input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"GOMC_CONTROL_FILE_WRITTEN"
try:
value = gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C2", "C1"], "ETO", ["O1", "C1"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "O1"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["O1", "C1"], "ETO", ["C2", "C1"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1.0, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
["s", "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[[1], "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[{"a": "1"}, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETHa", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, 1, ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, [1], ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", [1, "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", [[1], "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", 1], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", [1]], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MEMC_DataInput'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GEMC_NPT",
10,
300,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], 1, ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.05,
"RegrowthFreq": 0.05,
"CrankShaftFreq": 0.05,
"VolFreq": 0.05,
"MultiParticleFreq": 0.05,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.10,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.10,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct | |
<reponame>rlquinn/landscaper
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""
Tests for the neo4j graph database module.
"""
import unittest
import time
import mock
from py2neo import Node
from landscaper.graph_db import neo4j_db
from landscaper import landscape_manager
from tests.test_utils import utils
# W0212 - Access to a protected member
# pylint: disable=W0212
class TestUniqueAttributeNames(unittest.TestCase):
"""
Tests for the unique_attribute_names method.
"""
@mock.patch("landscaper.graph_db.neo4j_db.Neo4jGDB._get_db_connection")
def setUp(self, mck_get_connection):
mck_get_connection.return_value = None
self.gdb = neo4j_db.Neo4jGDB(mock.Mock())
def test_no_clash(self):
"""
Give it a list of names that do not clash with any of the attributes
in the attribute dictionary.
"""
immutable_keys = ["name", "type", "category", "layer"]
attributes = {"origin": "LGA", "destination": "LAX", "duration": "7"}
unique_attributes = self.gdb._unique_attribute_names(immutable_keys,
attributes, "")
self.assertEqual(attributes, unique_attributes)
def test_single_key_single_clash(self):
"""
Check a clash between an attribute key and an immutable key. This is a
single clash as the key will not need to be modified more than once.
"""
prefix = "component"
immutable_keys = ["name", "type", "category", "layer"]
attributes = {"name": "NIC", "address": "1234"}
expected_attributes = {"component-name": "NIC", "address": "1234"}
unique_attributes = self.gdb._unique_attribute_names(immutable_keys,
attributes,
prefix)
self.assertNotEqual(attributes, unique_attributes) # catch reference
self.assertEqual(unique_attributes, expected_attributes)
def test_single_key_double_clash(self):
"""
Check a clash between an attribute key and an immutable key. This is a
double clash as the key will need to be modified more than once.
"""
prefix = 'NIC'
immutable_keys = ["name", "type", "category", "layer"]
attributes = {'type': 'nic', 'NIC-type': 'high', 'bw': '40'}
expected_attributes = {'NIC-type_1': 'nic', 'NIC-type': 'high',
'bw': '40'}
unique_attributes = self.gdb._unique_attribute_names(immutable_keys,
attributes,
prefix)
self.assertEqual(unique_attributes, expected_attributes)
def test_single_key_triple_clash(self):
"""
Check a clash between an attribute key and an immutable key. This is a
double clash as the key will need to be modified more than once.
"""
prefix = 'machine'
immutable_keys = ["name", "type", "category", "layer"]
attributes = {'type': 'machine-a', 'machine-type': 'machine-b',
'machine-type_1': 'machine-c'}
expected_attributes = {'machine-type_1': 'machine-c',
'machine-type': 'machine-b',
'machine-type_2': 'machine-a'}
unique_attributes = self.gdb._unique_attribute_names(immutable_keys,
attributes,
prefix)
self.assertEqual(unique_attributes, expected_attributes)
def test_triple_key_single_clash(self):
"""
Test with three clashing keys.
"""
prefix = "cache"
immutable_keys = ["name", "type", "category", "layer"]
attributes = {"name": "cache", "type": "compute", "layer": "physical"}
expected_attributes = {"cache-name": "cache", "cache-type": "compute",
"cache-layer": "physical"}
unique_attributes = self.gdb._unique_attribute_names(immutable_keys,
attributes,
prefix)
self.assertNotEqual(unique_attributes, attributes)
self.assertEqual(expected_attributes, unique_attributes)
class TestNodeUpdateIntegration(unittest.TestCase):
"""
Integration tests for the update_node method.
"""
landscape_file = "tests/data/test_landscape_with_states.json"
def setUp(self):
utils.create_test_config()
manager = landscape_manager.LandscapeManager(utils.TEST_CONFIG_FILE)
self.graph_db = manager.graph_db
self.graph_db.delete_all()
self.graph_db.load_test_landscape(self.landscape_file)
def tearDown(self):
self.graph_db.delete_all()
utils.remove_test_config()
def test_state_updated(self):
"""
Test that a new state node is updated in the landscape.
"""
node_id = 'machine-A'
old_state = self._node_state_attributes(node_id)
attrs = {"A": 'apple', "B": 'banana'}
self.graph_db.update_node(node_id, time.time(), attrs)
new_state = self._node_state_attributes(node_id)
self.assertNotEqual(old_state, attrs)
self.assertEqual(new_state, attrs)
def test_extra_attributes(self):
"""
Test that extra attributes are added to the state.
"""
# VM node in landscape.
node_id = "1ffaedbf-719a-4327-a14e-ed7b8564fb4e"
old_state = self._node_state_attributes(node_id)
# UPDATE
attrs = {'h': 'happy', 'i': 'ink'}
self.graph_db.update_node(node_id, time.time(), extra_attrs=attrs)
# New state attrs from landscape
new_state = self._node_state_attributes(node_id)
attrs.update(old_state)
expected_state = attrs
self.assertEqual(new_state, expected_state)
def test_state_extra(self):
"""
Test that when there is a state and extra attrs, that they are both
combined into a new state.
"""
# stack from the landscape.
node_id = "stack-1"
old_state = self._node_state_attributes(node_id)
# UPDATE
state_attrs = {"m": "motor", "n": "nose"}
extra_attrs = {"r": "rock", "s": "snow"}
timestamp = time.time()
self.graph_db.update_node(node_id, timestamp, state_attrs, extra_attrs)
# Assertions
new_state = self._node_state_attributes(node_id)
state_attrs.update(extra_attrs)
expected_attrs = state_attrs
self.assertNotEqual(old_state, expected_attrs)
self.assertEqual(new_state, expected_attrs)
def test_state_extra_priority(self):
"""
Test that when given a state and extra attributes with matching
attributes that extra attributes takes priority.
"""
# stack from the landscape.
node_id = "machine-E"
old_state = self._node_state_attributes(node_id)
# UPDATE
state_attrs = {"j": "juice", "k": "kilowatt"}
extra_attrs = {"k": "kilometer", "l": "lights"}
timestamp = time.time()
self.graph_db.update_node(node_id, timestamp, state_attrs, extra_attrs)
# Assertions
new_state = self._pop_identity(self._node_state_attributes(node_id))
expected_attrs = {"k": "kilometer", "l": "lights", "j": "juice"}
self.assertNotEqual(old_state, expected_attrs)
self.assertEqual(new_state, expected_attrs)
def _node_state_attributes(self, node_id):
"""
returns a node from the landscape by id.
:param node_id: The node to grab.
:return: a node from the landscape by id.
"""
graph = self.graph_db.get_node_by_uuid_web(node_id, json_out=False)
node_attrs = graph.nodes(data=True)[0][1]
return self._pop_identity(node_attrs)
@staticmethod
def _pop_identity(node_attributes):
"""
removes the identity attributes.
"""
trimmed_attributes = node_attributes.copy()
for attr_key, _ in node_attributes.iteritems():
if attr_key in ['name', 'type', 'layer', 'category']:
trimmed_attributes.pop(attr_key)
return trimmed_attributes
class TestNodeUpdateUnit(unittest.TestCase):
"""
Unit tests for the node_update method.
"""
def setUp(self):
mck_manager = mock.MagicMock()
mck_manager.get_neo4j_credentials.return_value = (1, 1)
self.neo4j = neo4j_db.Neo4jGDB(mck_manager)
self.identity = {'a': 'b'}
self.neo4j.get_node_by_uuid = mock.Mock(return_value=self.identity)
# Mock some methods.
self.neo4j.graph_db = mock.MagicMock()
self.neo4j._create_edge = mock.MagicMock()
self.neo4j._expire_edge = mock.MagicMock()
def test_nothing_to_update(self):
"""
Test that when there is no state and no extra_attributes that None is
returned.
"""
identity, msg = self.neo4j.update_node('id', 0)
self.assertIsNotNone(identity)
self.assertFalse('success' in msg)
def test_unknown_node(self):
"""
Test that if a node is not found in the landscape that None is
returned.
"""
self.neo4j.get_node_by_uuid = mock.Mock(return_value=None)
identity, _ = self.neo4j.update_node('id', 0, {'car': 'VW'})
self.assertIsNone(identity)
def test_state_update(self):
"""
Test that if given a state we update node with the new state.
"""
# Setup the NEO4J Nodes.
now_ts = time.time()
# New state
state = {'A': 2, 'B': 4, 'C': 6}
# Old state
old_state_n = Node('blah', **{'A': 1, 'B': 3, 'C': 5})
edge_tuple = (old_state_n, None)
self.neo4j._get_state_node = mock.MagicMock(return_value=edge_tuple)
# UPDATE
identity, _ = self.neo4j.update_node('node', now_ts, state)
# Check that the new node has been attached to the identity node.
call_args = self.neo4j._create_edge.call_args_list[0][0]
self.assertEqual(identity, call_args[0])
self.assertEqual(state, dict(call_args[1]))
self.assertEqual(now_ts, call_args[2])
self.assertEqual('STATE', call_args[3])
def test_extra_attributes(self):
"""
Test that extra_attributes are added to a new state and concatenated
with the old state.
"""
now_ts = time.time()
# extra attributes
attrs = {'x': 24, 'y': 25, 'z': 26}
# Build old state node.
old_state_n = Node('blah', **{'A': 1, 'B': 3, 'C': 5})
edge_tuple = (old_state_n, None)
self.neo4j._get_state_node = mock.MagicMock(return_value=edge_tuple)
# UPDATE
identity, _ = self.neo4j.update_node('node', now_ts, extra_attrs=attrs)
attrs.update({'A': 1, 'B': 3, 'C': 5})
expected_attrs = attrs
# Assertions.
call_args = self.neo4j._create_edge.call_args_list[0][0]
self.assertEqual(identity, call_args[0])
self.assertEqual(expected_attrs, dict(call_args[1]))
self.assertEqual(now_ts, call_args[2])
self.assertEqual('STATE', call_args[3])
def test_state_with_extra(self):
"""
Test that if there is a state and extra attributes that they are both
added to the new node.
"""
now_ts = time.time()
# Mock old state
old_state_n = Node('blah', **{'A': 1, 'B': 3, 'C': 5})
edge_tuple = (old_state_n, None)
self.neo4j._get_state_node = mock.MagicMock(return_value=edge_tuple)
# new state and extra attributes
state = {'a': 1, 'b': 2, 'c': 3}
attrs = {'x': 24, 'y': 25, 'z': 26}
combined = {'a': 1, 'b': 2, 'c': 3, 'x': 24, 'y': 25, 'z': 26}
# UPDATE
identity, _ = self.neo4j.update_node('node', now_ts, state, attrs)
# Assertions
call_args = self.neo4j._create_edge.call_args_list[0][0]
self.assertEqual(identity, call_args[0])
self.assertEqual(combined, dict(call_args[1]))
self.assertEqual(now_ts, call_args[2])
self.assertEqual('STATE', call_args[3])
def test_state_with_extra_crossover(self):
"""
Test that if state and extra have overlapping attributes then extra
takes priority.
"""
now_ts = time.time()
# Mock old state
old_state_n = Node('blah', **{'A': 1})
edge_tuple = (old_state_n, None)
self.neo4j._get_state_node = mock.MagicMock(return_value=edge_tuple)
# new state and extra attributes
state = {'a': 1, 'b': 2, 'c': 3, 'k': 7}
attrs = {'k': 14, 'x': 24, 'y': 25, 'z': 26}
combined = {'a': 1, 'b': 2, 'c': 3, 'x': 24, 'y': 25, 'z': 26, 'k': 14}
# UPDATE
identity, _ = self.neo4j.update_node('node', now_ts, state, attrs)
call_args = self.neo4j._create_edge.call_args_list[0][0]
self.assertEqual(identity, call_args[0])
self.assertEqual(combined, dict(call_args[1]))
self.assertEqual(now_ts, call_args[2])
self.assertEqual('STATE', call_args[3])
def test_empty_extra_attrs(self):
"""
Test that if the extra attributes list is empty and no state that None
is returned.
"""
identity, msg = self.neo4j.update_node('id', 0, extra_attrs={})
self.assertIsNotNone(identity)
self.assertFalse('success' in msg)
def test_unchanged_state(self):
"""
If the new state does not differ from the old state then don't try to
update.
"""
now_ts = time.time()
self.neo4j._create_edge = mock.MagicMock()
# Mock old state
old_state_n = Node('old', **{'a': 'ok', 'b': 4, 'c': 'u'})
edge_tuple = (old_state_n, None)
self.neo4j._get_state_node = mock.MagicMock(return_value=edge_tuple)
# new state and | |
<filename>lib/sqlalchemy/orm/scoping.py
# orm/scoping.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from __future__ import annotations
from typing import Any
from typing import Callable
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from .session import _S
from .session import Session
from .. import exc as sa_exc
from .. import util
from ..util import create_proxy_methods
from ..util import ScopedRegistry
from ..util import ThreadLocalRegistry
from ..util import warn
from ..util import warn_deprecated
from ..util.typing import Protocol
if TYPE_CHECKING:
from ._typing import _EntityType
from ._typing import _IdentityKeyType
from .identity import IdentityMap
from .interfaces import ORMOption
from .mapper import Mapper
from .query import Query
from .query import RowReturningQuery
from .session import _BindArguments
from .session import _EntityBindKey
from .session import _PKIdentityArgument
from .session import _SessionBind
from .session import sessionmaker
from .session import SessionTransaction
from ..engine import Connection
from ..engine import Engine
from ..engine import Result
from ..engine import Row
from ..engine import RowMapping
from ..engine.interfaces import _CoreAnyExecuteParams
from ..engine.interfaces import _CoreSingleExecuteParams
from ..engine.interfaces import _ExecuteOptions
from ..engine.interfaces import _ExecuteOptionsParameter
from ..engine.result import ScalarResult
from ..sql._typing import _ColumnsClauseArgument
from ..sql._typing import _T0
from ..sql._typing import _T1
from ..sql._typing import _T2
from ..sql._typing import _T3
from ..sql._typing import _T4
from ..sql._typing import _T5
from ..sql._typing import _T6
from ..sql._typing import _T7
from ..sql._typing import _TypedColumnClauseArgument as _TCCA
from ..sql.base import Executable
from ..sql.elements import ClauseElement
from ..sql.roles import TypedColumnsClauseRole
from ..sql.selectable import ForUpdateArg
from ..sql.selectable import TypedReturnsRows
_T = TypeVar("_T", bound=Any)
class _QueryDescriptorType(Protocol):
def __get__(self, instance: Any, owner: Type[_T]) -> Query[_T]:
...
_O = TypeVar("_O", bound=object)
__all__ = ["scoped_session"]
@create_proxy_methods(
Session,
":class:`_orm.Session`",
":class:`_orm.scoping.scoped_session`",
classmethods=["close_all", "object_session", "identity_key"],
methods=[
"__contains__",
"__iter__",
"add",
"add_all",
"begin",
"begin_nested",
"close",
"commit",
"connection",
"delete",
"execute",
"expire",
"expire_all",
"expunge",
"expunge_all",
"flush",
"get",
"get_bind",
"is_modified",
"bulk_save_objects",
"bulk_insert_mappings",
"bulk_update_mappings",
"merge",
"query",
"refresh",
"rollback",
"scalar",
"scalars",
],
attributes=[
"bind",
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
],
)
class scoped_session(Generic[_S]):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
.. note::
When using :ref:`asyncio_toplevel`, the async-compatible
:class:`_asyncio.async_scoped_session` class should be
used in place of :class:`.scoped_session`.
"""
_support_async: bool = False
session_factory: sessionmaker[_S]
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.Session` is needed."""
registry: ScopedRegistry[_S]
def __init__(
self,
session_factory: sessionmaker[_S],
scopefunc: Optional[Callable[[], Any]] = None,
):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
@property
def _proxied(self) -> _S:
return self.registry()
def __call__(self, **kw: Any) -> _S:
r"""Return the current :class:`.Session`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.Session` is not present. If the :class:`.Session` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified."
)
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
else:
sess = self.registry()
if not self._support_async and sess._is_asyncio:
warn_deprecated(
"Using `scoped_session` with asyncio is deprecated and "
"will raise an error in a future version. "
"Please use `async_scoped_session` instead.",
"1.4.23",
)
return sess
def configure(self, **kwargs: Any) -> None:
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn(
"At least one scoped session is already present. "
" configure() can not affect sessions that have "
"already been created."
)
self.session_factory.configure(**kwargs)
def remove(self) -> None:
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def query_property(
self, query_cls: Optional[Type[Query[_T]]] = None
) -> _QueryDescriptorType:
"""return a class property which produces a :class:`_query.Query`
object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass:
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query:
def __get__(s, instance: Any, owner: Type[_O]) -> Query[_O]:
if query_cls:
# custom query class
return query_cls(owner, session=self.registry()) # type: ignore # noqa: E501
else:
# session's configured query class
return self.registry().query(owner)
return query()
# START PROXY METHODS scoped_session
# code within this block is **programmatically,
# statically generated** by tools/generate_proxy_methods.py
def __contains__(self, instance: object) -> bool:
r"""Return True if the instance is associated with this session.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The instance may be pending or persistent within the Session for a
result of True.
""" # noqa: E501
return self._proxied.__contains__(instance)
def __iter__(self) -> Iterator[object]:
r"""Iterate over all pending or persistent instances within this
Session.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.__iter__()
def add(self, instance: object, _warn: bool = True) -> None:
r"""Place an object in the ``Session``.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
""" # noqa: E501
return self._proxied.add(instance, _warn=_warn)
def add_all(self, instances: Iterable[object]) -> None:
r"""Add the given collection of instances to this ``Session``.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.add_all(instances)
def begin(
self, nested: bool = False, _subtrans: bool = False
) -> SessionTransaction:
r"""Begin a transaction, or nested transaction,
on this :class:`.Session`, if one is not already begun.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The :class:`_orm.Session` object features **autobegin** behavior,
so that normally it is not necessary to call the
:meth:`_orm.Session.begin`
method explicitly. However, it may be used in order to control
the scope of when the transactional state is begun.
When used to begin the outermost transaction, an error is raised
if this :class:`.Session` is already inside of a transaction.
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_explicit_begin` for
an example.
.. seealso::
:ref:`session_autobegin`
:ref:`unitofwork_transaction`
:meth:`.Session.begin_nested`
""" # noqa: E501
return self._proxied.begin(nested=nested, _subtrans=_subtrans)
def begin_nested(self) -> SessionTransaction:
r"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used | |
= _openbabel.READONEONLY
READBINARY = _openbabel.READBINARY
ZEROATOMSOK = _openbabel.ZEROATOMSOK
NOTWRITABLE = _openbabel.NOTWRITABLE
WRITEONEONLY = _openbabel.WRITEONEONLY
WRITEBINARY = _openbabel.WRITEBINARY
READXML = _openbabel.READXML
DEPICTION2D = _openbabel.DEPICTION2D
DEFAULTFORMAT = _openbabel.DEFAULTFORMAT
class OBFormat(OBPlugin):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
Default = staticmethod(_openbabel.OBFormat_Default)
FindType = staticmethod(_openbabel.OBFormat_FindType)
FormatFromMIME = staticmethod(_openbabel.OBFormat_FormatFromMIME)
__swig_destroy__ = _openbabel.delete_OBFormat
OBFormat.ReadMolecule = new_instancemethod(_openbabel.OBFormat_ReadMolecule,None,OBFormat)
OBFormat.ReadChemObject = new_instancemethod(_openbabel.OBFormat_ReadChemObject,None,OBFormat)
OBFormat.WriteMolecule = new_instancemethod(_openbabel.OBFormat_WriteMolecule,None,OBFormat)
OBFormat.WriteChemObject = new_instancemethod(_openbabel.OBFormat_WriteChemObject,None,OBFormat)
OBFormat.TargetClassDescription = new_instancemethod(_openbabel.OBFormat_TargetClassDescription,None,OBFormat)
OBFormat.GetType = new_instancemethod(_openbabel.OBFormat_GetType,None,OBFormat)
OBFormat.SpecificationURL = new_instancemethod(_openbabel.OBFormat_SpecificationURL,None,OBFormat)
OBFormat.GetMIMEType = new_instancemethod(_openbabel.OBFormat_GetMIMEType,None,OBFormat)
OBFormat.Flags = new_instancemethod(_openbabel.OBFormat_Flags,None,OBFormat)
OBFormat.SkipObjects = new_instancemethod(_openbabel.OBFormat_SkipObjects,None,OBFormat)
OBFormat.MakeNewInstance = new_instancemethod(_openbabel.OBFormat_MakeNewInstance,None,OBFormat)
OBFormat.RegisterFormat = new_instancemethod(_openbabel.OBFormat_RegisterFormat,None,OBFormat)
OBFormat.Display = new_instancemethod(_openbabel.OBFormat_Display,None,OBFormat)
OBFormat_swigregister = _openbabel.OBFormat_swigregister
OBFormat_swigregister(OBFormat)
def OBFormat_Default():
return _openbabel.OBFormat_Default()
OBFormat_Default = _openbabel.OBFormat_Default
def OBFormat_FindType(*args):
return _openbabel.OBFormat_FindType(*args)
OBFormat_FindType = _openbabel.OBFormat_FindType
def OBFormat_FormatFromMIME(*args):
return _openbabel.OBFormat_FormatFromMIME(*args)
OBFormat_FormatFromMIME = _openbabel.OBFormat_FormatFromMIME
class OBConversion(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_openbabel.OBConversion_swiginit(self,_openbabel.new_OBConversion(*args))
__swig_destroy__ = _openbabel.delete_OBConversion
RegisterFormat = staticmethod(_openbabel.OBConversion_RegisterFormat)
FindFormat = staticmethod(_openbabel.OBConversion_FindFormat)
FormatFromExt = staticmethod(_openbabel.OBConversion_FormatFromExt)
FormatFromMIME = staticmethod(_openbabel.OBConversion_FormatFromMIME)
GetNextFormat = staticmethod(_openbabel.OBConversion_GetNextFormat)
Description = staticmethod(_openbabel.OBConversion_Description)
INOPTIONS = _openbabel.OBConversion_INOPTIONS
OUTOPTIONS = _openbabel.OBConversion_OUTOPTIONS
GENOPTIONS = _openbabel.OBConversion_GENOPTIONS
ALL = _openbabel.OBConversion_ALL
RegisterOptionParam = staticmethod(_openbabel.OBConversion_RegisterOptionParam)
GetOptionParams = staticmethod(_openbabel.OBConversion_GetOptionParams)
GetDefaultFormat = staticmethod(_openbabel.OBConversion_GetDefaultFormat)
OBConversion.GetInStream = new_instancemethod(_openbabel.OBConversion_GetInStream,None,OBConversion)
OBConversion.GetOutStream = new_instancemethod(_openbabel.OBConversion_GetOutStream,None,OBConversion)
OBConversion.SetInStream = new_instancemethod(_openbabel.OBConversion_SetInStream,None,OBConversion)
OBConversion.SetOutStream = new_instancemethod(_openbabel.OBConversion_SetOutStream,None,OBConversion)
OBConversion.SetInAndOutFormats = new_instancemethod(_openbabel.OBConversion_SetInAndOutFormats,None,OBConversion)
OBConversion.SetInFormat = new_instancemethod(_openbabel.OBConversion_SetInFormat,None,OBConversion)
OBConversion.SetOutFormat = new_instancemethod(_openbabel.OBConversion_SetOutFormat,None,OBConversion)
OBConversion.GetInFormat = new_instancemethod(_openbabel.OBConversion_GetInFormat,None,OBConversion)
OBConversion.GetOutFormat = new_instancemethod(_openbabel.OBConversion_GetOutFormat,None,OBConversion)
OBConversion.GetInFilename = new_instancemethod(_openbabel.OBConversion_GetInFilename,None,OBConversion)
OBConversion.GetInPos = new_instancemethod(_openbabel.OBConversion_GetInPos,None,OBConversion)
OBConversion.GetInLen = new_instancemethod(_openbabel.OBConversion_GetInLen,None,OBConversion)
OBConversion.GetTitle = new_instancemethod(_openbabel.OBConversion_GetTitle,None,OBConversion)
OBConversion.GetAuxConv = new_instancemethod(_openbabel.OBConversion_GetAuxConv,None,OBConversion)
OBConversion.SetAuxConv = new_instancemethod(_openbabel.OBConversion_SetAuxConv,None,OBConversion)
OBConversion.IsOption = new_instancemethod(_openbabel.OBConversion_IsOption,None,OBConversion)
OBConversion.GetOptions = new_instancemethod(_openbabel.OBConversion_GetOptions,None,OBConversion)
OBConversion.AddOption = new_instancemethod(_openbabel.OBConversion_AddOption,None,OBConversion)
OBConversion.RemoveOption = new_instancemethod(_openbabel.OBConversion_RemoveOption,None,OBConversion)
OBConversion.SetOptions = new_instancemethod(_openbabel.OBConversion_SetOptions,None,OBConversion)
OBConversion.CopyOptions = new_instancemethod(_openbabel.OBConversion_CopyOptions,None,OBConversion)
OBConversion.GetSupportedInputFormat = new_instancemethod(_openbabel.OBConversion_GetSupportedInputFormat,None,OBConversion)
OBConversion.GetSupportedOutputFormat = new_instancemethod(_openbabel.OBConversion_GetSupportedOutputFormat,None,OBConversion)
OBConversion.Convert = new_instancemethod(_openbabel.OBConversion_Convert,None,OBConversion)
OBConversion.FullConvert = new_instancemethod(_openbabel.OBConversion_FullConvert,None,OBConversion)
OBConversion.AddChemObject = new_instancemethod(_openbabel.OBConversion_AddChemObject,None,OBConversion)
OBConversion.GetChemObject = new_instancemethod(_openbabel.OBConversion_GetChemObject,None,OBConversion)
OBConversion.IsLast = new_instancemethod(_openbabel.OBConversion_IsLast,None,OBConversion)
OBConversion.IsFirstInput = new_instancemethod(_openbabel.OBConversion_IsFirstInput,None,OBConversion)
OBConversion.SetFirstInput = new_instancemethod(_openbabel.OBConversion_SetFirstInput,None,OBConversion)
OBConversion.GetOutputIndex = new_instancemethod(_openbabel.OBConversion_GetOutputIndex,None,OBConversion)
OBConversion.SetOutputIndex = new_instancemethod(_openbabel.OBConversion_SetOutputIndex,None,OBConversion)
OBConversion.SetMoreFilesToCome = new_instancemethod(_openbabel.OBConversion_SetMoreFilesToCome,None,OBConversion)
OBConversion.SetOneObjectOnly = new_instancemethod(_openbabel.OBConversion_SetOneObjectOnly,None,OBConversion)
OBConversion.SetLast = new_instancemethod(_openbabel.OBConversion_SetLast,None,OBConversion)
OBConversion.IsLastFile = new_instancemethod(_openbabel.OBConversion_IsLastFile,None,OBConversion)
OBConversion.GetCount = new_instancemethod(_openbabel.OBConversion_GetCount,None,OBConversion)
OBConversion.Write = new_instancemethod(_openbabel.OBConversion_Write,None,OBConversion)
OBConversion.WriteString = new_instancemethod(_openbabel.OBConversion_WriteString,None,OBConversion)
OBConversion.WriteFile = new_instancemethod(_openbabel.OBConversion_WriteFile,None,OBConversion)
OBConversion.CloseOutFile = new_instancemethod(_openbabel.OBConversion_CloseOutFile,None,OBConversion)
OBConversion.Read = new_instancemethod(_openbabel.OBConversion_Read,None,OBConversion)
OBConversion.ReadString = new_instancemethod(_openbabel.OBConversion_ReadString,None,OBConversion)
OBConversion.ReadFile = new_instancemethod(_openbabel.OBConversion_ReadFile,None,OBConversion)
OBConversion.OpenInAndOutFiles = new_instancemethod(_openbabel.OBConversion_OpenInAndOutFiles,None,OBConversion)
OBConversion.ReportNumberConverted = new_instancemethod(_openbabel.OBConversion_ReportNumberConverted,None,OBConversion)
OBConversion.NumInputObjects = new_instancemethod(_openbabel.OBConversion_NumInputObjects,None,OBConversion)
OBConversion_swigregister = _openbabel.OBConversion_swigregister
OBConversion_swigregister(OBConversion)
def OBConversion_RegisterFormat(*args):
return _openbabel.OBConversion_RegisterFormat(*args)
OBConversion_RegisterFormat = _openbabel.OBConversion_RegisterFormat
def OBConversion_FindFormat(*args):
return _openbabel.OBConversion_FindFormat(*args)
OBConversion_FindFormat = _openbabel.OBConversion_FindFormat
def OBConversion_FormatFromExt(*args):
return _openbabel.OBConversion_FormatFromExt(*args)
OBConversion_FormatFromExt = _openbabel.OBConversion_FormatFromExt
def OBConversion_FormatFromMIME(*args):
return _openbabel.OBConversion_FormatFromMIME(*args)
OBConversion_FormatFromMIME = _openbabel.OBConversion_FormatFromMIME
def OBConversion_GetNextFormat(*args):
return _openbabel.OBConversion_GetNextFormat(*args)
OBConversion_GetNextFormat = _openbabel.OBConversion_GetNextFormat
def OBConversion_Description():
return _openbabel.OBConversion_Description()
OBConversion_Description = _openbabel.OBConversion_Description
def OBConversion_RegisterOptionParam(*args):
return _openbabel.OBConversion_RegisterOptionParam(*args)
OBConversion_RegisterOptionParam = _openbabel.OBConversion_RegisterOptionParam
def OBConversion_GetOptionParams(*args):
return _openbabel.OBConversion_GetOptionParams(*args)
OBConversion_GetOptionParams = _openbabel.OBConversion_GetOptionParams
def OBConversion_GetDefaultFormat():
return _openbabel.OBConversion_GetDefaultFormat()
OBConversion_GetDefaultFormat = _openbabel.OBConversion_GetDefaultFormat
class OBResidue(OBBase):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_openbabel.OBResidue_swiginit(self,_openbabel.new_OBResidue(*args))
__swig_destroy__ = _openbabel.delete_OBResidue
OBResidue.AddAtom = new_instancemethod(_openbabel.OBResidue_AddAtom,None,OBResidue)
OBResidue.InsertAtom = new_instancemethod(_openbabel.OBResidue_InsertAtom,None,OBResidue)
OBResidue.RemoveAtom = new_instancemethod(_openbabel.OBResidue_RemoveAtom,None,OBResidue)
OBResidue.SetName = new_instancemethod(_openbabel.OBResidue_SetName,None,OBResidue)
OBResidue.SetNum = new_instancemethod(_openbabel.OBResidue_SetNum,None,OBResidue)
OBResidue.SetChain = new_instancemethod(_openbabel.OBResidue_SetChain,None,OBResidue)
OBResidue.SetChainNum = new_instancemethod(_openbabel.OBResidue_SetChainNum,None,OBResidue)
OBResidue.SetIdx = new_instancemethod(_openbabel.OBResidue_SetIdx,None,OBResidue)
OBResidue.SetAtomID = new_instancemethod(_openbabel.OBResidue_SetAtomID,None,OBResidue)
OBResidue.SetHetAtom = new_instancemethod(_openbabel.OBResidue_SetHetAtom,None,OBResidue)
OBResidue.SetSerialNum = new_instancemethod(_openbabel.OBResidue_SetSerialNum,None,OBResidue)
OBResidue.GetName = new_instancemethod(_openbabel.OBResidue_GetName,None,OBResidue)
OBResidue.GetNum = new_instancemethod(_openbabel.OBResidue_GetNum,None,OBResidue)
OBResidue.GetNumString = new_instancemethod(_openbabel.OBResidue_GetNumString,None,OBResidue)
OBResidue.GetNumAtoms = new_instancemethod(_openbabel.OBResidue_GetNumAtoms,None,OBResidue)
OBResidue.GetChain = new_instancemethod(_openbabel.OBResidue_GetChain,None,OBResidue)
OBResidue.GetChainNum = new_instancemethod(_openbabel.OBResidue_GetChainNum,None,OBResidue)
OBResidue.GetIdx = new_instancemethod(_openbabel.OBResidue_GetIdx,None,OBResidue)
OBResidue.GetResKey = new_instancemethod(_openbabel.OBResidue_GetResKey,None,OBResidue)
OBResidue.GetAtoms = new_instancemethod(_openbabel.OBResidue_GetAtoms,None,OBResidue)
OBResidue.GetBonds = new_instancemethod(_openbabel.OBResidue_GetBonds,None,OBResidue)
OBResidue.GetAtomID = new_instancemethod(_openbabel.OBResidue_GetAtomID,None,OBResidue)
OBResidue.GetSerialNum = new_instancemethod(_openbabel.OBResidue_GetSerialNum,None,OBResidue)
OBResidue.GetAminoAcidProperty = new_instancemethod(_openbabel.OBResidue_GetAminoAcidProperty,None,OBResidue)
OBResidue.GetAtomProperty = new_instancemethod(_openbabel.OBResidue_GetAtomProperty,None,OBResidue)
OBResidue.GetResidueProperty = new_instancemethod(_openbabel.OBResidue_GetResidueProperty,None,OBResidue)
OBResidue.IsHetAtom = new_instancemethod(_openbabel.OBResidue_IsHetAtom,None,OBResidue)
OBResidue.IsResidueType = new_instancemethod(_openbabel.OBResidue_IsResidueType,None,OBResidue)
OBResidue.BeginAtoms = new_instancemethod(_openbabel.OBResidue_BeginAtoms,None,OBResidue)
OBResidue.EndAtoms = new_instancemethod(_openbabel.OBResidue_EndAtoms,None,OBResidue)
OBResidue.BeginAtom = new_instancemethod(_openbabel.OBResidue_BeginAtom,None,OBResidue)
OBResidue.NextAtom = new_instancemethod(_openbabel.OBResidue_NextAtom,None,OBResidue)
OBResidue_swigregister = _openbabel.OBResidue_swigregister
OBResidue_swigregister(OBResidue)
MAXSETNO = _openbabel.MAXSETNO
MAXELEM = _openbabel.MAXELEM
MINELEM = _openbabel.MINELEM
MAXRES = _openbabel.MAXRES
MINRES = _openbabel.MINRES
AA_ALA = _openbabel.AA_ALA
AA_GLY = _openbabel.AA_GLY
AA_LEU = _openbabel.AA_LEU
AA_SER = _openbabel.AA_SER
AA_VAL = _openbabel.AA_VAL
AA_THR = _openbabel.AA_THR
AA_LYS = _openbabel.AA_LYS
AA_ASP = _openbabel.AA_ASP
AA_ILE = _openbabel.AA_ILE
AA_ASN = _openbabel.AA_ASN
AA_GLU = _openbabel.AA_GLU
AA_PRO = _openbabel.AA_PRO
AA_ARG = _openbabel.AA_ARG
AA_PHE = _openbabel.AA_PHE
AA_GLN = _openbabel.AA_GLN
AA_TYR = _openbabel.AA_TYR
AA_HIS = _openbabel.AA_HIS
AA_CYS = _openbabel.AA_CYS
AA_MET = _openbabel.AA_MET
AA_TRP = _openbabel.AA_TRP
ACIDIC = _openbabel.ACIDIC
ACYCLIC = _openbabel.ACYCLIC
ALIPHATIC = _openbabel.ALIPHATIC
AROMATIC = _openbabel.AROMATIC
BASIC = _openbabel.BASIC
BURIED = _openbabel.BURIED
CHARGED = _openbabel.CHARGED
CYCLIC = _openbabel.CYCLIC
HYDROPHOBIC = _openbabel.HYDROPHOBIC
LARGE = _openbabel.LARGE
MEDIUM = _openbabel.MEDIUM
NEGATIVE = _openbabel.NEGATIVE
NEUTRAL = _openbabel.NEUTRAL
POLAR = _openbabel.POLAR
POSITIVE = _openbabel.POSITIVE
SMALL = _openbabel.SMALL
SURFACE = _openbabel.SURFACE
ALPHA_CARBON = _openbabel.ALPHA_CARBON
AMINO_BACKBONE = _openbabel.AMINO_BACKBONE
BACKBONE = _openbabel.BACKBONE
CYSTEINE_SULPHUR = _openbabel.CYSTEINE_SULPHUR
LIGAND = _openbabel.LIGAND
NUCLEIC_BACKBONE = _openbabel.NUCLEIC_BACKBONE
SHAPELY_BACKBONE = _openbabel.SHAPELY_BACKBONE
SHAPELY_SPECIAL = _openbabel.SHAPELY_SPECIAL
SIDECHAIN = _openbabel.SIDECHAIN
SUGAR_PHOSPHATE = _openbabel.SUGAR_PHOSPHATE
ALA = _openbabel.ALA
GLY = _openbabel.GLY
LEU = _openbabel.LEU
SER = _openbabel.SER
VAL = _openbabel.VAL
THR = _openbabel.THR
LYS = _openbabel.LYS
ASP = _openbabel.ASP
ILE = _openbabel.ILE
ASN = _openbabel.ASN
GLU = _openbabel.GLU
PRO = _openbabel.PRO
ARG = _openbabel.ARG
PHE = _openbabel.PHE
GLN = _openbabel.GLN
TYR = _openbabel.TYR
HIS = _openbabel.HIS
CYS = _openbabel.CYS
MET = _openbabel.MET
TRP = _openbabel.TRP
ASX = _openbabel.ASX
GLX = _openbabel.GLX
PCA = _openbabel.PCA
HYP = _openbabel.HYP
A = _openbabel.A
C = _openbabel.C
G = _openbabel.G
T = _openbabel.T
U = _openbabel.U
UPLUS = _openbabel.UPLUS
I = _openbabel.I
_1MA = _openbabel._1MA
_5MC = _openbabel._5MC
OMC = _openbabel.OMC
_1MG = _openbabel._1MG
_2MG = _openbabel._2MG
M2G = _openbabel.M2G
_7MG = _openbabel._7MG
OMG = _openbabel.OMG
YG = _openbabel.YG
H2U = _openbabel.H2U
_5MU = _openbabel._5MU
PSU = _openbabel.PSU
UNK = _openbabel.UNK
ACE = _openbabel.ACE
FOR = _openbabel.FOR
HOH = _openbabel.HOH
DOD = _openbabel.DOD
SO4 = _openbabel.SO4
PO4 = _openbabel.PO4
NAD = _openbabel.NAD
COA = _openbabel.COA
NAP = _openbabel.NAP
NDP = _openbabel.NDP
AMINO = _openbabel.AMINO
AMINO_NUCLEO = _openbabel.AMINO_NUCLEO
COENZYME = _openbabel.COENZYME
ION = _openbabel.ION
NUCLEO = _openbabel.NUCLEO
PROTEIN = _openbabel.PROTEIN
PURINE = _openbabel.PURINE
PYRIMIDINE = _openbabel.PYRIMIDINE
SOLVENT = _openbabel.SOLVENT
WATER = _openbabel.WATER
class OBInternalCoord(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
_a = _swig_property(_openbabel.OBInternalCoord__a_get, _openbabel.OBInternalCoord__a_set)
_b = _swig_property(_openbabel.OBInternalCoord__b_get, _openbabel.OBInternalCoord__b_set)
_c = _swig_property(_openbabel.OBInternalCoord__c_get, _openbabel.OBInternalCoord__c_set)
_dst = _swig_property(_openbabel.OBInternalCoord__dst_get, _openbabel.OBInternalCoord__dst_set)
_ang = _swig_property(_openbabel.OBInternalCoord__ang_get, _openbabel.OBInternalCoord__ang_set)
_tor = _swig_property(_openbabel.OBInternalCoord__tor_get, _openbabel.OBInternalCoord__tor_set)
def __init__(self, a=None, b=None, c=None, dst=0.0, ang=0.0, tor=0.0):
_openbabel.OBInternalCoord_swiginit(self,_openbabel.new_OBInternalCoord(a, b, c, dst, ang, tor))
__swig_destroy__ = _openbabel.delete_OBInternalCoord
OBInternalCoord_swigregister = _openbabel.OBInternalCoord_swigregister
OBInternalCoord_swigregister(OBInternalCoord)
OB_4RING_ATOM = _openbabel.OB_4RING_ATOM
OB_3RING_ATOM = _openbabel.OB_3RING_ATOM
OB_AROMATIC_ATOM = _openbabel.OB_AROMATIC_ATOM
OB_RING_ATOM = _openbabel.OB_RING_ATOM
OB_CSTEREO_ATOM = _openbabel.OB_CSTEREO_ATOM
OB_ACSTEREO_ATOM = _openbabel.OB_ACSTEREO_ATOM
OB_DONOR_ATOM = _openbabel.OB_DONOR_ATOM
OB_ACCEPTOR_ATOM = _openbabel.OB_ACCEPTOR_ATOM
OB_CHIRAL_ATOM = _openbabel.OB_CHIRAL_ATOM
OB_POS_CHIRAL_ATOM = _openbabel.OB_POS_CHIRAL_ATOM
OB_NEG_CHIRAL_ATOM = _openbabel.OB_NEG_CHIRAL_ATOM
OB_ATOM_HAS_NO_H = _openbabel.OB_ATOM_HAS_NO_H
OB_ATOM_NOT_H_DEFICIENT = _openbabel.OB_ATOM_NOT_H_DEFICIENT
OBATOM_TYPE_LEN = _openbabel.OBATOM_TYPE_LEN
class OBAtom(OBBase):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
Visit = _swig_property(_openbabel.OBAtom_Visit_get, _openbabel.OBAtom_Visit_set)
def __init__(self):
_openbabel.OBAtom_swiginit(self,_openbabel.new_OBAtom())
__swig_destroy__ = _openbabel.delete_OBAtom
OBAtom.__eq__ = new_instancemethod(_openbabel.OBAtom___eq__,None,OBAtom)
OBAtom.Duplicate = new_instancemethod(_openbabel.OBAtom_Duplicate,None,OBAtom)
OBAtom.SetIdx = new_instancemethod(_openbabel.OBAtom_SetIdx,None,OBAtom)
OBAtom.SetId = new_instancemethod(_openbabel.OBAtom_SetId,None,OBAtom)
OBAtom.SetHyb = new_instancemethod(_openbabel.OBAtom_SetHyb,None,OBAtom)
OBAtom.SetAtomicNum = new_instancemethod(_openbabel.OBAtom_SetAtomicNum,None,OBAtom)
OBAtom.SetIsotope = new_instancemethod(_openbabel.OBAtom_SetIsotope,None,OBAtom)
OBAtom.SetImplicitValence = new_instancemethod(_openbabel.OBAtom_SetImplicitValence,None,OBAtom)
OBAtom.IncrementImplicitValence = new_instancemethod(_openbabel.OBAtom_IncrementImplicitValence,None,OBAtom)
OBAtom.DecrementImplicitValence = new_instancemethod(_openbabel.OBAtom_DecrementImplicitValence,None,OBAtom)
OBAtom.SetFormalCharge = new_instancemethod(_openbabel.OBAtom_SetFormalCharge,None,OBAtom)
OBAtom.SetSpinMultiplicity = new_instancemethod(_openbabel.OBAtom_SetSpinMultiplicity,None,OBAtom)
OBAtom.SetType = new_instancemethod(_openbabel.OBAtom_SetType,None,OBAtom)
OBAtom.SetPartialCharge = new_instancemethod(_openbabel.OBAtom_SetPartialCharge,None,OBAtom)
OBAtom.SetCoordPtr = new_instancemethod(_openbabel.OBAtom_SetCoordPtr,None,OBAtom)
OBAtom.SetVector = new_instancemethod(_openbabel.OBAtom_SetVector,None,OBAtom)
OBAtom.SetResidue = new_instancemethod(_openbabel.OBAtom_SetResidue,None,OBAtom)
OBAtom.SetParent = new_instancemethod(_openbabel.OBAtom_SetParent,None,OBAtom)
OBAtom.SetAromatic = new_instancemethod(_openbabel.OBAtom_SetAromatic,None,OBAtom)
OBAtom.UnsetAromatic = new_instancemethod(_openbabel.OBAtom_UnsetAromatic,None,OBAtom)
OBAtom.SetClockwiseStereo = new_instancemethod(_openbabel.OBAtom_SetClockwiseStereo,None,OBAtom)
OBAtom.SetAntiClockwiseStereo = new_instancemethod(_openbabel.OBAtom_SetAntiClockwiseStereo,None,OBAtom)
OBAtom.SetPositiveStereo = new_instancemethod(_openbabel.OBAtom_SetPositiveStereo,None,OBAtom)
OBAtom.SetNegativeStereo = new_instancemethod(_openbabel.OBAtom_SetNegativeStereo,None,OBAtom)
OBAtom.UnsetStereo = new_instancemethod(_openbabel.OBAtom_UnsetStereo,None,OBAtom)
OBAtom.SetInRing = new_instancemethod(_openbabel.OBAtom_SetInRing,None,OBAtom)
OBAtom.SetChiral = new_instancemethod(_openbabel.OBAtom_SetChiral,None,OBAtom)
OBAtom.ClearCoordPtr = new_instancemethod(_openbabel.OBAtom_ClearCoordPtr,None,OBAtom)
OBAtom.GetFormalCharge = new_instancemethod(_openbabel.OBAtom_GetFormalCharge,None,OBAtom)
OBAtom.GetAtomicNum = new_instancemethod(_openbabel.OBAtom_GetAtomicNum,None,OBAtom)
OBAtom.GetIsotope = new_instancemethod(_openbabel.OBAtom_GetIsotope,None,OBAtom)
OBAtom.GetSpinMultiplicity = new_instancemethod(_openbabel.OBAtom_GetSpinMultiplicity,None,OBAtom)
OBAtom.GetAtomicMass = new_instancemethod(_openbabel.OBAtom_GetAtomicMass,None,OBAtom)
OBAtom.GetExactMass = new_instancemethod(_openbabel.OBAtom_GetExactMass,None,OBAtom)
OBAtom.GetIdx = new_instancemethod(_openbabel.OBAtom_GetIdx,None,OBAtom)
OBAtom.GetIndex = new_instancemethod(_openbabel.OBAtom_GetIndex,None,OBAtom)
OBAtom.GetId = new_instancemethod(_openbabel.OBAtom_GetId,None,OBAtom)
OBAtom.GetCoordinateIdx = new_instancemethod(_openbabel.OBAtom_GetCoordinateIdx,None,OBAtom)
OBAtom.GetCIdx = new_instancemethod(_openbabel.OBAtom_GetCIdx,None,OBAtom)
OBAtom.GetValence = new_instancemethod(_openbabel.OBAtom_GetValence,None,OBAtom)
OBAtom.GetHyb = new_instancemethod(_openbabel.OBAtom_GetHyb,None,OBAtom)
OBAtom.GetImplicitValence = new_instancemethod(_openbabel.OBAtom_GetImplicitValence,None,OBAtom)
OBAtom.GetHvyValence = new_instancemethod(_openbabel.OBAtom_GetHvyValence,None,OBAtom)
OBAtom.GetHeteroValence = new_instancemethod(_openbabel.OBAtom_GetHeteroValence,None,OBAtom)
OBAtom.GetType = new_instancemethod(_openbabel.OBAtom_GetType,None,OBAtom)
OBAtom.GetX = new_instancemethod(_openbabel.OBAtom_GetX,None,OBAtom)
OBAtom.GetY = new_instancemethod(_openbabel.OBAtom_GetY,None,OBAtom)
OBAtom.GetZ = new_instancemethod(_openbabel.OBAtom_GetZ,None,OBAtom)
OBAtom.x = new_instancemethod(_openbabel.OBAtom_x,None,OBAtom)
OBAtom.y = new_instancemethod(_openbabel.OBAtom_y,None,OBAtom)
OBAtom.z = new_instancemethod(_openbabel.OBAtom_z,None,OBAtom)
OBAtom.GetCoordinate = new_instancemethod(_openbabel.OBAtom_GetCoordinate,None,OBAtom)
OBAtom.GetVector = new_instancemethod(_openbabel.OBAtom_GetVector,None,OBAtom)
OBAtom.GetPartialCharge = new_instancemethod(_openbabel.OBAtom_GetPartialCharge,None,OBAtom)
OBAtom.GetResidue = new_instancemethod(_openbabel.OBAtom_GetResidue,None,OBAtom)
OBAtom.GetParent = new_instancemethod(_openbabel.OBAtom_GetParent,None,OBAtom)
OBAtom.GetNewBondVector = new_instancemethod(_openbabel.OBAtom_GetNewBondVector,None,OBAtom)
OBAtom.GetBond = new_instancemethod(_openbabel.OBAtom_GetBond,None,OBAtom)
OBAtom.GetNextAtom = new_instancemethod(_openbabel.OBAtom_GetNextAtom,None,OBAtom)
OBAtom.BeginBonds = new_instancemethod(_openbabel.OBAtom_BeginBonds,None,OBAtom)
OBAtom.EndBonds = new_instancemethod(_openbabel.OBAtom_EndBonds,None,OBAtom)
OBAtom.BeginBond = new_instancemethod(_openbabel.OBAtom_BeginBond,None,OBAtom)
OBAtom.NextBond = new_instancemethod(_openbabel.OBAtom_NextBond,None,OBAtom)
OBAtom.BeginNbrAtom = new_instancemethod(_openbabel.OBAtom_BeginNbrAtom,None,OBAtom)
OBAtom.NextNbrAtom = new_instancemethod(_openbabel.OBAtom_NextNbrAtom,None,OBAtom)
OBAtom.GetDistance = new_instancemethod(_openbabel.OBAtom_GetDistance,None,OBAtom)
OBAtom.GetAngle = new_instancemethod(_openbabel.OBAtom_GetAngle,None,OBAtom)
OBAtom.NewResidue = new_instancemethod(_openbabel.OBAtom_NewResidue,None,OBAtom)
OBAtom.AddResidue = new_instancemethod(_openbabel.OBAtom_AddResidue,None,OBAtom)
OBAtom.DeleteResidue = new_instancemethod(_openbabel.OBAtom_DeleteResidue,None,OBAtom)
OBAtom.AddBond = new_instancemethod(_openbabel.OBAtom_AddBond,None,OBAtom)
OBAtom.InsertBond = new_instancemethod(_openbabel.OBAtom_InsertBond,None,OBAtom)
OBAtom.DeleteBond = new_instancemethod(_openbabel.OBAtom_DeleteBond,None,OBAtom)
OBAtom.ClearBond = new_instancemethod(_openbabel.OBAtom_ClearBond,None,OBAtom)
OBAtom.HtoMethyl = new_instancemethod(_openbabel.OBAtom_HtoMethyl,None,OBAtom)
OBAtom.SetHybAndGeom = new_instancemethod(_openbabel.OBAtom_SetHybAndGeom,None,OBAtom)
OBAtom.ForceNoH = new_instancemethod(_openbabel.OBAtom_ForceNoH,None,OBAtom)
OBAtom.HasNoHForced = new_instancemethod(_openbabel.OBAtom_HasNoHForced,None,OBAtom)
OBAtom.ForceImplH = new_instancemethod(_openbabel.OBAtom_ForceImplH,None,OBAtom)
OBAtom.HasImplHForced = new_instancemethod(_openbabel.OBAtom_HasImplHForced,None,OBAtom)
OBAtom.CountFreeOxygens = new_instancemethod(_openbabel.OBAtom_CountFreeOxygens,None,OBAtom)
OBAtom.ImplicitHydrogenCount = new_instancemethod(_openbabel.OBAtom_ImplicitHydrogenCount,None,OBAtom)
OBAtom.ExplicitHydrogenCount = new_instancemethod(_openbabel.OBAtom_ExplicitHydrogenCount,None,OBAtom)
OBAtom.MemberOfRingCount = new_instancemethod(_openbabel.OBAtom_MemberOfRingCount,None,OBAtom)
OBAtom.MemberOfRingSize = new_instancemethod(_openbabel.OBAtom_MemberOfRingSize,None,OBAtom)
OBAtom.CountRingBonds = new_instancemethod(_openbabel.OBAtom_CountRingBonds,None,OBAtom)
OBAtom.SmallestBondAngle = new_instancemethod(_openbabel.OBAtom_SmallestBondAngle,None,OBAtom)
OBAtom.AverageBondAngle = new_instancemethod(_openbabel.OBAtom_AverageBondAngle,None,OBAtom)
OBAtom.BOSum = new_instancemethod(_openbabel.OBAtom_BOSum,None,OBAtom)
OBAtom.KBOSum = new_instancemethod(_openbabel.OBAtom_KBOSum,None,OBAtom)
OBAtom.HasResidue = new_instancemethod(_openbabel.OBAtom_HasResidue,None,OBAtom)
OBAtom.IsHydrogen = new_instancemethod(_openbabel.OBAtom_IsHydrogen,None,OBAtom)
OBAtom.IsCarbon = new_instancemethod(_openbabel.OBAtom_IsCarbon,None,OBAtom)
OBAtom.IsNitrogen = new_instancemethod(_openbabel.OBAtom_IsNitrogen,None,OBAtom)
OBAtom.IsOxygen = new_instancemethod(_openbabel.OBAtom_IsOxygen,None,OBAtom)
OBAtom.IsSulfur = new_instancemethod(_openbabel.OBAtom_IsSulfur,None,OBAtom)
OBAtom.IsPhosphorus = new_instancemethod(_openbabel.OBAtom_IsPhosphorus,None,OBAtom)
OBAtom.IsAromatic = new_instancemethod(_openbabel.OBAtom_IsAromatic,None,OBAtom)
OBAtom.IsInRing = new_instancemethod(_openbabel.OBAtom_IsInRing,None,OBAtom)
OBAtom.IsInRingSize = new_instancemethod(_openbabel.OBAtom_IsInRingSize,None,OBAtom)
OBAtom.IsHeteroatom = new_instancemethod(_openbabel.OBAtom_IsHeteroatom,None,OBAtom)
OBAtom.IsNotCorH = new_instancemethod(_openbabel.OBAtom_IsNotCorH,None,OBAtom)
OBAtom.IsConnected = new_instancemethod(_openbabel.OBAtom_IsConnected,None,OBAtom)
OBAtom.IsOneThree = new_instancemethod(_openbabel.OBAtom_IsOneThree,None,OBAtom)
OBAtom.IsOneFour = new_instancemethod(_openbabel.OBAtom_IsOneFour,None,OBAtom)
OBAtom.IsCarboxylOxygen = new_instancemethod(_openbabel.OBAtom_IsCarboxylOxygen,None,OBAtom)
OBAtom.IsPhosphateOxygen = new_instancemethod(_openbabel.OBAtom_IsPhosphateOxygen,None,OBAtom)
OBAtom.IsSulfateOxygen = new_instancemethod(_openbabel.OBAtom_IsSulfateOxygen,None,OBAtom)
OBAtom.IsNitroOxygen = new_instancemethod(_openbabel.OBAtom_IsNitroOxygen,None,OBAtom)
OBAtom.IsAmideNitrogen = new_instancemethod(_openbabel.OBAtom_IsAmideNitrogen,None,OBAtom)
OBAtom.IsPolarHydrogen = new_instancemethod(_openbabel.OBAtom_IsPolarHydrogen,None,OBAtom)
OBAtom.IsNonPolarHydrogen = new_instancemethod(_openbabel.OBAtom_IsNonPolarHydrogen,None,OBAtom)
OBAtom.IsAromaticNOxide = new_instancemethod(_openbabel.OBAtom_IsAromaticNOxide,None,OBAtom)
OBAtom.IsChiral = new_instancemethod(_openbabel.OBAtom_IsChiral,None,OBAtom)
OBAtom.IsAxial = new_instancemethod(_openbabel.OBAtom_IsAxial,None,OBAtom)
OBAtom.IsClockwise = new_instancemethod(_openbabel.OBAtom_IsClockwise,None,OBAtom)
OBAtom.IsAntiClockwise = new_instancemethod(_openbabel.OBAtom_IsAntiClockwise,None,OBAtom)
OBAtom.IsPositiveStereo = new_instancemethod(_openbabel.OBAtom_IsPositiveStereo,None,OBAtom)
OBAtom.IsNegativeStereo = new_instancemethod(_openbabel.OBAtom_IsNegativeStereo,None,OBAtom)
OBAtom.HasChiralitySpecified = new_instancemethod(_openbabel.OBAtom_HasChiralitySpecified,None,OBAtom)
OBAtom.HasChiralVolume = new_instancemethod(_openbabel.OBAtom_HasChiralVolume,None,OBAtom)
OBAtom.IsHbondAcceptor = new_instancemethod(_openbabel.OBAtom_IsHbondAcceptor,None,OBAtom)
OBAtom.IsHbondDonor = new_instancemethod(_openbabel.OBAtom_IsHbondDonor,None,OBAtom)
OBAtom.IsHbondDonorH = new_instancemethod(_openbabel.OBAtom_IsHbondDonorH,None,OBAtom)
OBAtom.HasAlphaBetaUnsat = new_instancemethod(_openbabel.OBAtom_HasAlphaBetaUnsat,None,OBAtom)
OBAtom.HasBondOfOrder = new_instancemethod(_openbabel.OBAtom_HasBondOfOrder,None,OBAtom)
OBAtom.CountBondsOfOrder = new_instancemethod(_openbabel.OBAtom_CountBondsOfOrder,None,OBAtom)
OBAtom.HasNonSingleBond = new_instancemethod(_openbabel.OBAtom_HasNonSingleBond,None,OBAtom)
OBAtom.HasSingleBond = new_instancemethod(_openbabel.OBAtom_HasSingleBond,None,OBAtom)
OBAtom.HasDoubleBond = new_instancemethod(_openbabel.OBAtom_HasDoubleBond,None,OBAtom)
OBAtom.HasAromaticBond = new_instancemethod(_openbabel.OBAtom_HasAromaticBond,None,OBAtom)
OBAtom.MatchesSMARTS = new_instancemethod(_openbabel.OBAtom_MatchesSMARTS,None,OBAtom)
OBAtom_swigregister = _openbabel.OBAtom_swigregister
OBAtom_swigregister(OBAtom)
OB_AROMATIC_BOND = _openbabel.OB_AROMATIC_BOND
OB_WEDGE_BOND = _openbabel.OB_WEDGE_BOND
OB_HASH_BOND = _openbabel.OB_HASH_BOND
OB_RING_BOND = _openbabel.OB_RING_BOND
OB_TORUP_BOND = _openbabel.OB_TORUP_BOND
OB_TORDOWN_BOND = _openbabel.OB_TORDOWN_BOND
OB_KSINGLE_BOND = _openbabel.OB_KSINGLE_BOND
OB_KDOUBLE_BOND = _openbabel.OB_KDOUBLE_BOND
OB_KTRIPLE_BOND = _openbabel.OB_KTRIPLE_BOND
OB_CLOSURE_BOND = _openbabel.OB_CLOSURE_BOND
OB_WEDGE_OR_HASH_BOND = _openbabel.OB_WEDGE_OR_HASH_BOND
OB_CIS_OR_TRANS_BOND = _openbabel.OB_CIS_OR_TRANS_BOND
class OBBond(OBBase):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
Aromatic = _openbabel.OBBond_Aromatic
Ring = _openbabel.OBBond_Ring
Closure = _openbabel.OBBond_Closure
Wedge = _openbabel.OBBond_Wedge
Hash = _openbabel.OBBond_Hash
WedgeOrHash = _openbabel.OBBond_WedgeOrHash
CisOrTrans = _openbabel.OBBond_CisOrTrans
Visit = _swig_property(_openbabel.OBBond_Visit_get, _openbabel.OBBond_Visit_set)
def __init__(self):
_openbabel.OBBond_swiginit(self,_openbabel.new_OBBond())
__swig_destroy__ = _openbabel.delete_OBBond
OBBond.SetIdx = new_instancemethod(_openbabel.OBBond_SetIdx,None,OBBond)
OBBond.SetId = new_instancemethod(_openbabel.OBBond_SetId,None,OBBond)
OBBond.SetBO = new_instancemethod(_openbabel.OBBond_SetBO,None,OBBond)
OBBond.SetBondOrder = new_instancemethod(_openbabel.OBBond_SetBondOrder,None,OBBond)
OBBond.SetBegin = new_instancemethod(_openbabel.OBBond_SetBegin,None,OBBond)
OBBond.SetEnd = new_instancemethod(_openbabel.OBBond_SetEnd,None,OBBond)
OBBond.SetParent = new_instancemethod(_openbabel.OBBond_SetParent,None,OBBond)
OBBond.SetLength = new_instancemethod(_openbabel.OBBond_SetLength,None,OBBond)
OBBond.Set = new_instancemethod(_openbabel.OBBond_Set,None,OBBond)
OBBond.SetKSingle = new_instancemethod(_openbabel.OBBond_SetKSingle,None,OBBond)
OBBond.SetKDouble = new_instancemethod(_openbabel.OBBond_SetKDouble,None,OBBond)
OBBond.SetKTriple = new_instancemethod(_openbabel.OBBond_SetKTriple,None,OBBond)
OBBond.SetAromatic = new_instancemethod(_openbabel.OBBond_SetAromatic,None,OBBond)
OBBond.SetWedge = new_instancemethod(_openbabel.OBBond_SetWedge,None,OBBond)
OBBond.SetHash = new_instancemethod(_openbabel.OBBond_SetHash,None,OBBond)
OBBond.SetWedgeOrHash = new_instancemethod(_openbabel.OBBond_SetWedgeOrHash,None,OBBond)
OBBond.SetUp = new_instancemethod(_openbabel.OBBond_SetUp,None,OBBond)
OBBond.SetDown = new_instancemethod(_openbabel.OBBond_SetDown,None,OBBond)
OBBond.SetInRing = new_instancemethod(_openbabel.OBBond_SetInRing,None,OBBond)
OBBond.SetClosure = new_instancemethod(_openbabel.OBBond_SetClosure,None,OBBond)
OBBond.UnsetHash = new_instancemethod(_openbabel.OBBond_UnsetHash,None,OBBond)
OBBond.UnsetWedge = new_instancemethod(_openbabel.OBBond_UnsetWedge,None,OBBond)
OBBond.UnsetUp = new_instancemethod(_openbabel.OBBond_UnsetUp,None,OBBond)
OBBond.UnsetDown = new_instancemethod(_openbabel.OBBond_UnsetDown,None,OBBond)
OBBond.UnsetAromatic = new_instancemethod(_openbabel.OBBond_UnsetAromatic,None,OBBond)
OBBond.UnsetKekule = new_instancemethod(_openbabel.OBBond_UnsetKekule,None,OBBond)
OBBond.GetIdx = new_instancemethod(_openbabel.OBBond_GetIdx,None,OBBond)
OBBond.GetId = new_instancemethod(_openbabel.OBBond_GetId,None,OBBond)
OBBond.GetBO = new_instancemethod(_openbabel.OBBond_GetBO,None,OBBond)
OBBond.GetBondOrder = new_instancemethod(_openbabel.OBBond_GetBondOrder,None,OBBond)
OBBond.GetFlags = new_instancemethod(_openbabel.OBBond_GetFlags,None,OBBond)
OBBond.GetBeginAtomIdx = new_instancemethod(_openbabel.OBBond_GetBeginAtomIdx,None,OBBond)
OBBond.GetEndAtomIdx = new_instancemethod(_openbabel.OBBond_GetEndAtomIdx,None,OBBond)
OBBond.GetBeginAtom = new_instancemethod(_openbabel.OBBond_GetBeginAtom,None,OBBond)
OBBond.GetEndAtom = new_instancemethod(_openbabel.OBBond_GetEndAtom,None,OBBond)
OBBond.GetNbrAtom = new_instancemethod(_openbabel.OBBond_GetNbrAtom,None,OBBond)
OBBond.GetParent = new_instancemethod(_openbabel.OBBond_GetParent,None,OBBond)
OBBond.GetEquibLength = new_instancemethod(_openbabel.OBBond_GetEquibLength,None,OBBond)
OBBond.GetLength = new_instancemethod(_openbabel.OBBond_GetLength,None,OBBond)
OBBond.GetNbrAtomIdx = new_instancemethod(_openbabel.OBBond_GetNbrAtomIdx,None,OBBond)
OBBond.FindSmallestRing = new_instancemethod(_openbabel.OBBond_FindSmallestRing,None,OBBond)
OBBond.IsAromatic = new_instancemethod(_openbabel.OBBond_IsAromatic,None,OBBond)
OBBond.IsInRing = new_instancemethod(_openbabel.OBBond_IsInRing,None,OBBond)
OBBond.IsRotor = new_instancemethod(_openbabel.OBBond_IsRotor,None,OBBond)
OBBond.IsAmide = new_instancemethod(_openbabel.OBBond_IsAmide,None,OBBond)
OBBond.IsPrimaryAmide = new_instancemethod(_openbabel.OBBond_IsPrimaryAmide,None,OBBond)
OBBond.IsSecondaryAmide = new_instancemethod(_openbabel.OBBond_IsSecondaryAmide,None,OBBond)
OBBond.IsTertiaryAmide = new_instancemethod(_openbabel.OBBond_IsTertiaryAmide,None,OBBond)
OBBond.IsEster = new_instancemethod(_openbabel.OBBond_IsEster,None,OBBond)
OBBond.IsCarbonyl = new_instancemethod(_openbabel.OBBond_IsCarbonyl,None,OBBond)
OBBond.IsSingle = new_instancemethod(_openbabel.OBBond_IsSingle,None,OBBond)
OBBond.IsDouble = new_instancemethod(_openbabel.OBBond_IsDouble,None,OBBond)
OBBond.IsTriple = new_instancemethod(_openbabel.OBBond_IsTriple,None,OBBond)
OBBond.IsKSingle = new_instancemethod(_openbabel.OBBond_IsKSingle,None,OBBond)
OBBond.IsKDouble = new_instancemethod(_openbabel.OBBond_IsKDouble,None,OBBond)
OBBond.IsKTriple = new_instancemethod(_openbabel.OBBond_IsKTriple,None,OBBond)
OBBond.IsClosure = new_instancemethod(_openbabel.OBBond_IsClosure,None,OBBond)
OBBond.IsUp = new_instancemethod(_openbabel.OBBond_IsUp,None,OBBond)
OBBond.IsDown = new_instancemethod(_openbabel.OBBond_IsDown,None,OBBond)
OBBond.IsWedge = new_instancemethod(_openbabel.OBBond_IsWedge,None,OBBond)
OBBond.IsHash = new_instancemethod(_openbabel.OBBond_IsHash,None,OBBond)
OBBond.IsWedgeOrHash = new_instancemethod(_openbabel.OBBond_IsWedgeOrHash,None,OBBond)
OBBond.IsCisOrTrans = new_instancemethod(_openbabel.OBBond_IsCisOrTrans,None,OBBond)
OBBond.IsDoubleBondGeometry = new_instancemethod(_openbabel.OBBond_IsDoubleBondGeometry,None,OBBond)
OBBond_swigregister = _openbabel.OBBond_swigregister
OBBond_swigregister(OBBond)
class OBReaction(OBBase):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_openbabel.OBReaction_swiginit(self,_openbabel.new_OBReaction())
ClassDescription = staticmethod(_openbabel.OBReaction_ClassDescription)
__swig_destroy__ = _openbabel.delete_OBReaction
OBReaction.NumReactants = new_instancemethod(_openbabel.OBReaction_NumReactants,None,OBReaction)
OBReaction.NumProducts = new_instancemethod(_openbabel.OBReaction_NumProducts,None,OBReaction)
OBReaction.AddReactant = new_instancemethod(_openbabel.OBReaction_AddReactant,None,OBReaction)
OBReaction.AddProduct = new_instancemethod(_openbabel.OBReaction_AddProduct,None,OBReaction)
OBReaction.SetTransitionState = new_instancemethod(_openbabel.OBReaction_SetTransitionState,None,OBReaction)
OBReaction.AddAgent = new_instancemethod(_openbabel.OBReaction_AddAgent,None,OBReaction)
OBReaction.GetReactant = new_instancemethod(_openbabel.OBReaction_GetReactant,None,OBReaction)
OBReaction.GetProduct = new_instancemethod(_openbabel.OBReaction_GetProduct,None,OBReaction)
OBReaction.GetTransitionState = new_instancemethod(_openbabel.OBReaction_GetTransitionState,None,OBReaction)
OBReaction.GetAgent = new_instancemethod(_openbabel.OBReaction_GetAgent,None,OBReaction)
OBReaction.GetTitle = new_instancemethod(_openbabel.OBReaction_GetTitle,None,OBReaction)
OBReaction.GetComment = new_instancemethod(_openbabel.OBReaction_GetComment,None,OBReaction)
OBReaction.SetTitle = new_instancemethod(_openbabel.OBReaction_SetTitle,None,OBReaction)
OBReaction.SetComment = new_instancemethod(_openbabel.OBReaction_SetComment,None,OBReaction)
OBReaction.IsReversible = new_instancemethod(_openbabel.OBReaction_IsReversible,None,OBReaction)
OBReaction.SetReversible = new_instancemethod(_openbabel.OBReaction_SetReversible,None,OBReaction)
OBReaction_swigregister = _openbabel.OBReaction_swigregister
OBReaction_swigregister(OBReaction)
def OBReaction_ClassDescription():
return _openbabel.OBReaction_ClassDescription()
OBReaction_ClassDescription = _openbabel.OBReaction_ClassDescription
def exceptionIter(*args):
raise Exception("""\nThis method can | |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google App Engine Pipeline API for complex, asynchronous workflows."""
__all__ = [
# Public API.
'Error', 'PipelineSetupError', 'PipelineExistsError',
'PipelineRuntimeError', 'SlotNotFilledError', 'SlotNotDeclaredError',
'UnexpectedPipelineError', 'PipelineStatusError', 'Slot', 'Pipeline',
'PipelineFuture', 'After', 'InOrder', 'Retry', 'Abort', 'get_status_tree',
'get_pipeline_names', 'get_root_list', 'create_handlers_map',
'set_enforce_auth',
]
import datetime
import hashlib
import itertools
import json
import logging
import os
import pprint
import re
import sys
import threading
import time
import urllib
import uuid
from google.appengine.api import mail
from google.appengine.api import files
from google.appengine.api import users
from google.appengine.api import taskqueue
from google.appengine.ext import db
from google.appengine.ext import webapp
# Relative imports
import models
import status_ui
import util as mr_util
# pylint: disable=g-bad-name
# pylint: disable=protected-access
# For convenience
_BarrierIndex = models._BarrierIndex
_BarrierRecord = models._BarrierRecord
_PipelineRecord = models._PipelineRecord
_SlotRecord = models._SlotRecord
_StatusRecord = models._StatusRecord
# Overall TODOs:
# - Add a human readable name for start()
# Potential TODOs:
# - Add support for ANY N barriers.
# - Allow Pipelines to declare they are "short" and optimize the evaluate()
# function to run as many of them in quick succession.
# - Add support in all Pipelines for hold/release where up-stream
# barriers will fire but do nothing because the Pipeline is not ready.
################################################################################
class Error(Exception):
"""Base class for exceptions in this module."""
class PipelineSetupError(Error):
"""Base class for exceptions that happen before Pipeline execution."""
class PipelineExistsError(PipelineSetupError):
"""A new Pipeline with an assigned idempotence_key cannot be overwritten."""
class PipelineRuntimeError(Error):
"""Base class for exceptions that happen during Pipeline execution."""
class SlotNotFilledError(PipelineRuntimeError):
"""A slot that should have been filled already was not yet filled."""
class SlotNotDeclaredError(PipelineRuntimeError):
"""A slot that was filled or passed along was not previously declared."""
class UnexpectedPipelineError(PipelineRuntimeError):
"""An assertion failed, potentially leaving the pipeline unable to proceed."""
class PipelineUserError(Error):
"""Exceptions raised indirectly by developers to cause certain behaviors."""
class Retry(PipelineUserError):
"""The currently running pipeline should be retried at a later time."""
class Abort(PipelineUserError):
"""The currently running pipeline should be aborted up to the root."""
class PipelineStatusError(Error):
"""Exceptions raised when trying to collect pipeline status."""
class _CallbackTaskError(Error):
"""A callback task was unable to execute properly for some reason."""
################################################################################
_MAX_BARRIERS_TO_NOTIFY = 10
_MAX_ABORTS_TO_BEGIN = 10
_TEST_MODE = False
_TEST_ROOT_PIPELINE_KEY = None
_DEFAULT_BACKOFF_SECONDS = 15
_DEFAULT_BACKOFF_FACTOR = 2
_DEFAULT_MAX_ATTEMPTS = 3
_RETRY_WIGGLE_TIMEDELTA = datetime.timedelta(seconds=20)
_DEBUG = False
_MAX_JSON_SIZE = 900000
_ENFORCE_AUTH = True
_MAX_CALLBACK_TASK_RETRIES = 5
################################################################################
class Slot(object):
"""An output that is filled by a Pipeline as it executes."""
def __init__(self, name=None, slot_key=None, strict=False):
"""Initializer.
Args:
name: The name of this slot.
slot_key: The db.Key for this slot's _SlotRecord if it's already been
allocated by an up-stream pipeline.
strict: If this Slot was created as an output of a strictly defined
pipeline.
"""
if name is None:
raise UnexpectedPipelineError('Slot with key "%s" missing a name.' %
slot_key)
if slot_key is None:
slot_key = db.Key.from_path(_SlotRecord.kind(), uuid.uuid4().hex)
self._exists = _TEST_MODE
else:
self._exists = True
self._touched = False
self._strict = strict
self.name = name
self.key = slot_key
self.filled = False
self._filler_pipeline_key = None
self._fill_datetime = None
self._value = None
@property
def value(self):
"""Returns the current value of this slot.
Returns:
The value of the slot (a serializable Python type).
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._value
@property
def filler(self):
"""Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._filler_pipeline_key.name()
@property
def fill_datetime(self):
"""Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
"""
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._fill_datetime
def _set_value(self, slot_record):
"""Sets the value of this slot based on its corresponding _SlotRecord.
Does nothing if the slot has not yet been filled.
Args:
slot_record: The _SlotRecord containing this Slot's value.
"""
if slot_record.status == _SlotRecord.FILLED:
self.filled = True
self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(
slot_record)
self._fill_datetime = slot_record.fill_time
self._value = slot_record.value
def _set_value_test(self, filler_pipeline_key, value):
"""Sets the value of this slot for use in testing.
Args:
filler_pipeline_key: The db.Key of the _PipelineRecord that filled
this slot.
value: The serializable value set for this slot.
"""
self.filled = True
self._filler_pipeline_key = filler_pipeline_key
self._fill_datetime = datetime.datetime.utcnow()
# Convert to JSON and back again, to simulate the behavior of production.
self._value = json.loads(json.dumps(
value, cls=mr_util.JsonEncoder), cls=mr_util.JsonDecoder)
def __repr__(self):
"""Returns a string representation of this slot."""
if self.filled:
return repr(self._value)
else:
return 'Slot(name="%s", slot_key="%s")' % (self.name, self.key)
class PipelineFuture(object):
"""A future for accessing the outputs of a Pipeline."""
# NOTE: Do not, ever, add a names() method to this class. Callers cannot do
# introspection on their context of being called. Even though the runtime
# environment of the Pipeline can allow for that to happen, such behavior
# would prevent synchronous simulation and verification, whic is an
# unacceptable tradeoff.
def __init__(self, output_names, force_strict=False):
"""Initializer.
Args:
output_names: The list of require output names that will be strictly
enforced by this class.
force_strict: If True, force this future to be in strict mode.
"""
self._after_all_pipelines = set()
self._output_dict = {
'default': Slot(name='default'),
}
self._strict = len(output_names) > 0 or force_strict
if self._strict:
for name in output_names:
if name in self._output_dict:
raise UnexpectedPipelineError('Output name reserved: "%s"' % name)
self._output_dict[name] = Slot(name=name, strict=True)
def _inherit_outputs(self,
pipeline_name,
already_defined,
resolve_outputs=False):
"""Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore.
"""
for name, slot_key in already_defined.iteritems():
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if slot is None:
if self._strict:
raise UnexpectedPipelineError(
'Inherited output named "%s" must be filled but '
'not declared for pipeline class "%s"' % (name, pipeline_name))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues())
all_slots = db.get(slot_key_dict.keys())
for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots):
if slot_record is None:
raise UnexpectedPipelineError(
'Inherited output named "%s" for pipeline class "%s" is '
'missing its Slot in the datastore: "%s"' %
(slot.name, pipeline_name, slot.key))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record)
def __getattr__(self, name):
"""Provides an output Slot instance with the given name if allowed."""
if name not in self._output_dict:
if self._strict:
raise SlotNotDeclaredError('Undeclared output with name "%s"' % name)
self._output_dict[name] = Slot(name=name)
slot = self._output_dict[name]
return slot
class _PipelineMeta(type):
"""Meta-class for recording all Pipelines that have been defined."""
# List of all Pipeline classes that have been seen.
_all_classes = []
def __new__(meta, name, bases, cls_dict):
"""Initializes the class path of a Pipeline and saves it."""
cls = type.__new__(meta, name, bases, cls_dict)
meta._all_classes.append(cls)
return cls
class ClassProperty(object):
"""Descriptor that lets us have read-only class properties."""
def __init__(self, method):
self.method = method
def __get__(self, cls, obj):
return self.method(obj)
class Pipeline(object):
"""A Pipeline function-object that performs operations and has a life cycle.
Class properties (to be overridden by sub-classes):
async: When True, this Pipeline will execute asynchronously and fill the
default output slot itself using the complete() method.
output_names: List of named outputs (in addition to the default slot) that
this Pipeline must output to (no more, no less).
public_callbacks: If the callback URLs generated for this class should be
accessible by all external requests regardless of login or task queue.
admin_callbacks: If the callback URLs generated for this class should | |
try:
_self.lock(_self)
r = _cmd.get_viewport(_self._COb)
finally:
_self.unlock(r,_self)
if is_ok(r):
if len(r):
if (_self.get_setting_int("logging") != 0) and (output<3):
if not quiet:
print(" get_viewport: data written to log file.")
_self.log("_ viewport (\\\n","cmd.viewport((\\\n")
_self.log("_ %14.9f, %14.9f )\n"% r)
if output<2: # suppress if we have a log file open
output=0
if output and (not quiet) and (output<3):
print("### cut below here and paste into script ###")
print("viewport %4d, %4d"% r)
print("### cut above here and paste into script ###")
if output==3:
return ("viewport ( %14.9f, %14.9f )\n"% r)
elif _self._raising(r,_self):
raise QuietException
return r
def get_vis(_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.get_vis(_self._COb)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise QuietException
return r
def set_vis(dict,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.set_vis(_self._COb,dict)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise QuietException
return r
def get_colorection(key,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.get_colorection(_self._COb,key)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise QuietException
return r
def set_colorection(dict,key,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.set_colorection(_self._COb,dict,key)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise QuietException
return r
def del_colorection(dict,key,_self=cmd):
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.del_colorection(_self._COb,dict,key)
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): raise QuietException
return r
def get_scene_list(_self=cmd):
with _self.lockcm:
return _cmd.get_scene_order(_self._COb)
def chain_session(_self=cmd):
import os
# assumes locked interpreter
r = 0
session_file = str(_self.get("session_file"))
re_pat = re.compile("[0-9]+\.")
if len(session_file): # find next session file, if it exists
mo = re_pat.search(session_file)
if mo!=None:
pat = mo.group(0)
if len(pat):
file_no = int(float(pat)) + 1
new_form = r"%0"+str(len(pat)-1)+"d."
for new_num in range(file_no, file_no+11):
new_pat = new_form % new_num
new_file = re_pat.sub(new_pat, session_file)
# try both PSE and PSW
if not os.path.exists(new_file):
new_file = re.sub("\.pse$",".psw",new_file,re.I)
if not os.path.exists(new_file):
new_file = re.sub("\.psw$",".pse",new_file,re.I)
if os.path.exists(new_file):
_self.do("_ cmd.load(r'''"+new_file+"''',format='psw')")
return 1
return 0
def scene_order(names,sort=0,location='current',quiet=1,_self=cmd):
'''
DESCRIPTION
"scene_order" changes the ordering of scenes.
USAGE
scene_order names, sort, location
ARGUMENTS
names = string: a space-separated list of names
sort = yes or no {default: no}
location = top, current, or bottom {default: current}
EXAMPLES
scene_order *,yes
scene_order F6 F4 F3
scene_order 003 006 004, location=top
PYMOL API
cmd.scene_order(string names, string sort, string location)
SEE ALSO
scene
'''
location = location_sc.auto_err(location,'location')
if is_string(sort):
sort=boolean_dict[boolean_sc.auto_err(sort,'sort option')]
with _self.lockcm:
return _cmd.scene_order(_self._COb, names, sort, location)
def _scene_get_current_message(_self=cmd):
wiz = _self.get_wizard()
return '\n'.join(wiz.message) if (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene')) else None
def scene_recall_message(message, _self=cmd):
'''
INTERNAL, DO NOT USE.
Display a scene message.
'''
wiz = _self.get_wizard()
replace_flag = (wiz is not None
and wiz.__class__.__name__ == 'Message'
and hasattr(wiz, 'from_scene'))
if message:
if is_string(message):
message = message.splitlines()
elif not is_list(message):
raise TypeError("message %s" % (type(message)))
wizard_func = _self.replace_wizard if replace_flag else _self.wizard
wizard_func("message", *message)
_self.get_wizard().from_scene = 1
elif replace_flag:
_self.wizard()
def scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, sele="all", _self=cmd):
'''
DESCRIPTION
"scene" saves and restores scenes. A scene consists of the camera
view, all object activity information, all atom-wise visibilities,
all atom-wise colors, all representations, the global frame index,
and may contain a text message to display on playback.
USAGE
scene [key [,action [, message, [ new_key=new-key-value ]]]]
ARGUMENTS
key = string, new, auto, or *: use new for an automatically
numbered new scene, use auto for the current scene (if one
exists), and use * for all scenes (clear and recall actions only).
action = store, recall, insert_after, insert_before, next,
previous, update, rename, or clear: (default = recall). If
rename, then a new_key argument must be explicitly defined.
message = string: a text message to display with the scene.
new_key = string: the new name for the scene
EXAMPLES
scene *
scene F1, store
scene F2, store, Please note the critical hydrogen bond shown in yellow.
scene F1
scene F2
scene F1, rename, new_key=F5
NOTES
Scenes F1 through F12 are automatically bound to function keys
provided that "set_key" has not been used to redefine the behaviour
of the respective key.
SEE ALSO
view, set_view, get_view
'''
action = scene_action_sc.auto_err(action, 'action')
if is_list(message):
message = '\n'.join(message)
# default when called with no arguments
if key == 'auto':
if action == 'recall':
action = 'next'
# preserve message on update
if action == 'update':
if message is None:
message = _scene_get_current_message(_self)
# aliases (DEPRECATED)
if action == 'clear':
action = 'delete'
elif action == 'append' or action == 'update':
action = 'store'
# presentation auto quit
if (pymol._scene_quit_on_action == action and
action in ('next', 'previous') and
_self.get_setting_boolean("presentation") and
_self.get_setting_boolean("presentation_auto_quit") and
_self.get("scene_current_name") == ""):
if not chain_session(_self):
_self.quit()
# call C function
with _self.lockcm:
r = _cmd.scene(_self._COb, key, action, message, int(view),
int(color), int(active), int(rep), int(frame),
float(animate), new_key, int(hand), sele)
# autocomplete
if action in ('store', 'delete') or action.startswith('insert_'):
_self._pymol._scene_dict_sc.rebuild(_self.get_scene_list())
# for presentation auto quit
pymol._scene_quit_on_action = action
return r
def _legacy_scene(key='auto', action='recall', message=None, view=1,
color=1, active=1, rep=1, frame=1, animate=-1,
new_key=None, hand=1, quiet=1, _self=cmd):
''' FOR INTERNAL USE ONLY. Stores and deletes <=1.7.4 compatible scenes. '''
pymol=_self._pymol
r = DEFAULT_SUCCESS
view = int(view)
rep = int(rep)
color = int(color)
active = int(active)
frame = int(frame)
quiet = int(quiet)
animate = 0
try:
_self.lock(_self) # manipulating global data, so need lock
if key=='*':
if action=='clear':
for key in pymol._scene_dict:
# free selections
scene_list = pymol._scene_dict[key]
if len(scene_list)>3:
colorection = scene_list[3]
if colorection!=None:
_self.del_colorection(colorection,key)
name = "_scene_"+key+"_*"
_self.delete(name)
else:
raise ValueError('action=' + action)
else:
if action == 'store':
if key in ('new', 'auto'):
raise ValueError('key=' + key)
if key in pymol._scene_dict:
raise RuntimeError('update not supported')
if rep:
for rep_name in rep_list:
name = "_scene_"+key+"_"+rep_name
_self.select(name,"rep "+rep_name)
if is_string(message):
if message:
if (message[0:1] in [ '"',"'"] and
message[-1:] in [ '"',"'"]):
message=message[1:-1]
else:
message = message.splitlines()
pymol._scene_dict[key] = [
_self.get_view(0) if view else None,
_self.get_vis() if active else None,
_self.get_frame() if frame else None,
_self.get_colorection(key) if color else None,
1 if rep else None,
message,
]
else:
raise ValueError('action=' + action)
finally:
_self.unlock(r,_self)
return r
def session_save_views(session,_self=cmd):
pymol=_self._pymol
session['view_dict']=copy.deepcopy(pymol._view_dict)
return 1
def session_restore_views(session,_self=cmd):
pymol=_self._pymol
if 'view_dict' in session:
pymol._view_dict=copy.deepcopy(session['view_dict'])
pymol._view_dict_sc.rebuild(list(pymol._view_dict.keys()))
return 1
def session_restore_scenes(session,_self=cmd):
# Restore scenes from old session files (<= 1.7.4)
if 'scene_dict' in session:
_self.scene('*', 'clear')
# save initial scene
tempname = '_initial_scene'
while tempname in session['scene_dict']:
tempname += '_'
_self.scene(tempname, 'store')
frame = 0
if _self.get_movie_playing():
_self.mstop()
frame = _self.get_frame()
for key, data in list(session['scene_dict'].items()):
_convert_legacy_scene(key, data, _self)
if frame:
_self.frame(frame)
_self.mplay()
# restore initial scene
_self.scene(tempname, 'recall', animate=0)
_self.scene(tempname, 'clear')
if 'scene_order' in session:
_self.scene_order(' '.join(session['scene_order']))
_self._pymol._scene_dict_sc.rebuild(_self.get_scene_list())
return 1
def _convert_legacy_scene(key, scene_list, _self=cmd):
# Create a scene from the given legacy scene list and finally delete
# the colorection and rep selections.
scene_list += [None] * 5
view, active, frame, color, rep = [(0 if x is None else 1)
for x in scene_list[:5]]
if frame:
_self.frame(scene_list[2])
if view:
_self.set_view(scene_list[0], 0.0)
if active:
_self.disable()
_self.deselect()
_self.set_vis(scene_list[1])
if color:
_self.set_colorection(scene_list[3], key)
_self.del_colorection(scene_list[3], key)
if rep:
# only atomic representations
_self.hide('everything', '(*)')
sele_prefix = _self.get_legal_name('_scene_' + key + '_')
for rep_name in rep_list:
_self.show(rep_name, "?" + sele_prefix + rep_name)
_self.delete(sele_prefix + "*")
_self.scene(key, 'store', scene_list[5], view, color, active, rep, frame)
def stereo(toggle='on', quiet=1, _self=cmd):
'''
DESCRIPTION
"stereo" activates or deactives stereo mode.
USAGE
stereo [toggle]
ARGUMENTS
toggle = on, off, crosseye, walleye, quadbuffer, sidebyside, or geowall
EXAMPLES
stereo on
stereo off
stereo crosseye
NOTES
"quadbuffer" is the default stereo mode if hardware stereo is available.
otherwise, "crosseye" is the default.
PYMOL API
cmd.stereo(string toggle)
'''
toggle = stereo_dict[stereo_sc.auto_err(str(toggle),'toggle')]
r = DEFAULT_ERROR
try:
_self.lock(_self)
if(toggle>0) : # stereo mode code
_self.set("stereo_mode",str(toggle),quiet=quiet)
toggle=1
r = _cmd.stereo(_self._COb,toggle)
if is_error(r):
print("Error: Selected stereo mode is not available.")
finally:
_self.unlock(r,_self);
if _self._raising(r,_self): raise QuietException
return r
def turn(axis, angle, _self=cmd):
'''
DESCRIPTION
"turn" rotates the camera about one of the three primary axes,
centered at the origin.
USAGE
turn axis, angle
EXAMPLES
turn x, 90
turn y, 45
PYMOL API
cmd.turn(string axis, float angle)
SEE ALSO
move, rotate, translate, zoom, center, clip
'''
r = DEFAULT_ERROR
try:
_self.lock(_self)
r = _cmd.turn(_self._COb,str(axis),float(angle))
finally:
_self.unlock(r,_self)
if _self._raising(r,_self): | |
import streamlit as st
import numpy as np
import pandas as pd
import torch
from torchvision import transforms
import models
import yaml
from skimage import io, transform, filters
import cv2 as cv
from scipy import ndimage
from pathlib import Path
from argparse import Namespace
from PIL import Image, ImageFilter
from options.opt_manager import OptManager
from utility.plotting import polygon_on_img
st.beta_set_page_config(
page_title="ConstraintNet",
#page_icon="",
layout="centered",
initial_sidebar_state="collapsed"
)
# read state of last run
widget_values = {}
dest = Path('./utility/widget_values.yaml')
with dest.open('r') as f:
widget_values = yaml.load(f)
st.write("""
# Facial Landmark Detection with ConstraintNet
""")
################################
#LOAD CONFIG####################
################################
#st.write("""
# ## Load Config
# """)
def file_selector_config(widget_values, folder_path=Path('./experiments/')):
filenames = [f for f in folder_path.glob('**/config_test*.yaml') if not
'sector_of_a_circle' in str(f)]
index = 0
for i, filename in enumerate(filenames):
if Path(widget_values['file_config']) == filename:
index = i
break
selected_filename = st.selectbox('Select config file:', filenames, index=index)
return selected_filename
@st.cache
def dict2opts(opts_dict):
opts = {}
for blocks, block_opts in opts_dict['options'].items():
for opt, value in block_opts.items():
opts[opt] = value
opts_ns = Namespace(**opts)
return opts_ns
opts = 0
with st.beta_expander('0 Load Config', expanded=True):
file_config = Path(widget_values['file_config'])
file_config = file_selector_config(widget_values)
widget_values['file_config'] = str(file_config)
opts_dict = OptManager.yaml2dict(file_config)
opts = dict2opts(opts_dict)
st.write("Config Overview ")
st.write("* Comment: ", opts.comment)
st.write("* Model module: ", opts.model_module)
if hasattr(opts, 'opts2constr_guard_layer'):
st.write("* Constraint guard layer: ", opts.opts2constr_guard_layer)
st.write("* Weight file: ", opts.reload_ckpt_file)
show_config = widget_values['show_config']
show_config_b = st.button('Show/Hide Total Config')
if show_config_b:
show_config = not show_config
if show_config:
st.json(opts_dict['options'])
widget_values['show_config'] = show_config
################################
#LOAD IMAGE AND RESIZE##########
################################
#st.write("""
# ## Load Image and Resize
# """)
def file_selector_pic(folder_path=Path('./pics/')):
filenames = [f for f in folder_path.iterdir() if not '.txt' in str(f)]
index = 0
for i, filename in enumerate(filenames):
if Path(widget_values['file_pic']) == filename:
index = i
break
selected_filename = st.selectbox('Select image file:', filenames) #,index=index)
widget_values['file_pic'] = str(selected_filename)
return selected_filename
def resize_img(img):
#img in format H x W x C
h_in, w_in = img.shape[:2]
output_size = 300
if h_in > w_in:
w_out = output_size
h_out = h_in * w_out / w_in
else:
h_out = output_size
w_out = w_in * h_out / h_in
#convert to PIL
img = Image.fromarray(img)
#from torchvision
resize_it = transforms.Resize((int(h_out), int(w_out)))
img = resize_it(img)
img = np.array(img)
return img
img = 0
#Path for image
img_file = Path(widget_values['file_pic'])
img = io.imread(img_file)
# resize
img = resize_img(img)
img_h = img.shape[0]
img_w = img.shape[1]
with st.beta_expander('1 Load Image and Resize', expanded=True):
col_1_load, col_2_load = st.beta_columns(2)
with col_1_load:
img_file = file_selector_pic()
img = io.imread(img_file)
img_h = img.shape[0]
img_w = img.shape[1]
st.write(""" (height, width) =
""", (img_h, img_w))
# resize
img = resize_img(img)
img_h = img.shape[0]
img_w = img.shape[1]
# info
st.write("After resizing:")
st.write("""(height, width) =
""", (img_h, img_w))
with col_2_load:
st.image(polygon_on_img(img), caption="Loaded image.", width=300)
################################
#CROP IMAGE#####################
################################
#st.write("""
# ## Crop Image
# """)
sl_x_min = widget_values['sl_x_min']
sl_y_min = widget_values['sl_y_min']
with st.beta_expander('2 Crop Image', expanded=True):
st.write("""
For cropping a 224 x 224 patch of the image,
set top left corner of the rectangle:
""")
col_1_crop, col_2_crop = st.beta_columns(2)
with col_1_crop:
center = st.button('Center Crop')
if center:
sl_x_min = int((img_w - 224) / 2)
sl_y_min = int((img_h - 224) / 2)
sl_x_min = st.slider("x_min", 0, img_w-224, sl_x_min)
widget_values['sl_x_min'] = sl_x_min
sl_x_max = sl_x_min + 224
sl_y_min = st.slider("y_min", 0, img_h-224, sl_y_min)
widget_values['sl_y_min'] = sl_y_min
sl_y_max = sl_y_min + 224
top_left = np.array([sl_x_min, sl_y_min])
top_right = np.array([sl_x_max, sl_y_min])
bottom_right = np.array([sl_x_max, sl_y_max])
bottom_left = np.array([sl_x_min, sl_y_max])
polygon_crop = np.array([top_left, top_right, bottom_right, bottom_left])
img_covered_area = polygon_on_img(img, polygon_crop, linewidth=5)
with col_2_crop:
st.image(img_covered_area, caption="This rectangle patch is cropped.", width=300)
rec = {'x_min': sl_x_min, 'y_min': sl_y_min}
img = img[
rec['y_min']:rec['y_min']+224,
rec['x_min']:rec['x_min']+224
]
################################
#ADD A RECTANGLE################
################################
#st.write("""
# ## Add a Rectangle
# """)
def make_recording_widget(f):
"""Return a function that wraps a streamlit widget and records the
widget's values to a global dictionary.
"""
def wrapper(label, *args, **kwargs):
widget_value = f(label, *args, **kwargs)
widget_values[label] = widget_value
return widget_value
return wrapper
add_rec = widget_values['add_rec']
b_rec_x_min = widget_values['b_rec_x_min']
b_rec_x_max = widget_values['b_rec_x_max']
b_rec_y_min = widget_values['b_rec_y_min']
b_rec_y_max = widget_values['b_rec_y_max']
color_r = widget_values['color_r']
color_g = widget_values['color_g']
color_b = widget_values['color_b']
with st.beta_expander('3 Add a Rectangle'):
add_rem = st.button('Add/Remove')
if add_rem:
add_rec = not add_rec
if add_rec:
col_1_rec, col_2_rec = st.beta_columns(2)
with col_1_rec:
st.write("Set left boundary")
b_rec_x_min = st.slider("x_min", 0, 223, b_rec_x_min)
widget_values['b_rec_x_min'] = b_rec_x_min
st.write("Set top boundary")
b_rec_y_min = st.slider("y_min", 0, 223, b_rec_y_min)
widget_values['b_rec_y_min'] = b_rec_y_min
st.write("Set Color")
color_r = st.slider("R", 0, 255, color_r)
widget_values['color_r'] = color_r
color_g = st.slider("G", 0, 255, color_g)
widget_values['color_g'] = color_g
color_b = st.slider("B", 0, 255, color_b)
widget_values['color_b'] = color_b
with col_2_rec:
st.write("Set right boundary")
b_rec_x_max = st.slider("x_max", 0, 223, b_rec_x_max)
widget_values['b_rec_x_max'] = b_rec_x_max
st.write("Set bottom boundary")
b_rec_y_max = st.slider("y_max", 0, 223, b_rec_y_max)
widget_values['b_rec_y_max'] = b_rec_y_max
img[b_rec_y_min:b_rec_y_max, b_rec_x_min:b_rec_x_max,0] = color_r
img[b_rec_y_min:b_rec_y_max, b_rec_x_min:b_rec_x_max,1] = color_g
img[b_rec_y_min:b_rec_y_max, b_rec_x_min:b_rec_x_max,2] = color_b
img_rec = polygon_on_img(img)
st.image(img_rec, caption="Added rectangle.", width=450)
widget_values['add_rec'] = add_rec
################################
#ROTATE IMAGE###################
################################
#st.write("""
# ## Rotate Image
# """)
with st.beta_expander('4 Rotate Image'):
col_1_rot, col_2_rot = st.beta_columns(2)
rot_angle = widget_values['rot_angle']
with col_1_rot:
st.write("Set angle for rotation:")
alpha_0 = st.button('No Rotation')
if alpha_0:
rot_angle = 0
rot_angle = st.slider("alpha", -180, 180, rot_angle)
widget_values['rot_angle'] = rot_angle
with col_2_rot:
img = Image.fromarray(img)
img = transforms.functional.rotate(img, rot_angle)
img = np.array(img)
img_rotate = polygon_on_img(img)
st.image(img_rotate, caption="Rotated image.", width=300)
################################
#BLURR IMAGE####################
################################
#st.write("""
# ## Add Gaussian Blurring
# """)
with st.beta_expander('5 Add Gaussian Blurring'):
col_1_gauss, col_2_gauss = st.beta_columns(2)
sigma = widget_values['sigma']
with col_1_gauss:
st.write("Set standard deviation:")
sigma_0 = st.button('No Blurring')
if sigma_0:
sigma = 0
sigma = st.slider('standard deviation', 0, 10, sigma, step=1)
img = Image.fromarray(img)
img = img.filter(ImageFilter.GaussianBlur(sigma))
img = np.array(img)
widget_values['sigma'] = sigma
with col_2_gauss:
img_blur = polygon_on_img(img)
st.image(img_blur, caption="Blurred image.", width=300)
################################
#Set Constraints################
################################
img_for_plot = img
#ready for NN
img = img.transpose((2,0,1))
img = torch.from_numpy(img).float()
mean = opts.normalize_mean
std = opts.normalize_std
img = transforms.Normalize(mean, std)(img)
data = {}
data['img'] = img.unsqueeze(0)
if "triangle" in str(file_config):
#st.write("""
# ## Set Triangle Constraint
# """)
with st.beta_expander('6 Set Triangle Constraint'):
sl_x_1 = widget_values['sl_x_1']
sl_y_1 = widget_values['sl_y_1']
sl_x_2 = widget_values['sl_x_2']
sl_y_2 = widget_values['sl_y_2']
sl_x_3 = widget_values['sl_x_3']
sl_y_3 = widget_values['sl_y_3']
col_1_constr_top, col_2_constr_top = st.beta_columns(2)
with col_1_constr_top:
st.write("Top vertex:")
sl_x_1 = st.slider("x_t", 0, 223, sl_x_1)
widget_values['sl_x_1'] = sl_x_1
sl_y_1 = st.slider("y_t", 0, 223, sl_y_1)
widget_values['sl_y_1'] = sl_y_1
col_1_constr, col_2_constr = st.beta_columns(2)
with col_1_constr:
st.write("Bottom left vertex:")
sl_x_3 = st.slider("x_bl", 0, 223, sl_x_3)
widget_values['sl_x_3'] = sl_x_3
sl_y_3 = st.slider("y_bl", 0, 223, sl_y_3)
widget_values['sl_y_3'] = sl_y_3
with col_2_constr:
st.write("Bottom right vertex:")
sl_x_2 = st.slider("x_br", 0, 223, sl_x_2)
widget_values['sl_x_2'] = sl_x_2
sl_y_2 = st.slider("y_br", 0, 223, sl_y_2)
widget_values['sl_y_2'] = sl_y_2
data['constr_para'] = torch.tensor([sl_x_1, sl_y_1, sl_x_2, sl_y_2, sl_x_3,
sl_y_3]).float().unsqueeze(0)
polygon_triangle = np.array([[sl_x_1, sl_y_1], [sl_x_2, sl_y_2], [sl_x_3, sl_y_3]])
with col_2_constr_top:
st.image(polygon_on_img(img_for_plot, polygon_triangle), width=300)
if ("exp_1" in str(file_config) and "bb" in str(file_config)):
#st.write("## Set Bounding Box Constraints")
with st.beta_expander('6 Set Bounding Box Constraints', expanded=True):
col_1_bb, col_2_bb = st.beta_columns(2)
sl_x_min_bb = widget_values['sl_x_min_bb']
sl_y_min_bb = widget_values['sl_y_min_bb']
sl_x_max_bb = widget_values['sl_x_max_bb']
sl_y_max_bb = widget_values['sl_y_max_bb']
with col_1_bb:
st.write("Top left:")
sl_x_min_bb = st.slider("x_min_bb", 0, 223, sl_x_min_bb)
widget_values['sl_x_min_bb'] = sl_x_min_bb
sl_y_min_bb = st.slider("y_min_bb", 0, 223, sl_y_min_bb)
widget_values['sl_y_min_bb'] = sl_y_min_bb
with col_2_bb:
st.write("Bottom right:")
sl_x_max_bb = st.slider("x_max_bb", 0, 223, sl_x_max_bb)
widget_values['sl_x_max_bb'] = sl_x_max_bb
sl_y_max_bb = st.slider("y_max_bb", 0, 223, sl_y_max_bb)
widget_values['sl_y_max_bb'] = sl_y_max_bb
data['constr_para'] = torch.tensor([sl_x_min_bb, sl_x_max_bb, sl_y_min_bb,
sl_y_max_bb]).float().unsqueeze(0)
polygon_bb = np.array([[sl_x_min_bb, sl_y_min_bb], [sl_x_max_bb,
sl_y_min_bb], [sl_x_max_bb, sl_y_max_bb], [sl_x_min_bb, sl_y_max_bb]])
################################
#Apply ConstraintNet############
################################
#st.write("""
# ## Detection
# """)
pred_name = '6 Detection'
if ("exp_1" in str(file_config) and "bb" in str(file_config)) or ("triangle" in
str(file_config)):
pred_name = '7 Detection'
with st.beta_expander(pred_name, expanded=True):
#load model from opts
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@st.cache
def create_model():
model = models.my_model(opts)
model.to(device)
ckpt = torch.load(Path(opts.reload_ckpt_file))
model.load_state_dict(ckpt)
return model.eval()
model = create_model()
x_img = data['img'].to(device)
if opts.model_module == 'constraintnet':
constr_para = data['constr_para'].to(device)
y_pred = model(x_img, constr_para)
else:
y_pred = model(x_img)
if "exp_1" in str(file_config):
idx_x_nose = opts.lm_ordering_lm_order.index('nose_x')
x_nose = y_pred.cpu().data[0, idx_x_nose]
idx_y_nose = opts.lm_ordering_lm_order.index('nose_y')
y_nose = y_pred.cpu().data[0, idx_y_nose]
idx_x_lefteye = opts.lm_ordering_lm_order.index('lefteye_x')
x_lefteye = y_pred.cpu().data[0, idx_x_lefteye]
idx_y_lefteye = opts.lm_ordering_lm_order.index('lefteye_y')
y_lefteye = y_pred.cpu().data[0, idx_y_lefteye]
idx_x_righteye = opts.lm_ordering_lm_order.index('righteye_x')
x_righteye = y_pred.cpu().data[0, idx_x_righteye]
idx_y_righteye = opts.lm_ordering_lm_order.index('righteye_y')
y_righteye = y_pred.cpu().data[0, idx_y_righteye]
scatter_pts = (np.array([[x_nose, y_nose], [x_lefteye, y_lefteye],
[x_righteye, y_righteye]]), 300 ,'x','white')
if "resnet" in str(file_config):
img_exp_1_resnet = polygon_on_img(img_for_plot, scatter_pts=scatter_pts)
#io.imsave(Path('./pics/exp_1_resnet.jpg'),
# np.array(img_exp_1_resnet))
st.image(img_exp_1_resnet, caption="Prediction with resnet without constraints", width=600)
elif "bb_rel" in str(file_config):
img_exp_1_bb_rel = polygon_on_img(img_for_plot, polygon_xy = polygon_bb,
linewidth=5, scatter_pts = scatter_pts)
#io.imsave(Path('./pics/exp_1_bb_rel.jpg'),
# np.array(img_exp_1_resnet))
st.image(img_exp_1_bb_rel,
caption="Prediction within bb constraint and additional relative constraints",
width=600)
else:
img_exp_1_bb = polygon_on_img(img_for_plot, polygon_xy = polygon_bb,
linewidth=5, scatter_pts = scatter_pts)
#io.imsave(Path('./pics/exp_1_bb.jpg'),
# np.array(img_exp_1_bb))
st.image(img_exp_1_bb, caption="Prediction within bb constraint.", width=600)
if "exp_2" in str(file_config):
x_nose = | |
def transitionFunction_idleBehaviour_fun1(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun1")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if self._in_flag_desiredRobotCommandTele and not self._in_flag_desiredRobotCommandAuto:
self.transitionFunction_idleBehaviour_fun1_0()
elif not self._in_flag_desiredRobotCommandTele and self._in_flag_desiredRobotCommandAuto:
self.transitionFunction_idleBehaviour_fun1_1()
elif not (self._in_flag_desiredRobotCommandTele and not self._in_flag_desiredRobotCommandAuto) and not ( not self._in_flag_desiredRobotCommandTele and self._in_flag_desiredRobotCommandAuto):
self.transitionFunction_idleBehaviour_fun1_2()
pass
##### Partial transition function: fun1_0 based on input buffers self._in_flag_desiredRobotCommandTele and not self._in_flag_desiredRobotCommandAuto #####
def transitionFunction_idleBehaviour_fun1_0(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun1_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_desiredMoveCommand=True
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBB")
print("[CS - ballCollector] -- idleBehaviour")
if(self.desiredRobotCommandTele.data=="stop"):
self.desiredMoveCommand.cmd.data="stop"
self.currentRobotSpeed.data=0
elif(self.desiredRobotCommandTele.data=="rotate left"):
self.desiredMoveCommand.cmd.data="rotate left"
elif(self.desiredRobotCommandTele.data=="rotate right"):
self.desiredMoveCommand.cmd.data="rotate right"
else:
self.desiredMoveCommand.cmd.data="empty"
if(self.desiredRobotCommandTele.data=="move faster"):
self.currentRobotSpeed.data=self.currentRobotSpeed.data+speed_delta
if(self.currentRobotSpeed.data>200):
self.currentRobotSpeed.data=200
elif(self.desiredRobotCommandTele.data=="move slower"):
self.currentRobotSpeed.data=self.currentRobotSpeed.data-speed_delta
if(self.currentRobotSpeed.data<0):
self.currentRobotSpeed.data=0
elif(self.desiredRobotCommandTele.data=="move front"):
self.desiredMoveCommand.direction.data=0
elif(self.desiredRobotCommandTele.data=="move backwards"):
self.desiredMoveCommand.direction.data=180
elif(self.desiredRobotCommandTele.data=="move left"):
self.desiredMoveCommand.direction.data=270
elif(self.desiredRobotCommandTele.data=="move right"):
self.desiredMoveCommand.direction.data=90
else:
self._out_flag_desiredMoveCommand=False
print("TEST")
self.desiredMoveCommand.desiredSpeed.data=self.currentRobotSpeed.data
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBB 22222")
# End - Partial Transition Function Code
pass
##### Partial transition function: fun1_1 based on input buffers not self._in_flag_desiredRobotCommandTele and self._in_flag_desiredRobotCommandAuto #####
def transitionFunction_idleBehaviour_fun1_1(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun1_1")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_desiredMoveCommand=True
print("[CS - ballCollector] -- idleBehaviour")
if(self.desiredRobotCommandAuto.data=="stop"):
self.desiredMoveCommand.cmd.data="stop"
self.currentRobotSpeed.data=0
elif(self.desiredRobotCommandAuto.data=="rotate left"):
self.desiredMoveCommand.cmd.data="rotate left"
elif(self.desiredRobotCommandAuto.data=="rotate right"):
self.desiredMoveCommand.cmd.data="rotate right"
else:
if(self.desiredRobotCommandAuto.data=="move faster"):
self.currentRobotSpeed.data=self.currentRobotSpeed.data+speed_delta
if(self.currentRobotSpeed.data>200):
self.currentRobotSpeed.data=200
elif(self.desiredRobotCommandAuto.data=="move slower"):
self.currentRobotSpeed.data=self.currentRobotSpeed.data-speed_delta
if(self.currentRobotSpeed.data<0):
self.currentRobotSpeed.data=0
else:
self.desiredMoveCommand.cmd.data="empty"
if(self.desiredRobotCommandAuto.data=="move front"):
self.desiredMoveCommand.direction.data=0
elif(self.desiredRobotCommandAuto.data=="move backwards"):
self.desiredMoveCommand.direction.data=180
elif(self.desiredRobotCommandAuto.data=="move left"):
self.desiredMoveCommand.direction.data=270
elif(self.desiredRobotCommandAuto.data=="move right"):
self.desiredMoveCommand.direction.data=90
else:
self._out_flag_desiredMoveCommand=False
self.desiredMoveCommand.desiredSpeed.data=self.currentRobotSpeed.data
# End - Partial Transition Function Code
pass
##### Partial transition function: fun1_2 based on input buffers not (self._in_flag_desiredRobotCommandTele and not self._in_flag_desiredRobotCommandAuto) and not ( not self._in_flag_desiredRobotCommandTele and self._in_flag_desiredRobotCommandAuto) #####
def transitionFunction_idleBehaviour_fun1_2(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun1_2")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
# Comment generated by RSSL compiler - transition function generated - data from input buffers was not received on time - code not specified
# End - Partial Transition Function Code
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_idleBehaviour_fun2(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun2")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if self._in_flag_desiredRobotCommandAuto and not self._in_flag_desiredRobotCommandTele:
self.transitionFunction_idleBehaviour_fun2_0()
elif not self._in_flag_desiredRobotCommandAuto and self._in_flag_desiredRobotCommandTele:
self.transitionFunction_idleBehaviour_fun2_1()
elif not (self._in_flag_desiredRobotCommandAuto and not self._in_flag_desiredRobotCommandTele) and not ( not self._in_flag_desiredRobotCommandAuto and self._in_flag_desiredRobotCommandTele):
self.transitionFunction_idleBehaviour_fun2_2()
pass
##### Partial transition function: fun2_0 based on input buffers self._in_flag_desiredRobotCommandAuto and not self._in_flag_desiredRobotCommandTele #####
def transitionFunction_idleBehaviour_fun2_0(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun2_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_desiredVacuumCommand=True
print("[CS - ballCollector] -- idleBehaviour")
if(self.desiredRobotCommandAuto.data=="vacuum turn on"):
self.desiredVacuumCommand.data=vacuum_max
elif(self.desiredRobotCommandAuto.data=="vacuum off"):
self.desiredVacuumCommand.data=0
# End - Partial Transition Function Code
pass
##### Partial transition function: fun2_1 based on input buffers not self._in_flag_desiredRobotCommandAuto and self._in_flag_desiredRobotCommandTele #####
def transitionFunction_idleBehaviour_fun2_1(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun2_1")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_desiredVacuumCommand=True
print("[CS - ballCollector] -- idleBehaviour")
if(self.desiredRobotCommandTele.data=="vacuum turn on"):
self.desiredVacuumCommand.data=vacuum_max
elif(self.desiredRobotCommandTele.data=="vacuum off"):
self.desiredVacuumCommand.data=0
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBB 22222 3333")
# End - Partial Transition Function Code
pass
##### Partial transition function: fun2_2 based on input buffers not (self._in_flag_desiredRobotCommandAuto and not self._in_flag_desiredRobotCommandTele) and not ( not self._in_flag_desiredRobotCommandAuto and self._in_flag_desiredRobotCommandTele) #####
def transitionFunction_idleBehaviour_fun2_2(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun2_2")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
# Comment generated by RSSL compiler - transition function generated - data from input buffers was not received on time - code not specified
# End - Partial Transition Function Code
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_idleBehaviour_fun3(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun3")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if self._in_flag_sensorInfo:
self.transitionFunction_idleBehaviour_fun3_0()
elif not (self._in_flag_sensorInfo):
self.transitionFunction_idleBehaviour_fun3_1()
pass
##### Partial transition function: fun3_0 based on input buffers self._in_flag_sensorInfo #####
def transitionFunction_idleBehaviour_fun3_0(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun3_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_desiredMoveCommand=True
print("DDDDDDDDDDDDDDDDDDDDD")
print("[CS - ballCollector] -- idleBehaviour - check sensors: sonars")
flag=False
compas=0
# check sonar nr 1 (North)
if(self.sensorInfo.sonar_1.data < minDistanceToObstacle):
# too close to obstacle
# change direction and change speed
flag=True
compas=180 # move to south
elif(self.sensorInfo.sonar_2.data < minDistanceToObstacle):
# EAST
# change direction and change speed
flag=True
compas=270 # move to west
elif(self.sensorInfo.sonar_3.data < minDistanceToObstacle):
# SOUTH
# change direction and change speed
flag=True
compas=0 # move to north
elif(self.sensorInfo.sonar_4.data < minDistanceToObstacle):
# WEST
# change direction and change speed
flag=True
compas=90 # move to east
if(flag):
self.currentRobotSpeed.data=0
self.desiredMoveCommand.direction.data=compas
self.desiredMoveCommand.desiredSpeed.data=self.currentRobotSpeed.data
# End - Partial Transition Function Code
pass
##### Partial transition function: fun3_1 based on input buffers not (self._in_flag_sensorInfo) #####
def transitionFunction_idleBehaviour_fun3_1(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun3_1")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
# Comment generated by RSSL compiler - transition function generated - data from input buffers was not received on time - code not specified
# End - Partial Transition Function Code
pass
##### Decomposition of partial transition function based on input buffers #####
def transitionFunction_idleBehaviour_fun4(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun4")
# Partial Transition Function - the first layer #
self.log("PARTIAL TRANSITION FUNCTION - FIRST LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
if self._in_flag_sensorInfo:
self.transitionFunction_idleBehaviour_fun4_0()
elif not (self._in_flag_sensorInfo):
self.transitionFunction_idleBehaviour_fun4_1()
pass
##### Partial transition function: fun4_0 based on input buffers self._in_flag_sensorInfo #####
def transitionFunction_idleBehaviour_fun4_0(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun4_0")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
self._out_flag_obstacleDetectedTele=True
self._out_flag_obstacleDetectedAuto=True
self._out_flag_ballCollectedAuto=True
self._out_flag_ballCollectedTele=True
self.obstacleDetectedTele.sonar_1.data=self.sensorInfo.sonar_1.data
self.obstacleDetectedTele.sonar_2.data=self.sensorInfo.sonar_2.data
self.obstacleDetectedTele.sonar_3.data=self.sensorInfo.sonar_3.data
self.obstacleDetectedTele.sonar_4.data=self.sensorInfo.sonar_4.data
self.obstacleDetectedAuto.sonar_1.data=self.sensorInfo.sonar_1.data
self.obstacleDetectedAuto.sonar_2.data=self.sensorInfo.sonar_2.data
self.obstacleDetectedAuto.sonar_3.data=self.sensorInfo.sonar_3.data
self.obstacleDetectedAuto.sonar_4.data=self.sensorInfo.sonar_4.data
self.ballCollectedAuto.data=self.sensorInfo.inlet.data
self.ballCollectedTele.data=self.sensorInfo.inlet.data
# End - Partial Transition Function Code
pass
##### Partial transition function: fun4_1 based on input buffers not (self._in_flag_sensorInfo) #####
def transitionFunction_idleBehaviour_fun4_1(self):
self.log("[Behaviour idleBehaviour] -- Calculating Partial Transition Function fun4_1")
# Partial Transition Function - the second layer #
self.log("PARTIAL TRANSITION FUNCTION - SECOND LAYER - idleBehaviour consists of the following partial transition functions (decomposition based on input buffers)")
# Begin - Partial Transition Function Code
# Comment generated by RSSL compiler - transition function generated - data from input buffers was not received on time - | |
runtime)
)
async def edit_order_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.EditOrderRequest,
headers: dingtalkjzcrm__1__0_models.EditOrderHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditOrderResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditOrderResponse(),
await self.do_roarequest_async('EditOrder', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/orders', 'json', req, runtime)
)
def edit_quotation_record(
self,
request: dingtalkjzcrm__1__0_models.EditQuotationRecordRequest,
) -> dingtalkjzcrm__1__0_models.EditQuotationRecordResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditQuotationRecordHeaders()
return self.edit_quotation_record_with_options(request, headers, runtime)
async def edit_quotation_record_async(
self,
request: dingtalkjzcrm__1__0_models.EditQuotationRecordRequest,
) -> dingtalkjzcrm__1__0_models.EditQuotationRecordResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditQuotationRecordHeaders()
return await self.edit_quotation_record_with_options_async(request, headers, runtime)
def edit_quotation_record_with_options(
self,
request: dingtalkjzcrm__1__0_models.EditQuotationRecordRequest,
headers: dingtalkjzcrm__1__0_models.EditQuotationRecordHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditQuotationRecordResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditQuotationRecordResponse(),
self.do_roarequest('EditQuotationRecord', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/quotationRecords', 'json', req, runtime)
)
async def edit_quotation_record_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.EditQuotationRecordRequest,
headers: dingtalkjzcrm__1__0_models.EditQuotationRecordHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditQuotationRecordResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditQuotationRecordResponse(),
await self.do_roarequest_async('EditQuotationRecord', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/quotationRecords', 'json', req, runtime)
)
def edit_customer_pool(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerPoolRequest,
) -> dingtalkjzcrm__1__0_models.EditCustomerPoolResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditCustomerPoolHeaders()
return self.edit_customer_pool_with_options(request, headers, runtime)
async def edit_customer_pool_async(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerPoolRequest,
) -> dingtalkjzcrm__1__0_models.EditCustomerPoolResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditCustomerPoolHeaders()
return await self.edit_customer_pool_with_options_async(request, headers, runtime)
def edit_customer_pool_with_options(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerPoolRequest,
headers: dingtalkjzcrm__1__0_models.EditCustomerPoolHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditCustomerPoolResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditCustomerPoolResponse(),
self.do_roarequest('EditCustomerPool', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/customerPools', 'json', req, runtime)
)
async def edit_customer_pool_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerPoolRequest,
headers: dingtalkjzcrm__1__0_models.EditCustomerPoolHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditCustomerPoolResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditCustomerPoolResponse(),
await self.do_roarequest_async('EditCustomerPool', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/customerPools', 'json', req, runtime)
)
def edit_purchase(
self,
request: dingtalkjzcrm__1__0_models.EditPurchaseRequest,
) -> dingtalkjzcrm__1__0_models.EditPurchaseResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditPurchaseHeaders()
return self.edit_purchase_with_options(request, headers, runtime)
async def edit_purchase_async(
self,
request: dingtalkjzcrm__1__0_models.EditPurchaseRequest,
) -> dingtalkjzcrm__1__0_models.EditPurchaseResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditPurchaseHeaders()
return await self.edit_purchase_with_options_async(request, headers, runtime)
def edit_purchase_with_options(
self,
request: dingtalkjzcrm__1__0_models.EditPurchaseRequest,
headers: dingtalkjzcrm__1__0_models.EditPurchaseHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditPurchaseResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditPurchaseResponse(),
self.do_roarequest('EditPurchase', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/purchases', 'json', req, runtime)
)
async def edit_purchase_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.EditPurchaseRequest,
headers: dingtalkjzcrm__1__0_models.EditPurchaseHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditPurchaseResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditPurchaseResponse(),
await self.do_roarequest_async('EditPurchase', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/purchases', 'json', req, runtime)
)
def edit_intostock(
self,
request: dingtalkjzcrm__1__0_models.EditIntostockRequest,
) -> dingtalkjzcrm__1__0_models.EditIntostockResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditIntostockHeaders()
return self.edit_intostock_with_options(request, headers, runtime)
async def edit_intostock_async(
self,
request: dingtalkjzcrm__1__0_models.EditIntostockRequest,
) -> dingtalkjzcrm__1__0_models.EditIntostockResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditIntostockHeaders()
return await self.edit_intostock_with_options_async(request, headers, runtime)
def edit_intostock_with_options(
self,
request: dingtalkjzcrm__1__0_models.EditIntostockRequest,
headers: dingtalkjzcrm__1__0_models.EditIntostockHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditIntostockResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditIntostockResponse(),
self.do_roarequest('EditIntostock', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/intostocks', 'json', req, runtime)
)
async def edit_intostock_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.EditIntostockRequest,
headers: dingtalkjzcrm__1__0_models.EditIntostockHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditIntostockResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditIntostockResponse(),
await self.do_roarequest_async('EditIntostock', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/intostocks', 'json', req, runtime)
)
def edit_customer(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerRequest,
) -> dingtalkjzcrm__1__0_models.EditCustomerResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditCustomerHeaders()
return self.edit_customer_with_options(request, headers, runtime)
async def edit_customer_async(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerRequest,
) -> dingtalkjzcrm__1__0_models.EditCustomerResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditCustomerHeaders()
return await self.edit_customer_with_options_async(request, headers, runtime)
def edit_customer_with_options(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerRequest,
headers: dingtalkjzcrm__1__0_models.EditCustomerHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditCustomerResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditCustomerResponse(),
self.do_roarequest('EditCustomer', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/customers', 'json', req, runtime)
)
async def edit_customer_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.EditCustomerRequest,
headers: dingtalkjzcrm__1__0_models.EditCustomerHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditCustomerResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.EditCustomerResponse(),
await self.do_roarequest_async('EditCustomer', 'jzcrm_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/jzcrm/customers', 'json', req, runtime)
)
def get_data_list(
self,
request: dingtalkjzcrm__1__0_models.GetDataListRequest,
) -> dingtalkjzcrm__1__0_models.GetDataListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.GetDataListHeaders()
return self.get_data_list_with_options(request, headers, runtime)
async def get_data_list_async(
self,
request: dingtalkjzcrm__1__0_models.GetDataListRequest,
) -> dingtalkjzcrm__1__0_models.GetDataListResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.GetDataListHeaders()
return await self.get_data_list_with_options_async(request, headers, runtime)
def get_data_list_with_options(
self,
request: dingtalkjzcrm__1__0_models.GetDataListRequest,
headers: dingtalkjzcrm__1__0_models.GetDataListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.GetDataListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.datatype):
query['datatype'] = request.datatype
if not UtilClient.is_unset(request.page):
query['page'] = request.page
if not UtilClient.is_unset(request.pagesize):
query['pagesize'] = request.pagesize
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.GetDataListResponse(),
self.do_roarequest('GetDataList', 'jzcrm_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/jzcrm/data', 'json', req, runtime)
)
async def get_data_list_with_options_async(
self,
request: dingtalkjzcrm__1__0_models.GetDataListRequest,
headers: dingtalkjzcrm__1__0_models.GetDataListHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.GetDataListResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.datatype):
query['datatype'] = request.datatype
if not UtilClient.is_unset(request.page):
query['page'] = request.page
if not UtilClient.is_unset(request.pagesize):
query['pagesize'] = request.pagesize
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkjzcrm__1__0_models.GetDataListResponse(),
await self.do_roarequest_async('GetDataList', 'jzcrm_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/jzcrm/data', 'json', req, runtime)
)
def edit_invoice(
self,
request: dingtalkjzcrm__1__0_models.EditInvoiceRequest,
) -> dingtalkjzcrm__1__0_models.EditInvoiceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditInvoiceHeaders()
return self.edit_invoice_with_options(request, headers, runtime)
async def edit_invoice_async(
self,
request: dingtalkjzcrm__1__0_models.EditInvoiceRequest,
) -> dingtalkjzcrm__1__0_models.EditInvoiceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkjzcrm__1__0_models.EditInvoiceHeaders()
return await self.edit_invoice_with_options_async(request, headers, runtime)
def edit_invoice_with_options(
self,
request: dingtalkjzcrm__1__0_models.EditInvoiceRequest,
headers: dingtalkjzcrm__1__0_models.EditInvoiceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkjzcrm__1__0_models.EditInvoiceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.datatype):
body['datatype'] = request.datatype
if not UtilClient.is_unset(request.stamp):
body['stamp'] = request.stamp
if not UtilClient.is_unset(request.msgid):
body['msgid'] = request.msgid
if not UtilClient.is_unset(request.data):
body['data'] = request.data
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = | |
# @Author: <NAME>
# @Email: <EMAIL>
# @Filename: spherical_brute_force.py
# @Last modified by: <NAME>
# @Last modified time: 31-Jul-2018
"""Obtain clusters and calculate meta-features.
Args:
dataset_filename (string): path to the dataset
Predefined types:
Point (dict): {'coordinate': (float, ...), 'label': int}
Dataset (list): list of dict objects:
[Point, ...]
Vertex (tuple): Point['coordinate']
Vertices (list): [Vertex, ...]
Output files:
dataset_filename.output.json: calculated meta-features.
dataset_filename.clusters.json: calculated clusters.
dataset_filename.log: log file
"""
import argparse
import collections
import json
import logging
import logging.handlers
import math
import os
import numpy
import meta_features
INFINITESIMAL = 1e-323
PROCESS_COUNT = int(os.cpu_count() / 2)
def initialize_logger(
name='LOG',
filename=None,
level=logging.DEBUG,
filemode='a'):
"""Initialize a logger in module logging.
Args:
name (string, optional): Name of logger. Defaults to None.
filename (string, optional): Defaults to None.
The path of log file
By default, logger will stream to the standard output
level (logging level, optional): Defaults to logging.INFO
filemode (string, optional): Defaults to 'a'.
'w' or 'a', overwrite or append
Returns:
logger: [description]
"""
log_format = '%(asctime)s %(levelname)s\n' + \
' %(filename)s:%(lineno)s: %(name)s: %(message)s'
if filename is None:
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(
filename=filename, mode=filemode)
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(level)
return logger, handler
def load_dataset(filename):
"""Load data from a csv file.
Args:
filename (string): path of input file.
CSV format
[coordinate, ...] + [label]
Returns:
Dataset: dataset
"""
return [(
lambda point: {
'coordinate': tuple(map(float, point[:-1])),
'label': int(point[-1])})
(string.strip().rstrip().split(','))
for string in open(filename, 'r').read()
.strip().rstrip().split('\n')]
def initialize_cluster(coordinates):
"""Construct a cluster instance with given coordiante.
A factory function
Args:
coordinates (list): The coordinates that needed to be included.
[Vertex, ...]
Returns:
dict: a cluster initialized with given coordinates
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list): Instances in the cluster
i.e. distance <= radius
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume' (float): volume of the sphere
}]
"""
points = coordinates
_points = list(map(numpy.array, coordinates))
centroid = sum(_points) / len(_points)
radius = max(
map(lambda x, y=centroid: numpy.linalg.norm((x - y)), _points))
return {
'centroid': tuple(centroid),
'radius': radius,
'points': points,
'size': len(points),
'log-volume': calculate_log_volume(len(centroid), radius)
}
def calculate_distance(lhs, rhs):
"""Calculate the euclidean distance between 2 points.
Args:
lhs, rhs (Vertex): Coordinates of 2 points
Returns:
float: Euclidean distance between them
"""
return numpy.linalg.norm((numpy.array(lhs) - numpy.array(rhs)))
def calculate_log_volume(dimension, radius):
"""Calculate the log-volume of a sphere with given dimension and radius.
Args:
dimension (int): dimension of the space
radius (float): radius of the sphere
Returns:
float: the log-volume of the sphere
radius is set as REL_TOL (1e-09)
"""
if (math.isclose(radius, 0)):
radius = INFINITESIMAL
try:
log_volume = ((dimension / 2.0) * math.log(math.pi) + dimension *
math.log(radius) - math.lgamma(dimension / 2.0 + 1))
except ValueError as message:
raise ValueError("".join([
"{0}\n".format(message),
"(({0} / 2.0) * ln(pi) + ({0} * ln({1})".format(dimension, radius),
" - ln(gamma({0} / 2.0 + 1)))".format(dimension)]))
if math.isnan(log_volume):
raise ValueError(
"Volume is NaN: pi ^ " +
"({0} / 2.0) / gamma({0} / 2.0 + 1) * {1} ^ {0}".format(
dimension, radius))
return log_volume
def float_less_or_equal(lhs, rhs, **kwargs):
"""Determine float A is less than or equal to B using numpy.isclose().
Use numpy.isclose() to determine if A and B are equal
with default tolerance.
Args:
lhs, rhs (float): values that need to be compared
kwargs: kwargs for numpy.isclose()
Returns:
bool: result of comparison.
"""
return numpy.isclose(lhs, rhs, **kwargs) or (lhs < rhs)
def check_inside_cluster(cluster, point):
"""Check if point is inside the cluster.
Args:
cluster (dict): cluster to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
point (Vertex): point to be checked
Returns:
bool: if the point is encompassed by the boundary
"""
return float_less_or_equal(
calculate_distance(cluster['centroid'], point), cluster['radius'])
def check_homogeneity(cluster, label, clusters):
"""Check homogeneity of the cluster with given clusters.
A homogeneous cluster will not overlap with any other cluster which has
different label, but may overlap with cluster that has the same label.
Which means, there should be no region with ambiguity in
categorisation process.
Args:
cluster (dict): Cluster that need to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
label (): label of the cluster
clusters (dict): list of clusters with labels as keys.
{
label: [cluster, ...]
}
Returns:
bool: if cluster is homogeneous
"""
for _label, _clusters in clusters.items():
if _label == label:
continue
for _cluster in _clusters:
if float_less_or_equal(
calculate_distance(
cluster['centroid'], _cluster['centroid']),
(cluster['radius'] + _cluster['radius'])):
return False
return True
def check_homogeneity_instances(indices, dataset):
labels = set(map(lambda x: dataset[x]['label'], indices))
if len(labels) > 1:
return False
return True
def sub_partitions(indices, n, current):
# n (int) is the number of groups
# current (list) is the current grouping
r = len(indices)
# print(indices, n, current)
if n == 1:
yield [list(indices)]
return
if n == r:
for i, index in enumerate(indices):
tmp = [current + [index]]
tmp.extend(list(map(lambda x: [x], indices[:i] + indices[i + 1:])))
yield tmp
return
for other in sub_partitions(indices[1:], n - 1, []):
tmp = [current + [indices[0]]]
tmp.extend(other)
yield tmp
for index in range(1, len(indices)):
indices[1], indices[index] = indices[index], indices[1]
for tmp in sub_partitions(indices[1:], n, current + [indices[0]]):
yield tmp
indices[1], indices[index] = indices[index], indices[1]
return
def partition(indices):
r = len(indices)
for n in range(1, r + 1):
for tmp in sub_partitions(indices[:], n, []):
yield tmp
def clustering(dataset, logger):
"""Calculate all spherical clusters.
All spheres will be pure(only contains data points with same label)
Args:
dataset (list): All the instances in the space with label
list of dict objects:
[Point, ...]
logger (logger): logger for logging
Returns:
dict: Clusters obtained separated by labels
label: clusters (list of dict objects)
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list) : Instances in the cluster
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume': The volume of the sphere
float(optional)
}, ...]
"""
logger.info('Sorting datasets...')
dataset.sort(key=lambda x: x['coordinate'])
clusters = collections.defaultdict(list)
instances = [instance['coordinate'] for instance in dataset]
count = 0
found_count = 0
minimum = len(instances)
logger.info('Checking clusters...')
for groups in partition(list(range(len(dataset)))):
tmp_clusters = collections.defaultdict(list)
if len(groups) > minimum:
logger.info('Minimum found. #groups: {}'.format(len(groups)))
break
for indices in groups:
cluster = initialize_cluster(list(
map(lambda x: instances[x], indices)))
label = dataset[indices[0]]['label']
if (not check_homogeneity(cluster, label, tmp_clusters)
or not check_homogeneity_instances(indices, dataset)):
break
tmp_clusters[label].append(cluster)
else:
minimum = len(groups)
clusters = tmp_clusters
logger.info('Minimum updated. #{} group'.format(count))
found_count += 1
logger.info(
'One option found. Total till now: {}'.format(found_count))
count += 1
if count % 50 == 0:
logger.info('{} groupings checked'.format(count))
return clusters
def main(args):
"""
Start main function here.
Dispatching all the tasks to process.
"""
log_file = args.log
logger, handler = initialize_logger("Parent", log_file)
logger.info('Start: Version 2.1.1')
logger.debug('Logger initialized')
logger.debug('argparse: %r', args)
logger.removeHandler(handler)
_args = []
for dataset_filename in args.paths:
clusters_filename = dataset_filename + ".clusters.json"
output_filename = dataset_filename + ".output.json"
_args.append(tuple([
dataset_filename,
clusters_filename,
output_filename,
log_file]))
list(map(task_processing, _args))
def task_processing(args): # Take note here!!!
"""Unwrap the args tuple to adapt a function with multiple args to map."""
def worker(
dataset_filename,
clusters_filename,
output_filename,
log_file):
"""Link the submodules to process the data."""
logger, handler = initialize_logger(dataset_filename, log_file)
logger.debug('Logger initialized')
logger.debug('Loading dataset')
dataset = load_dataset(dataset_filename)
logger.info('Dataset loaded')
logger.info('Trying to load clusters from %s', clusters_filename)
clusters = None
try:
clusters = json.load(open(clusters_filename, 'r'))
except FileNotFoundError:
logger.warning('Clusters data file not found')
except json.decoder.JSONDecodeError:
logger.warning('File broken. Not Json Decodable')
if not clusters:
logger.debug('Clustering data points')
clusters = clustering(dataset, logger)
logger.debug(
'Dumping clusters data into json file: %s', clusters_filename)
json.dump(clusters, open(clusters_filename, 'w'))
logger.info('Data points clustered')
logger.debug('Calculating meta-feature indicators')
features = meta_features.meta_features(clusters)
logger.debug(
'Dumping meta-feature indicators into json file: %s',
clusters_filename)
json.dump(features, open(output_filename, 'w'))
logger.info('Meta-feature indicators calculated')
logger.info('Complete')
logger.removeHandler(handler)
return worker(*args)
def traverse(paths):
"""Traverse to collect all the data files."""
print("Starting Traverse Through", flush=True)
files = []
while paths:
path = paths[0]
paths = paths[1:]
for file in os.listdir(path):
if (file.find('.json') == -1
and file.find('.log') == -1
and file.find('.DS_Store') == -1
and file.find('.png') == -1
and file.find('.html') == -1):
files.append('{0}/{1}'.format(path, file))
elif os.path.isdir('{0}/{1}'.format(path, file)):
paths.append('{0}/{1}'.format(path, file))
print("Traverse Completed.", flush=True)
return files
def parse_args():
"""Parse all necessary args."""
parser = argparse.ArgumentParser(
description="Obtain clusters and calculate meta-features")
parser.add_argument('-r', action='store', nargs='+',
default=[], | |
each sentence
"""
try:
import mishkal.tashkeel
# Load vocalizer
if not hasattr(self, "vocalizer"):
vocalizer = mishkal.tashkeel.TashkeelClass()
setattr(self, "vocalizer", vocalizer)
# Add diacritics
text = vocalizer.tashkeel(text)
except ImportError:
_LOGGER.warning("mishkal is highly recommended for language 'ar'")
_LOGGER.warning("pip install 'mishkal>=0.4.0'")
yield from super().text_to_tokens(text)
class ArabicPhonemizer(SqlitePhonemizer):
"""Phonemizer for Arabic (اَلْعَرَبِيَّةُ)"""
def __init__(self, lang_dir: typing.Union[str, Path], **kwargs):
self.lang_dir = lang_dir
super().__init__(
minor_breaks={"،", ":", ";"}, major_breaks={".", "؟", "!"}, **kwargs
)
# -----------------------------------------------------------------------------
# cs-cz
# -----------------------------------------------------------------------------
CZECH_MINOR_BREAKS = {",", ":", ";"}
CZECH_MAJOR_BREAKS = {".", "?", "!"}
class CzechTokenizer(RegexTokenizer):
"""Tokenizer for Czech (čeština)"""
def __init__(
self,
lang_dir: typing.Union[str, Path],
use_number_converters: bool = False,
do_replace_currency: bool = True,
**kwargs,
):
self.lang_dir = Path(lang_dir)
currency_names = get_currency_names("cs_CZ")
currency_names["€"] = "EUR"
super().__init__(
replacements=[
("\\B'", '"'), # replace single quotes
("'\\B", '"'),
('[\\<\\>\\(\\)\\[\\]"]+', ""), # drop brackets/quotes
("’", "'"), # normalize apostrophe
],
punctuations={'"', ",", ";", ":", ".", "?", "!", "„", "“", "”", "«", "»"},
minor_breaks=CZECH_MINOR_BREAKS,
major_breaks=CZECH_MAJOR_BREAKS,
casing_func=str.lower,
num2words_lang="cs_CZ",
babel_locale="cs_CZ",
currency_names=currency_names,
use_number_converters=use_number_converters,
do_replace_currency=do_replace_currency,
**kwargs,
)
class CzechPhonemizer(SqlitePhonemizer):
"""Phonemizer for Czech (čeština)"""
def __init__(self, lang_dir: typing.Union[str, Path], **kwargs):
self.lang_dir = lang_dir
super().__init__(
minor_breaks=CZECH_MINOR_BREAKS, major_breaks=CZECH_MAJOR_BREAKS, **kwargs
)
# -----------------------------------------------------------------------------
# de-de
# -----------------------------------------------------------------------------
GERMAN_MINOR_BREAKS = {",", ":", ";"}
GERMAN_MAJOR_BREAKS = {".", "?", "!"}
class GermanTokenizer(RegexTokenizer):
"""Tokenizer for German (Deutsch)"""
def __init__(
self,
lang_dir: typing.Union[str, Path],
use_number_converters: bool = False,
do_replace_currency: bool = True,
**kwargs,
):
self.lang_dir = Path(lang_dir)
currency_names = get_currency_names("de_DE")
currency_names["€"] = "EUR"
super().__init__(
replacements=[
("\\B'", '"'), # replace single quotes
("'\\B", '"'),
('[\\<\\>\\(\\)\\[\\]"]+', ""), # drop brackets/quotes
],
punctuations={
'"',
",",
";",
":",
".",
"?",
"!",
"„",
"“",
"”",
"«",
"»",
"’",
},
minor_breaks=GERMAN_MINOR_BREAKS,
major_breaks=GERMAN_MAJOR_BREAKS,
casing_func=str.lower,
num2words_lang="de_DE",
babel_locale="de_DE",
currency_names=currency_names,
use_number_converters=use_number_converters,
do_replace_currency=do_replace_currency,
**kwargs,
)
class GermanPhonemizer(SqlitePhonemizer):
"""Phonemizer for German (Deutsch)"""
def __init__(self, lang_dir: typing.Union[str, Path], **kwargs):
self.lang_dir = lang_dir
super().__init__(
minor_breaks=GERMAN_MINOR_BREAKS, major_breaks=GERMAN_MAJOR_BREAKS, **kwargs
)
# -----------------------------------------------------------------------------
# en-us, en-gb
# -----------------------------------------------------------------------------
ENGLISH_PUNCTUATIONS = {'"', ",", ";", ":", ".", "?", "!", "“", "”", "«", "»", "-"}
ENGLISH_MINOR_BREAKS = {",", ":", ";"}
ENGLISH_MAJOR_BREAKS = {".", "?", "!"}
class EnglishTokenizer(RegexTokenizer):
"""Tokenizer for English"""
def __init__(
self,
lang_dir: typing.Union[str, Path],
use_number_converters: bool = False,
do_replace_currency: bool = True,
**kwargs,
):
self.lang_dir = Path(lang_dir)
currency_names = get_currency_names("en_US")
currency_names["$"] = "USD"
super().__init__(
replacements=[
("\\B'", '"'), # replace single quotes
("'\\B", '"'),
('[\\<\\>\\(\\)\\[\\]"]+', ""), # drop brackets/quotes
("&", " and "),
("’", "'"), # normalize apostrophe
("(\\d+(?:[,.]\\d+)*)%\\B", "\\1 percent"),
],
abbreviations={
"capt.": "captain",
"co.": "company",
"col.": "colonel",
"dr.": "doctor",
"drs.": "doctors",
"esq.": "esquire",
"ft.": "fort",
"gen.": "general",
"hon.": "honorable",
"jr.": "junior",
"ltd.": "limited",
"lt.": "lieutenant",
"maj.": "major",
"mr.": "mister",
"mrs.": "misess",
"rev.": "reverend",
"sgt.": "sergeant",
"st.": "saint",
},
punctuations=ENGLISH_PUNCTUATIONS,
minor_breaks=ENGLISH_MINOR_BREAKS,
major_breaks=ENGLISH_MAJOR_BREAKS,
casing_func=str.lower,
num2words_lang="en_US",
babel_locale="en_US",
currency_names=currency_names,
use_number_converters=use_number_converters,
do_replace_currency=do_replace_currency,
**kwargs,
)
class EnglishPhonemizer(SqlitePhonemizer):
"""Phonemizer for English"""
def __init__(self, lang_dir: typing.Union[str, Path], **kwargs):
self.lang_dir = lang_dir
super().__init__(
minor_breaks=ENGLISH_MINOR_BREAKS,
major_breaks=ENGLISH_MAJOR_BREAKS,
feature_map={
TokenFeatures.PART_OF_SPEECH: {
"NNS": "NN",
"NNP": "NN",
"NNPS": "NN",
"PRP$": "PRP",
"RBR": "RB",
"RBS": "RB",
"VBG": "VB",
"VBN": "VB",
"VBP": "VB",
"VBZ": "VB",
"JJR": "JJ",
"JJS": "JJ",
}
},
**kwargs,
)
# -----------------------------------------------------------------------------
# es-es
# -----------------------------------------------------------------------------
SPANISH_MINOR_BREAKS = {",", ":", ";"}
SPANISH_MAJOR_BREAKS = {".", "?", "!"}
class SpanishTokenizer(RegexTokenizer):
"""Tokenizer for Spanish (Español)"""
def __init__(
self,
lang_dir: typing.Union[str, Path],
use_number_converters: bool = False,
do_replace_currency: bool = True,
**kwargs,
):
self.lang_dir = Path(lang_dir)
currency_names = get_currency_names("es_ES")
currency_names["€"] = "EUR"
super().__init__(
replacements=[
("\\B'", '"'), # replace single quotes
("'\\B", '"'),
('[\\<\\>\\(\\)\\[\\]"]+', ""), # drop brackets/quotes
("’", "'"), # normalize apostrophe
],
punctuations={
'"',
",",
";",
":",
".",
"?",
"¿",
"!",
"¡",
"„",
"“",
"”",
"«",
"»",
},
minor_breaks=SPANISH_MINOR_BREAKS,
major_breaks=SPANISH_MAJOR_BREAKS,
casing_func=str.lower,
num2words_lang="es_ES",
babel_locale="es_ES",
currency_names=currency_names,
use_number_converters=use_number_converters,
do_replace_currency=do_replace_currency,
**kwargs,
)
class SpanishPhonemizer(SqlitePhonemizer):
"""Phonemizer for Spanish (Español)"""
def __init__(self, lang_dir: typing.Union[str, Path], **kwargs):
self.lang_dir = lang_dir
super().__init__(
minor_breaks=SPANISH_MINOR_BREAKS,
major_breaks=SPANISH_MAJOR_BREAKS,
**kwargs,
)
# -----------------------------------------------------------------------------
# fa
# -----------------------------------------------------------------------------
class FarsiTokenizer(RegexTokenizer):
"""Tokenizer for Farsi/Persian (فارسی)"""
def __init__(
self,
lang_dir: typing.Union[str, Path],
use_number_converters: bool = False,
do_replace_currency: bool = True,
**kwargs,
):
self.lang_dir = Path(lang_dir)
currency_names = get_currency_names("fa")
super().__init__(
replacements=[
("\\B'", '"'), # replace single quotes
("'\\B", '"'),
('[\\<\\>\\(\\)\\[\\]"]+', ""), # drop brackets/quotes
],
punctuations={
'"',
"„",
"“",
"”",
"«",
"»",
"’",
",",
"،",
":",
";",
".",
"?",
"؟",
"!",
},
minor_breaks={"،", ":", ";"},
major_breaks={".", "؟", "!"},
casing_func=str.lower,
num2words_lang="fa",
babel_locale="fa",
currency_names=currency_names,
use_number_converters=use_number_converters,
do_replace_currency=do_replace_currency,
**kwargs,
)
def text_to_tokens(
self, text: str
) -> typing.Iterable[typing.Tuple[typing.List[str], typing.List[Token]]]:
"""
Process text into words and sentence tokens using hazm.
Returns: (original_words, sentence_tokens) for each sentence
"""
try:
import hazm
# Load normalizer
if not hasattr(self, "normalizer"):
normalizer = hazm.Normalizer()
setattr(self, "normalizer", normalizer)
# Load tagger
if not hasattr(self, "tagger"):
# Load part of speech tagger
model_path = self.lang_dir / "pos" / "postagger.model"
tagger = hazm.POSTagger(model=str(model_path))
setattr(self, "tagger", tagger)
sentences = hazm.sent_tokenize(normalizer.normalize(text))
for sentence in sentences:
original_words = []
sentence_tokens = []
for word, pos in tagger.tag(hazm.word_tokenize(sentence)):
original_words.append(word)
sentence_tokens.append(
Token(text=word, features={TokenFeatures.PART_OF_SPEECH: pos})
)
yield original_words, sentence_tokens
except ImportError:
_LOGGER.warning("hazm is highly recommended for language 'fa'")
_LOGGER.warning("pip install 'hazm>=0.7.0'")
# Fall back to parent implementation
yield from super().text_to_tokens(text)
class FarsiPhonemizer(SqlitePhonemizer):
"""Phonemizer for Farsi/Persian (فارسی)"""
def __init__(self, lang_dir: typing.Union[str, Path], **kwargs):
self.lang_dir = lang_dir
super().__init__(
minor_breaks={"،", ":", ";"}, major_breaks={".", "؟", "!"}, **kwargs
)
def post_phonemize(
self, token: Token, token_pron: WordPronunciation
) -> WORD_PHONEMES:
"""Post-process tokens/pronunciton after phonemization (called in phonemize)"""
phonemes = super().post_phonemize(token, token_pron)
# Genitive case
pos = token.features.get(TokenFeatures.PART_OF_SPEECH)
if pos == "Ne":
if isinstance(phonemes, list):
phonemes.append("e̞")
else:
return list(phonemes) + ["e̞"]
return phonemes
# -----------------------------------------------------------------------------
# fr-fr
# -----------------------------------------------------------------------------
FRENCH_MINOR_BREAKS = {",", ":", ";"}
FRENCH_MAJOR_BREAKS = {".", "?", "!"}
class FrenchTokenizer(RegexTokenizer):
"""Tokenizer for French (Français)"""
def __init__(
self,
lang_dir: typing.Union[str, Path],
use_number_converters: bool = False,
do_replace_currency: bool = True,
**kwargs,
):
self.lang_dir = Path(lang_dir)
currency_names = get_currency_names("fr_FR")
currency_names["€"] = "EUR"
super().__init__(
replacements=[
("\\B'", '"'), # replace single quotes
("'\\B", '"'),
('[\\<\\>\\(\\)\\[\\]"]+', ""), # drop brackets/quotes
("’", "'"), # normalize apostrophe
],
abbreviations={
"M.": "monsieur",
"Mlle.": "mademoiselle",
"Mlles.": "mesdemoiselles",
"Mme.": "Madame",
"Mmes.": "Mesdames",
"N.B.": "nota bene",
"p.c.q.": "parce que",
"Pr.": "professeur",
"qqch.": "quelque chose",
"rdv.": "rendez-vous",
"max.": "maximum",
"min.": "minimum",
"no.": "numéro",
"adr.": "adresse",
"dr.": "docteur",
"st.": "saint",
"co.": "companie",
"jr.": "junior",
"sgt.": "sergent",
"capt.": "capitain",
"col.": "colonel",
"av.": "avenue",
"av. J.-C.": "<NAME>",
"apr. J.-C.": "après Jésus-Christ",
"art.": "article",
"boul.": "boulevard",
"c.-à-d.": "c’est-à-dire",
"etc.": "et cetera",
"ex.": "exemple",
"excl.": "exclusivement",
"Mlle": "mademoiselle",
"Mlles": "mesdemoiselles",
"Mme": "Madame",
"Mmes": "Mesdames",
},
punctuations={'"', ",", ";", ":", ".", "?", "!", "„", "“", "”", "«", "»"},
minor_breaks=FRENCH_MINOR_BREAKS,
major_breaks=FRENCH_MAJOR_BREAKS,
casing_func=str.lower,
num2words_lang="fr_FR",
babel_locale="fr_FR",
currency_names=currency_names,
use_number_converters=use_number_converters,
do_replace_currency=do_replace_currency,
**kwargs,
)
class FrenchPhonemizer(SqlitePhonemizer):
"""Phonemizer for French (Français)"""
def __init__(
self, lang_dir: typing.Union[str, Path], no_liason: bool = False, **kwargs
):
self.lang_dir = lang_dir
self.no_liason = no_liason
super().__init__(
minor_breaks=FRENCH_MINOR_BREAKS, major_breaks=FRENCH_MAJOR_BREAKS, **kwargs
)
def phonemize(
self, tokens: typing.Sequence[TOKEN_OR_STR], **kwargs
) -> typing.Iterable[WORD_PHONEMES]:
"""Add liasons to a sentence by examining word texts, parts of speech, and phonemes."""
token_phonemes = super().phonemize(tokens, **kwargs)
if self.no_liason:
# Liasons disabled
yield from token_phonemes
if self.word_break:
# Filter out word breaks
token_phonemes = [ps for ps in token_phonemes if ps != [self.word_break]]
# First word break
yield [self.word_break]
for (token1, token1_pron), (token2, token2_pron) in pairwise(
zip(
itertools.chain(tokens, [None]), itertools.chain(token_phonemes, [None])
)
):
if token2 is None:
# Last token
yield token1_pron
continue
liason = False
# Conditions to meet for liason check:
# 1) token 1 ends with a silent consonant
# 2) token 2 starts with a vowel (phoneme)
last_char1 = token1.text[-1]
ends_silent_consonant = FrenchPhonemizer._has_silent_consonant(
last_char1, token1_pron[-1]
)
starts_vowel = FrenchPhonemizer._is_vowel(token2_pron[0])
token1_pos = token1.features.get(TokenFeatures.PART_OF_SPEECH)
token2_pos = token2.features.get(TokenFeatures.PART_OF_SPEECH)
if ends_silent_consonant and starts_vowel:
# Handle mandatory liason cases
# https://www.commeunefrancaise.com/blog/la-liaison
if token1.text == "et":
# No liason
pass
elif token1_pos in {"DET", "NUM"}:
# Determiner/adjective -> noun
liason = True
elif (token1_pos == "PRON") and (token2_pos in {"AUX", "VERB"}):
# Pronoun -> verb
liason = True
elif (token1_pos == "ADP") or (token1.text == "très"):
# Preposition
liason = True
elif (token1_pos == "ADJ") and (token2_pos in {"NOUN", "PROPN"}):
# Adjective -> noun
liason = True
elif token1_pos in {"AUX", "VERB"}:
# Verb -> vowel
liason = True
if liason:
# Apply liason
# s -> z
# p -> p
# d|t -> d
liason_pron = token1_pron
if last_char1 in {"s", "x", "z"}:
liason_pron.append("z")
elif last_char1 == "d":
liason_pron.append("t")
elif last_char1 in {"t", "p", "n"}:
# Final phoneme is same as char
liason_pron.append(last_char1)
yield liason_pron
else:
# Keep pronunciations the same
yield token1_pron
if self.word_break:
# Add word breaks back in
| |
>= 45:
self.sector_name += ' ' + chr(ord(self.word[0]) - 32) + self.word[1:len(self.word)]
elif name_variance >= 35:
self.sector_name += ' ' + 'Alpha'
elif name_variance >= 25:
self.sector_name += ' ' + 'Beta'
elif name_variance >= 15:
self.sector_name += ' ' + 'Gamma'
elif name_variance >= 10:
self.sector_name += ' ' + 'Reaches'
elif name_variance >= 7:
self.sector_name += ' ' + 'Sector'
elif name_variance >= 6:
self.sector_name += ' ' + 'Frontier'
elif name_variance >= 4:
self.sector_name += ' ' + 'UNEXPLORED'
elif name_variance >= 2:
self.sector_name += ' ' + 'UNKNOWN'
#print(self.sector_name)
mapper_file_out.write('\n\n# ' + self.sector_name)
mapper_file_out.write('\n# 0,0')
mapper_file_out.write('\n\n# Name: ' + self.sector_name)
mapper_file_out.write('\n\n# Milieu: M1105')
mapper_file_out.write('\n\n# Credits: ' + self.sector_name + ' sector was randomly generated by ' + __app__)
mapper_file_out.write('\n\n# Author: <NAME> https://www.youtube.com/user/ShawnDriscollCG')
mapper_file_out.write('\n# Source: Rules from Mongoose Traveller 2nd Edition, and a smidgen from Traveller 5.10\n')
# Name the subsectors
#self.subsect_name = []
for subs in range(16):
proper = False
while not(proper):
temp = self.CC
while temp == self.CC:
temp = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
self.s_type = temp
self.pick_sound()
self.word = self.sound
building = True
while building:
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.CC and (syllable == self.CV or syllable == self.CVC or syllable == self.CC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.V and (syllable == self.V or syllable == self.VC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.CV and (syllable == self.V or syllable == self.VC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.VC and (syllable == self.CV or syllable == self.CVC or syllable == self.CC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.CVC and (syllable == self.CV or syllable == self.CVC or syllable == self.CC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
if temp == self.VC or temp == self.CVC:
building = False
else:
self.s_type = syllable
self.pick_sound()
self.word += self.sound
temp = syllable
if len(self.word) > 3 and len(self.word) < 14:
proper = True
mapper_file_out.write('\n# Subsector ' + self.core_code[subs] + ': ' + chr(ord(self.word[0]) - 32) + self.word[1:len(self.word)])
#self.subsect_name.append(chr(ord(self.word[0]) - 32) + self.word[1:len(self.word)])
#print(self.subsect_name)
mapper_file_out.write('\n\n# Alleg: As: "<NAME>"')
mapper_file_out.write('\n# Alleg: Cs: "Client State"')
mapper_file_out.write('\n# Alleg: Im: "Third Imperium"')
mapper_file_out.write('\n# Alleg: Na: "Non-Aligned, Human-dominated"')
mapper_file_out.write('\n# Alleg: So: "Solomani Confederation"')
mapper_file_out.write('\n# Alleg: Va: "Non-Aligned, Vargr-dominated"')
mapper_file_out.write('\n# Alleg: Zh: "Zhodani Consulate"')
mapper_file_out.write('\n\nHex Name UWP Remarks {Ix} (Ex) [Cx] N B Z PBG W A Stellar')
mapper_file_out.write( '\n---- -------------------- --------- -------------------- ------ ------- ------ - -- - --- -- ---- --------------')
self.worlds_rolled = 0
# Make the worlds
for hex_grid_col in range(32):
for hex_grid_row in range(40):
if roll('d100') <= self.stellar_density:
# Get World Name
proper = False
while not(proper):
temp = self.CC
while temp == self.CC:
temp = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
self.s_type = temp
self.pick_sound()
self.word = self.sound
building = True
while building:
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.CC and (syllable == self.CV or syllable == self.CVC or syllable == self.CC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.V and (syllable == self.V or syllable == self.VC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.CV and (syllable == self.V or syllable == self.VC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.VC and (syllable == self.CV or syllable == self.CVC or syllable == self.CC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
while temp == self.CVC and (syllable == self.CV or syllable == self.CVC or syllable == self.CC):
syllable = self.syllable_type[randint(1, len(self.syllable_type)) - 1]
if temp == self.VC or temp == self.CVC:
building = False
else:
self.s_type = syllable
self.pick_sound()
self.word += self.sound
temp = syllable
if len(self.word) > 3 and len(self.word) < 14:
proper = True
self.main_world_name = chr(ord(self.word[0]) - 32) + self.word[1:len(self.word)]
# Find Location and Subsector for Main World
self.main_world_location = self.core_code[(hex_grid_col) // 8 + ((hex_grid_row) // 10) * 4]
#print(self.main_world_location)
self.main_world_location = ''
if hex_grid_col + 1 < 10:
self.main_world_location += '0' + self.hex_code[hex_grid_col + 1]
else:
self.main_world_location += str(hex_grid_col + 1)
if hex_grid_row + 1 < 10:
self.main_world_location += '0' + self.hex_code[hex_grid_row + 1]
else:
self.main_world_location += str(hex_grid_row + 1)
#print(self.main_world_location)
# Roll for Main World Size
self.main_world_size = roll('2D-2')
# from Traveller5
if self.main_world_size == 10:
if self.super_earth_chance:
self.main_world_size = roll('1D+9')
#print(self.main_world_size)
# Roll for Main World Atmosphere
self.main_world_atmosphere = roll('FLUX') + self.main_world_size
# from Traveller5
if self.main_world_atmosphere < 0 or self.main_world_size == 0:
self.main_world_atmosphere = 0
if self.main_world_atmosphere > 15:
self.main_world_atmosphere = 15
# Roll for Main World Hydrographics
if self.hydro_calc_method == 'Based on Atmosphere':
# from Mongoose Traveller 2nd Edition and Traveller5
self.main_world_hydrographics = roll('FLUX') + self.main_world_atmosphere
else:
# from something else
self.main_world_hydrographics = roll('FLUX') + self.main_world_size
if self.main_world_hydrographics < 0:
self.main_world_hydrographics = 0
if self.main_world_hydrographics > 10:
self.main_world_hydrographics = 10
# Size and Atmosphere DMs added to Hydrographics
if self.main_world_size < 2:
self.main_world_hydrographics = 0
if self.main_world_atmosphere < 2 or \
self.main_world_atmosphere == 10 or \
self.main_world_atmosphere == 11 or \
self.main_world_atmosphere == 12:
self.main_world_hydrographics += -4
# Main World Temperature?
self.main_world_temperature = roll('2d6') + self.world_temperature_dm[self.main_world_atmosphere]
# Temperature DMs added to Hydrographics
if self.main_world_atmosphere != 13 and self.main_world_atmosphere != 15:
if self.main_world_temperature >= 10 and self.main_world_temperature <= 11:
self.main_world_hydrographics += -2
if self.main_world_temperature >= 12:
self.main_world_hydrographics += -6
if self.main_world_hydrographics < 0:
self.main_world_hydrographics = 0
# Roll for Main World Population
#self.main_world_population = roll('2D6-2')
#self.main_world_population = roll('4D4-4')
self.main_world_population = roll('2d6-2')
# Roll for Main World Government
if self.main_world_population > 0:
self.main_world_government = roll('FLUX') + self.main_world_population
if self.main_world_government < 0:
self.main_world_government = 0
if self.main_world_government > 15:
self.main_world_government = 15
else:
self.main_world_government = 0
# Roll for Law Level
if self.main_world_population > 0:
self.main_world_law_level = roll('FLUX') + self.main_world_government
if self.main_world_law_level < 0:
self.main_world_law_level = 0
if self.main_world_law_level > 15:
self.main_world_law_level = 15
else:
self.main_world_law_level = 0
# Roll for Starport
self.main_world_starport = roll('2D6')
if self.main_world_population >= 10:
self.main_world_starport += 2
elif self.main_world_population >= 8:
self.main_world_starport += 1
elif self.main_world_population <= 2:
self.main_world_starport += -2
elif self.main_world_population <= 4:
self.main_world_starport += -1
# Roll for Technology Level
if self.main_world_population > 0:
self.main_world_tech_level = roll('1D6')
# Add DMs
self.main_world_tech_level += self.starport_value[self.main_world_starport]
self.main_world_tech_level += self.size_value[self.main_world_size]
self.main_world_tech_level += self.atmosphere_value[self.main_world_atmosphere]
self.main_world_tech_level += self.hydro_value[self.main_world_hydrographics]
self.main_world_tech_level += self.population_value[self.main_world_population]
self.main_world_tech_level += self.government_value[self.main_world_government]
# Adjust Minimum TL for Environmental Limits
if self.minimum_TL[self.main_world_atmosphere] != -1:
if self.main_world_tech_level < self.minimum_TL[self.main_world_atmosphere]:
self.main_world_tech_level = self.minimum_TL[self.main_world_atmosphere]
if self.main_world_tech_level < 0:
self.main_world_tech_level = 0
if self.main_world_tech_level > 15:
self.main_world_tech_level = 15
else:
self.main_world_tech_level = 0
# Calculate Travel Code
self.main_world_travel_code = ''
if (self.main_world_atmosphere >= 10 or (self.main_world_government == 0 or self.main_world_government == 7 \
or self.main_world_government == 10) or (self.main_world_law_level == 0 or self.main_world_law_level >= 9)) and roll('d6') == 6:
self.main_world_travel_code = ' A'
# Lookup Trade Code(s)
self.trade_index = 0
self.main_world_trade_code = [' ',' ',' ',' ',' ',' ', ' ', ' ', ' ', ' ']
self.main_world_trade_class = [' ',' ',' ',' ',' ',' ', ' ', ' ', ' ', ' ']
if self.main_world_atmosphere >= 4 \
and self.main_world_atmosphere <= 9 \
and self.main_world_hydrographics >= 4 \
and self.main_world_hydrographics <= 8 \
and self.main_world_population >= 5 \
and self.main_world_population <= 7:
self.main_world_trade_code[self.trade_index] = 'Ag'
self.main_world_trade_class[self.trade_index] = 'Agricultural'
self.trade_index += 1
if (self.main_world_atmosphere == 2 \
or self.main_world_atmosphere == 3 \
or self.main_world_atmosphere == 10 \
or self.main_world_atmosphere == 11) \
and self.main_world_hydrographics >= 1 \
and self.main_world_hydrographics <= 5 \
and self.main_world_population >= 3 \
and self.main_world_population <= 6 \
and self.main_world_law_level >= 6 \
and self.main_world_law_level <= 9:
self.main_world_trade_code[self.trade_index] = 'Px'
self.main_world_trade_class[self.trade_index] = 'Prison'
self.main_world_travel_code = ' A'
self.trade_index += 1
if self.main_world_size == 0 \
and self.main_world_atmosphere == 0 \
and self.main_world_hydrographics == 0:
self.main_world_trade_code[self.trade_index] = 'As'
self.main_world_trade_class[self.trade_index] = 'Asteroid'
self.trade_index += 1
if self.main_world_population == 0 \
and self.main_world_government == 0 \
and self.main_world_law_level | |
<filename>awsRESv2.py
import os, sys
import re, time, datetime
import shutil, atexit
import subprocess
import yaml
from awsAPI import aws, print_color
#version 2: backup all termination/creation cli
#version 2.1: add GWLB v1 support
class resource(object):
def __init__(self):
self.creation_dependency = None
self.termination_dependency = None
self.keepAlive = False
self.creation = None
self.termination = None
self.ID = None
def get_creation_dependency(self):
if self.creation_dependency:
return set(self.creation_dependency)
else:
return set()
def get_termination_dependency(self):
if self.termination_dependency:
return set(self.termination_dependency)
else:
return set()
def get_creation(self):
return self.creation
def get_termination(self):
return self.termination
def get_id(self):
if self.__class__.__name__ == "VPCE_SERVICE":
return self.svcName
else:
return self.ID
def load_deployment(self, fileName):
print("No definition of load_deployment in object: ", self.__class__.__name__)
def exec_creation(self):
print("No definition of creation in object: ", self.__class__.__name__)
def exec_termination(self):
print("No definition of termination in object: ", self.__class__.__name__)
class INTERNET_GATEWAY(resource):
def __init__(self, tagName, content):
super().__init__()
self.name = tagName
self.raw_yaml = content
self.creation = "aws ec2 create-internet-gateway"
self.termination = "aws ec2 delete-internet-gateway"
self.reName = "aws ec2 create-tags"
self.ID = None
self._cmd_composition()
def _cmd_composition(self):
for key, value in self.raw_yaml.items():
if key != "action":
if value and value != "None":
self.creation += " --" + key + " " + str(value)
else:
self.creation += " --" + key
else:
self._action_handler(value)
self.termination += " --internet-gateway-id" + " " + "self.ID"
if self.name:
self.reName += " --tag" + " " + f"Key=Name,Value={self.name}" + " " + "--resources" + " " + "self.ID"
def _action_handler(self, action_yaml):
for key, value in action_yaml.items():
if key == "cleanUP":
self.keepAlive = False if str(value).lower() == "true" else True
def exec_creation(self, cli_handler):
res = cli_handler.raw_cli_res(self.creation)
self.ID = re.compile(r'InternetGatewayId: (.*)').findall(res)[0].strip()
if self.name:
self.reName = self.reName.replace("self.ID", str(self.ID))
cli_handler.raw_cli_res(self.reName)
def exec_termination(self, cli_handler):
if self.ID:
self.termination = self.termination.replace("self.ID", str(self.ID))
if not self.keepAlive:
cli_handler.raw_cli_res(self.termination)
else:
cli_handler.raw_cli_res(self.termination, exec=False)
class VPC(resource):
def __init__(self, tagName, content):
super().__init__()
self.name = tagName
self.raw_yaml = content
self.creation = "aws ec2 create-vpc"
self.termination = "aws ec2 delete-vpc"
self.reName = "aws ec2 create-tags"
self.attach = "aws ec2 attach-internet-gateway"
self.detach = "aws ec2 detach-internet-gateway"
self.ID = None
self._cmd_composition()
def _cmd_composition(self):
for key, value in self.raw_yaml.items():
if key != "action":
if value and value != "None":
self.creation += " --" + key + " " + str(value)
else:
self.creation += " --" + key
else:
self._action_handler(value)
self.attach += " --vpc-id" + " " + "self.ID" + " " + "--internet-gateway-id" + " " + "{IGW_ID}"
self.detach += " --vpc-id" + " " + "self.ID" + " " + "--internet-gateway-id" + " " + "{IGW_ID}"
self.termination += " --vpc-id" + " " + "self.ID"
if self.name:
self.reName += " --tag" + " " + f"Key=Name,Value={self.name}" + " " + "--resources" + " " + "self.ID"
def _action_handler(self, action_yaml):
for key, value in action_yaml.items():
if key == "bind_to":
if type(value) == str:
self.creation_dependency = [value]
else:
self.creation_dependency = value
elif key == "cleanUP":
self.keepAlive = False if str(value).lower() == "true" else True
def exec_creation(self, cli_handler):
res = cli_handler.raw_cli_res(self.creation)
self.ID = re.compile(r'VpcId: (.*)').findall(res)[0].strip()
if self.name:
self.reName = self.reName.replace("self.ID", str(self.ID))
cli_handler.raw_cli_res(self.reName)
if self.attach and self.creation_dependency:
for igw in self.creation_dependency:
res_obj = cli_handler.res_deployment[igw]
if type(res_obj).__name__ == "INTERNET_GATEWAY":
self.attach = re.sub(r"self.ID", self.ID, self.attach)
igw_id = cli_handler.find_id(igw)
self.attach = re.sub(r"\{.*?\}", igw_id, self.attach)
cli_handler.raw_cli_res(self.attach)
def exec_termination(self, cli_handler):
if self.ID:
if self.detach and self.creation_dependency:
for igw in self.creation_dependency:
res_obj = cli_handler.res_deployment[igw]
if type(res_obj).__name__ == "INTERNET_GATEWAY":
self.detach = re.sub(r"self.ID", self.ID, self.detach)
igw_id = cli_handler.find_id(igw)
self.detach = re.sub(r"\{.*?\}", igw_id, self.detach)
if not self.keepAlive:
cli_handler.raw_cli_res(self.detach)
else:
cli_handler.raw_cli_res(self.detach, exec=False)
self.termination = self.termination.replace("self.ID", str(self.ID))
if not self.keepAlive:
cli_handler.raw_cli_res(self.termination)
else:
cli_handler.raw_cli_res(self.termination, exec=False)
class SECURITY_GROUP(resource):
def __init__(self, tagName, content):
super().__init__()
self.name = tagName
self.raw_yaml = content
self.creation = f"aws ec2 create-security-group --group-name {self.name}"
self.termination = "aws ec2 delete-security-group"
self.reName = "aws ec2 create-tags"
self.rules = []
self.ID = None
self._cmd_composition()
def _cmd_composition(self):
for key, value in self.raw_yaml.items():
if key != "action":
if value and value != "None":
value = '"' + value + '"' if " " in value else value
self.creation += " --" + key + " " + str(value)
else:
self.creation += " --" + key
else:
self._action_handler(value)
self.termination += " --group-id" + " " + "self.ID"
if self.name:
self.reName += " --tag" + " " + f"Key=Name,Value={self.name}" + " " + "--resources" + " " + "self.ID"
def _action_handler(self, action_yaml):
for key, value in action_yaml.items():
if key == "bind_to":
if type(value) == str:
self.creation_dependency = [value]
else:
self.creation_dependency = value
elif key == "authorize-security-group-ingress":
for rule in value:
cmd = "aws ec2 authorize-security-group-ingress --group-id self.ID"
for key2, value2 in rule.items():
cmd += " --" + key2 + " " + str(value2)
self.rules.append(cmd)
elif key == "authorize-security-group-egress":
for rule in value:
cmd = "aws ec2 authorize-security-group-egress --group-id self.ID"
for key3, value3 in rule.items():
cmd += " --" + key3 + " " + str(value3)
self.rules.append(cmd)
elif key == "cleanUP":
self.keepAlive = False if str(value).lower() == "true" else True
def exec_creation(self, cli_handler):
if self.creation_dependency:
for vpc in self.creation_dependency:
res_obj = cli_handler.res_deployment[vpc]
if type(res_obj).__name__ == "VPC":
vpc_id = cli_handler.find_id(vpc)
str_vpcID = f"--vpc-id {vpc_id}"
self.creation = re.sub(r"--vpc-id .*?(?=( --|$))", str_vpcID, self.creation)
res = cli_handler.raw_cli_res(self.creation)
self.ID = re.compile(r'GroupId: (.*)').findall(res)[0].strip()
if self.name:
self.reName = self.reName.replace("self.ID", str(self.ID))
cli_handler.raw_cli_res(self.reName)
for rule in self.rules:
rule = rule.replace("self.ID", str(self.ID))
cli_handler.raw_cli_res(rule)
def exec_termination(self, cli_handler):
if self.ID:
self.termination = self.termination.replace("self.ID", str(self.ID))
if not self.keepAlive:
cli_handler.raw_cli_res(self.termination)
else:
cli_handler.raw_cli_res(self.termination, exec=False)
class SUBNET(resource):
def __init__(self, tagName, content):
super().__init__()
self.name = tagName
self.raw_yaml = content
self.creation = "aws ec2 create-subnet"
self.termination = "aws ec2 delete-subnet"
self.reName = "aws ec2 create-tags"
self.ID = None
self._cmd_composition()
def _cmd_composition(self):
for key, value in self.raw_yaml.items():
if key != "action":
if value and value != "None":
self.creation += " --" + key + " " + str(value)
else:
self.creation += " --" + key
else:
self._action_handler(value)
self.termination += " --subnet-id" + " " + "self.ID"
if self.name:
self.reName += " --tag" + " " + f"Key=Name,Value={self.name}" + " " + "--resources" + " " + "self.ID"
def _action_handler(self, action_yaml):
for key, value in action_yaml.items():
if key == "bind_to":
if type(value) == str:
self.creation_dependency = [value]
else:
self.creation_dependency = value
elif key == "cleanUP":
self.keepAlive = False if str(value).lower() == "true" else True
def exec_creation(self, cli_handler):
if self.creation_dependency:
for res in self.creation_dependency:
res_obj = cli_handler.res_deployment[res]
if type(res_obj).__name__ == "VPC":
vpc_id = cli_handler.find_id(res)
str_vpcID = f"--vpc-id {vpc_id}"
self.creation = re.sub(r"--vpc-id .*?(?=( --|$))", str_vpcID, self.creation)
if type(res_obj).__name__ == "SUBNET": #Yijun
sub_id = cli_handler.find_id(res)
cmd = f"aws ec2 describe-subnets --subnet-ids {sub_id}"
resp = cli_handler.raw_cli_res(cmd)
pattern = "AvailabilityZone: (.*)"
zone = re.compile(pattern).findall(resp)[0].strip()
str_zoneID = f"--availability-zone {zone}"
self.creation = re.sub(r"--availability-zone .*?(?=( --|$))", str_zoneID, self.creation)
res = cli_handler.raw_cli_res(self.creation)
self.ID = re.compile(r'SubnetId: (.*)').findall(res)[0].strip()
if self.name:
self.reName = self.reName.replace("self.ID", str(self.ID))
cli_handler.raw_cli_res(self.reName)
def exec_termination(self, cli_handler):
if self.ID:
self.termination = self.termination.replace("self.ID", str(self.ID))
if not self.keepAlive:
while True:
res = cli_handler.raw_cli_res(self.termination)
if "has dependencies and cannot be deleted" in res:
time.sleep(5)
else:
break
else:
res = cli_handler.raw_cli_res(self.termination, exec=False)
class GATEWAY_LOAD_BALANCE(resource):
def __init__(self, tagName, content):
super().__init__()
self.name = tagName
self.raw_yaml = content
self.creation = f"aws elbv2 create-load-balancer --name {self.name}"
self.termination = "aws elbv2 delete-load-balancer"
self.ID = None
self._cmd_composition()
def _cmd_composition(self):
for key, value in self.raw_yaml.items():
if key != "action":
if value and value != "None":
self.creation += " --" + key + " " + str(value)
else:
self.creation += " --" + key
else:
self._action_handler(value)
self.termination += " --load-balancer-arn" + " " + "self.ID"
def _action_handler(self, action_yaml):
for key, value in action_yaml.items():
if key == "bind_to":
if type(value) == str:
self.creation_dependency = [value]
else:
self.creation_dependency = value
elif key == "cleanUP":
self.keepAlive = False if str(value).lower() == "true" else True
def exec_creation(self, cli_handler):
if self.creation_dependency:
str_subID = "--subnets"
for sub in self.creation_dependency:
res_obj = cli_handler.res_deployment[sub]
if type(res_obj).__name__ == "SUBNET":
sub_id = cli_handler.find_id(sub)
str_subID += " " + sub_id
if str_subID != "--subnets":
self.creation = re.sub(r"--subnets .*?(?=( --|$))", str_subID, self.creation)
res = cli_handler.raw_cli_res(self.creation)
self.ID = re.compile(r'LoadBalancerArn: (.*)').findall(res)[0].strip()
def exec_termination(self, cli_handler):
if self.ID:
self.termination = self.termination.replace("self.ID", str(self.ID))
if not self.keepAlive:
cli_handler.raw_cli_res(self.termination)
else:
cli_handler.raw_cli_res(self.termination, exec=False)
class TARGET_GROUP(resource):
def __init__(self, tagName, content):
super().__init__()
self.name = tagName
self.raw_yaml = content
self.creation = f"aws elbv2 create-target-group --name {self.name}"
self.termination = "aws elbv2 delete-target-group"
self.tg_type = None
self.ID = None
| |
<reponame>ramsuthar305/SwachhHab59
from flask import render_template, request, redirect, url_for, session, escape, Flask, jsonify
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from bson import regex, ObjectId
from random import randint
import json
import pathlib
import hashlib
import requests
import random
import os
from datetime import datetime
from flask_pymongo import PyMongo
import re
import smtplib
import math
import smtplib
from werkzeug.utils import secure_filename
from fastai.vision import *
# import cv2
import numpy as np
import base64
from PIL import Image
from io import BytesIO
from math import radians, cos, sin, asin, sqrt
import herepy
import ast
from os.path import join, dirname, realpath
port = 5000
# host = "192.168.43.95"
host = "0.0.0.0"
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb+srv://ramsuthar305:<EMAIL>/mitsor?retryWrites=true&w=majority"
mongo = PyMongo(app)
app.config['uploads'] = join(dirname(realpath(__file__)), "uploads")
app.config['SESSION_TYPE'] = 'memcached'
app.config['SECRET_KEY'] = '5234124584324'
headers = {'Authorization': 'Basic cm9vdDo2NjIyNDQ2Ng==',
'Content-Type': 'application/json'}
UPLOAD_FOLDER = "../static/uploads/"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
return json.JSONEncoder.default(self, o)
@app.route('/api/register', methods=['POST'])
def api_register():
if request.method == 'POST':
data = request.get_data()
data = json.loads(data)
user_email = data["user_email"]
check_if_exist = mongo.db.grievance_users.find_one(
{"user_email": user_email})
if check_if_exist:
return jsonify({"status": 'failed', "message": "User already exist!"})
else:
result = mongo.db.grievance_users.insert_one(data)
return jsonify({"status": 'success', "message": "registered successfully!"})
@app.route('/api/login', methods=['POST'])
def api_login():
if request.method == 'POST':
if 'logged_in' in session:
return jsonify({"status": 'user is already logged.'})
else:
data = request.get_data()
data = json.loads(data)
user_name = data['user_email']
user_password = data['<PASSWORD>']
loginuser = mongo.db.grievance_users.find_one({"$and": [{"$or": [{"user_email": user_name}, {
"user_phone": user_name}]}, {"user_password": user_password}]})
if loginuser['user_type'] == 'general':
del loginuser["user_password"]
del loginuser["_id"]
session['username'] = loginuser["user_email"]
session['logged_in'] = True
return jsonify({"data": loginuser, "status": "user logged in succesffully"})
elif loginuser['user_type'] == 'admin':
return jsonify({"status": 'failed', "message": "Restricted login"})
else:
return jsonify({"status": 'failed', "message": "invalid Credential!"})
@app.route('/api/logout', methods=['GET'])
def api_logout():
del session['logged_in']
del session['username']
return jsonify({"status": "user logged out in succesffully"})
@app.route("/login", methods=['POST', 'GET'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
try:
loginuser = mongo.db.grievance_users.find_one({"$and": [{"$or": [{"user_email": username}, {
"user_phone": username}]}, {"user_password": password}]})
if loginuser['user_type'] == 'admin':
print('\n\ntrue:')
session['admin_area'] = loginuser['user_area']
session['admin_login'] = True
grievance_all = list(mongo.db.grievance.find(
{'assigned_authority': loginuser['user_area']}))
return redirect('/index')
else:
CONTEXT_msg = 'Entered Username and password do not match. Please Retry!'
return render_template("loginpage.html", CONTEXT_msg=CONTEXT_msg)
except Exception as e:
print(e)
return render_template('loginpage.html')
grievance_all = list(mongo.db.grievance.find(
{'assigned_authority': session['admin_area']}))
return render_template('problems.html', all=[grievance_all, session['admin_area']])
else:
CONTEXT_msg = ''
return render_template("loginpage.html", CONTEXT_msg=CONTEXT_msg)
@app.route('/logout')
def logout():
del session['logged_in']
del session['username']
return redirect('/login')
@app.route('/upload')
def upload():
return render_template('add.html')
@app.route('/hello/<send_mail>', methods=['GET', 'POST'])
def predict(send_mail="no"):
send_mail_to = None
grievance_all = list(mongo.db.grievance.find({"grievance_type": "unpredicted"}))
for i in grievance_all:
if 'grievance_type' not in i or i['grievance_type'] == "unpredicted":
filename = str(i['grievance_id'][:5])
pathh = i['image_link']
path = Path('')
classes = ['garbage', 'pothole', 'sewage']
data = ImageDataBunch.single_from_classes(path, classes, ds_tfms=get_transforms(), size=240).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet101, metrics=error_rate)
learn.load('phase1')
img = open_image(pathh)
pred_class, pred_idx, outputs = learn.predict(img)
print(str(pred_class))
if(str(pred_class) == "sewage"):
send_mail_to = "<EMAIL>"
if(str(pred_class) == "garbage"):
send_mail_to = "<EMAIL>"
if(str(pred_class) == "pothole"):
send_mail_to = "<EMAIL>"
all1 = mongo.db.grievance.find_one_and_update({'grievance_id': i["grievance_id"]}, {
'$set': {"grievance_type": str(pred_class)}})
if send_mail == "yes":
sendMail(send_mail_to, "New Grievance Received",
"Your department has received a grievance. Please check your portel for details.")
return "smd"
def distance(lat1, lat2, lon1, lon2):
lon1 = radians(lon1)
lon2 = radians(lon2)
lat1 = radians(lat1)
lat2 = radians(lat2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
r = 6371
return(c * r)
@app.route('/api/history/<email>', methods=['GET'])
def history(email):
grievance_all = list(mongo.db.grievance.find({"user_id": email}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
del i["_id"]
i["id"] = i["grievance_id"]
i["image_link"] = "http://"+ host+":5000" +i["image_link"][1:]
print(grievance_all)
return jsonify({"status": 'success', "data": grievance_all})
@app.route('/uploader', methods=['GET', 'POST'])
def uploader():
if request.method == 'POST':
data = request.get_data()
data = json.loads(data)
im = Image.open(BytesIO(base64.b64decode(data['image_link'])))
image_link='./static/uploads/'+str(datetime.now())+str(data['grievance_id'])+'.jpeg'
im.save(image_link, 'JPEG')
data["image_link"] = image_link
data["assigned_authority"] = "null"
data["assigned_date"] = str(datetime.now())
data["status"] = "unsolved"
data["timestamp"] = str(datetime.now())
data["area"] = "unpredicted"
mongo.db.grievance.insert_one(data)
predict()
return jsonify({"status": 'success', "message": "registered successfully!"})
@app.route('/records')
def records():
all = list(mongo.db.grievance.find())
return render_template('table.html', all=all)
@app.route('/reports')
def reports():
grievance_all = list(mongo.db.grievance.find())
pothole = [0] * 12
sewage = [0] * 12 # Total grievance reported
garbage = [0] * 12
totalpothole = totalsewage = totalgarbage = 0 # Grievances reported
solved = [0] * 12 # Solved vs Pending
unsolved = [0] * 12
for i in grievance_all:
if(i["grievance_type"] == "sewage"):
# pothole.append(i["grievance_type"])
date, time = i["assigned_date"].split(" ")
year, month, day = date.split("-")
sewage[int(month)-1] += 1
totalsewage += 1
if(i["status"] == "unsolved"):
unsolved[int(month)-1] += 1
else:
solved[int(month)-1] += 1
if(i["grievance_type"] == "pothole"):
# pothole.append(i["grievance_type"])
date, time = i["assigned_date"].split(" ")
year, month, day = date.split("-")
pothole[int(month)-1] += 1
totalpothole += 1
if(i["status"] == "unsolved"):
unsolved[int(month)-1] += 1
else:
solved[int(month)-1] += 1
if(i["grievance_type"] == "garbage"):
# pothole.append(i["grievance_type"])
date, time = i["assigned_date"].split(" ")
year, month, day = date.split("-")
garbage[int(month)-1] += 1
totalgarbage += 1
if(i["status"] == "unsolved"):
unsolved[int(month)-1] += 1
else:
solved[int(month)-1] += 1
return render_template('admin.html', garbage=garbage, seewage=sewage, pothole=pothole, solved=solved, unsolved=unsolved, totalsewage=totalsewage, totalgarbage=totalgarbage, totalpothole=totalpothole)
def getLocationDetails(latitude, longitude):
latitude = float(latitude)
longitude = float(longitude)
gp = herepy.GeocoderReverseApi(
'7xcRjOXj4yGyENeYkpSvqlXE4Ahyx3TReJHGN71yQ80')
response = gp.retrieve_addresses([latitude, longitude])
response = str(response)
response = ast.literal_eval(response)
response = response["items"][0]["address"]["label"]
return response
@app.route('/bar')
def bar():
pincode = [11, 12, 11, 13, 15, 11, 16, 17]
tmp = set(pincode)
count = []
for i in tmp:
count.append(pincode.count(i))
tick_label = ['one', 'two', 'three', 'four', 'five', 'six']
pincode = list(tmp)
plt.bar(pincode, count, tick_label=tick_label,
width=0.8, color=['red', 'green'])
plt.xlabel('Problems')
plt.ylabel('Area')
plt.title('Pincode')
# plt.savefig('/graphs'+str(datetime.now())+'.png')
return 'hii'
@app.route("/index")
def index():
grievance_all = list(mongo.db.grievance.find())
for i in grievance_all:
print(i["area"])
if i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
ass_array = {"virar": [19.4564, 72.7925],
"panvel": [18.9894, 73.1175],
"ghatkopar": [19.0858, 72.9090],
"dahisar": [19.2494, 72.8596],
"mira road ": [19.2871, 72.8688]}
list_distance = {}
for x in ass_array:
list_distance[x] = distance(
latitude, ass_array[x][0], longitude, ass_array[x][1])
distance_min = min(list_distance, key=list_distance.get)
update_authority = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"assigned_authority": distance_min}})
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
return render_template('problems.html', all=grievance_all)
@app.route('/solve/<id1>')
def solve(id1):
mongo.db.grievance.find_one_and_update(
{'grievance_id': id1}, {'$set': {'status': 'solved'}})
return redirect('/index')
@app.route("/userspecific/<id>")
def userspecific(id):
grievance_all = list(mongo.db.grievance.find({"user_id": id}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
return render_template('problems.html', all=grievance_all)
@app.route("/virar")
def virar():
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "virar"}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"assigned_authority": 'virar'}})
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "virar"}))
return render_template('problems.html', all=grievance_all)
@app.route("/panvel")
def panvel():
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "panvel"}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"assigned_authority": 'panvel'}})
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "panvel"}))
return render_template('problems.html', all=grievance_all)
@app.route("/ghatkopar")
def ghatkopar():
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "ghatkopar"}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"assigned_authority": 'ghatkopar'}})
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "ghatkopar"}))
return render_template('problems.html', all=grievance_all)
@app.route("/dahisar")
def dahisar():
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "dahisar"}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"assigned_authority": 'dahisar'}})
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "dahisar"}))
return render_template('problems.html', all=grievance_all)
@app.route("/mira road")
def miraroad():
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "mira road"}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
grievance_all = list(mongo.db.grievance.find(
{"assigned_authority": "mira road"}))
return render_template('problems.html', all=grievance_all)
@app.route("/sewage")
def sewage():
grievance_all = list(mongo.db.grievance.find({"grievance_type": "sewage"}))
for i in grievance_all:
if 'area' not in i or i['area'] == "unpredicted":
longitude = float(i['longitude'])
latitude = float(i['latitude'])
res = getLocationDetails(latitude, longitude)
update_location = mongo.db.grievance.find_one_and_update(
{'grievance_id': i["grievance_id"]}, {'$set': {"area": res}})
grievance_all = list(mongo.db.grievance.find(
{"grievance_type": "sewage"}))
return render_template('problems.html', all=grievance_all)
@app.route("/garbage")
def garbage():
grievance_all = list(mongo.db.grievance.find(
{"grievance_type": "garbage"}))
for i in grievance_all:
| |
such as downloading files from a
# remote camera.
# If you register the callback function, the EDSDK calls the callback
# function during execution or on completion of the following APIs.
# This timing can be used in updating on-screen progress bars, for example.
#
# Parameters:
# In: inRef - The reference of the stream or image.
# inProgressCallback - Pointer to a progress callback function.
# inProgressOption - The option about progress is specified.
# Must be one of the following values.
# kEdsProgressOption_Done
# When processing is completed,a callback function
# is called only at once.
# kEdsProgressOption_Periodically
# A callback function is performed periodically.
# inContext - Application information, passed in the argument
# when the callback function is called. Any information
# required for your program may be added.
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsSetProgressCallback(inRef, inProgressFunc, inProgressOption, inContext):
EDSDK.EdsSetProgressCallback.argtype = (c_void_p, EdsProgressCallback, EdsProgressOption, c_void_p)
EDSDK.EdsSetProgressCallback.restype = c_uint
# C#原型:public extern static uint EdsSetProgressCallback( IntPtr inRef, EdsProgressCallback inProgressFunc,
# EdsProgressOption inProgressOption, IntPtr inContext);
return EDSDK.EdsSetProgressCallback(inRef, inProgressFunc, inProgressOption, inContext)
'''
# *--------------------------------------------
# Image operating functions
# ---------------------------------------------*/
'''
# *-----------------------------------------------------------------------------
#
# Function: EdsCreateImageRef
#
# Description:
# Creates an image object from an image file.
# Without modification, stream objects cannot be worked with as images.
# Thus, when extracting images from image files,
# you must use this API to create image objects.
# The image object created this way can be used to get image information
# (such as the height and width, number of color components, and
# resolution), thumbnail image data, and the image data itself.
#
# Parameters:
# In: inStreamRef - The reference of the stream.
#
# Out: outImageRef - The reference of the image.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsCreateImageRef(inStreamRef, outImageRef):
EDSDK.EdsCreateImageRef.argtype = (c_void_p, c_void_p)
EDSDK.EdsCreateImageRef.restype = c_uint
# C#原型:public extern static uint EdsCreateImageRef( IntPtr inStreamRef, out IntPtr outImageRef);
return EDSDK.EdsCreateImageRef(inStreamRef, byref(outImageRef))
# *-----------------------------------------------------------------------------
#
# Function: EdsGetImageInfo
#
# Description:
# Gets image information from a designated image object.
# Here, image information means the image width and height,
# number of color components, resolution, and effective image area.
#
# Parameters:
# In: inStreamRef - Designate the object for which to get image information.
# inImageSource - Of the various image data items in the image file,
# designate the type of image data representing the
# information you want to get. Designate the image as
# defined in Enum EdsImageSource.
#
# kEdsImageSrc_FullView
# The image itself (a full-sized image)
# kEdsImageSrc_Thumbnail
# A thumbnail image
# kEdsImageSrc_Preview
# A preview image
# Out: outImageInfo - Stores the image data information designated
# in inImageSource.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsGetImageInfo(inImageRef, inImageSource, outImageInfo):
EDSDK.EdsGetImageInfo.argtype = (c_void_p, EdsImageSource, EdsImageInfo)
EDSDK.EdsGetImageInfo.restype = c_uint
# C#原型:public extern static uint EdsGetImageInfo( IntPtr inImageRef, EdsImageSource inImageSource,
# out EdsImageInfo outImageInfo );
return EDSDK.EdsGetImageInfo(inImageRef, inImageSource, byref(outImageInfo))
# *-----------------------------------------------------------------------------
#
# Function: EdsGetImage
#
# Description:
# Gets designated image data from an image file, in the form of a
# designated rectangle.
# Returns uncompressed results for JPEGs and processed results
# in the designated pixel order (RGB, Top-down BGR, and so on) for
# RAW images.
# Additionally, by designating the input/output rectangle,
# it is possible to get reduced, enlarged, or partial images.
# However, because images corresponding to the designated output rectangle
# are always returned by the SDK, the SDK does not take the aspect
# ratio into account.
# To maintain the aspect ratio, you must keep the aspect ratio in mind
# when designating the rectangle.
#
# Parameters:
# In:
# inImageRef - Designate the image object for which to get
# the image data.
# inImageSource - Designate the type of image data to get from
# the image file (thumbnail, preview, and so on).
# Designate values as defined in Enum EdsImageSource.
# inImageType - Designate the output image type. Because
# the output format of EdGetImage may only be RGB, only
# kEdsTargetImageType_RGB or kEdsTargetImageType_RGB16
# can be designated.
# However, image types exceeding the resolution of
# inImageSource cannot be designated.
# inSrcRect - Designate the coordinates and size of the rectangle
# to be retrieved (processed) from the source image.
# inDstSize - Designate the rectangle size for output.
#
# Out:
# outStreamRef - Designate the memory or file stream for output of
# the image.
# Returns: Any of the sdk errors.
# ----------------------------------------------------------------------------- * /
def EdsGetImage(inImageRef, inImageSource, inImageType, inSrcRect, inDstSize, outStreamRef):
EDSDK.EdsGetImage.argtype = (c_void_p, EdsImageSource, EdsTargetImageType, EdsRect, EdsSize, c_void_p)
EDSDK.EdsGetImage.restype = c_uint
# C#原型:public extern static uint EdsGetImage( IntPtr inImageRef, EdsImageSource inImageSource,
# EdsTargetImageType inImageType, EdsRect inSrcRect, EdsSize inDstSize, IntPtr outStreamRef );
return EDSDK.EdsGetImage(inImageRef, inImageSource, inImageType, inSrcRect, inDstSize, outStreamRef)
'''
# ----------------------------------------------
# Event handler registering functions
# ----------------------------------------------
'''
# *-----------------------------------------------------------------------------
#
# Function: EdsSetCameraAddedHandler
#
# Description:
# Registers a callback function for when a camera is detected.
#
# Parameters:
# In: inCameraAddedHandler - Pointer to a callback function
# called when a camera is connected physically
# inContext - Specifies an application-defined value to be sent to
# the callback function pointed to by CallBack parameter.
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*#
# public extern static uint EdsSetCameraAddedHandler(EdsCameraAddedHandler inCameraAddedHandler,IntPtr inContext);
# *-----------------------------------------------------------------------------
#
# Function: EdsSetPropertyEventHandler
#
# Description:
# Registers a callback function for receiving status
# change notification events for property states on a camera.
#
# Parameters:
# In: inCameraRef - Designate the camera object.
# inEvent - Designate one or all events to be supplemented.
# inPropertyEventHandler - Designate the pointer to the callback
# function for receiving property-related camera events.
# inContext - Designate application information to be passed by
# means of the callback function. Any data needed for
# your application can be passed.
# Out: None
#
# Returns: Any of the sdk errors. ----------------------------------------------------------------------------- *
# FIXME:
# @WINFUNCTYPE(None, c_uint, c_uint, c_uint, IntPtr)
# def EdsPropertyEventHandler(inEvent, inPropertyID, inParam, inContext):
# pass
# function pointer:
def EdsSetPropertyEventHandler(inCameraRef, inEvent, inPropertyEventHandler, inContext):
EDSDK.EdsSetPropertyEventHandler.argtype = (IntPtr, EdsPropertyEvent, EdsPropertyEventHandler, py_object)
EDSDK.EdsSetPropertyEventHandler.restype = EdsError
# C#原型:public extern static uint EdsSetPropertyEventHandler( IntPtr inCameraRef, uint inEvnet,
# EdsPropertyEventHandler inPropertyEventHandler, IntPtr inContext );
return EDSDK.EdsSetPropertyEventHandler(inCameraRef, inEvent, inPropertyEventHandler,
inContext)
# *-----------------------------------------------------------------------------
#
# Function: EdsSetObjectEventHandler
#
# Description:
# Registers a callback function for receiving status
# change notification events for objects on a remote camera.
# Here, object means volumes representing memory cards, files and directories,
# and shot images stored in memory, in particular.
#
# Parameters:
# In: inCameraRef - Designate the camera object.
# inEvent - Designate one or all events to be supplemented.
# To designate all events, use kEdsObjectEvent_All.
# inObjectEventHandler - Designate the pointer to the callback function
# for receiving object-related camera events.
# inContext - Passes inContext without modification,
# as designated as an EdsSetObjectEventHandler argument.
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*#
# EdsObjectEventHandler = WINFUNCTYPE(c_uint, c_uint, IntPtr, py_object)
def EdsSetObjectEventHandler(inCameraRef, inEvent, inObjectEventHandler, inContext):
EDSDK.EdsSetObjectEventHandler.argtype = (IntPtr, EdsObjectEvent, EdsObjectEventHandler, IntPtr)
EDSDK.EdsSetObjectEventHandler.restype = EdsError
# C#原型:public extern static uint EdsSetObjectEventHandler( IntPtr inCameraRef, uint inEvnet,
# EdsObjectEventHandler inObjectEventHandler, IntPtr inContext );
return EDSDK.EdsSetObjectEventHandler(inCameraRef, inEvent, inObjectEventHandler,
inContext)
# *-----------------------------------------------------------------------------
#
# Function: EdsSetCameraStateEventHandler
#
# Description:
# Registers a callback function for receiving status
# change notification events for property states on a camera.
#
# Parameters:
# In: inCameraRef - Designate the camera object.
# inEvent - Designate one or all events to be supplemented.
# To designate all events, use kEdsStateEvent_All.
# inStateEventHandler - Designate the pointer to the callback function
# for receiving events related to camera object states.
# inContext - Designate application information to be passed
# by means of the callback function. Any data needed for
# your application can be passed.
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsSetCameraStateEventHandler(inCameraRef, inEvent, inStateEventHandler, inContext):
EDSDK.EdsSetCameraStateEventHandler.argtype = (IntPtr, EdsStateEvent, EdsStateEventHandler, IntPtr)
EDSDK.EdsSetCameraStateEventHandler.restype = EdsError
# C#原型:public extern static uint EdsSetCameraStateEventHandler( IntPtr inCameraRef, uint inEvnet,
# EdsStateEventHandler inStateEventHandler, IntPtr inContext );
return EDSDK.EdsSetCameraStateEventHandler(inCameraRef, inEvent, inStateEventHandler,
inContext)
# *-----------------------------------------------------------------------------
#
# Function: EdsCreateEvfImageRef
# Description:
# Creates an object used to get the live view image data set.
#
# Parameters:
# In: inStreamRef - The stream reference which opened to get EVF JPEG image.
# Out: outEvfImageRef - The EVFData reference.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsCreateEvfImageRef(inStreamRef, outEvfImageRef):
EDSDK.EdsCreateEvfImageRef.argtype = (IntPtr, IntPtr)
EDSDK.EdsCreateEvfImageRef.restype = c_uint
# C#原型:public extern static uint EdsCreateEvfImageRef(IntPtr inStreamRef, out IntPtr outEvfImageRef);
return EDSDK.EdsCreateEvfImageRef(inStreamRef, byref(outEvfImageRef))
# *-----------------------------------------------------------------------------
#
# Function: EdsDownloadEvfImage
# Description:
# Downloads the live view image data set for a camera currently in live view mode.
# Live view can be started by using the property ID:kEdsPropertyID_Evf_OutputDevice and
# data:EdsOutputDevice_PC to call EdsSetPropertyData.
# In addition | |
or pidx < 0:
# no parens, current block ends at keyword
return kidx
else:
# parens, there might be a child filter inside.
# find the close parens and that is end of block
pidx = match_symbol(S)
if pidx > 0:
return find_keyword(S, start=pidx, keywords=keywords)
else:
return -1
def split_sqly(S, keywords=FILTERS_KW):
"""
splits a sqly statement into blocks
the blocks either start with one of the strings in keywords
or start after an outermost matching (closing) paren
Only does one level of split...
"""
# sanity check
p = match_symbol(S)
if p < 0 and "(" in S:
raise Exception("query {} has unbalanced parens".format(S))
s = S
clauses = []
idx = 0
while True:
idx = find_next_block(s, keywords=keywords)
if idx > 0:
clauses.append(s[:idx].strip().strip(";"))
s = s[idx:]
else:
clauses.append(s.strip().strip(";"))
break
return clauses
def treeify_sqly_where(clause):
"""
converts a where clause in sqly form to a nested dict:
for example:
(has_name = cow AND (has_colour = green OR has_colour = red))
-->
{'AND': ['has_name = cow', {'OR': ['has_colour = green', 'has_colour = red']}]}
"""
clause = remove_nested_enclosing_symbol(clause)
t = split_sqly(clause, keywords=["AND", "OR"])
if len(t) == 0:
raise Exception("empty clause")
if len(t) == 1:
# either a leaf clause or a "NOT"
if t[0][:3] == "NOT":
return {"NOT": treeify_sqly_where(t[0][4:])}
else:
return t[0]
conj_list = {"AND": False, "OR": False}
for i in range(len(t)):
c = t[i]
if c[:3] == "AND":
conj_list["AND"] = True
t[i] = c[3:].strip()
elif c[:2] == "OR":
conj_list["OR"] = True
t[i] = c[2:].strip()
if conj_list["OR"] and conj_list["AND"]:
raise Exception("AND and OR at same level of clause {}".format(clause))
if not (conj_list["OR"] or conj_list["AND"]):
raise Exception("multiple blocks in clause but no conjunctions {}".format(clause))
if conj_list["OR"]:
return {"OR": [treeify_sqly_where(c) for c in t]}
else:
return {"AND": [treeify_sqly_where(c) for c in t]}
def convert_where_tree(where_tree):
"""
converts a treeified where tree (output from treeify_sqly_where)
into a new-style FILTERS where clause by recursively converting
clauses into comparators
"""
if type(where_tree) is str:
where_tree = remove_nested_enclosing_symbol(where_tree)
if where_tree[0] == "<":
return triple_str_to_dict(where_tree)
else:
return where_leaf_to_comparator(where_tree)
output = {}
if where_tree.get("NOT") and type(where_tree["NOT"]) is str:
output["NOT"] = [where_leaf_to_comparator(where_tree["NOT"])]
return output
for k, v in where_tree.items():
if k in ["AND", "OR", "NOT"]:
# a subtree
assert type(v) is list
output[k] = [convert_where_tree(t) for t in v]
return output
def triple_str_to_dict(clause):
"""
converts a triple (for a where_clause) in the form
<<#subj, pred_text, #obj/obj_text>>
to dictionary form. it assumed that one of the three entries is
replaced by a "?"
if the obj memid is fixed (as opposed to the obj_text),
use a "#" in front of the memid. subj_text is not a valid
possibility for the first entry of the triple; still, if a query uses
a fixed subj, it should be preceded with a "#".
the order is assumed to be subj, pred, obj.
examples:
"find me a record whose name is bob":
<< ?, has_name, bob >> --> {"pred_text": "has_name", "obj_text": "bob"}
"find me a record who is a friend of the entity with memid
dd2ca5a4c5204fc09c71279f8956a2b1":
<< ?, friend_of, #dd2ca5a4c5204fc09c71279f8956a2b1 >> -->
{"pred_text": "friend_of", "obj": "dd2ca5a4c5204fc09c71279f8956a2b1"}
"find me a record x for which the entity with memid
dd2ca5a4c5204fc09c71279f8956a2b1" is a parent_of x:
<< #dd2ca5a4c5204fc09c71279f8956a2b1, parent_of, ? >> -->
{"pred_text": "parent_of", "subj": "dd2ca5a4c5204fc09c71279f8956a2b1"}
commmas in obj text or subj text need to be escaped with \
"find me a record whose name is bob, the sailor":
<< ?, has_name, bob >> --> {"pred_text": "has_name", "obj_text": "bob\, the sailor"}
TODO:
This does not currently handle nested queries.
This does not currently handle multiple "?"
moar escapes?
"""
comma = uuid.uuid4().hex
clause = clause.replace("\,", comma)
terms = remove_enclosing_symbol(clause, ("<<", ">>")).split(",")
terms = [t.replace(comma, ",") for t in terms]
terms = [t.strip() for t in terms]
assert terms[1] and terms[1] != "?"
out = {"pred_text": terms[1]}
if terms[0] == "?":
if terms[2] == "?":
raise Exception(
"queries with both subj and obj unfixed in a triple are not yet supported"
)
assert terms[2] != "?"
if terms[2][0] == "#":
out["obj"] = terms[2][1:]
else:
out["obj_text"] = terms[2]
else:
if terms[0][0] == "#":
out["subj"] = terms[0][1:]
else:
raise Exception(
'queries with a "subj_text" (as opposed to subj memid) in a triple are not supported'
)
return out
def where_leaf_to_comparator(clause):
"""
converts a leaf in sqly clause into a FILTERs comparator
for example
'has_name = cow'
-->
{"input_left": "has_name",
"input_right": "cow",
"comparison_type": "EQUAL"}
"""
# TODO having_measure
eq_idx = clause.find("=")
memideq_idx = clause.find(
"=#="
) # special equality for memids instead of subject or object _text_
lt_idx = clause.find("<")
lte_idx = clause.find("<=")
gt_idx = clause.find(">")
gte_idx = clause.find(">=")
mod_idx = clause.find("%")
# everything will break if clause is complicated enough that it has internal comparators FIXME?
# not obvious we should be converting those back forth though
assert not (gt_idx > -1 and lt_idx > -1)
assert not (eq_idx > -1 and mod_idx > -1)
if lt_idx > -1:
eq_idx = -1
left_text = clause[:lt_idx]
if lte_idx > -1:
ct = "LESS_THAN_EQUAL"
right_text = clause[lte_idx + 2 :]
else:
ct = "LESS_THAN"
right_text = clause[lt_idx + 1 :]
if gt_idx > 0:
eq_idx = -1
left_text = clause[:gt_idx]
if gte_idx > 0:
ct = "GREATER_THAN_EQUAL"
right_text = clause[gte_idx + 2 :]
else:
ct = "GREATER_THAN"
right_text = clause[gt_idx + 1 :]
if eq_idx > -1:
left_text = clause[:eq_idx]
if clause[eq_idx + 1 : eq_idx + 3] == "(+-":
eq = clause[eq_idx : clause.find(")", eq_idx) + 1]
ct = {"close_tolerance": int(eq[4:-1])}
right_text = clause[eq_idx + len(eq) + 1 :]
elif clause[eq_idx + 1 : eq_idx + 3] == "#=":
ct = "MEMID_EQUAL"
right_text = clause[eq_idx + 3 :]
else:
ct = "EQUAL"
right_text = clause[eq_idx + 1 :]
if mod_idx > -1:
# %_(modulus)(+-close_tolerance)
left_text = clause[:mod_idx]
mod_text = clause[eq_idx : clause.find(" ", mod_idx)]
open_paren_idx = mod_text.find("(")
close_paren_idx = mod_text.find(")")
mod = int(mod_text[open_paren_idx + 1 : close_paren_idx])
open_paren_idx = mod_text.find("(", close_paren_idx)
if open_paren_idx > -1:
close_paren_idx = mod_text.find(")", open_paren_idx)
tol = int(mod_text[open_paren_idx + 3 : close_paren_idx])
ct = {"close_tolerance": tol, "modulus": mod}
else:
ct = {"close_tolerance": tol, "modulus": mod}
right_text = clause[eq_idx + len(mod_text) :]
left_value = maybe_eval_literal(left_text.strip())
# TODO warn if right_value was something that needed eval?
right_value = maybe_eval_literal(right_text.strip())
f = {
"input_left": {"attribute": left_value},
"input_right": right_value,
"comparison_type": ct,
}
return f
def convert_where_from_sqly(clause, d):
tree = treeify_sqly_where(clause)
if type(tree) is str:
tree = {"AND": [tree]}
d["where_clause"] = convert_where_tree(tree)
def convert_output_from_sqly(clause, d):
# FIXME !!! deal with recursion. what if there is sqly in attribute?
# can be attribute, or list of simple attributes in form (a; b; c)
# currently cannot handle a list with a complex (new filters style) attribute
if clause == "MEMORY" or clause == "COUNT":
output = clause
else:
pidx = match_symbol(clause)
if pidx > -1:
oidx = clause.find("(")
clause = clause[oidx + 1 : pidx]
# this WILL break for FILTERs style attributes
if clause.find(",") > -1:
attrs = [c.strip() for c in clause.split(",")]
else:
attrs = [clause]
output = [{"attribute": maybe_eval_literal(a)} for a in attrs]
else:
output = [{"attribute": maybe_eval_literal(clause)}]
d["output"] = output
def convert_memtype_from_sqly(clause, d):
# FIXME allow sentences with OR
d["memory_type"] = clause
def convert_order_by_from_sqly(clause, d):
if not d.get("selector"):
d["selector"] = {}
l = clause.find("LOCATION")
if l == 0:
d["selector"]["location"] = maybe_eval_literal(clause[9:])
else:
if clause == "RANDOM":
d["selector"]["return_quantity"] = "random"
else:
d["selector"]["return_quantity"] = {"argval": {"quantity": {}}}
d["selector"]["return_quantity"]["argval"]["quantity"][
"attribute"
] = maybe_eval_literal(clause)
def convert_limit_from_sqly(clause, d):
# this requires doing convert_order_by first
c = clause.split()
assert d.get("selector")
d["selector"]["ordinal"] = int(c[0])
if len(c) > 1:
d["selector"]["return_quantity"]["argval"]["polarity"] = {"DESC": "MAX", "ASC": "MIN"}[
c[1]
]
def convert_coref_from_sqly(clause, d):
d["contains_coreference"] = clause
def convert_same_from_sqly(clause, d):
if not d.get("selector"):
d["selector"] = {}
d["selector"]["same"] = clause
if __name__ == "__main__":
from droidlet.interpreter.tests import all_test_commands
has_name_cow = {"input_left": "has_name", "input_right": "cow", "comparison_type": "EQUAL"}
has_colour_green = {
"input_left": "has_colour",
"input_right": "green",
"comparison_type": "EQUAL",
}
has_colour_red = {"input_left": "has_colour", "input_right": "red", "comparison_type": "EQUAL"}
distance_to_me_greater_5 = {
"input_left": all_test_commands.ATTRIBUTES["distance from me"],
"input_right": "5",
"comparison_type": "GREATER_THAN",
}
c = {"AND": | |
<gh_stars>1-10
"""
Hierarchical GAN with InfoGAN structure and losses
Author(s): <NAME> (<EMAIL>)
"""
import os.path
import pickle
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Reshape, Input, Lambda
from keras.layers import Conv2D, Conv2DTranspose, UpSampling2D, ZeroPadding2D, Cropping2D
from keras.layers import LeakyReLU, Dropout, GaussianNoise
from keras.layers import BatchNormalization, RepeatVector
from keras.layers import concatenate, dot, multiply
from keras.regularizers import l2
from keras.optimizers import Adam, Adamax, RMSprop
from keras import backend as K
from keras.initializers import RandomNormal
from keras.layers.pooling import GlobalAveragePooling2D
from keras.utils import plot_model
from keras.models import load_model
def label_flipping(y, p):
h = np.random.binomial(1, p)
if h:
y[:, [0, 1]] = y[:, [1, 0]]
return y
def gaussian_loss(y_true, y_pred):
Q_C_mean = y_pred[:, 0, :]
Q_C_logstd = y_pred[:, 1, :]
y_true = y_true[:, 0, :]
epsilon = (y_true - Q_C_mean) / (K.exp(Q_C_logstd) + K.epsilon())
loss_Q_C = (Q_C_logstd + 0.5 * K.square(epsilon))
loss_Q_C = K.mean(loss_Q_C)
return loss_Q_C
class HGAN(object):
def __init__(self, X_train, X_test, latent_dim=2, noise_dim=100):
self.latent_dim = latent_dim
self.noise_dim = noise_dim
self.D1 = None # first discriminator
self.D = None # second discriminator
self.G1 = None # first generator
self.G2 = None # second generator
self.DM1 = None # first discriminator model
self.DM = None # second discriminator model
self.AM1 = None # first adversarial model
self.AM2 = None # second adversarial model
self.x_train = X_train
self.x_test = X_test
n_points = X_train.shape[1]/2
self.input_shape = (n_points, 2, 1)
self.conc_shape = X_train.shape[1:]
def discriminator1(self):
if self.D1:
return self.D1
kernel_height = 5
depth = 16
dropout = 0.4
weight_decay = 1e-5
x = Input(shape=self.input_shape)
y = Conv2D(depth*1, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(x)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Conv2D(depth*2, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Conv2D(depth*4, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Conv2D(depth*8, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Flatten()(y)
y = Dense(1024)(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
d = Dense(2, activation='softmax', name="D_out")(y)
def linmax(x):
return K.maximum(x, -16)
def linmax_shape(input_shape):
return input_shape
# Auxiliary Q
q1 = Dense(128)(y)
q1 = BatchNormalization(momentum=0.9)(q1)
q1 = LeakyReLU(alpha=0.2)(q1)
q_mean1 = Dense(self.latent_dim, activation='linear', name="Q_mean1")(q1)
q_logstd1 = Dense(self.latent_dim, name="Q_logstd1")(q1)
q_logstd1 = Lambda(linmax, output_shape=linmax_shape)(q_logstd1)
# Reshape Q to nbatch, 1, latent_dim
q_mean1 = Reshape((1, self.latent_dim))(q_mean1)
q_logstd1 = Reshape((1, self.latent_dim))(q_logstd1)
q1 = concatenate([q_mean1, q_logstd1], name="Q_out1", axis=1)
self.D1 = Model(inputs=x, outputs=[d, q1])
self.D1.summary()
return self.D1
def discriminator(self):
if self.D:
return self.D
kernel_height = 5
depth = 32
dropout = 0.4
weight_decay = 1e-5
x = Input(shape=self.conc_shape)
y = Conv2D(depth*1, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(x)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Conv2D(depth*2, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Conv2D(depth*4, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Conv2D(depth*8, (kernel_height,2), strides=2, padding='same',
kernel_regularizer=l2(weight_decay))(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
y = Dropout(dropout)(y)
y = Flatten()(y)
y = Dense(1024)(y)
y = BatchNormalization(momentum=0.9)(y)
y = LeakyReLU(alpha=0.2)(y)
d = Dense(2, activation='softmax', name="D_out")(y)
def linmax(x):
return K.maximum(x, -16)
def linmax_shape(input_shape):
return input_shape
# Auxiliary Q
q2 = Dense(128)(y)
q2 = BatchNormalization(momentum=0.9)(q2)
q2 = LeakyReLU(alpha=0.2)(q2)
q_mean2 = Dense(self.latent_dim, activation='linear', name="Q_mean2")(q2)
q_logstd2 = Dense(self.latent_dim, name="Q_logstd2")(q2)
q_logstd2 = Lambda(linmax, output_shape=linmax_shape)(q_logstd2)
# Reshape Q to nbatch, 1, latent_dim
q_mean2 = Reshape((1, self.latent_dim))(q_mean2)
q_logstd2 = Reshape((1, self.latent_dim))(q_logstd2)
q2 = concatenate([q_mean2, q_logstd2], name="Q_out2", axis=1)
self.D = Model(inputs=x, outputs=[d, q2])
self.D.summary()
return self.D
def generator1(self):
if self.G1:
return self.G1
kernel_height = 5
depth = 32*16
dim = (self.input_shape[0]+12)/16
weight_decay = 1e-5
noise_std = 0.01
c1 = Input(shape=(self.latent_dim,), name="latent_input1")
z1 = Input(shape=(self.noise_dim,), name="noise_input1")
x = concatenate([c1, z1])
x = Dense(dim*2*depth, kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Reshape((dim, 2, depth))(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/2), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/4), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/8), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/16), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
# Out: 100 x 2, xy coordinates, [-1.0,1.0] per coordinate
x = Conv2DTranspose(1, (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = Activation('tanh')(x)
x = Cropping2D((6, 0))(x)
self.G1 = Model(inputs=[c1, z1], outputs=x)
self.G1.summary()
return self.G1
def generator2(self):
if self.G2:
return self.G2
kernel_height = 5
depth = 32*16
dim = (self.input_shape[0]+12)/16
weight_decay = 1e-5
noise_std = 0.01
c2 = Input(shape=(self.latent_dim,))
z2 = Input(shape=(self.noise_dim,))
x1 = Input(shape=self.input_shape)
x1_flat = Flatten()(x1)
x = concatenate([c2, z2, x1_flat])
x = Dense(dim*2*depth, kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Reshape((dim, 2, depth))(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/2), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/4), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/8), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
x = UpSampling2D((2,1))(x)
x = Conv2DTranspose(int(depth/16), (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = BatchNormalization(momentum=0.9)(x)
x = LeakyReLU(alpha=0.2)(x)
x = GaussianNoise(noise_std)(x)
# Out: 100 x 2, xy coordinates, [-1.0,1.0] per coordinate
x = Conv2DTranspose(1, (kernel_height,2), padding='same',
kernel_regularizer=l2(weight_decay))(x)
x = Activation('tanh')(x)
x = Cropping2D((6, 0))(x)
self.G2 = Model(inputs=[c2, z2, x1], outputs=x)
self.G2.summary()
return self.G2
def discriminator_model1(self):
if self.DM1:
return self.DM1
x1 = Input(shape=self.input_shape)
dis1 = self.discriminator1()
dis1.trainable = True
d, q1 = dis1(x1)
self.DM1 = Model(inputs=x1, outputs=[d, q1])
# optimizer = RMSprop(lr=1e-3)
optimizer = Adam(lr=0.00005, beta_1=0.5)
self.DM1.compile(loss=['binary_crossentropy', gaussian_loss],
loss_weights=[1, 1], optimizer=optimizer)
return self.DM1
def discriminator_model(self):
if self.DM:
return self.DM
x = Input(shape=self.conc_shape)
dis = self.discriminator()
dis.trainable = True
d, q2 = dis(x)
self.DM = Model(inputs=x, outputs=[d, q2])
# optimizer = RMSprop(lr=1e-3)
optimizer = Adam(lr=0.00005, beta_1=0.5)
self.DM.compile(loss=['binary_crossentropy', gaussian_loss],
loss_weights=[1, 1], optimizer=optimizer)
return self.DM
def adversarial_model1(self):
if self.AM1:
return self.AM1
c1 = Input(shape=(self.latent_dim,))
z1 = Input(shape=(self.noise_dim,))
gen1 = self.generator1()
x1 = gen1([c1, z1])
dis1 = self.discriminator1()
dis1.trainable = False
d, q1 = dis1(x1)
self.AM1 = Model(inputs=[c1, z1], outputs=[d, q1])
# optimizer = RMSprop(lr=1e-3)
optimizer = Adam(lr=0.0002, beta_1=0.5)
self.AM1.compile(loss=['binary_crossentropy', gaussian_loss],
loss_weights=[1, 1], optimizer=optimizer)
return self.AM1
def adversarial_model2(self):
if self.AM2:
return self.AM2
c2 = Input(shape=(self.latent_dim,))
z2 = Input(shape=(self.noise_dim,))
x1 = Input(shape=self.input_shape)
gen2 = self.generator2()
x2 = gen2([c2, z2, x1])
x = concatenate([x1, x2], axis=1)
dis = self.discriminator()
dis.trainable = False
d, q2 = dis(x)
self.AM2 = Model(inputs=[c2, z2, x1], outputs=[d, q2])
# optimizer = RMSprop(lr=1e-3)
optimizer = Adam(lr=0.0002, beta_1=0.5)
self.AM2.compile(loss=['binary_crossentropy', gaussian_loss],
loss_weights=[1, 1], optimizer=optimizer)
return self.AM2
def train(self, train_steps=2000, batch_size=256, save_interval=0, mode='startover'):
g1_fname = '../hgan_idetc2018_data/superformula/naive/generator1.h5'
g2_fname = '../hgan_idetc2018_data/superformula/naive/generator2.h5'
d1_fname = '../hgan_idetc2018_data/superformula/naive/discriminator1.h5'
d_fname = '../hgan_idetc2018_data/superformula/naive/discriminator.h5'
if os.path.isfile(g1_fname) and os.path.isfile(g2_fname) and \
os.path.isfile(d1_fname) and os.path.isfile(d_fname):
trained_existed = True
else:
trained_existed = False
if mode != 'startover' and trained_existed:
self.dis1 = self.D1 = load_model(d1_fname)
self.dis = self.D = load_model(d_fname)
self.gen1 = self.G1 = load_model(g1_fname)
self.gen2 = self.G2 = load_model(g2_fname)
else:
self.dis1 = self.discriminator1()
self.dis = self.discriminator()
self.gen1 = self.generator1()
self.gen2 = self.generator2()
self.dis_model1 = self.discriminator_model1()
self.dis_model = self.discriminator_model()
self.adv_model1 = self.adversarial_model1()
self.adv_model2 = self.adversarial_model2()
if mode != 'evaluate' or not trained_existed:
for t in range(train_steps):
sigma = np.exp(-t/1e4) # annealed noise scale
# Train discriminator model and adversarial model
ind = np.random.choice(self.x_train.shape[0], size=batch_size, replace=False)
X1_train = self.x_train[ind, :100]
X1_train += np.random.normal(scale=sigma, size=X1_train.shape)
y_real = np.zeros((batch_size, 2), dtype=np.uint8)
y_real[:, 1] = 1
# y_real = label_flipping(y_real, .1)
y_latent1 = np.random.uniform(size=(batch_size, self.latent_dim))
y_latent_target1 = np.expand_dims(y_latent1, 1)
y_latent_target1 = np.repeat(y_latent_target1, 2, axis=1)
d1_loss_real = self.dis_model1.train_on_batch(X1_train, [y_real, y_latent_target1])
noise1 = np.random.normal(scale=0.5, size=(batch_size, self.noise_dim))
y_latent1 = np.random.uniform(size=(batch_size, self.latent_dim))
y_latent_target1 = np.expand_dims(y_latent1, 1)
y_latent_target1 = np.repeat(y_latent_target1, 2, axis=1)
X1_fake = self.gen1.predict_on_batch([y_latent1, noise1])
X1_fake += np.random.normal(scale=sigma, size=X1_fake.shape)
y_fake = np.zeros((batch_size, 2), | |
'''
@Author: <NAME>
https://github.com/lindawangg/COVID-Net/blob/master/create_COVIDx_v2.ipynb
@Date: 2020-04-06 15:50:21
@
# Description:
'''
import numpy as np
import pandas as pd
import os
import random
from shutil import copyfile
import pydicom as dicom
import cv2
from torch.utils.data import Dataset,DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn import CrossEntropyLoss
from PIL import Image
import logging
import sys
import time
ONNET_DIR = os.path.abspath("./python-package/")
sys.path.append(ONNET_DIR) # To find local version of the onnet
#sys.path.append(os.path.abspath("./python-package/cnn_models/"))
from cnn_models.COVIDNext50 import COVIDNext50
from onnet import *
import torch
from torch.optim import Adam
from torchvision import transforms
from sklearn.metrics import f1_score, precision_score, recall_score,accuracy_score,classification_report
isONN=True
class COVID_set(Dataset):
def __init__(self, config,img_dir, labels_file, transforms):
self.config = config
self.img_pths, self.labels = self._prepare_data(img_dir, labels_file)
self.transforms = transforms
def _prepare_data(self, img_dir, labels_file):
with open(labels_file, 'r') as f:
labels_raw = f.readlines()
labels, img_pths = [], []
for i in range(len(labels_raw)):
data = labels_raw[i].split()
img_pth = data[1]
#img_name = data[1]
#img_pth = os.path.join(img_dir, img_name)
img_pths.append(img_pth)
labels.append(self.config.mapping[data[2]])
return img_pths, labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
img = Image.open(self.img_pths[idx]).convert("RGB")
img_tensor = self.transforms(img)
label = self.labels[idx]
label_tensor = torch.tensor(label, dtype=torch.long)
return img_tensor, label_tensor
def train_test_split():
seed = 0
np.random.seed(seed) # Reset the seed so all runs are the same.
random.seed(seed)
MAXVAL = 255 # Range [0 255]
# path to covid-19 dataset from https://github.com/ieee8023/covid-chestxray-dataset
imgpath = 'E:/Insegment/covid-chestxray-dataset-master/images'
csvpath = 'E:/Insegment/covid-chestxray-dataset-master/metadata.csv'
# path to https://www.kaggle.com/c/rsna-pneumonia-detection-challenge
kaggle_datapath = 'F:/Datasets/rsna-pneumonia-detection-challenge/'
kaggle_csvname = 'stage_2_detailed_class_info.csv' # get all the normal from here
kaggle_csvname2 = 'stage_2_train_labels.csv' # get all the 1s from here since 1 indicate pneumonia
kaggle_imgpath = 'stage_2_train_images'
# parameters for COVIDx dataset
train = []
test = []
test_count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
train_count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
mapping = dict()
mapping['COVID-19'] = 'COVID-19'
mapping['SARS'] = 'pneumonia'
mapping['MERS'] = 'pneumonia'
mapping['Streptococcus'] = 'pneumonia'
mapping['Normal'] = 'normal'
mapping['Lung Opacity'] = 'pneumonia'
mapping['1'] = 'pneumonia'
train_file = open("train_split_v2.txt","a")
test_file = open("test_split_v2.txt", "a")
# train/test split
split = 0.1
csv = pd.read_csv(csvpath, nrows=None)
idx_pa = csv["view"] == "PA" # Keep only the PA view
csv = csv[idx_pa]
pneumonias = ["COVID-19", "SARS", "MERS", "ARDS", "Streptococcus"]
pathologies = ["Pneumonia","Viral Pneumonia", "Bacterial Pneumonia", "No Finding"] + pneumonias
pathologies = sorted(pathologies)
filename_label = {'normal': [], 'pneumonia': [], 'COVID-19': []}
count = {'normal': 0, 'pneumonia': 0, 'COVID-19': 0}
for index, row in csv.iterrows():
f = row['finding']
if f in mapping:
count[mapping[f]] += 1
entry = [int(row['patientid']), row['filename'], mapping[f]]
filename_label[mapping[f]].append(entry)
print('Data distribution from covid-chestxray-dataset:')
print(count)
for key in filename_label.keys():
arr = np.array(filename_label[key])
if arr.size == 0:
continue
# split by patients
# num_diff_patients = len(np.unique(arr[:,0]))
# num_test = max(1, round(split*num_diff_patients))
# select num_test number of random patients
if key == 'pneumonia':
test_patients = ['8', '31']
elif key == 'COVID-19':
test_patients = ['19', '20', '36', '42', '86'] # random.sample(list(arr[:,0]), num_test)
else:
test_patients = []
print('Key: ', key)
print('Test patients: ', test_patients)
# go through all the patients
for patient in arr:
info = f"{str(patient[0])} {imgpath}\{patient[1]} {patient[2]}\n"
if patient[0] in test_patients:
#copyfile(os.path.join(imgpath, patient[1]), os.path.join(savepath, 'test', patient[1]))
test.append(patient); test_count[patient[2]] += 1
train_file.write(info)
else:
#copyfile(os.path.join(imgpath, patient[1]), os.path.join(savepath, 'train', patient[1]))
train.append(patient); train_count[patient[2]] += 1
test_file.write(info)
csv_normal = pd.read_csv(os.path.join(kaggle_datapath, kaggle_csvname), nrows=None)
csv_pneu = pd.read_csv(os.path.join(kaggle_datapath, kaggle_csvname2), nrows=None)
patients = {'normal': [], 'pneumonia': []}
for index, row in csv_normal.iterrows():
if row['class'] == 'Normal':
patients['normal'].append(row['patientId'])
for index, row in csv_pneu.iterrows():
if int(row['Target']) == 1:
patients['pneumonia'].append(row['patientId'])
for key in patients.keys():
arr = np.array(patients[key])
if arr.size == 0:
continue
# split by patients
num_diff_patients = len(np.unique(arr))
num_test = max(1, round(split*num_diff_patients))
#test_patients = np.load('rsna_test_patients_{}.npy'.format(key)) #
test_patients = random.sample(list(arr), num_test) #, download the .npy files from the repo.
np.save('rsna_test_patients_{}.npy'.format(key), np.array(test_patients))
for patient in arr:
ds = dicom.dcmread(os.path.join(kaggle_datapath, kaggle_imgpath, patient + '.dcm'))
pixel_array_numpy = ds.pixel_array
imgname = patient + '.png'
if patient in test_patients:
path = os.path.join(kaggle_datapath, 'test', imgname)
cv2.imwrite(path, pixel_array_numpy)
test.append([patient, imgname, key]); test_count[key] += 1
test_file.write(f"{patient} {path} {key}\n" )
if test_count[key]%50==0:
test_file.flush()
else:
path = os.path.join(kaggle_datapath, 'train', imgname)
cv2.imwrite(path, pixel_array_numpy)
train_file.write(f"{patient} {path} {key}\n")
if train_count[key]%20==0:
train_file.flush()
train.append([patient, imgname, key]); train_count[key] += 1
print(f"\r@{path}",end="")
print('Final stats')
print('Train count: ', train_count)
print('Test count: ', test_count)
print('Total length of train: ', len(train))
print('Total length of test: ', len(test))
train_file.close()
test_file.close()
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def save_model(model, config):
if isinstance(model, torch.nn.DataParallel):
# Save without the DataParallel module
model_dict = model.module.state_dict()
else:
model_dict = model.state_dict()
state = {
"state_dict": model_dict,
"global_step": config['global_step'],
"clf_report": config['clf_report']
}
f1_macro = config['clf_report']['macro avg']['f1-score'] * 100
name = "{}_F1_{:.2f}_step_{}.pth".format(config['name'],
f1_macro,
config['global_step'])
model_path = os.path.join(config['save_dir'], name)
torch.save(state, model_path)
log.info("Saved model to {}".format(model_path))
def validate(data_loader, model, best_score, global_step, cfg):
model.eval()
gts, predictions = [], []
log.info("Validation started...")
for data in data_loader:
imgs, labels = data
imgs = to_device(imgs, gpu=cfg.gpu)
with torch.no_grad():
logits = model(imgs)
if isONN:
preds = net.predict(logits).cpu().numpy()
else:
probs = model.module.probability(logits)
preds = torch.argmax(probs, dim=1).cpu().numpy()
labels = labels.cpu().detach().numpy()
predictions.extend(preds)
gts.extend(labels)
predictions = np.array(predictions, dtype=np.int32)
gts = np.array(gts, dtype=np.int32)
acc, f1, prec, rec = clf_metrics(predictions=predictions,targets=gts,average="macro")
report = classification_report(gts, predictions, output_dict=True)
log.info("\n====== VALIDATION | Accuracy {:.4f} | F1 {:.4f} | Precision {:.4f} | Recall {:.4f}".format(acc, f1, prec, rec))
if f1 > best_score:
save_config = {
'name': config.name,
'save_dir': config.ckpts_dir,
'global_step': global_step,
'clf_report': report
}
#save_model(model=model, config=save_config)
best_score = f1
#log.info("Validation end")
model.train()
return best_score
def train_transforms(width, height):
trans_list = [
transforms.Resize((height, width)),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([
transforms.RandomAffine(degrees=20,
translate=(0.15, 0.15),
scale=(0.8, 1.2),
shear=5)], p=0.5),
transforms.RandomApply([
transforms.ColorJitter(brightness=0.3, contrast=0.3)], p=0.5),
transforms.Grayscale(),
transforms.ToTensor()
]
return transforms.Compose(trans_list)
def val_transforms(width, height):
trans_list = [
transforms.Resize((height, width)),
transforms.Grayscale(),
transforms.ToTensor()
]
return transforms.Compose(trans_list)
def to_device(tensor, gpu=False):
return tensor.cuda() if gpu else tensor.cpu()
def clf_metrics(predictions, targets, average='macro'):
f1 = f1_score(targets, predictions, average=average)
precision = precision_score(targets, predictions, average=average)
recall = recall_score(targets, predictions, average=average)
acc = accuracy_score(targets, predictions)
return acc, f1, precision, recall
def main(model):
if config.gpu and not torch.cuda.is_available():
raise ValueError("GPU not supported or enabled on this system.")
use_gpu = config.gpu
log.info("Loading train dataset")
train_dataset = COVID_set(config,config.train_imgs, config.train_labels,train_transforms(config.width,config.height))
train_loader = DataLoader(train_dataset,
batch_size=config.batch_size,shuffle=True,drop_last=True, num_workers=config.n_threads,pin_memory=use_gpu)
log.info("Number of training examples {}".format(len(train_dataset)))
log.info("Loading val dataset")
val_dataset = COVID_set(config,config.val_imgs, config.val_labels,val_transforms(config.width,config.height))
val_loader = DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.n_threads,
pin_memory=use_gpu)
log.info("Number of validation examples {}".format(len(val_dataset)))
if use_gpu:
model.cuda()
#model = torch.nn.DataParallel(model)
optim_layers = filter(lambda p: p.requires_grad, model.parameters())
# optimizer and lr scheduler
optimizer = Adam(optim_layers,
lr=config.lr,
weight_decay=config.weight_decay)
scheduler = ReduceLROnPlateau(optimizer=optimizer,
factor=config.lr_reduce_factor,
patience=config.lr_reduce_patience,
mode='max',
min_lr=1e-7)
# Load the last global_step from the checkpoint if existing
global_step = 0 if state is None else state['global_step'] + 1
class_weights = to_device(torch.FloatTensor(config.loss_weights),gpu=use_gpu)
loss_fn = CrossEntropyLoss(reduction='mean', weight=class_weights)
# Reset the best metric score
best_score = -1
t0=time.time()
for epoch in range(config.epochs):
log.info("\nStarted epoch {}/{}".format(epoch + 1,config.epochs))
for data in train_loader:
imgs, labels = data
imgs = to_device(imgs, gpu=use_gpu)
labels = to_device(labels, gpu=use_gpu)
logits = model(imgs)
loss = loss_fn(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if global_step % config.log_steps == 0 and global_step > 0:
if isONN:
preds = net.predict(logits).cpu().numpy()
else:
probs = model.module.probability(logits)
preds = torch.argmax(probs, dim=1).detach().cpu().numpy()
labels = labels.cpu().detach().numpy()
acc, f1, _, _ = clf_metrics(preds, labels)
lr = optimizer.param_groups[0]['lr'] #get_learning_rate(optimizer)
print(f"\r{global_step} | batch: Loss={loss.item():.3f} | F1={f1:.3f} | Accuracy={acc:.4f} | LR={lr:.2e}\tT={time.time()-t0:.4f}",end="")
if global_step % config.eval_steps == 0 and global_step > 0:
best_score = validate(val_loader, model,best_score=best_score,global_step=global_step,cfg=config)
scheduler.step(best_score)
global_step += 1
def UpdateConfig(config):
config.name = "COVIDNext50_NewData"
config.gpu = True
config.batch_size = 16
config.n_threads = 4
config.random_seed = 1337
config.weights = "E:/Insegment/COVID-Next-Pytorch-master/COVIDNext50_NewData_F1_92.98_step_10800.pth"
config.lr = 1e-4
config.weight_decay = 1e-3
config.lr_reduce_factor = 0.7
config.lr_reduce_patience = 5
# Data
config.train_imgs = None#"/data/ssd/datasets/covid/COVIDxV2/data/train"
config.train_labels = "E:/ONNet/data/covid_train_split_v2.txt" #"/data/ssd/datasets/covid/COVIDxV2/data/train_COVIDx.txt"
config.val_imgs = None#"/data/ssd/datasets/covid/COVIDxV2/data/test"
config.val_labels = "E:/ONNet/data/covid_test_split_v2.txt" #"/data/ssd/datasets/covid/COVIDxV2/data/test_COVIDx.txt"
# Categories mapping
config.mapping = {
'normal': 0,
'pneumonia': 1,
'COVID-19': 2
}
# Loss weigths order follows the order in the category mapping dict
config.loss_weights = [0.05, 0.05, 1.0]
config.width = 256
config.height = 256
config.n_classes = len(config.mapping)
# Training
config.epochs = 300
config.log_steps = 5
config.eval_steps = 400
config.ckpts_dir = "./experiments/ckpts"
return config
IMG_size = (256, 256)
if __name__ == '__main__':
config_0 = NET_config("DNet",'covid',IMG_size,0.01,batch_size=16, nClass=3, nLayer=5)
#config_0 = RGBO_CNN_config("RGBO_CNN",'covid',IMG_size,0.01,batch_size=16, nClass=3, nLayer=5)
if isONN:
env_title, net = DNet_instance(config_0)
#env_title, net = RGBO_CNN_instance(config_0)
config = net.config
config = UpdateConfig(config)
config.batch_size = 64
config.log_steps = 10
config.lr = 0.001
state = None
else:
config = UpdateConfig(config_0)
if config.weights:
state = torch.load(config.weights)
log.info("Loaded model weights from: {}".format(config.weights))
else:
state = None
state_dict = state["state_dict"] if state else None
net = COVIDNext50(n_classes=config.n_classes)
if state_dict:
net = | |
authenticated and unauthenticated roles are supported.
- *(string) --*
- *(string) --*
- **RoleMappings** *(dict) --*
How users for a specific identity provider are to mapped to roles. This is a String-to- RoleMapping object map. The string identifies the identity provider, for example, "graph.facebook.com" or "cognito-idp.us-east-1.amazonaws.com/us-east-1_abcdefghi:app_client_id".
- *(string) --*
- *(dict) --*
A role mapping.
- **Type** *(string) --*
The role mapping type. Token will use ``cognito:roles`` and ``cognito:preferred_role`` claims from the Cognito identity provider token to map groups to roles. Rules will attempt to match claims from the token to map to a role.
- **AmbiguousRoleResolution** *(string) --*
If you specify Token or Rules as the ``Type`` , ``AmbiguousRoleResolution`` is required.
Specifies the action to be taken if either no rules match the claim value for the ``Rules`` type, or there is no ``cognito:preferred_role`` claim and there are multiple ``cognito:roles`` matches for the ``Token`` type.
- **RulesConfiguration** *(dict) --*
The rules to be used for mapping users to roles.
If you specify Rules as the role mapping type, ``RulesConfiguration`` is required.
- **Rules** *(list) --*
An array of rules. You can specify up to 25 rules per identity provider.
Rules are evaluated in order. The first one to match specifies the role.
- *(dict) --*
A rule that maps a claim name, a claim value, and a match type to a role ARN.
- **Claim** *(string) --*
The claim name that must be present in the token, for example, "isAdmin" or "paid".
- **MatchType** *(string) --*
The match condition that specifies how closely the claim value in the IdP token must match ``Value`` .
- **Value** *(string) --*
A brief string that the claim must match, for example, "paid" or "yes".
- **RoleARN** *(string) --*
The role ARN.
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:rtype: dict
:returns:
"""
pass
def get_open_id_token(self, IdentityId: str, Logins: Dict = None) -> Dict:
"""
Gets an OpenID token, using a known Cognito ID. This known Cognito ID is returned by GetId . You can optionally add additional logins for the identity. Supplying multiple logins creates an implicit link.
The OpenId token is valid for 10 minutes.
This is a public API. You do not need any credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdToken>`_
**Request Syntax**
::
response = client.get_open_id_token(
IdentityId='string',
Logins={
'string': 'string'
}
)
**Response Syntax**
::
{
'IdentityId': 'string',
'Token': 'string'
}
**Response Structure**
- *(dict) --*
Returned in response to a successful GetOpenIdToken request.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID. Note that the IdentityId returned may not match the one passed on input.
- **Token** *(string) --*
An OpenID token, valid for 10 minutes.
:type IdentityId: string
:param IdentityId: **[REQUIRED]**
A unique identifier in the format REGION:GUID.
:type Logins: dict
:param Logins:
A set of optional name-value pairs that map provider names to provider tokens. When using graph.facebook.com and www.amazon.com, supply the access_token returned from the provider\'s authflow. For accounts.google.com, an Amazon Cognito user pool provider, or any other OpenId Connect provider, always include the ``id_token`` .
- *(string) --*
- *(string) --*
:rtype: dict
:returns:
"""
pass
def get_open_id_token_for_developer_identity(self, IdentityPoolId: str, Logins: Dict, IdentityId: str = None, TokenDuration: int = None) -> Dict:
"""
Registers (or retrieves) a Cognito ``IdentityId`` and an OpenID Connect token for a user authenticated by your backend authentication process. Supplying multiple logins will create an implicit linked account. You can only specify one developer provider as part of the ``Logins`` map, which is linked to the identity pool. The developer provider is the "domain" by which Cognito will refer to your users.
You can use ``GetOpenIdTokenForDeveloperIdentity`` to create a new identity and to link new logins (that is, user credentials issued by a public provider or developer provider) to an existing identity. When you want to create a new identity, the ``IdentityId`` should be null. When you want to associate a new login with an existing authenticated/unauthenticated identity, you can do so by providing the existing ``IdentityId`` . This API will create the identity in the specified ``IdentityPoolId`` .
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/GetOpenIdTokenForDeveloperIdentity>`_
**Request Syntax**
::
response = client.get_open_id_token_for_developer_identity(
IdentityPoolId='string',
IdentityId='string',
Logins={
'string': 'string'
},
TokenDuration=123
)
**Response Syntax**
::
{
'IdentityId': 'string',
'Token': 'string'
}
**Response Structure**
- *(dict) --*
Returned in response to a successful ``GetOpenIdTokenForDeveloperIdentity`` request.
- **IdentityId** *(string) --*
A unique identifier in the format REGION:GUID.
- **Token** *(string) --*
An OpenID token.
:type IdentityPoolId: string
:param IdentityPoolId: **[REQUIRED]**
An identity pool ID in the format REGION:GUID.
:type IdentityId: string
:param IdentityId:
A unique identifier in the format REGION:GUID.
:type Logins: dict
:param Logins: **[REQUIRED]**
A set of optional name-value pairs that map provider names to provider tokens. Each name-value pair represents a user from a public provider or developer provider. If the user is from a developer provider, the name-value pair will follow the syntax ``\"developer_provider_name\": \"developer_user_identifier\"`` . The developer provider is the \"domain\" by which Cognito will refer to your users; you provided this domain while creating/updating the identity pool. The developer user identifier is an identifier from your backend that uniquely identifies a user. When you create an identity pool, you can specify the supported logins.
- *(string) --*
- *(string) --*
:type TokenDuration: integer
:param TokenDuration:
The expiration time of the token, in seconds. You can specify a custom expiration time for the token so that you can cache it. If you don\'t provide an expiration time, the token is valid for 15 minutes. You can exchange the token with Amazon STS for temporary AWS credentials, which are valid for a maximum of one hour. The maximum token duration you can set is 24 hours. You should take care in setting the expiration time for a token, as there are significant security implications: an attacker could use a leaked token to access your AWS resources for the token\'s duration.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_identities(self, IdentityPoolId: str, MaxResults: int, NextToken: str = None, HideDisabled: bool = None) -> Dict:
"""
Lists the identities in an identity pool.
You must use AWS Developer credentials to call this API.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/cognito-identity-2014-06-30/ListIdentities>`_
**Request Syntax**
::
response = client.list_identities(
IdentityPoolId='string',
MaxResults=123,
NextToken='string',
HideDisabled=True|False
)
**Response Syntax**
::
{
'IdentityPoolId': 'string',
'Identities': [
{
'IdentityId': 'string',
'Logins': [
'string',
],
'CreationDate': datetime(2015, 1, 1),
'LastModifiedDate': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
The response to a ListIdentities request.
- **IdentityPoolId** *(string) --*
An identity pool ID in the format REGION:GUID.
- **Identities** *(list) | |
qualifier=False, statement_type="predicate")
#statements_test = get_all_statements_of_entity('Q176198')
#id_statements_test = get_statements_by_id(statements_test, 'Q176198', 'P725')
#id_statements_test[0]
# In[107]:
# parameters
# cluster_root_ids: ['Qcode']
# predicates_ids: ['Pcode']
def cluster_extend_by_predicates_ids(cluster_root_ids, predicates_ids):
cluster = []
for cluster_root_id in cluster_root_ids:
root_statements = get_all_statements_of_entity(cluster_root_id)
#print("root_statements", root_statements)
for predicate_id in predicates_ids:
cluster += get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=True, statement_type="qualifier_predicate")
cluster += get_statements_by_id(root_statements, cluster_root_id, predicate_id, qualifier=False, statement_type="predicate")
return cluster
#test_predicate_clusters = cluster_extend_by_predicates_ids(extract_ids(q0_themes[0]), extract_ids(q0_predicates))
#print(len(test_predicate_clusters))
#test_predicate_clusters[0]
#test_predicate_clusters_test_2 = cluster_extend_by_predicates_ids(extract_ids(q0_themes_test_2[0]), extract_ids(q0_predicates_test_2))
#print(len(test_predicate_clusters_test_2))
#print(test_predicate_clusters_test_2[-1])
# In[108]:
def cluster_extractor_from_complements(complements):
for c in complements:
[print(t.pos_) for t in c]
return complements
#print(cluster_extractor_from_complements(q0_themes[1]))
# In[110]:
#TODO: add cache
#TODO: Check if extending with predicate_ids is useful
# parameter
# question: nlp_string
#limits=plt.axis('off')
def build_graph(nlp, themes, themes_enhanced, predicates, deep_k=10):
#start_time = time.time()
theme_ids = extract_ids(themes[0])
theme_enhanced_ids = extract_ids(themes_enhanced)
predicates_ids = extract_ids(predicates)
predicates_enhanced_ids = [p for p in theme_enhanced_ids if is_wd_predicate(p)]
predicates_enhanced = merge_lists([get_nlp(get_wd_label(p)) for p in predicates_enhanced_ids], predicates_enhanced_ids)
#print(theme_ids)
#print(theme_enhanced_ids)
for i, tei in enumerate(theme_enhanced_ids):
if tei in theme_ids:
tmp = theme_enhanced_ids.pop(i)
init_clusters = cluster_extend_by_words(theme_ids, [p[0].text for p in predicates+predicates_enhanced], top_k=deep_k)
#print("init_clusters",len(init_clusters))
init_clusters_enhanced = cluster_extend_by_words(theme_enhanced_ids, [p[0].text for p in predicates+predicates_enhanced], top_k=deep_k)
#print("init_clusters_enhanced",len(init_clusters_enhanced))
init_sorted_statements = sort_statements_by_similarity(init_clusters + init_clusters_enhanced)
#print("init_sorted_statements",len(init_sorted_statements))
init_flatten_statements = statements_flatter([s[1] for s in init_sorted_statements])
#print("init_flatten_statements",len(init_flatten_statements))
predicate_ids_clusters = cluster_extend_by_predicates_ids(theme_ids, predicates_ids+predicates_enhanced_ids)
#print("predicate_ids_clusters",len(predicate_ids_clusters))
predicate_ids_enhanced_clusters = cluster_extend_by_predicates_ids(theme_enhanced_ids, predicates_ids+predicates_enhanced_ids)
#print("predicate_ids_enhanced_clusters",len(predicate_ids_enhanced_clusters))
predicate_ids_flatten_statements = statements_flatter(predicate_ids_clusters+predicate_ids_enhanced_clusters)
#print("predicate_ids_flatten_statements",len(predicate_ids_flatten_statements))
clusters = init_flatten_statements+predicate_ids_flatten_statements
filtered_statements = unduplicate_statements(clusters)
#print(predicate_ids_enhanced_clusters)
graph = make_statements_graph(filtered_statements)
##print("clusters:", len(clusters))
##print("filtered_statements:", len(filtered_statements))
#end_time = time.time()
#print("->\tRunning time is {}s".format(round(end_time-start_time,2)))
return graph
#q0_test = questions[0]
#q0_test = "Which actor voiced the Unicorn in The Last Unicorn?"
#q0_test = "what was the cause of death of yves klein"
#q0_test = "Who is the wife of <NAME>?"
#q0_test = "Who is the author of <NAME>?"
#q0_nlp_test = get_nlp(q0_test)
#q0_themes_test = get_themes(q0_nlp_test, top_k=3)
#q0_themes_enhanced_test = get_enhanced_themes(q0_themes_test, top_k=3)
#q0_predicates_test = get_predicates_online(q0_nlp_test, top_k=3)
#q0_focused_parts_test = []
#graph, predicates_dict = build_graph(q0_nlp_test, q0_themes_test, q0_themes_enhanced_test, q0_predicates_test, deep_k=3)
#print(predicates_dict)
#plot_graph(graph, "file_name_graph", "Graph_title")
# In[111]:
# check the graph for complements
# parameters
# name: string
def find_name_in_graph(graph, name):
return [x for x,y in graph.nodes(data=True) if y['name'].lower() == name.lower()]
#[find_name_in_graph(c.text) for c in q0_themes[1]]
#print(find_name_in_graph(graph, "the unicorn"))
# In[113]:
# TODO: clean the complements by removing stopwords etc.
def find_theme_complement(graph, themes):
return [i for i in itertools.chain.from_iterable(
[id for id in [c for c in [find_name_in_graph(graph, t.text) for t in themes[1]] if c]])]
#print(find_theme_complement(graph, q0_themes_test))
#[i for i in itertools.chain.from_iterable([id for id in check_theme_complement(graph, q0_themes)])]
# In[114]:
def find_paths_in_graph(graph, node_start, node_end):
return [p for p in nx.all_simple_paths(graph, source=node_start, target=node_end)]
#test_paths = find_paths_in_graph(graph, "Q16205566", "Q7774795")
#print(test_paths)
# In[115]:
def is_id_in_graph(graph, node_id):
return graph.has_node(node_id)
#print(is_id_in_graph(graph, "Q24039104"))
# In[116]:
def is_name_in_graph(graph, node_name):
return find_name_in_graph(graph, node_name) != []
#print(is_name_in_graph(graph, "the Unicorn"))
# In[118]:
def find_paths_for_themes(graph, themes):
themes_ids = [t for t in extract_ids(themes[0])]
complements_ids = find_theme_complement(graph, themes)
paths = []
for t_id in themes_ids:
if is_id_in_graph(graph, t_id):
for c_id in complements_ids:
if is_id_in_graph(graph, c_id):
path = find_paths_in_graph(graph, t_id, c_id)
if path:
paths.append(path)
paths = [i for i in itertools.chain.from_iterable(
[id for id in paths])]
return paths
#print(find_paths_for_themes(graph, q0_themes_test))
#print(find_paths_for_themes(graph, q0_themes))
# In[119]:
def get_node_predicates_from_path(paths):
predicates = []
for p in paths:
[predicates.append(i[:i.find("-")]) for i in p if is_wd_predicate(i[:i.find("-")]) and i[:i.find("-")] not in predicates]
return predicates
#test_node_predicates = get_node_predicates_from_path(test_paths)
#print(test_node_predicates)
# In[120]:
def get_node_predicate_similarity_from_path(paths, predicates):
path_predicates = get_node_predicates_from_path(paths)
return sorted([(pp, get_similarity_by_ids(p2, pp)) for p in predicates for p2 in p[1] for pp in path_predicates], key=lambda x: x[-1], reverse=True)
#test_node_pedicate_similarities = get_node_predicate_similarity_from_path(test_paths, q0_predicates)
#print(test_node_pedicate_similarities)
# In[121]:
def get_focused_parts(nlp_sentence, top_k=3):
focused_parts = [t.head for t in nlp_sentence if t.tag_ == "WDT" or t.tag_ == "WP" or t.tag_ == "WP$" or t.tag_ == "WRB"]
focused_parts_ids = [get_wd_ids(p.text, top_k=top_k) for p in focused_parts]
return merge_lists(focused_parts, focused_parts_ids)
#print(get_focused_parts(q0_nlp_test))
# In[367]:
#questions_2 = ("what was the cause of death of <NAME>",
# "Who is the wife of <NAME>?",
# "Who is the president of the United States?",
# "When was produced the first Matrix movie?",
# "Who made the soundtrack of the The Last Unicorn movie?",
# "Who is the author of <NAME>?",
# "Which actor voiced the Unicorn in The Last Unicorn?",
# "how is called the rabbit in Alice in Wonderland?",
# "what city was a<NAME> born in",
# "which stadium do the wests tigers play in"
# )
#
#question_2 = questions_2[6] #"what city was a<NAME> born in"#
#q_nlp_2 = get_nlp(question_2)
#q_themes_2 = get_themes(q_nlp_2, top_k=3)
#q_themes_enhanced_2 = get_enhanced_themes(q_themes_2, top_k=3)
#q_predicates_2 = get_predicates(q_nlp_2, top_k=3)
#q_predicates_2 = q_predicates_2 if q_predicates_2[0][1] else get_predicates_online(q_nlp_2, top_k=3)
#q_focused_parts_2 = get_focused_parts(q_nlp_2)
#print("q_nlp:", q_nlp_2)
#print("e\t\te.pos_\te.tag_\te.dep_\te.head\te.children")
#for e in q_nlp_2:
# print(e.text,"\t\t", e.pos_,"\t", e.tag_,"\t", e.dep_,"\t", e.head, "\t", [child for child in e.children])
#
#print("\nq_themes:", q_themes_2)
#print("q_themes_enhanced:",q_themes_enhanced_2)
#print("q_predicates:", q_predicates_2)
#print("q_focused_parts:", q_focused_parts_2)
#
#graph_2, predicates_dict_2 = build_graph(q_nlp_2, q_themes_2, q_themes_enhanced_2, q_predicates_2)
#print(predicates_dict_2)
#plot_graph(graph_2, "main_graph", "Main_graph_title")
#answers_2 = find_anwser_from_graph_2(graph, q0_nlp, q0_themes, q_themes_enhanced_2, q_predicates_2, q_focused_parts_2)
# In[368]:
def add_compound(nlp_list, themes):
compounded = []
#if not nlp_list[0]:
# return compounded
try:
for t in [e[0] for e in themes[0]] + themes[1]:
for l in [n[0] for n in nlp_list]:
if l.text.lower() in t.text.lower():
compounded.append(t.text)
return compounded
except:
return compounded
# TODO: make the predicate search go further in the path list for the !i%2
def find_paths_keywords(graph, nlp, themes, themes_enhanced, predicates, focused_parts):
WH_FILTER = ["WDT", "WP", "WP$", "WRB"]
VERB_FILTER = ["VERB", "AUX"]
NOUN_FILTER = ["NOUN","PROPN"]
POSITION_FILTER = ["ADP"]
focused_parts_words = [t[0].text for t in focused_parts]
focused_parts_ids = [j for i in [t[1] for t in focused_parts] for j in i]
focused_parts_predicates_ids = [f for f in focused_parts_ids if is_wd_predicate(f)]
focused_parts_words_ids = [f for f in focused_parts_ids if is_wd_entity(f)]
focused_parts_words_ids_labeled = [get_wd_label(p) for p in focused_parts_words_ids]
#print(focused_parts_words_2)
question_anchors = [t for t in nlp if t.tag_ in WH_FILTER]
themes_enhanced_list = [t[0] for t in themes_enhanced]
focus_themes = [t[0].text for t in themes[0]]
focus_path_by_tails = [[c for c in t.head.children if c.pos_ in NOUN_FILTER] for t in nlp if t.pos_ == "PRON"]
focus_part_by_head = [t.head for t in question_anchors]
predicates_nlp = [t for t in nlp if t.pos_ in VERB_FILTER]
predicates_lemma = [t.lemma_ for t in predicates_nlp]
predicates_attention = [t for t in nlp if t.head in predicates_nlp]
predicates_attention_tails = [[c for c in t.children] for t in predicates_attention]
in_attention_heads = [t.head.text for t in nlp if t.pos_ in POSITION_FILTER]
in_attention_tails = add_compound([[c for c in t.children] for t in nlp if t.pos_ in POSITION_FILTER], themes)
focus_themes_enhanced = [t[0] for t in themes_enhanced
if t[0].lower() in [a.lower() for a in in_attention_tails]
or t[0].lower() in [a.lower() for a in in_attention_heads]]
theme_enhanced_ids = extract_ids(themes_enhanced)
predicates_enhanced_ids = [(p) for p in theme_enhanced_ids if is_wd_predicate(p)]
[predicates_enhanced_ids.append(p) for p in focused_parts_predicates_ids if p not in predicates_enhanced_ids]
alterniative_words = {}
for t in themes_enhanced:
for e in predicates_enhanced_ids:
if e in t[1]:
alterniative_words[t[0]] = [get_nlp(get_wd_label(e)),[e]]
else:
alterniative_words[get_wd_label(e)] = [get_nlp(get_wd_label(e)),[e]]
#print("focused_parts_predicates_ids",focused_parts_predicates_ids)
#print("focused_parts_words_ids",focused_parts_words_ids)
#print("alterniative_words",alterniative_words)
#print("predicates_enhanced_ids",predicates_enhanced_ids)
##print("predicates_enhanced",predicates_enhanced)
#print("question_anchors",question_anchors)
#print("in_attention_heads",in_attention_heads)
#print("in_attention_tails",in_attention_tails)
#print("focus_themes",focus_themes)
#print("themes_enhanced_list",themes_enhanced_list)
#print("focus_themes_enhanced",focus_themes_enhanced)
#print("focus_path_by_tails",focus_path_by_tails)
#print("focus_part_by_head",focus_part_by_head)
#print("predicates_nlp",predicates_nlp)
#print("predicates_lemma",predicates_lemma)
#print("predicates_attention",predicates_attention)
#print("predicates_attention_tails",predicates_attention_tails)
#
#print("\n")
paths_keywords = []
[paths_keywords.append(e.lower()) for e in focused_parts_words + in_attention_heads + in_attention_tails + focus_themes + focus_themes_enhanced + focused_parts_words_ids_labeled if e.lower() not in paths_keywords]
#print(paths_keywords)
#paths_keywords = [p for p in itertools.permutations(paths_keywords)]
#print(paths_keywords)
return paths_keywords, alterniative_words, question_anchors
#initial_paths = find_paths_for_themes(graph, themes)
#predicate_id_similarities = get_node_predicate_similarity_from_path(initial_paths, predicates)
#best_path = [p for p in initial_paths if predicate_id_similarities[0][0] == p[1][:p[1].find("-")]]
#path_answer = get_wd_label(best_path[0][2]) if best_path else []
#return (path_answer, best_path[0][2]) if path_answer else (False, False)
#paths_keywords_2 = find_paths_keywords(graph_2, q_nlp_2, q_themes_2, q_themes_enhanced_2, q_predicates_2, q_focused_parts_2)
#paths_keywords_2
# In[369]:
def get_paths_keywords_nodes(graph, keywords,threshold=0.9,top_performance=50):
keywords_nodes = []
for k in keywords:
nlp_lookup = get_nlp(k)
keywords_nodes.append([x for x,y in graph.nodes(data=True)
if get_nlp(y['name']).similarity(nlp_lookup) >= threshold])
if keywords_nodes:
if len(keywords_nodes[1]) * len(keywords_nodes[0]) > top_performance:
if len(keywords_nodes[0]) <= int(sqrt(top_performance)):
keywords_nodes[1] = keywords_nodes[1][:int(top_performance/len(keywords_nodes[0]))]
elif len(keywords_nodes[0]) >= len(keywords_nodes[1]):
keywords_nodes[0] = keywords_nodes[0][:int(top_performance/len(keywords_nodes[1]))]
else:
keywords_nodes[0] = keywords_nodes[0][:int(sqrt(top_performance))]
keywords_nodes[1] = keywords_nodes[1][:int(sqrt(top_performance))]
keywords_nodes_per = [p for p in itertools.permutations(keywords_nodes, 2)]
paths_keyword_nodes = []
for pkn in keywords_nodes_per:
for pkn1 in pkn[0]:
for pkn2 in pkn[1]:
[paths_keyword_nodes.append(p) for p in nx.all_simple_paths(graph, source=pkn1, target=pkn2) if p not in paths_keyword_nodes]
return paths_keyword_nodes
def find_path_nodes_from_graph_2(graph, keywords, threshold=0.9, thres_inter=0.2, top_k=3, top_performance=50,min_paths=3000):
#print("current threshold", str(round(threshold, 1)))
main_keyword_paths = get_paths_keywords_nodes(graph, keywords[0],threshold=threshold,top_performance=top_performance)
alternative_keyword_paths = []
for k_1 in keywords[1]:
for i, k_0 in enumerate(keywords[0]):
if k_1==k_0:
tmp_keywords = keywords[0].copy()
tmp_keywords[i] = keywords[1][k_1][0].text
alternative_keyword_paths += get_paths_keywords_nodes(graph, tmp_keywords, threshold=threshold,top_performance=top_performance)
keyword_paths = main_keyword_paths+alternative_keyword_paths
#print("len(keyword_paths)",len(keyword_paths))
if len(keyword_paths) < min_paths:
if threshold == 0: return keyword_paths
threshold -= thres_inter
if threshold < 0: threshold = 0
keyword_paths = find_path_nodes_from_graph_2(graph, keywords, threshold, thres_inter, top_k,top_performance,min_paths)
return keyword_paths
#start_time = time.time()
#path_nodes_2 = find_path_nodes_from_graph_2(graph_2, paths_keywords_2, threshold=0.9, thres_inter=0.15, top_performance=50, min_paths=3000)
#end_time = time.time()
#print("Finding path nodes ->\tRunning time is {}s".format(round(end_time-start_time,2)))
#print(path_nodes_2)
# In[370]:
#node_predicates_names_2 = get_node_predicates_from_path(path_nodes_2)
def is_sublist(a, b):
if not a: return True
if not b: return False
#if a == b: return False
return b[:len(a)] == a | |
# -*- coding: utf-8 -*-
"""
VirtualBox COM/XPCOM constants.
This file is autogenerated from VirtualBox.xidl, DO NOT EDIT!
"""
__copyright__ = \
"""
Copyright (C) 2009-2016 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 106555 $";
class VirtualBoxReflectionInfo:
"""
Enum constants for the various python styles.
"""
def __init__(self, fIsSym):
self.__fIsSym = fIsSym
__dValues = {
'SettingsVersion': {
'Null': 0,
'v1_0': 1,
'v1_1': 2,
'v1_2': 3,
'v1_3pre': 4,
'v1_3': 5,
'v1_4': 6,
'v1_5': 7,
'v1_6': 8,
'v1_7': 9,
'v1_8': 10,
'v1_9': 11,
'v1_10': 12,
'v1_11': 13,
'v1_12': 14,
'v1_13': 15,
'v1_14': 16,
'v1_15': 17,
'v1_16': 18,
'Future': 99999,
},
'AccessMode': {
'ReadOnly': 1,
'ReadWrite': 2,
},
'MachineState': {
'Null': 0,
'PoweredOff': 1,
'Saved': 2,
'Teleported': 3,
'Aborted': 4,
'Running': 5,
'Paused': 6,
'Stuck': 7,
'Teleporting': 8,
'LiveSnapshotting': 9,
'Starting': 10,
'Stopping': 11,
'Saving': 12,
'Restoring': 13,
'TeleportingPausedVM': 14,
'TeleportingIn': 15,
'FaultTolerantSyncing': 16,
'DeletingSnapshotOnline': 17,
'DeletingSnapshotPaused': 18,
'OnlineSnapshotting': 19,
'RestoringSnapshot': 20,
'DeletingSnapshot': 21,
'SettingUp': 22,
'Snapshotting': 23,
'FirstOnline': 5,
'LastOnline': 19,
'FirstTransient': 8,
'LastTransient': 23,
},
'SessionState': {
'Null': 0,
'Unlocked': 1,
'Locked': 2,
'Spawning': 3,
'Unlocking': 4,
},
'CPUPropertyType': {
'Null': 0,
'PAE': 1,
'LongMode': 2,
'TripleFaultReset': 3,
'APIC': 4,
'X2APIC': 5,
},
'HWVirtExPropertyType': {
'Null': 0,
'Enabled': 1,
'VPID': 2,
'NestedPaging': 3,
'UnrestrictedExecution': 4,
'LargePages': 5,
'Force': 6,
},
'ParavirtProvider': {
'None': 0,
'Default': 1,
'Legacy': 2,
'Minimal': 3,
'HyperV': 4,
'KVM': 5,
},
'FaultToleranceState': {
'Inactive': 1,
'Master': 2,
'Standby': 3,
},
'LockType': {
'Null': 0,
'Shared': 1,
'Write': 2,
'VM': 3,
},
'SessionType': {
'Null': 0,
'WriteLock': 1,
'Remote': 2,
'Shared': 3,
},
'DeviceType': {
'Null': 0,
'Floppy': 1,
'DVD': 2,
'HardDisk': 3,
'Network': 4,
'USB': 5,
'SharedFolder': 6,
'Graphics3D': 7,
},
'DeviceActivity': {
'Null': 0,
'Idle': 1,
'Reading': 2,
'Writing': 3,
},
'ClipboardMode': {
'Disabled': 0,
'HostToGuest': 1,
'GuestToHost': 2,
'Bidirectional': 3,
},
'DnDMode': {
'Disabled': 0,
'HostToGuest': 1,
'GuestToHost': 2,
'Bidirectional': 3,
},
'Scope': {
'Global': 0,
'Machine': 1,
'Session': 2,
},
'BIOSBootMenuMode': {
'Disabled': 0,
'MenuOnly': 1,
'MessageAndMenu': 2,
},
'APICMode': {
'Disabled': 0,
'APIC': 1,
'X2APIC': 2,
},
'ProcessorFeature': {
'HWVirtEx': 0,
'PAE': 1,
'LongMode': 2,
'NestedPaging': 3,
},
'FirmwareType': {
'BIOS': 1,
'EFI': 2,
'EFI32': 3,
'EFI64': 4,
'EFIDUAL': 5,
},
'PointingHIDType': {
'None': 1,
'PS2Mouse': 2,
'USBMouse': 3,
'USBTablet': 4,
'ComboMouse': 5,
'USBMultiTouch': 6,
},
'KeyboardHIDType': {
'None': 1,
'PS2Keyboard': 2,
'USBKeyboard': 3,
'ComboKeyboard': 4,
},
'BitmapFormat': {
'Opaque': 0,
'BGR': 0x20524742,
'BGR0': 0x30524742,
'BGRA': 0x41524742,
'RGBA': 0x41424752,
'PNG': 0x20474E50,
'JPEG': 0x4745504A,
},
'DhcpOpt': {
'SubnetMask': 1,
'TimeOffset': 2,
'Router': 3,
'TimeServer': 4,
'NameServer': 5,
'DomainNameServer': 6,
'LogServer': 7,
'Cookie': 8,
'LPRServer': 9,
'ImpressServer': 10,
'ResourseLocationServer': 11,
'HostName': 12,
'BootFileSize': 13,
'MeritDumpFile': 14,
'DomainName': 15,
'SwapServer': 16,
'RootPath': 17,
'ExtensionPath': 18,
'IPForwardingEnableDisable': 19,
'NonLocalSourceRoutingEnableDisable': 20,
'PolicyFilter': 21,
'MaximumDatagramReassemblySize': 22,
'DefaultIPTime2Live': 23,
'PathMTUAgingTimeout': 24,
'IPLayerParametersPerInterface': 25,
'InterfaceMTU': 26,
'AllSubnetsAreLocal': 27,
'BroadcastAddress': 28,
'PerformMaskDiscovery': 29,
'MaskSupplier': 30,
'PerformRouteDiscovery': 31,
'RouterSolicitationAddress': 32,
'StaticRoute': 33,
'TrailerEncapsulation': 34,
'ARPCacheTimeout': 35,
'EthernetEncapsulation': 36,
'TCPDefaultTTL': 37,
'TCPKeepAliveInterval': 38,
'TCPKeepAliveGarbage': 39,
'NetworkInformationServiceDomain': 40,
'NetworkInformationServiceServers': 41,
'NetworkTimeProtocolServers': 42,
'VendorSpecificInformation': 43,
'Option_44': 44,
'Option_45': 45,
'Option_46': 46,
'Option_47': 47,
'Option_48': 48,
'Option_49': 49,
'IPAddressLeaseTime': 51,
'Option_64': 64,
'Option_65': 65,
'TFTPServerName': 66,
'BootfileName': 67,
'Option_68': 68,
'Option_69': 69,
'Option_70': 70,
'Option_71': 71,
'Option_72': 72,
'Option_73': 73,
'Option_74': 74,
'Option_75': 75,
'Option_119': 119,
},
'DhcpOptEncoding': {
'Legacy': 0,
'Hex': 1,
},
'VFSType': {
'File': 1,
'Cloud': 2,
'S3': 3,
'WebDav': 4,
},
'ImportOptions': {
'KeepAllMACs': 1,
'KeepNATMACs': 2,
'ImportToVDI': 3,
},
'ExportOptions': {
'CreateManifest': 1,
'ExportDVDImages': 2,
'StripAllMACs': 3,
'StripAllNonNATMACs': 4,
},
'CertificateVersion': {
'V1': 1,
'V2': 2,
'V3': 3,
'Unknown': 99,
},
'VirtualSystemDescriptionType': {
'Ignore': 1,
'OS': 2,
'Name': 3,
'Product': 4,
'Vendor': 5,
'Version': 6,
'ProductUrl': 7,
'VendorUrl': 8,
'Description': 9,
'License': 10,
'Miscellaneous': 11,
'CPU': 12,
'Memory': 13,
'HardDiskControllerIDE': 14,
'HardDiskControllerSATA': 15,
'HardDiskControllerSCSI': 16,
'HardDiskControllerSAS': 17,
'HardDiskImage': 18,
'Floppy': 19,
'CDROM': 20,
'NetworkAdapter': 21,
'USBController': 22,
'SoundCard': 23,
'SettingsFile': 24,
},
'VirtualSystemDescriptionValueType': {
'Reference': 1,
'Original': 2,
'Auto': 3,
'ExtraConfig': 4,
},
'GraphicsControllerType': {
'Null': 0,
'VBoxVGA': 1,
'VMSVGA': 2,
},
'CleanupMode': {
'UnregisterOnly': 1,
'DetachAllReturnNone': 2,
'DetachAllReturnHardDisksOnly': 3,
'Full': 4,
},
'CloneMode': {
'MachineState': 1,
'MachineAndChildStates': 2,
'AllStates': 3,
},
'CloneOptions': {
'Link': 1,
'KeepAllMACs': 2,
'KeepNATMACs': 3,
'KeepDiskNames': 4,
},
'AutostopType': {
'Disabled': 1,
'SaveState': 2,
'PowerOff': 3,
'AcpiShutdown': 4,
},
'HostNetworkInterfaceMediumType': {
'Unknown': 0,
'Ethernet': 1,
'PPP': 2,
'SLIP': 3,
},
'HostNetworkInterfaceStatus': {
'Unknown': 0,
'Up': 1,
'Down': 2,
},
'HostNetworkInterfaceType': {
'Bridged': 1,
'HostOnly': 2,
},
'AdditionsFacilityType': {
'None': 0,
'VBoxGuestDriver': 20,
'AutoLogon': 90,
'VBoxService': 100,
'VBoxTrayClient': 101,
'Seamless': 1000,
'Graphics': 1100,
'All': 2147483646,
},
'AdditionsFacilityClass': {
'None': 0,
'Driver': 10,
'Service': 30,
'Program': 50,
'Feature': 100,
'ThirdParty': 999,
'All': 2147483646,
},
'AdditionsFacilityStatus': {
'Inactive': 0,
'Paused': 1,
'PreInit': 20,
'Init': 30,
'Active': 50,
'Terminating': 100,
'Terminated': 101,
'Failed': 800,
'Unknown': 999,
},
'AdditionsRunLevelType': {
'None': 0,
'System': 1,
'Userland': 2,
'Desktop': 3,
},
'AdditionsUpdateFlag': {
'None': 0,
'WaitForUpdateStartOnly': 1,
},
'GuestSessionStatus': {
'Undefined': 0,
'Starting': 10,
'Started': 100,
'Terminating': 480,
'Terminated': 500,
'TimedOutKilled': 512,
'TimedOutAbnormally': 513,
'Down': 600,
'Error': 800,
},
'GuestSessionWaitForFlag': {
'None': 0,
'Start': 1,
'Terminate': 2,
'Status': 4,
},
'GuestSessionWaitResult': {
'None': 0,
'Start': 1,
'Terminate': 2,
'Status': 3,
'Error': 4,
'Timeout': 5,
'WaitFlagNotSupported': 6,
},
'GuestUserState': {
'Unknown': 0,
'LoggedIn': 1,
'LoggedOut': 2,
'Locked': 3,
'Unlocked': 4,
'Disabled': 5,
'Idle': 6,
'InUse': 7,
'Created': 8,
'Deleted': 9,
'SessionChanged': 10,
'CredentialsChanged': 11,
'RoleChanged': 12,
'GroupAdded': 13,
'GroupRemoved': 14,
'Elevated': 15,
},
'FileSeekOrigin': {
'Begin': 0,
'Current': 1,
'End': 2,
},
'ProcessInputFlag': {
'None': 0,
'EndOfFile': 1,
},
'ProcessOutputFlag': {
'None': 0,
'StdErr': 1,
},
'ProcessWaitForFlag': {
'None': 0,
'Start': 1,
'Terminate': 2,
'StdIn': 4,
'StdOut': 8,
'StdErr': 16,
},
'ProcessWaitResult': {
'None': 0,
'Start': 1,
'Terminate': 2,
'Status': 3,
'Error': 4,
'Timeout': 5,
'StdIn': 6,
'StdOut': 7,
'StdErr': 8,
'WaitFlagNotSupported': 9,
},
'FileCopyFlag': {
'None': 0,
'NoReplace': 1,
'FollowLinks': 2,
'Update': 4,
},
'FsObjMoveFlags': {
'None': 0,
'Replace': 1,
'FollowLinks': 2,
'AllowDirectoryMoves': 4,
},
'DirectoryCreateFlag': {
'None': 0,
'Parents': 1,
},
'DirectoryCopyFlags': {
'None': 0,
'CopyIntoExisting': 1,
},
'DirectoryRemoveRecFlag': {
'None': 0,
'ContentAndDir': 1,
'ContentOnly': 2,
},
'FsObjRenameFlag': {
'NoReplace': 0,
'Replace': 1,
},
'ProcessCreateFlag': {
'None': 0,
'WaitForProcessStartOnly': 1,
'IgnoreOrphanedProcesses': 2,
'Hidden': 4,
'Profile': 8,
'WaitForStdOut': 16,
'WaitForStdErr': 32,
'ExpandArguments': 64,
'UnquotedArguments': 128,
},
'ProcessPriority': {
'Invalid': 0,
'Default': 1,
},
'SymlinkType': {
'Unknown': 0,
'Directory': 1,
'File': 2,
},
'SymlinkReadFlag': {
'None': 0,
'NoSymlinks': 1,
},
'ProcessStatus': {
'Undefined': 0,
'Starting': 10,
'Started': 100,
'Paused': 110,
'Terminating': 480,
'TerminatedNormally': 500,
'TerminatedSignal': 510,
'TerminatedAbnormally': 511,
'TimedOutKilled': 512,
'TimedOutAbnormally': 513,
'Down': 600,
'Error': 800,
},
'ProcessInputStatus': {
'Undefined': 0,
'Broken': 1,
'Available': 10,
'Written': 50,
'Overflow': 100,
},
'PathStyle': {
'DOS': 1,
'UNIX': 2,
'Unknown': 8,
},
'FileAccessMode': {
'ReadOnly': 1,
'WriteOnly': 2,
'ReadWrite': 3,
'AppendOnly': 4,
'AppendRead': 5,
},
'FileOpenAction': {
'OpenExisting': 1,
'OpenOrCreate': 2,
'CreateNew': 3,
'CreateOrReplace': 4,
'OpenExistingTruncated': 5,
'AppendOrCreate': 99,
},
'FileSharingMode': {
'Read': 1,
'Write': 2,
'ReadWrite': 3,
'Delete': 4,
'ReadDelete': 5,
'WriteDelete': 6,
'All': 7,
},
'FileOpenExFlags': {
'None': 0,
},
'FileStatus': {
'Undefined': 0,
'Opening': 10,
'Open': 100,
'Closing': 150,
'Closed': 200,
'Down': 600,
'Error': 800,
},
'FsObjType': {
| |
<reponame>danjperron/A2D_PIC_RPI
#!/usr/bin/env python
################################
#
# burnLVP.py
#
#
# Program to burn pic12F1840 using LVP mode with a Rasberry Pi
#
#
# programmer : <NAME>
# Date : June 30, 2013
# Version : 1.0
#
# source code: https://github.com/danjperron/A2D_PIC_RPI
#
#
#//////////////////////////////////// MIT LICENSE ///////////////////////////////////
# The MIT License (MIT)
#
# Copyright (c) 2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from time import sleep
import RPi.GPIO as GPIO
import sys, termios, atexit
from intelhex import IntelHex
from select import select
#set io pin
#CLK GPIO 4
PIC_CLK = 7
#DATA GPIO 8
PIC_DATA = 24
#MCLR GPIO 9
PIC_MCLR = 21
#compatible PIC id
PIC12F1840 = 0x1b80
PIC16F1847 = 0x1480
PIC12LF1840 = 0x1bc0
PIC16LF1847 = 0x14A0
PIC16F1826 = 0x2780
PIC16F1827 = 0x27A0
PIC16LF1826 = 0x2880
PIC16LF1827 = 0x28A0
PIC16F1823 = 0x2720
PIC16LF1823 = 0x2820
PIC12F1822 = 0x2700
PIC12LF1822 = 0x2800
PIC16F1824 = 0x2740
PIC16LF1824 = 0x2840
PIC16F1825 = 0x2760
PIC16LF1825 = 0x2860
PIC16F1828 = 0x27C0
PIC16LF1828 = 0x28C0
PIC16F1829 = 0x27E0
PIC16LF1829 = 0x28E0
# command definition
C_LOAD_CONFIG = 0
C_LOAD_PROGRAM = 2
C_LOAD_DATA = 3
C_READ_PROGRAM = 4
C_READ_DATA = 5
C_INC_ADDRESS = 6
C_RESET_ADDRESS = 0x16
C_BEGIN_INT_PROG = 8
C_BEGIN_EXT_PROG = 0x18
C_END_EXT_PROG = 0xa
C_BULK_ERASE_PROGRAM = 9
C_BULK_ERASE_DATA = 0xB
def Release_VPP():
GPIO.setup(PIC_DATA, GPIO.IN)
GPIO.setup(PIC_CLK, GPIO.IN)
GPIO.output(PIC_MCLR, False)
print "VPP OFF"
def Set_VPP():
print "VPP OFF"
#held MCLR Low
GPIO.setup(PIC_MCLR,GPIO.OUT)
GPIO.output(PIC_MCLR,False)
sleep(0.1)
#ok PIC_CLK=out& HIGH, PIC_DATA=out & LOW
GPIO.setup(PIC_CLK, GPIO.OUT)
GPIO.output(PIC_CLK,False)
#MCLR LOW
GPIO.setup(PIC_DATA, GPIO.OUT)
GPIO.output(PIC_DATA,False)
print "VPP ON"
GPIO.output(PIC_MCLR,True)
sleep(0.1)
def Set_LVP():
#held MCLR HIGH
GPIO.setup(PIC_MCLR,GPIO.OUT)
GPIO.output(PIC_MCLR,True)
sleep(0.1)
#ok PIC_CLK=out& HIGH, PIC_DATA=out & LOW
GPIO.setup(PIC_CLK, GPIO.OUT)
GPIO.output(PIC_CLK,False)
#MCLR LOW
GPIO.setup(PIC_DATA, GPIO.OUT)
GPIO.output(PIC_DATA,False)
print "LVP ON"
GPIO.output(PIC_MCLR,False)
sleep(0.3)
def Release_LVP():
GPIO.setup(PIC_DATA,GPIO.IN)
GPIO.setup(PIC_CLK,GPIO.IN)
GPIO.output(PIC_MCLR,True)
print "LVP OFF"
def SendMagic():
magic = 0x4d434850
GPIO.setup(PIC_DATA, GPIO.OUT)
for loop in range(33):
GPIO.output(PIC_CLK,True)
GPIO.output(PIC_DATA,(magic & 1) == 1)
# use pass like a delay
pass
GPIO.output(PIC_CLK,False)
pass
magic = magic >> 1
def SendCommand(Command):
GPIO.setup(PIC_DATA, GPIO.OUT)
for loop in range(6):
GPIO.output(PIC_CLK,True)
GPIO.output(PIC_DATA,(Command & 1)==1)
pass
GPIO.output(PIC_CLK,False)
pass
Command = Command >> 1;
def ReadWord():
GPIO.setup(PIC_DATA,GPIO.IN)
Value = 0
for loop in range(16):
GPIO.output(PIC_CLK,True)
pass
if GPIO.input(PIC_DATA):
Value = Value + (1 << loop)
GPIO.output(PIC_CLK,False)
pass
Value = (Value >> 1) & 0x3FFF;
return Value;
def LoadWord(Value):
GPIO.setup(PIC_DATA,GPIO.OUT)
Value = (Value << 1) & 0x7FFE
for loop in range(16):
GPIO.output(PIC_CLK,True)
GPIO.output(PIC_DATA,(Value & 1)==1)
pass
GPIO.output(PIC_CLK,False)
pass
Value = Value >> 1;
def Pic12_BulkErase():
print "Bulk Erase Program",
SendCommand(C_RESET_ADDRESS)
SendCommand(C_LOAD_CONFIG)
LoadWord(0x3fff)
SendCommand(C_BULK_ERASE_PROGRAM)
sleep(0.1)
print ", Data.",
SendCommand(C_BULK_ERASE_DATA)
sleep(0.1)
print ".... done."
def Pic12_ProgramBlankCheck(program_size):
print "Program blank check",
SendCommand(C_RESET_ADDRESS)
for l in range(program_size):
SendCommand(C_READ_PROGRAM)
Value = ReadWord()
if Value != 0x3fff :
print "*** CPU program at Address ", hex(l), " = ", hex(Value), " Failed!"
return False
if (l % 128)==0 :
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Passed!"
return True
def Pic12_DataBlankCheck(data_size):
print "Data Blank check",
SendCommand(C_RESET_ADDRESS)
for l in range(data_size):
SendCommand(C_READ_DATA)
Value = ReadWord()
if Value != 0xff :
print "*** CPU eeprom data at Address ", hex(l), " = ", hex(Value), "Failed!"
return False
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Passed!"
return True
def Pic12_ProgramBurn(pic_data, program_base, program_size):
print "Writing Program",
SendCommand(C_RESET_ADDRESS)
for l in range( program_size):
if pic_data.get(l*2+ program_base) != None :
if pic_data.get(l*2+ program_base+1) != None :
Value = pic_data.get(l*2+ program_base) + ( 256 * pic_data.get(l*2+ program_base+1))
Value = Value & 0x3fff
SendCommand(C_LOAD_PROGRAM)
LoadWord(Value)
SendCommand(C_BEGIN_INT_PROG)
sleep(0.005)
SendCommand(C_READ_PROGRAM)
RValue = ReadWord()
if Value != RValue :
print "Program address:", hex(l) , " write ", hex(Value), " read ", hex(RValue), " Failed!"
return False
if (l % 128)==0 :
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Done."
return True
def Pic12_DataBurn(pic_data, data_base, data_size):
print "Writing Data",
SendCommand(C_RESET_ADDRESS)
for l in range( data_size):
if pic_data.get(l*2 + data_base) != None :
if pic_data.get(l*2 + 1 + data_base) != None :
Value = pic_data.get(l*2 + data_base)
SendCommand(C_LOAD_DATA)
LoadWord(Value)
SendCommand(C_BEGIN_INT_PROG)
sleep(0.003)
SendCommand(C_READ_DATA)
RValue = ReadWord()
if Value != RValue :
print "Data address:", hex(l) , " write ", hex(Value), " read ", hex(RValue), " Failed!"
return False
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Done."
return True
def Pic12_ProgramCheck(pic_data, program_base, program_size):
print "Program check ",
SendCommand(C_RESET_ADDRESS)
for l in range(program_size):
if pic_data.get(l*2+ program_base) != None :
if pic_data.get(l*2+ program_base+1) != None :
Value = pic_data.get(l*2+ program_base) + ( 256 * pic_data.get(l*2+ program_base+1))
Value = Value & 0x3fff
SendCommand(C_READ_PROGRAM)
RValue = ReadWord()
if Value != RValue :
print "Program address:", hex(l) , " write ", hex(Value), " read ", hex(RValue)
return False
if (l % 128)==0 :
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Passed!"
return True
def Pic12_DataCheck(pic_data, data_base, data_size):
print "Data check ",
SendCommand(C_RESET_ADDRESS)
for l in range(data_size):
if pic_data.get(l*2+ data_base) != None :
if pic_data.get(l*2 + 1 + data_base) != None :
Value = pic_data.get(l*2 +data_base)
SendCommand(C_READ_DATA)
RValue = ReadWord()
if Value != RValue :
print "Data address:", hex(l) , " write ", hex(Value), " read ", hex(RValue)
return False
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Passed!"
return True
def Pic12_ConfigBurn(pic_data, config_base):
print "Writing Config",
SendCommand(C_RESET_ADDRESS)
SendCommand(C_LOAD_CONFIG)
LoadWord(0x3fff)
#user id first
for l in range(4):
if pic_data.get(l*2+ config_base) != None :
if pic_data.get(l*2+ config_base+1) != None :
Value = pic_data.get(l*2+ config_base) + ( 256 * pic_data.get(l*2+ config_base+1))
Value = Value & 0x3fff
SendCommand(C_LOAD_PROGRAM)
LoadWord(Value)
SendCommand(C_BEGIN_INT_PROG)
sleep(0.005)
SendCommand(C_READ_PROGRAM)
RValue = ReadWord()
if Value != RValue :
print "User Id Location:", hex(l) , " write ", hex(Value), " read ", hex(RValue), " Failed!"
return False
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
#ok we are at 08004
#skip 0x8004 .. 0x8006
SendCommand(C_INC_ADDRESS)
SendCommand(C_INC_ADDRESS)
SendCommand(C_INC_ADDRESS)
# now the configuration word 1& 2 at 0x8007 ( hex file at 0x1000E)
for l in range(7,9):
if pic_data.get(l*2+ config_base) != None :
if pic_data.get(l*2+ config_base+1) != None :
Value = pic_data.get(l*2+ config_base) + ( 256 * pic_data.get(l*2+ config_base+1))
Value = Value & 0x3fff
SendCommand(C_LOAD_PROGRAM)
if l is 8:
#catch21 force LVP programming to be always ON
Value = Value | 0x2000
LoadWord(Value)
SendCommand(C_BEGIN_INT_PROG)
sleep(0.005)
SendCommand(C_READ_PROGRAM)
RValue = ReadWord()
if Value != RValue :
print "Config Word ", l-6 , " write ", hex(Value), " read ", hex(RValue), " Failed!"
return False
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Done."
return True
def Pic12_ConfigCheck(pic_data, config_base):
print "Config Check",
SendCommand(C_RESET_ADDRESS)
SendCommand(C_LOAD_CONFIG)
LoadWord(0x3fff)
#user id first
for l in range(4):
if pic_data.get(l*2+ config_base) != None :
if pic_data.get(l*2+ config_base+1) != None :
Value = pic_data.get(l*2+ config_base) + ( 256 * pic_data.get(l*2+ config_base+1))
Value = Value & 0x3fff
SendCommand(C_READ_PROGRAM)
RValue = ReadWord()
if Value != RValue :
print "User Id Location:", hex(l) , " write ", hex(Value), " read ", hex(RValue), " Failed!"
return False
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
#ok we are at 08004
#skip 0x8004 .. 0x8006
SendCommand(C_INC_ADDRESS)
SendCommand(C_INC_ADDRESS)
SendCommand(C_INC_ADDRESS)
# now the configuration word 1& 2 at 0x8007 ( hex file at 0x1000E)
for l in range(7,9):
if pic_data.get(l*2+ config_base) != None :
if pic_data.get(l*2+ config_base+1) != None :
Value = pic_data.get(l*2+ config_base) + ( 256 * pic_data.get(l*2+ config_base+1))
Value = Value & 0x3fff
if l is 8:
#catch21 force LVP programming to be always ON
Value = Value | 0x2000
SendCommand(C_READ_PROGRAM)
RValue = ReadWord()
if Value != RValue :
print "Config Word ", l-6 , " write ", hex(Value), " read ", hex(RValue), " Failed!"
return False
sys.stdout.write('.')
sys.stdout.flush()
SendCommand(C_INC_ADDRESS)
print "Passed!"
return True
#just check if the user forget to set LVP flag enable
#if not just give a warning since we force LVP enable
def Pic12_CheckLVP(pic_data, config_base):
#specify config word2
l=8
if pic_data.get(l*2+ config_base) != None :
if pic_data.get(l*2+ | |
<filename>fhir/resources/conditiondefinition.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ConditionDefinition
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
import typing
from pydantic import Field
from pydantic import root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import fhirtypes
from . import metadataresource
class ConditionDefinition(metadataresource.MetadataResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A definition of a condition.
A definition of a condition and information relevant to managing it.
"""
resource_type = Field("ConditionDefinition", const=True)
bodySite: fhirtypes.CodeableConceptType = Field(
None,
alias="bodySite",
title="Anatomical location, if relevant",
description="The anatomical location where this condition manifests itself.",
# if property is element of this resource.
element_property=True,
)
code: fhirtypes.CodeableConceptType = Field(
...,
alias="code",
title="Identification of the condition, problem or diagnosis",
description=None,
# if property is element of this resource.
element_property=True,
)
contact: typing.List[fhirtypes.ContactDetailType] = Field(
None,
alias="contact",
title="Contact details for the publisher",
description=(
"Contact details to assist a user in finding and communicating with the"
" publisher."
),
# if property is element of this resource.
element_property=True,
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="Date last changed",
description=(
"The date (and optionally time) when the condition definition was "
"published. The date must change when the business version changes and "
"it must change if the status code changes. In addition, it should "
"change when the substantive content of the condition definition "
"changes."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
definition: typing.List[fhirtypes.Uri] = Field(
None,
alias="definition",
title="Formal Definition for the condition",
description=(
"Formal definitions of the condition. These may be references to "
"ontologies, published clinical protocols or research papers."
),
# if property is element of this resource.
element_property=True,
)
definition__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(None, alias="_definition", title="Extension field for ``definition``.")
description: fhirtypes.Markdown = Field(
None,
alias="description",
title="Natural language description of the condition definition",
description=(
"A free text natural language description of the condition definition "
"from a consumer's perspective."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
experimental: bool = Field(
None,
alias="experimental",
title="For testing purposes, not real usage",
description=(
"A Boolean value to indicate that this condition definition is authored"
" for testing purposes (or education/evaluation/marketing) and is not "
"intended to be used for genuine usage."
),
# if property is element of this resource.
element_property=True,
)
experimental__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_experimental", title="Extension field for ``experimental``."
)
hasBodySite: bool = Field(
None,
alias="hasBodySite",
title="Whether bodySite is appropriate",
description="Whether bodySite is appropriate to collect for this condition.",
# if property is element of this resource.
element_property=True,
)
hasBodySite__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_hasBodySite", title="Extension field for ``hasBodySite``."
)
hasSeverity: bool = Field(
None,
alias="hasSeverity",
title="Whether Severity is appropriate",
description="Whether Severity is appropriate to collect for this condition.",
# if property is element of this resource.
element_property=True,
)
hasSeverity__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_hasSeverity", title="Extension field for ``hasSeverity``."
)
hasStage: bool = Field(
None,
alias="hasStage",
title="Whether stage is appropriate",
description="Whether stage is appropriate to collect for this condition.",
# if property is element of this resource.
element_property=True,
)
hasStage__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_hasStage", title="Extension field for ``hasStage``."
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Additional identifier for the condition definition",
description=(
"A formal identifier that is used to identify this condition definition"
" when it is represented in other formats, or referenced in a "
"specification, model, design or an instance."
),
# if property is element of this resource.
element_property=True,
)
jurisdiction: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="jurisdiction",
title="Intended jurisdiction for condition definition (if applicable)",
description=(
"A legal or geographic region in which the condition definition is "
"intended to be used."
),
# if property is element of this resource.
element_property=True,
)
medication: typing.List[fhirtypes.ConditionDefinitionMedicationType] = Field(
None,
alias="medication",
title="Medications particularly relevant for this condition",
description=None,
# if property is element of this resource.
element_property=True,
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Name for this condition definition (computer friendly)",
description=(
"A natural language name identifying the condition definition. This "
"name should be usable as an identifier for the module by machine "
"processing applications such as code generation."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
observation: typing.List[fhirtypes.ConditionDefinitionObservationType] = Field(
None,
alias="observation",
title="Observations particularly relevant to this condition",
description=None,
# if property is element of this resource.
element_property=True,
)
plan: typing.List[fhirtypes.ConditionDefinitionPlanType] = Field(
None,
alias="plan",
title="Plan that is appropriate",
description=None,
# if property is element of this resource.
element_property=True,
)
precondition: typing.List[fhirtypes.ConditionDefinitionPreconditionType] = Field(
None,
alias="precondition",
title="Observation that suggets this condition",
description="An observation that suggests that this condition applies.",
# if property is element of this resource.
element_property=True,
)
publisher: fhirtypes.String = Field(
None,
alias="publisher",
title="Name of the publisher (organization or individual)",
description=(
"The name of the organization or individual that published the "
"condition definition."
),
# if property is element of this resource.
element_property=True,
)
publisher__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_publisher", title="Extension field for ``publisher``."
)
questionnaire: typing.List[fhirtypes.ConditionDefinitionQuestionnaireType] = Field(
None,
alias="questionnaire",
title="Questionnaire for this condition",
description=None,
# if property is element of this resource.
element_property=True,
)
severity: fhirtypes.CodeableConceptType = Field(
None,
alias="severity",
title="Subjective severity of condition",
description=(
"A subjective assessment of the severity of the condition as evaluated "
"by the clinician."
),
# if property is element of this resource.
element_property=True,
)
stage: fhirtypes.CodeableConceptType = Field(
None,
alias="stage",
title="Stage/grade, usually assessed formally",
description=(
"Clinical stage or grade of a condition. May include formal severity "
"assessments."
),
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="draft | active | retired | unknown",
description=(
"The status of this condition definition. Enables tracking the life-"
"cycle of the content."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["draft", "active", "retired", "unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
subtitle: fhirtypes.String = Field(
None,
alias="subtitle",
title="Subordinate title of the event definition",
description=(
"An explanatory or alternate title for the event definition giving "
"additional information about its content."
),
# if property is element of this resource.
element_property=True,
)
subtitle__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subtitle", title="Extension field for ``subtitle``."
)
team: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="team",
title="Appropriate team for this condition",
description=None,
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["CareTeam"],
)
title: fhirtypes.String = Field(
None,
alias="title",
title="Name for this condition definition (human friendly)",
description=(
"A short, descriptive, user-friendly title for the condition " "definition."
),
# if property is element of this resource.
element_property=True,
)
title__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_title", title="Extension field for ``title``."
)
url: fhirtypes.Uri = Field(
None,
alias="url",
title=(
"Canonical identifier for this condition definition, represented as a "
"URI (globally unique)"
),
description=(
"An absolute URI that is used to identify this condition definition "
"when it is referenced in a specification, model, design or an "
"instance; also called its canonical identifier. This SHOULD be "
"globally unique and SHOULD be a literal address at which at which an "
"authoritative instance of this condition definition is (or will be) "
"published. This URL can be the target of a canonical reference. It "
"SHALL remain the same when the condition definition is stored on "
"different servers."
),
# if property is element of this resource.
element_property=True,
)
url__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_url", title="Extension field for ``url``."
)
useContext: typing.List[fhirtypes.UsageContextType] = Field(
None,
alias="useContext",
title="The context that the content is intended to | |
of nodes to use
"""
return pulumi.get(self, "max_node_count")
@property
@pulumi.getter(name="minNodeCount")
def min_node_count(self) -> Optional[int]:
"""
Min number of nodes to use
"""
return pulumi.get(self, "min_node_count")
@property
@pulumi.getter(name="nodeIdleTimeBeforeScaleDown")
def node_idle_time_before_scale_down(self) -> Optional[str]:
"""
Node Idle Time before scaling down amlCompute. This string needs to be in the RFC Format.
"""
return pulumi.get(self, "node_idle_time_before_scale_down")
@pulumi.output_type
class ScriptReferenceResponse(dict):
"""
Script reference
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scriptArguments":
suggest = "script_arguments"
elif key == "scriptData":
suggest = "script_data"
elif key == "scriptSource":
suggest = "script_source"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ScriptReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ScriptReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ScriptReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
script_arguments: Optional[str] = None,
script_data: Optional[str] = None,
script_source: Optional[str] = None,
timeout: Optional[str] = None):
"""
Script reference
:param str script_arguments: Optional command line arguments passed to the script to run.
:param str script_data: The location of scripts in the mounted volume.
:param str script_source: The storage source of the script: inline, workspace.
:param str timeout: Optional time period passed to timeout command.
"""
if script_arguments is not None:
pulumi.set(__self__, "script_arguments", script_arguments)
if script_data is not None:
pulumi.set(__self__, "script_data", script_data)
if script_source is not None:
pulumi.set(__self__, "script_source", script_source)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="scriptArguments")
def script_arguments(self) -> Optional[str]:
"""
Optional command line arguments passed to the script to run.
"""
return pulumi.get(self, "script_arguments")
@property
@pulumi.getter(name="scriptData")
def script_data(self) -> Optional[str]:
"""
The location of scripts in the mounted volume.
"""
return pulumi.get(self, "script_data")
@property
@pulumi.getter(name="scriptSource")
def script_source(self) -> Optional[str]:
"""
The storage source of the script: inline, workspace.
"""
return pulumi.get(self, "script_source")
@property
@pulumi.getter
def timeout(self) -> Optional[str]:
"""
Optional time period passed to timeout command.
"""
return pulumi.get(self, "timeout")
@pulumi.output_type
class ScriptsToExecuteResponse(dict):
"""
Customized setup scripts
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "creationScript":
suggest = "creation_script"
elif key == "startupScript":
suggest = "startup_script"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ScriptsToExecuteResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ScriptsToExecuteResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ScriptsToExecuteResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
creation_script: Optional['outputs.ScriptReferenceResponse'] = None,
startup_script: Optional['outputs.ScriptReferenceResponse'] = None):
"""
Customized setup scripts
:param 'ScriptReferenceResponse' creation_script: Script that's run only once during provision of the compute.
:param 'ScriptReferenceResponse' startup_script: Script that's run every time the machine starts.
"""
if creation_script is not None:
pulumi.set(__self__, "creation_script", creation_script)
if startup_script is not None:
pulumi.set(__self__, "startup_script", startup_script)
@property
@pulumi.getter(name="creationScript")
def creation_script(self) -> Optional['outputs.ScriptReferenceResponse']:
"""
Script that's run only once during provision of the compute.
"""
return pulumi.get(self, "creation_script")
@property
@pulumi.getter(name="startupScript")
def startup_script(self) -> Optional['outputs.ScriptReferenceResponse']:
"""
Script that's run every time the machine starts.
"""
return pulumi.get(self, "startup_script")
@pulumi.output_type
class ServiceManagedResourcesSettingsResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cosmosDb":
suggest = "cosmos_db"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ServiceManagedResourcesSettingsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ServiceManagedResourcesSettingsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ServiceManagedResourcesSettingsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cosmos_db: Optional['outputs.CosmosDbSettingsResponse'] = None):
"""
:param 'CosmosDbSettingsResponse' cosmos_db: The settings for the service managed cosmosdb account.
"""
if cosmos_db is not None:
pulumi.set(__self__, "cosmos_db", cosmos_db)
@property
@pulumi.getter(name="cosmosDb")
def cosmos_db(self) -> Optional['outputs.CosmosDbSettingsResponse']:
"""
The settings for the service managed cosmosdb account.
"""
return pulumi.get(self, "cosmos_db")
@pulumi.output_type
class ServiceResponseBaseResponseError(dict):
"""
The error details.
"""
def __init__(__self__, *,
error: 'outputs.ErrorResponseResponse'):
"""
The error details.
:param 'ErrorResponseResponse' error: The error response.
"""
pulumi.set(__self__, "error", error)
@property
@pulumi.getter
def error(self) -> 'outputs.ErrorResponseResponse':
"""
The error response.
"""
return pulumi.get(self, "error")
@pulumi.output_type
class SetupScriptsResponse(dict):
"""
Details of customized scripts to execute for setting up the cluster.
"""
def __init__(__self__, *,
scripts: Optional['outputs.ScriptsToExecuteResponse'] = None):
"""
Details of customized scripts to execute for setting up the cluster.
:param 'ScriptsToExecuteResponse' scripts: Customized setup scripts
"""
if scripts is not None:
pulumi.set(__self__, "scripts", scripts)
@property
@pulumi.getter
def scripts(self) -> Optional['outputs.ScriptsToExecuteResponse']:
"""
Customized setup scripts
"""
return pulumi.get(self, "scripts")
@pulumi.output_type
class SharedPrivateLinkResourceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "groupId":
suggest = "group_id"
elif key == "privateLinkResourceId":
suggest = "private_link_resource_id"
elif key == "requestMessage":
suggest = "request_message"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SharedPrivateLinkResourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SharedPrivateLinkResourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SharedPrivateLinkResourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
group_id: Optional[str] = None,
name: Optional[str] = None,
private_link_resource_id: Optional[str] = None,
request_message: Optional[str] = None,
status: Optional[str] = None):
"""
:param str group_id: The private link resource group id.
:param str name: Unique name of the private link.
:param str private_link_resource_id: The resource id that private link links to.
:param str request_message: Request message.
:param str status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_link_resource_id is not None:
pulumi.set(__self__, "private_link_resource_id", private_link_resource_id)
if request_message is not None:
pulumi.set(__self__, "request_message", request_message)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[str]:
"""
The private link resource group id.
"""
return pulumi.get(self, "group_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Unique name of the private link.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateLinkResourceId")
def private_link_resource_id(self) -> Optional[str]:
"""
The resource id that private link links to.
"""
return pulumi.get(self, "private_link_resource_id")
@property
@pulumi.getter(name="requestMessage")
def request_message(self) -> Optional[str]:
"""
Request message.
"""
return pulumi.get(self, "request_message")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class SkuResponse(dict):
"""
Sku of the resource
"""
def __init__(__self__, *,
name: Optional[str] = None,
tier: Optional[str] = None):
"""
Sku of the resource
:param str name: Name of the sku
:param str tier: Tier of the sku like Basic or Enterprise
"""
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the sku
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
Tier of the sku like Basic or Enterprise
"""
return pulumi.get(self, "tier")
@pulumi.output_type
class SparkMavenPackageResponse(dict):
def __init__(__self__, *,
artifact: Optional[str] = None,
group: Optional[str] = None,
version: Optional[str] = None):
if artifact is not None:
pulumi.set(__self__, "artifact", artifact)
if group is not None:
pulumi.set(__self__, "group", group)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def artifact(self) -> Optional[str]:
return pulumi.get(self, "artifact")
@property
@pulumi.getter
def group(self) -> Optional[str]:
return pulumi.get(self, "group")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class SslConfigurationResponse(dict):
"""
The ssl configuration for scoring
"""
def __init__(__self__, *,
cert: Optional[str] = None,
cname: Optional[str] = None,
key: Optional[str] = None,
status: Optional[str] = None):
"""
The ssl configuration for scoring
:param str cert: Cert data
:param str cname: CNAME of the cert
:param str key: Key data
:param str status: Enable or disable ssl for scoring
"""
if cert is not None:
pulumi.set(__self__, "cert", cert)
if cname is not None:
pulumi.set(__self__, "cname", cname)
if key is not None:
pulumi.set(__self__, "key", key)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def cert(self) -> Optional[str]:
"""
Cert data
"""
return pulumi.get(self, "cert")
@property
@pulumi.getter
def cname(self) -> Optional[str]:
"""
CNAME of the cert
"""
return pulumi.get(self, "cname")
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Key data
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Enable or disable ssl for scoring
"""
return pulumi.get(self, "status")
@pulumi.output_type
class StatusMessageResponse(dict):
"""
Active message associated with project.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdTimeUtc":
suggest = "created_time_utc"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StatusMessageResponse. Access the value | |
this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --colleagues
long-summary: |
Usage: --colleagues display-name=XX relationship=XX user-principal-name=XX
Multiple actions can be specified by using more than one --colleagues argument.
- name: --manager
short-summary: "relatedPerson"
long-summary: |
Usage: --manager display-name=XX relationship=XX user-principal-name=XX
"""
helps['people user-profile create-project'] = """
type: command
short-summary: "Create new navigation property to projects for users."
parameters:
- name: --inference
short-summary: "inferenceData"
long-summary: |
Usage: --inference confidence-score=XX user-has-verified-accuracy=XX
- name: --source
short-summary: "personDataSources"
long-summary: |
Usage: --source type=XX
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-application
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-device
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --colleagues
long-summary: |
Usage: --colleagues display-name=XX relationship=XX user-principal-name=XX
Multiple actions can be specified by using more than one --colleagues argument.
- name: --sponsors
long-summary: |
Usage: --sponsors display-name=XX relationship=XX user-principal-name=XX
Multiple actions can be specified by using more than one --sponsors argument.
- name: --address
short-summary: "physicalAddress"
long-summary: |
Usage: --address city=XX country-or-region=XX postal-code=XX post-office-box=XX state=XX street=XX type=XX
city: The city.
country-or-region: The country or region. It's a free-format string value, for example, 'United States'.
postal-code: The postal code.
state: The state.
street: The street.
"""
helps['people user-profile create-publication'] = """
type: command
short-summary: "Create new navigation property to publications for users."
parameters:
- name: --inference
short-summary: "inferenceData"
long-summary: |
Usage: --inference confidence-score=XX user-has-verified-accuracy=XX
- name: --source
short-summary: "personDataSources"
long-summary: |
Usage: --source type=XX
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --user
short-summary: "identity"
long-summary: |
Usage: --user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-application
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-device
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-device display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --microsoft-graph-identity-user
short-summary: "identity"
long-summary: |
Usage: --microsoft-graph-identity-user display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
"""
helps['people user-profile create-skill'] = """
type: command
short-summary: "Create new navigation property to skills for users."
parameters:
- name: --inference
short-summary: "inferenceData"
long-summary: |
Usage: --inference confidence-score=XX user-has-verified-accuracy=XX
- name: --source
short-summary: "personDataSources"
long-summary: |
Usage: --source type=XX
- name: --application
short-summary: "identity"
long-summary: |
Usage: --application display-name=XX id=XX
display-name: The identity's display name. Note that this may not always be available or up to date. For \
example, if a user changes their display name, the API may show the new value in a future response, but the items \
associated with the user won't show up as having changed when using delta.
id: Unique identifier for the identity.
- name: --device
short-summary: "identity"
long-summary: |
Usage: --device display-name=XX id=XX
display-name: The identity's display name. | |
) == path ]), 6 )
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 3, 0, 0 ) )
self.assertEqual( prims[6].vertex( 0 ).point().position(), hou.Vector3( 6, 0, 0 ) )
node.parm( "root" ).set( "/1/2/3" )
prims = node.geometry().prims()
self.assertEqual( len(prims), 6 )
nameAttr = node.geometry().findPrimAttrib( "name" )
pathAttr = node.geometry().findPrimAttrib( "path" )
self.assertEqual( nameAttr.strings(), tuple( [ '/' ] ) )
self.assertEqual( pathAttr.strings(), tuple( [ '/1/2/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 6 )
for path in pathAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "path" ) == path ]), 6 )
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 6, 0, 0 ) )
def testReloadButton( self ) :
def testNode( node ) :
self.assertEqual( node.cookCount(), 0 )
node.cook()
self.assertEqual( node.cookCount(), 1 )
node.cook()
self.assertEqual( node.cookCount(), 1 )
node.parm( "reload" ).pressButton()
node.cook()
self.assertEqual( node.cookCount(), 2 )
node.cook()
self.assertEqual( node.cookCount(), 2 )
node.parm( "reload" ).pressButton()
node.cook()
self.assertEqual( node.cookCount(), 3 )
self.writeSCC()
testNode( self.sop() )
testNode( self.xform() )
testNode( self.geometry() )
testNode( self.sopXform() )
def writeAnimSCC( self, rotate = False ) :
scene = self.writeSCC()
sc1 = scene.child( str( 1 ) )
sc2 = sc1.child( str( 2 ) )
sc3 = sc2.child( str( 3 ) )
mesh = IECore.MeshPrimitive.createBox(IECore.Box3f(IECore.V3f(0),IECore.V3f(1)))
for time in [ 0.5, 1, 1.5, 2, 5, 10 ] :
matrix = IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) )
if rotate :
matrix.rotate( IECore.V3d( time, 0, 0 ) )
sc1.writeTransform( IECore.M44dData( matrix ), time )
mesh["Cs"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Uniform, IECore.V3fVectorData( [ IECore.V3f( time, 1, 0 ) ] * 6 ) )
sc2.writeObject( mesh, time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) )
if rotate :
matrix.rotate( IECore.V3d( time, 0, 0 ) )
sc2.writeTransform( IECore.M44dData( matrix ), time )
matrix = IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) )
if rotate :
matrix.rotate( IECore.V3d( time, 0, 0 ) )
sc3.writeTransform( IECore.M44dData( matrix ), time )
return scene
def testAnimatedScene( self ) :
self.writeAnimSCC()
times = range( 0, 10 )
halves = [ x + 0.5 for x in times ]
quarters = [ x + 0.25 for x in times ]
times.extend( [ x + 0.75 for x in times ] )
times.extend( halves )
times.extend( quarters )
times.sort()
spf = 1.0 / hou.fps()
sop = self.sop()
for time in times :
hou.setTime( time - spf )
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Houdini )
prims = sop.geometry().prims()
self.assertEqual( len(prims), 18 )
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ '/1', '/1/2', '/1/2/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 6 )
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 1, time, 0 ) )
self.assertEqual( prims[6].vertex( 0 ).point().position(), hou.Vector3( 3, 2*time, 0 ) )
self.assertEqual( prims[12].vertex( 0 ).point().position(), hou.Vector3( 6, 3*time, 0 ) )
self.assertEqual( prims[6].attribValue( "Cd" ), ( time, 1, 0 ) )
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Cortex )
prims = sop.geometry().prims()
self.assertEqual( len(prims), 3 )
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ '/1', '/1/2', '/1/2/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 1 )
self.assertEqual( prims[0].vertices()[0].point().position(), hou.Vector3( 1.5, time + 0.5, 0.5 ) )
self.assertEqual( prims[1].vertices()[0].point().position(), hou.Vector3( 3.5, 2*time + 0.5, 0.5 ) )
self.assertEqual( prims[2].vertices()[0].point().position(), hou.Vector3( 6.5, 3*time + 0.5, 0.5 ) )
xform = self.xform()
xform.parm( "expand" ).pressButton()
a = xform.children()[0]
b = [ x for x in a.children() if x.name() != "geo" ][0]
c = [ x for x in b.children() if x.name() != "geo" ][0]
for time in times :
if hou.applicationVersion()[0] >= 14 :
self.assertEqual( IECore.M44d( list(xform.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(xform.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, time * 2, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 6, time * 3, 0 ) ) )
else :
self.assertEqual( IECore.M44d( list(xform.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) ) )
for time in times :
hou.setTime( time - spf )
self.assertEqual( IECore.M44d( list(xform.parmTransform().asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.parmTransform().asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.parmTransform().asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.parmTransform().asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) ) )
xform.parm( "hierarchy" ).set( IECoreHoudini.SceneCacheNode.Hierarchy.Parenting )
xform.parm( "collapse" ).pressButton()
xform.parm( "expand" ).pressButton()
a = xform.children()[0]
b = xform.children()[1]
c = xform.children()[2]
for time in times :
if hou.applicationVersion()[0] >= 14 :
self.assertEqual( IECore.M44d( list(xform.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.localTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.localTransformAtTime( time -spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(xform.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, 2*time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.worldTransformAtTime( time -spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 6, 3*time, 0 ) ) )
else :
self.assertEqual( IECore.M44d( list(xform.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.worldTransformAtTime( time - spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, 2*time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.worldTransformAtTime( time -spf ).asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 6, 3*time, 0 ) ) )
for time in times :
hou.setTime( time - spf )
self.assertEqual( IECore.M44d( list(xform.parmTransform().asTuple()) ), IECore.M44d() )
self.assertEqual( IECore.M44d( list(a.parmTransform().asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 1, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(b.parmTransform().asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 2, time, 0 ) ) )
self.assertEqual( IECore.M44d( list(c.parmTransform().asTuple()) ), IECore.M44d.createTranslated( IECore.V3d( 3, time, 0 ) ) )
def testSopXformNameMode( self ) :
self.writeAnimSCC()
times = range( 0, 10 )
halves = [ x + 0.5 for x in times ]
quarters = [ x + 0.25 for x in times ]
times.extend( [ x + 0.75 for x in times ] )
times.extend( halves )
times.extend( quarters )
times.sort()
spf = 1.0 / hou.fps()
node = self.sopXform()
# prims transform according to their name
for time in times :
hou.setTime( time - spf )
prims = node.geometry().prims()
self.assertEqual( len(prims), 18 )
nameAttr = node.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ '/1', '/1/2', '/1/2/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 6 )
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 1.5, time - 0.5, -0.5 ) )
self.assertEqual( prims[6].vertex( 0 ).point().position(), hou.Vector3( 3.5, time*2 - 0.5, -0.5 ) )
self.assertEqual( prims[12].vertex( 0 ).point().position(), hou.Vector3( 6.5, time*3 - 0.5, -0.5 ) )
# names are relative to the root parm, and non-matching names are ignored
node.parm( "root" ).set( "/1/2" )
for time in times :
hou.setTime( time - spf )
prims = node.geometry().prims()
self.assertEqual( len(prims), 18 )
nameAttr = node.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ '/1', '/1/2', '/1/2/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 6 )
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 0.5, -0.5, -0.5 ) )
self.assertEqual( prims[6].vertex( 0 ).point().position(), hou.Vector3( 0.5, -0.5, -0.5 ) )
self.assertEqual( prims[12].vertex( 0 ).point().position(), hou.Vector3( 0.5, -0.5, -0.5 ) )
# making the names relative again so the transformations take effect
node.inputConnections()[0].inputNode().inputConnections()[1].inputNode().parm( "name1" ).set( "/" )
node.inputConnections()[0].inputNode().inputConnections()[2].inputNode().parm( "name1" ).set( "/3" )
for time in times :
hou.setTime( time - spf )
prims = node.geometry().prims()
self.assertEqual( len(prims), 18 )
nameAttr = node.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ '/1', '/', '/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 6 )
# still doesn't animate because /1 doesn't match any child of /1/2
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 0.5, -0.5, -0.5 ) )
# these ones are proper relative paths
self.assertEqual( prims[6].vertex( 0 ).point().position(), hou.Vector3( 3.5, time*2 - 0.5, -0.5 ) )
self.assertEqual( prims[12].vertex( 0 ).point().position(), hou.Vector3( 6.5, time*3 - 0.5, -0.5 ) )
# testing invert toggle
node.parm( "invert" ).set( | |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from copy import deepcopy
from datetime import datetime, timedelta
from hashlib import md5
from Queue import Empty, Queue
import re
import ssl
import time
import traceback
# 3p
from pyVim import connect
from pyVmomi import vim # pylint: disable=E0611
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from checks.libs.vmware.basic_metrics import BASIC_METRICS
from checks.libs.vmware.all_metrics import ALL_METRICS
from util import Timer
SOURCE_TYPE = 'vsphere'
REAL_TIME_INTERVAL = 20 # Default vCenter sampling interval
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of jobs batched at the same time in the queue to query available metrics
BATCH_MORLIST_SIZE = 50
# Time after which we reap the jobs that clog the queue
# TODO: use it
JOB_TIMEOUT = 10
EXCLUDE_FILTERS = {
'AlarmStatusChangedEvent': [r'Gray'],
'TaskEvent': [
r'Initialize powering On',
r'Power Off virtual machine',
r'Power On virtual machine',
r'Reconfigure virtual machine',
r'Relocate virtual machine',
r'Suspend virtual machine',
r'Migrate virtual machine',
],
'VmBeingHotMigratedEvent': [],
'VmMessageEvent': [],
'VmMigratedEvent': [],
'VmPoweredOnEvent': [],
'VmPoweredOffEvent': [],
'VmReconfiguredEvent': [],
'VmResumedEvent': [],
'VmSuspendedEvent': [],
}
MORLIST = 'morlist'
METRICS_METADATA = 'metrics_metadata'
LAST = 'last'
INTERVAL = 'interval'
class VSphereEvent(object):
UNKNOWN = 'unknown'
def __init__(self, raw_event, event_config=None):
self.raw_event = raw_event
if self.raw_event and self.raw_event.__class__.__name__.startswith('vim.event'):
self.event_type = self.raw_event.__class__.__name__[10:]
else:
self.event_type = VSphereEvent.UNKNOWN
self.timestamp = int((self.raw_event.createdTime.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds())
self.payload = {
"timestamp": self.timestamp,
"event_type": SOURCE_TYPE,
"source_type_name": SOURCE_TYPE,
}
if event_config is None:
self.event_config = {}
else:
self.event_config = event_config
def _is_filtered(self):
# Filter the unwanted types
if self.event_type not in EXCLUDE_FILTERS:
return True
filters = EXCLUDE_FILTERS[self.event_type]
for f in filters:
if re.search(f, self.raw_event.fullFormattedMessage):
return True
return False
def get_datadog_payload(self):
if self._is_filtered():
return None
transform_method = getattr(self, 'transform_%s' % self.event_type.lower(), None)
if callable(transform_method):
return transform_method()
# Default event transformation
self.payload["msg_title"] = u"{0}".format(self.event_type)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
return self.payload
def transform_vmbeinghotmigratedevent(self):
self.payload["msg_title"] = u"VM {0} is being migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} has launched a hot migration of this virtual machine:\n".format(user=self.raw_event.userName)
changes = []
pre_host = self.raw_event.host.name
new_host = self.raw_event.destHost.name
pre_dc = self.raw_event.datacenter.name
new_dc = self.raw_event.destDatacenter.name
pre_ds = self.raw_event.ds.name
new_ds = self.raw_event.destDatastore.name
if pre_host == new_host:
changes.append(u"- No host migration: still {0}".format(new_host))
else:
# Insert in front if it's a change
changes = [u"- Host MIGRATION: from {0} to {1}".format(pre_host, new_host)] + changes
if pre_dc == new_dc:
changes.append(u"- No datacenter migration: still {0}".format(new_dc))
else:
# Insert in front if it's a change
changes = [u"- Datacenter MIGRATION: from {0} to {1}".format(pre_dc, new_dc)] + changes
if pre_ds == new_ds:
changes.append(u"- No datastore migration: still {0}".format(new_ds))
else:
# Insert in front if it's a change
changes = [u"- Datastore MIGRATION: from {0} to {1}".format(pre_ds, new_ds)] + changes
self.payload["msg_text"] += "\n".join(changes)
self.payload['host'] = self.raw_event.vm.name
self.payload['tags'] = [
'vsphere_host:%s' % pre_host,
'vsphere_host:%s' % new_host,
'vsphere_datacenter:%s' % pre_dc,
'vsphere_datacenter:%s' % new_dc,
]
return self.payload
def transform_alarmstatuschangedevent(self):
if self.event_config.get('collect_vcenter_alarms') is None:
return None
def get_transition(before, after):
vals = {
'gray': -1,
'green': 0,
'yellow': 1,
'red': 2
}
before = before.lower()
after = after.lower()
if before not in vals or after not in vals:
return None
if vals[before] < vals[after]:
return 'Triggered'
else:
return 'Recovered'
TO_ALERT_TYPE = {
'green': 'success',
'yellow': 'warning',
'red': 'error'
}
def get_agg_key(alarm_event):
return 'h:{0}|dc:{1}|a:{2}'.format(
md5(alarm_event.entity.name).hexdigest()[:10],
md5(alarm_event.datacenter.name).hexdigest()[:10],
md5(alarm_event.alarm.name).hexdigest()[:10]
)
# Get the entity type/name
if self.raw_event.entity.entity.__class__ == vim.VirtualMachine:
host_type = 'VM'
elif self.raw_event.entity.entity.__class__ == vim.HostSystem:
host_type = 'host'
else:
return None
host_name = self.raw_event.entity.name
# Need a getattr because from is a reserved keyword...
trans_before = getattr(self.raw_event, 'from')
trans_after = self.raw_event.to
transition = get_transition(trans_before, trans_after)
# Bad transition, we shouldn't have got this transition
if transition is None:
return None
self.payload['msg_title'] = u"[{transition}] {monitor} on {host_type} {host_name} is now {status}".format(
transition=transition,
monitor=self.raw_event.alarm.name,
host_type=host_type,
host_name=host_name,
status=trans_after
)
self.payload['alert_type'] = TO_ALERT_TYPE[trans_after]
self.payload['event_object'] = get_agg_key(self.raw_event)
self.payload['msg_text'] = u"""vCenter monitor status changed on this alarm, it was {before} and it's now {after}.""".format(
before=trans_before,
after=trans_after
)
self.payload['host'] = host_name
return self.payload
def transform_vmmessageevent(self):
self.payload["msg_title"] = u"VM {0} is reporting".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmmigratedevent(self):
self.payload["msg_title"] = u"VM {0} has been migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredoffevent(self):
self.payload["msg_title"] = u"VM {0} has been powered OFF".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered off this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredonevent(self):
self.payload["msg_title"] = u"VM {0} has been powered ON".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered on this virtual machine. It is running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmresumingevent(self):
self.payload["msg_title"] = u"VM {0} is RESUMING".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has resumed {vm}. It will soon be powered on.""".format(
user=self.raw_event.userName,
vm=self.raw_event.vm.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmsuspendedevent(self):
self.payload["msg_title"] = u"VM {0} has been SUSPENDED".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has suspended this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmreconfiguredevent(self):
self.payload["msg_title"] = u"VM {0} configuration has been changed".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} saved the new configuration:\n@@@\n".format(user=self.raw_event.userName)
# Add lines for configuration change don't show unset, that's hacky...
config_change_lines = [line for line in self.raw_event.configSpec.__repr__().splitlines() if 'unset' not in line]
self.payload["msg_text"] += u"\n".join(config_change_lines)
self.payload["msg_text"] += u"\n@@@"
self.payload['host'] = self.raw_event.vm.name
return self.payload
def atomic_method(method):
""" Decorator to catch the exceptions that happen in detached thread atomic tasks
and display them in the logs.
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception:
args[0].exceptionq.put("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Datadog
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.pool_started = False
self.exceptionq = Queue()
# Connections open to vCenter instances
self.server_instances = {}
# Event configuration
self.event_config = {}
# Caching resources, timeouts
self.cache_times = {}
for instance in self.instances:
i_key = self._instance_key(instance)
self.cache_times[i_key] = {
MORLIST: {
LAST: 0,
INTERVAL: init_config.get('refresh_morlist_interval',
REFRESH_MORLIST_INTERVAL)
},
METRICS_METADATA: {
LAST: 0,
INTERVAL: init_config.get('refresh_metrics_metadata_interval',
REFRESH_METRICS_METADATA_INTERVAL)
}
}
self.event_config[i_key] = instance.get('event_config')
# First layer of cache (get entities from the tree)
self.morlist_raw = {}
# Second layer, processed from the first one
self.morlist = {}
# Metrics metadata, basically perfCounterId -> {name, group, description}
self.metrics_metadata = {}
self.latest_event_query = {}
def stop(self):
self.stop_pool()
def start_pool(self):
self.log.info("Starting Thread Pool")
self.pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(self.pool_size)
self.pool_started = True
self.jobs_status = {}
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
self.pool_started = False
def restart_pool(self):
self.stop_pool()
self.start_pool()
def _clean(self):
now = time.time()
# TODO: use that
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > JOB_TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck.")
self.restart_pool()
break
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = self.latest_event_query[i_key] = \
event_manager.latestEvent.createdTime + timedelta(seconds=1)
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=self.latest_event_query[i_key])
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got {0} events from vCenter event manager".format(len(new_events)))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key])
# Can return None if the event if filtered out
event_payload = normalized_event.get_datadog_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
def _instance_key(self, instance):
i_key = instance.get('name')
if i_key is None:
raise Exception("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
| |
)
@dataclass
class TeamEditMembersResult(DataClassJsonMixin):
failures: Optional[List[UserRolePair]] = field(
default=None, metadata=config(field_name="failures")
)
@dataclass
class InviteLinkDetails(DataClassJsonMixin):
invite_id: TeamInviteID = field(metadata=config(field_name="inviteID"))
inviter_uid: UID = field(metadata=config(field_name="inviterUID"))
inviter_username: str = field(metadata=config(field_name="inviterUsername"))
inviter_reset_or_del: bool = field(metadata=config(field_name="inviterResetOrDel"))
team_is_open: bool = field(metadata=config(field_name="teamIsOpen"))
team_id: TeamID = field(metadata=config(field_name="teamID"))
team_desc: str = field(metadata=config(field_name="teamDesc"))
team_name: TeamName = field(metadata=config(field_name="teamName"))
team_num_members: int = field(metadata=config(field_name="teamNumMembers"))
team_avatars: Dict[str, AvatarUrl] = field(
metadata=config(field_name="teamAvatars")
)
@dataclass
class ImplicitTeamUserSet(DataClassJsonMixin):
keybase_users: Optional[List[str]] = field(
default=None, metadata=config(field_name="keybaseUsers")
)
unresolved_users: Optional[List[SocialAssertion]] = field(
default=None, metadata=config(field_name="unresolvedUsers")
)
@dataclass
class TeamProfileAddEntry(DataClassJsonMixin):
team_id: TeamID = field(metadata=config(field_name="teamID"))
team_name: TeamName = field(metadata=config(field_name="teamName"))
open: bool = field(metadata=config(field_name="open"))
disabled_reason: str = field(metadata=config(field_name="disabledReason"))
@dataclass
class TeamRoleMapAndVersion(DataClassJsonMixin):
teams: Dict[str, TeamRolePair] = field(metadata=config(field_name="teams"))
version: UserTeamVersion = field(metadata=config(field_name="user_team_version"))
@dataclass
class TeamTreeMembershipResult__OK(DataClassJsonMixin):
s: Literal[TeamTreeMembershipStatusStrings.OK]
OK: Optional[TeamTreeMembershipValue]
@dataclass
class TeamTreeMembershipResult__ERROR(DataClassJsonMixin):
s: Literal[TeamTreeMembershipStatusStrings.ERROR]
ERROR: Optional[TeamTreeError]
@dataclass
class TeamTreeMembershipResult__HIDDEN(DataClassJsonMixin):
s: Literal[TeamTreeMembershipStatusStrings.HIDDEN]
HIDDEN: None
TeamTreeMembershipResult = Union[
TeamTreeMembershipResult__OK,
TeamTreeMembershipResult__ERROR,
TeamTreeMembershipResult__HIDDEN,
]
@dataclass
class TeamSearchExport(DataClassJsonMixin):
items: Dict[str, TeamSearchItem] = field(metadata=config(field_name="items"))
suggested: Optional[List[TeamID]] = field(
default=None, metadata=config(field_name="suggested")
)
@dataclass
class TeamSearchRes(DataClassJsonMixin):
results: Optional[List[TeamSearchItem]] = field(
default=None, metadata=config(field_name="results")
)
@dataclass
class MerkleTreeLocation(DataClassJsonMixin):
leaf: UserOrTeamID = field(metadata=config(field_name="leaf"))
loc: SigChainLocation = field(metadata=config(field_name="loc"))
@dataclass
class SignatureMetadata(DataClassJsonMixin):
signing_kid: KID = field(metadata=config(field_name="signingKID"))
prev_merkle_root_signed: MerkleRootV2 = field(
metadata=config(field_name="prevMerkleRootSigned")
)
first_appeared_unverified: Seqno = field(
metadata=config(field_name="firstAppearedUnverified")
)
time: Time = field(metadata=config(field_name="time"))
sig_chain_location: SigChainLocation = field(
metadata=config(field_name="sigChainLocation")
)
@dataclass
class Proofs(DataClassJsonMixin):
social: Optional[List[TrackProof]] = field(
default=None, metadata=config(field_name="social")
)
web: Optional[List[WebProof]] = field(
default=None, metadata=config(field_name="web")
)
public_keys: Optional[List[PublicKey]] = field(
default=None, metadata=config(field_name="publicKeys")
)
@dataclass
class UserSummarySet(DataClassJsonMixin):
time: Time = field(metadata=config(field_name="time"))
version: int = field(metadata=config(field_name="version"))
users: Optional[List[UserSummary]] = field(
default=None, metadata=config(field_name="users")
)
@dataclass
class UserSettings(DataClassJsonMixin):
emails: Optional[List[Email]] = field(
default=None, metadata=config(field_name="emails")
)
phone_numbers: Optional[List[UserPhoneNumber]] = field(
default=None, metadata=config(field_name="phoneNumbers")
)
@dataclass
class ProofSuggestion(DataClassJsonMixin):
key: str = field(metadata=config(field_name="key"))
below_fold: bool = field(metadata=config(field_name="belowFold"))
profile_text: str = field(metadata=config(field_name="profileText"))
picker_text: str = field(metadata=config(field_name="pickerText"))
picker_subtext: str = field(metadata=config(field_name="pickerSubtext"))
profile_icon: Optional[List[SizedImage]] = field(
default=None, metadata=config(field_name="profileIcon")
)
profile_icon_darkmode: Optional[List[SizedImage]] = field(
default=None, metadata=config(field_name="profileIconDarkmode")
)
picker_icon: Optional[List[SizedImage]] = field(
default=None, metadata=config(field_name="pickerIcon")
)
picker_icon_darkmode: Optional[List[SizedImage]] = field(
default=None, metadata=config(field_name="pickerIconDarkmode")
)
metas: Optional[List[Identify3RowMeta]] = field(
default=None, metadata=config(field_name="metas")
)
@dataclass
class NextMerkleRootRes(DataClassJsonMixin):
res: Optional[MerkleRootV2] = field(default=None, metadata=config(field_name="res"))
@dataclass
class UserBlockedBody(DataClassJsonMixin):
uid: UID = field(metadata=config(field_name="blocker_uid"))
username: str = field(metadata=config(field_name="blocker_username"))
blocks: Optional[List[UserBlockedRow]] = field(
default=None, metadata=config(field_name="blocks")
)
@dataclass
class UserBlockedSummary(DataClassJsonMixin):
blocker: str = field(metadata=config(field_name="blocker"))
blocks: Dict[str, Optional[List[UserBlockState]]] = field(
metadata=config(field_name="blocks")
)
@dataclass
class Confidence(DataClassJsonMixin):
username_verified_via: UsernameVerificationType = field(
metadata=config(field_name="username_verified_via,omitempty")
)
other: str = field(metadata=config(field_name="other,omitempty"))
proofs: Optional[List[WotProof]] = field(
default=None, metadata=config(field_name="proofs,omitempty")
)
@dataclass
class BlockReferenceCount(DataClassJsonMixin):
ref: BlockReference = field(metadata=config(field_name="ref"))
live_count: int = field(metadata=config(field_name="liveCount"))
@dataclass
class ReferenceCountRes(DataClassJsonMixin):
counts: Optional[List[BlockIdCount]] = field(
default=None, metadata=config(field_name="counts")
)
@dataclass
class BlockQuotaInfo(DataClassJsonMixin):
total: UsageStat = field(metadata=config(field_name="total"))
limit: int = field(metadata=config(field_name="limit"))
git_limit: int = field(metadata=config(field_name="gitLimit"))
folders: Optional[List[FolderUsageStat]] = field(
default=None, metadata=config(field_name="folders")
)
@dataclass
class UserPlusKeys(DataClassJsonMixin):
uid: UID = field(metadata=config(field_name="uid"))
eldest_seqno: Seqno = field(metadata=config(field_name="eldestSeqno"))
status: StatusCode = field(metadata=config(field_name="status"))
username: str = field(metadata=config(field_name="username"))
pgp_key_count: int = field(metadata=config(field_name="pgpKeyCount"))
uvv: UserVersionVector = field(metadata=config(field_name="uvv"))
device_keys: Optional[List[PublicKey]] = field(
default=None, metadata=config(field_name="deviceKeys")
)
resets: Optional[List[ResetSummary]] = field(
default=None, metadata=config(field_name="resets")
)
deleted_device_keys: Optional[List[PublicKey]] = field(
default=None, metadata=config(field_name="deletedDeviceKeys")
)
per_user_keys: Optional[List[PerUserKey]] = field(
default=None, metadata=config(field_name="perUserKeys")
)
revoked_device_keys: Optional[List[RevokedKey]] = field(
default=None, metadata=config(field_name="revokedDeviceKeys")
)
@dataclass
class ExtendedStatus(DataClassJsonMixin):
standalone: bool = field(metadata=config(field_name="standalone"))
ui_router_mapping: Dict[str, int] = field(
metadata=config(field_name="uiRouterMapping")
)
tsec_cached: bool = field(metadata=config(field_name="tsecCached"))
device_sig_key_cached: bool = field(
metadata=config(field_name="deviceSigKeyCached")
)
device_enc_key_cached: bool = field(
metadata=config(field_name="deviceEncKeyCached")
)
paper_sig_key_cached: bool = field(metadata=config(field_name="paperSigKeyCached"))
paper_enc_key_cached: bool = field(metadata=config(field_name="paperEncKeyCached"))
stored_secret: bool = field(metadata=config(field_name="storedSecret"))
secret_prompt_skip: bool = field(metadata=config(field_name="secretPromptSkip"))
remember_passphrase: bool = field(metadata=config(field_name="rememberPassphrase"))
default_device_id: DeviceID = field(metadata=config(field_name="defaultDeviceID"))
platform_info: PlatformInfo = field(metadata=config(field_name="platformInfo"))
log_dir: str = field(metadata=config(field_name="logDir"))
passphrase_stream_cached: bool = field(
metadata=config(field_name="passphraseStreamCached")
)
default_username: str = field(metadata=config(field_name="defaultUsername"))
local_db_stats: Optional[List[str]] = field(
default=None, metadata=config(field_name="localDbStats")
)
provisioned_usernames: Optional[List[str]] = field(
default=None, metadata=config(field_name="provisionedUsernames")
)
clients: Optional[List[ClientStatus]] = field(
default=None, metadata=config(field_name="Clients")
)
device_ek_names: Optional[List[str]] = field(
default=None, metadata=config(field_name="deviceEkNames")
)
device_err: Optional[LoadDeviceErr] = field(
default=None, metadata=config(field_name="deviceErr")
)
device: Optional[Device] = field(default=None, metadata=config(field_name="device"))
configured_accounts: Optional[List[ConfiguredAccount]] = field(
default=None, metadata=config(field_name="configuredAccounts")
)
local_chat_db_stats: Optional[List[str]] = field(
default=None, metadata=config(field_name="localChatDbStats")
)
local_block_cache_db_stats: Optional[List[str]] = field(
default=None, metadata=config(field_name="localBlockCacheDbStats")
)
local_sync_cache_db_stats: Optional[List[str]] = field(
default=None, metadata=config(field_name="localSyncCacheDbStats")
)
cache_dir_size_info: Optional[List[DirSizeInfo]] = field(
default=None, metadata=config(field_name="cacheDirSizeInfo")
)
session: Optional[SessionStatus] = field(
default=None, metadata=config(field_name="session")
)
@dataclass
class ContactListResolutionResult(DataClassJsonMixin):
newly_resolved: Optional[List[ProcessedContact]] = field(
default=None, metadata=config(field_name="newlyResolved")
)
resolved: Optional[List[ProcessedContact]] = field(
default=None, metadata=config(field_name="resolved")
)
@dataclass
class TeamEphemeralKey__TEAM(DataClassJsonMixin):
keyType: Literal[TeamEphemeralKeyTypeStrings.TEAM]
TEAM: Optional[TeamEk]
@dataclass
class TeamEphemeralKey__TEAMBOT(DataClassJsonMixin):
keyType: Literal[TeamEphemeralKeyTypeStrings.TEAMBOT]
TEAMBOT: Optional[TeambotEk]
TeamEphemeralKey = Union[TeamEphemeralKey__TEAM, TeamEphemeralKey__TEAMBOT]
@dataclass
class TeamEphemeralKeyBoxed__TEAM(DataClassJsonMixin):
keyType: Literal[TeamEphemeralKeyTypeStrings.TEAM]
TEAM: Optional[TeamEkBoxed]
@dataclass
class TeamEphemeralKeyBoxed__TEAMBOT(DataClassJsonMixin):
keyType: Literal[TeamEphemeralKeyTypeStrings.TEAMBOT]
TEAMBOT: Optional[TeambotEkBoxed]
TeamEphemeralKeyBoxed = Union[
TeamEphemeralKeyBoxed__TEAM, TeamEphemeralKeyBoxed__TEAMBOT
]
@dataclass
class GitLocalMetadata(DataClassJsonMixin):
repo_name: GitRepoName = field(metadata=config(field_name="repoName"))
push_type: GitPushType = field(metadata=config(field_name="pushType"))
previous_repo_name: GitRepoName = field(
metadata=config(field_name="previousRepoName")
)
refs: Optional[List[GitRefMetadata]] = field(
default=None, metadata=config(field_name="refs")
)
@dataclass
class HomeScreenItemDataExt__TODO(DataClassJsonMixin):
t: Literal[HomeScreenItemTypeStrings.TODO]
TODO: Optional[HomeScreenTodoExt]
HomeScreenItemDataExt = Union[HomeScreenItemDataExt__TODO]
@dataclass
class HomeScreenPeopleNotificationFollowedMulti(DataClassJsonMixin):
num_others: int = field(metadata=config(field_name="numOthers"))
followers: Optional[List[HomeScreenPeopleNotificationFollowed]] = field(
default=None, metadata=config(field_name="followers")
)
@dataclass
class Identity(DataClassJsonMixin):
when_last_tracked: Time = field(metadata=config(field_name="whenLastTracked"))
breaks_tracking: bool = field(metadata=config(field_name="breaksTracking"))
status: Optional[Status] = field(default=None, metadata=config(field_name="status"))
proofs: Optional[List[IdentifyRow]] = field(
default=None, metadata=config(field_name="proofs")
)
cryptocurrency: Optional[List[Cryptocurrency]] = field(
default=None, metadata=config(field_name="cryptocurrency")
)
revoked: Optional[List[TrackDiff]] = field(
default=None, metadata=config(field_name="revoked")
)
revoked_details: Optional[List[RevokedProof]] = field(
default=None, metadata=config(field_name="revokedDetails")
)
@dataclass
class LinkCheckResult(DataClassJsonMixin):
proof_id: int = field(metadata=config(field_name="proofId"))
proof_result: ProofResult = field(metadata=config(field_name="proofResult"))
snoozed_result: ProofResult = field(metadata=config(field_name="snoozedResult"))
tor_warning: bool = field(metadata=config(field_name="torWarning"))
tmp_track_expire_time: Time = field(
metadata=config(field_name="tmpTrackExpireTime")
)
breaks_tracking: bool = field(metadata=config(field_name="breaksTracking"))
cached: Optional[CheckResult] = field(
default=None, metadata=config(field_name="cached")
)
diff: Optional[TrackDiff] = field(default=None, metadata=config(field_name="diff"))
remote_diff: Optional[TrackDiff] = field(
default=None, metadata=config(field_name="remoteDiff")
)
hint: Optional[SigHint] = field(default=None, metadata=config(field_name="hint"))
@dataclass
class ServicesStatus(DataClassJsonMixin):
service: Optional[List[ServiceStatus]] = field(
default=None, metadata=config(field_name="service")
)
kbfs: Optional[List[ServiceStatus]] = field(
default=None, metadata=config(field_name="kbfs")
)
updater: Optional[List[ServiceStatus]] = field(
default=None, metadata=config(field_name="updater")
)
@dataclass
class InstallResult(DataClassJsonMixin):
status: Status = field(metadata=config(field_name="status"))
fatal: bool = field(metadata=config(field_name="fatal"))
component_results: Optional[List[ComponentResult]] = field(
default=None, metadata=config(field_name="componentResults")
)
@dataclass
class UninstallResult(DataClassJsonMixin):
status: Status = field(metadata=config(field_name="status"))
component_results: Optional[List[ComponentResult]] = field(
default=None, metadata=config(field_name="componentResults")
)
@dataclass
class ProblemSet(DataClassJsonMixin):
user: User = field(metadata=config(field_name="user"))
kid: KID = field(metadata=config(field_name="kid"))
tlfs: Optional[List[ProblemTLF]] = field(
default=None, metadata=config(field_name="tlfs")
)
@dataclass
class SaltpackPlaintextResult(DataClassJsonMixin):
info: SaltpackEncryptedMessageInfo = field(metadata=config(field_name="info"))
plaintext: str = field(metadata=config(field_name="plaintext"))
signed: bool = field(metadata=config(field_name="signed"))
@dataclass
class SaltpackFileResult(DataClassJsonMixin):
info: SaltpackEncryptedMessageInfo = field(metadata=config(field_name="info"))
decrypted_filename: str = field(metadata=config(field_name="decryptedFilename"))
signed: bool = field(metadata=config(field_name="signed"))
@dataclass
class Path__LOCAL(DataClassJsonMixin):
PathType: Literal[PathTypeStrings.LOCAL]
LOCAL: Optional[str]
@dataclass
class Path__KBFS(DataClassJsonMixin):
PathType: Literal[PathTypeStrings.KBFS]
KBFS: Optional[KBFSPath]
@dataclass
class Path__KBFS_ARCHIVED(DataClassJsonMixin):
PathType: Literal[PathTypeStrings.KBFS_ARCHIVED]
KBFS_ARCHIVED: Optional[KBFSArchivedPath]
Path = Union[Path__LOCAL, Path__KBFS, Path__KBFS_ARCHIVED]
@dataclass
class DirentWithRevision(DataClassJsonMixin):
entry: Dirent = field(metadata=config(field_name="entry"))
revision: KBFSRevision = field(metadata=config(field_name="revision"))
@dataclass
class SimpleFSListResult(DataClassJsonMixin):
progress: Progress = field(metadata=config(field_name="progress"))
entries: Optional[List[Dirent]] = field(
default=None, metadata=config(field_name="entries")
)
@dataclass
class FolderSyncConfigAndStatus(DataClassJsonMixin):
config_: FolderSyncConfig = field(metadata=config(field_name="config"))
status: FolderSyncStatus = field(metadata=config(field_name="status"))
@dataclass
class TeamMembersDetails(DataClassJsonMixin):
owners: Optional[List[TeamMemberDetails]] = field(
default=None, metadata=config(field_name="owners")
)
admins: Optional[List[TeamMemberDetails]] = field(
default=None, metadata=config(field_name="admins")
)
writers: Optional[List[TeamMemberDetails]] = field(
default=None, metadata=config(field_name="writers")
)
readers: Optional[List[TeamMemberDetails]] = field(
default=None, metadata=config(field_name="readers")
)
bots: Optional[List[TeamMemberDetails]] = field(
default=None, metadata=config(field_name="bots")
)
restricted_bots: Optional[List[TeamMemberDetails]] = field(
default=None, metadata=config(field_name="restrictedBots")
)
@dataclass
class FastTeamData(DataClassJsonMixin):
max_continuous_ptk_generation: PerTeamKeyGeneration = field(
metadata=config(field_name="maxContinuousPTKGeneration")
)
frozen: bool = field(metadata=config(field_name="frozen"))
tombstoned: bool = field(metadata=config(field_name="tombstoned"))
name: TeamName = field(metadata=config(field_name="name"))
chain: FastTeamSigChainState = field(metadata=config(field_name="chain"))
per_team_key_seeds_unverified: Dict[str, PerTeamKeySeed] = field(
metadata=config(field_name="perTeamKeySeedsUnverified")
)
subversion: int = field(metadata=config(field_name="subversion"))
seed_checks: Dict[str, PerTeamSeedCheck] = field(
metadata=config(field_name="seedChecks")
)
latest_key_generation: PerTeamKeyGeneration = field(
metadata=config(field_name="latestKeyGeneration")
)
reader_key_masks: Dict[str, Dict[str, MaskB64]] = field(
metadata=config(field_name="readerKeyMasks")
)
latest_seqno_hint: Seqno = field(metadata=config(field_name="latestSeqnoHint"))
cached_at: Time = field(metadata=config(field_name="cachedAt"))
loaded_latest: bool = field(metadata=config(field_name="loadedLatest"))
@dataclass
class HiddenTeamChainRatchetSet(DataClassJsonMixin):
ratchets: Dict[str, LinkTripleAndTime] = field(
metadata=config(field_name="ratchets")
)
@dataclass
class HiddenTeamChainLink(DataClassJsonMixin):
merkle_root: MerkleRootV2 = field(metadata=config(field_name="m"))
parent_chain: LinkTriple = field(metadata=config(field_name="p"))
signer: Signer = field(metadata=config(field_name="s"))
ptk: Dict[str, PerTeamKeyAndCheck] = field(metadata=config(field_name="k"))
@dataclass
class UserLogPoint(DataClassJsonMixin):
role: TeamRole = field(metadata=config(field_name="role"))
sig_meta: SignatureMetadata = field(metadata=config(field_name="sigMeta"))
@dataclass
class AnnotatedTeamUsedInviteLogPoint(DataClassJsonMixin):
username: str = field(metadata=config(field_name="username"))
team_used_invite_log_point: TeamUsedInviteLogPoint = field(
metadata=config(field_name="teamUsedInviteLogPoint")
)
@dataclass
class SeitanKeyAndLabel__V1(DataClassJsonMixin):
v: Literal[SeitanKeyAndLabelVersionStrings.V1]
V1: Optional[SeitanKeyAndLabelVersion1]
@dataclass
class SeitanKeyAndLabel__V2(DataClassJsonMixin):
v: Literal[SeitanKeyAndLabelVersionStrings.V2]
V2: Optional[SeitanKeyAndLabelVersion2]
@dataclass
class SeitanKeyAndLabel__Invitelink(DataClassJsonMixin):
v: Literal[SeitanKeyAndLabelVersionStrings.Invitelink]
Invitelink: Optional[SeitanKeyAndLabelInvitelink]
SeitanKeyAndLabel = Union[
SeitanKeyAndLabel__V1, SeitanKeyAndLabel__V2, SeitanKeyAndLabel__Invitelink
]
@dataclass
class LoadTeamArg(DataClassJsonMixin):
force_full_reload: bool = field(metadata=config(field_name="forceFullReload"))
id: TeamID = field(metadata=config(field_name="ID"))
public: bool = field(metadata=config(field_name="public"))
need_admin: bool = field(metadata=config(field_name="needAdmin"))
refresh_uid_mapper: bool = field(metadata=config(field_name="refreshUIDMapper"))
refreshers: TeamRefreshers = field(metadata=config(field_name="refreshers"))
name: str = field(metadata=config(field_name="name"))
force_repoll: bool = field(metadata=config(field_name="forceRepoll"))
stale_ok: bool = field(metadata=config(field_name="staleOK"))
allow_name_lookup_burst_cache: bool = field(
metadata=config(field_name="allowNameLookupBurstCache")
)
skip_need_hidden_rotate_check: bool = field(
metadata=config(field_name="skipNeedHiddenRotateCheck")
)
audit_mode: AuditMode = field(metadata=config(field_name="auditMode"))
@dataclass
class TeamList(DataClassJsonMixin):
teams: Optional[List[MemberInfo]] = field(
default=None, metadata=config(field_name="teams")
)
@dataclass
class TeamTreeResult(DataClassJsonMixin):
entries: Optional[List[TeamTreeEntry]] = field(
default=None, metadata=config(field_name="entries")
)
@dataclass
class SubteamListResult(DataClassJsonMixin):
entries: Optional[List[SubteamListEntry]] = field(
default=None, metadata=config(field_name="entries")
)
@dataclass
class ImplicitTeamDisplayName(DataClassJsonMixin):
is_public: bool = field(metadata=config(field_name="isPublic"))
writers: ImplicitTeamUserSet = field(metadata=config(field_name="writers"))
readers: ImplicitTeamUserSet = field(metadata=config(field_name="readers"))
conflict_info: Optional[ImplicitTeamConflictInfo] = field(
default=None, metadata=config(field_name="conflictInfo")
)
@dataclass
class TeamRoleMapStored(DataClassJsonMixin):
data: TeamRoleMapAndVersion = field(metadata=config(field_name="data"))
cached_at: Time = field(metadata=config(field_name="cachedAt"))
@dataclass
class AnnotatedTeamMemberDetails(DataClassJsonMixin):
details: TeamMemberDetails = field(metadata=config(field_name="details"))
role: TeamRole = field(metadata=config(field_name="role"))
@dataclass
class TeamTreeMembership(DataClassJsonMixin):
team_name: str = field(metadata=config(field_name="teamName"))
result: TeamTreeMembershipResult = field(metadata=config(field_name="result"))
target_team_id: TeamID = field(metadata=config(field_name="targetTeamID"))
target_username: str = field(metadata=config(field_name="targetUsername"))
guid: int = field(metadata=config(field_name="guid"))
@dataclass
class PublicKeyV2Base(DataClassJsonMixin):
kid: KID = field(metadata=config(field_name="kid"))
is_sibkey: bool = field(metadata=config(field_name="isSibkey"))
is_eldest: bool = field(metadata=config(field_name="isEldest"))
c_time: Time = field(metadata=config(field_name="cTime"))
e_time: Time = field(metadata=config(field_name="eTime"))
provisioning: SignatureMetadata = field(metadata=config(field_name="provisioning"))
revocation: Optional[SignatureMetadata] = field(
default=None, metadata=config(field_name="revocation")
)
@dataclass
class ProofSuggestionsRes(DataClassJsonMixin):
show_more: bool = field(metadata=config(field_name="showMore"))
suggestions: Optional[List[ProofSuggestion]] = field(
default=None, metadata=config(field_name="suggestions")
)
@dataclass
class APIUserSearchResult(DataClassJsonMixin):
score: float = field(metadata=config(field_name="score"))
services_summary: Dict[str, APIUserServiceSummary] = field(
metadata=config(field_name="services_summary")
)
raw_score: float = field(metadata=config(field_name="rawScore"))
keybase: Optional[APIUserKeybaseResult] = field(
default=None, metadata=config(field_name="keybase")
)
service: Optional[APIUserServiceResult] | |
#!/usr/bin/env python3
# Copyright 2021 BHG [bw.org]
# BWDB.py as of 2021-04-13 bw
# module version
__version__ = "3.1.11"
# import sqlite3
try:
import sqlite3
have_sqlite3 = True
except ImportError:
sqlite3 = None
have_sqlite3 = False
# import mysql
try:
import mysql.connector as mysql
have_mysql = True
except ImportError:
mysql = None
have_mysql = False
class BWErr(Exception):
"""Simple Error class"""
def __init__(self, message):
self.message = message
super().__init__(self.message)
class BWDB:
def __init__(self, **kwargs):
self._db = None
self._cur = None
self._dbms = None
self._database = None
self._table = None
self._column_names = None
# populate simple parameters first
if 'user' in kwargs:
self._user = kwargs['user']
else:
self._user = None
if 'password' in kwargs:
self._password = kwargs['password']
else:
self._password = None
if 'host' in kwargs:
self._host = kwargs['host']
else:
self._host = None
# populate properties
if 'dbms' in kwargs:
self.dbms = kwargs['dbms']
if 'database' in kwargs:
self.database = kwargs['database']
if 'table' in kwargs:
self.table = kwargs['table']
# property setters/getters
def get_dbms(self):
return self._dbms
def set_dbms(self, dbms_str):
if dbms_str == 'mysql':
if have_mysql:
self._dbms = dbms_str
else:
raise BWErr('mysql not available')
elif dbms_str == 'sqlite':
if have_sqlite3:
self._dbms = dbms_str
else:
raise BWErr('sqlite not available')
else:
raise BWErr('set_dbms: invalid dbms_str specified')
def get_database(self):
return self._database
def set_database(self, database):
self._database = database
if self._cur:
self._cur.close()
if self._db:
self._db.close()
self._database = database
if self._dbms == 'sqlite':
self._db = sqlite3.connect(self._database)
if self._db is None:
raise BWErr('set_database: failed to open sqlite database')
else:
self._cur = self._db.cursor()
elif self._dbms == 'mysql':
self._db = mysql.connect(user=self._user, password=<PASSWORD>,
host=self._host, database=self._database)
if self._db is None:
raise BWErr('set_database: failed to connect to mysql')
else:
self._cur = self._db.cursor(prepared=True)
else:
raise BWErr('set_database: unknown _dbms')
def get_cursor(self):
return self._cur
def set_table(self, table):
self._table = self.sanitize_string(table)
self.column_names()
def get_table(self):
return self._table
# properties
dbms = property(fget=get_dbms, fset=set_dbms)
database = property(fget=get_database, fset=set_database)
table = property(fget=get_table, fset=set_table)
cursor = property(fget=get_cursor)
# sql methods =====
def sql_do_nocommit(self, sql, parms=()):
"""Execute an SQL statement"""
self._cur.execute(sql, parms)
return self._cur.rowcount
def sql_do(self, sql, parms=()):
"""Execute an SQL statement"""
self._cur.execute(sql, parms)
self.commit()
return self._cur.rowcount
def sql_do_many_nocommit(self, sql, parms=()):
"""Execute an SQL statement over set of data"""
self._cur.executemany(sql, parms)
return self._cur.rowcount
def sql_do_many(self, sql, parms=()):
"""Execute an SQL statement over set of data"""
self._cur.executemany(sql, parms)
self.commit()
return self._cur.rowcount
def sql_query(self, sql, parms=()):
self._cur.execute(sql, parms)
for row in self._cur:
yield row
def sql_query_row(self, sql, parms=()):
self._cur.execute(sql, parms)
row = self._cur.fetchone()
self._cur.fetchall()
return row
def sql_query_value(self, sql, parms=()):
return self.sql_query_row(sql, parms)[0]
# crud methods =====
def column_names(self):
""" Get column names """
if self._column_names is not None:
return self._column_names
if self._dbms == 'sqlite':
rows = self.sql_query(f"PRAGMA table_info ({self._table});")
self._column_names = tuple(r[1] for r in rows)
elif self._dbms == 'mysql':
self._cur.execute(f"SELECT * FROM {self._table} LIMIT 1")
self._cur.fetchall()
self._column_names = self._cur.column_names
else:
raise BWErr("column_names: unknown _dbms")
if self._column_names[0] != 'id':
self._column_names = None
raise BWErr("colum_names: no id column")
elif len(self._column_names) < 2:
self._column_names = None
raise BWErr("colum_names: empty list")
else:
return self._column_names
def count_rows(self):
""" Returns number of rows in table """
return self.sql_query_value(f"SELECT COUNT(*) FROM {self._table}")
def get_row(self, row_id):
""" Get rows from table – returns cursor """
return self.sql_query_row(f"SELECT * FROM {self._table} WHERE id = ?", (row_id,))
def get_rows(self):
""" Get rows from table – returns cursor """
return self.sql_query(f"SELECT * FROM {self._table}")
def get_rows_limit(self, limit, offset=0):
return self.sql_query(f"SELECT * FROM {self._table} LIMIT ? OFFSET ?",
(limit, offset))
def add_row_nocommit(self, parms=()):
colnames = self.column_names()
numnames = len(colnames)
if 'id' in colnames:
numnames -= 1
names_str = self.sql_colnames_string(colnames)
values_str = self.sql_values_string(numnames)
sql = f"INSERT INTO {self._table} ({names_str}) VALUES ({values_str})"
return self.sql_do_nocommit(sql, parms)
def add_row(self, parms=()):
r = self.add_row_nocommit(parms)
self.commit()
return r
def update_row_nocommit(self, row_id, dict_rec):
""" Update row id with data in dict """
if "id" in dict_rec.keys(): # don't update id column
del dict_rec['id']
keys = sorted(dict_rec.keys()) # get keys and values
values = [dict_rec[v] for v in keys]
update_string = self.sql_update_string(keys)
sql = f"UPDATE {self._table} SET {update_string} WHERE id = ?"
values.append(row_id)
return self.sql_do_nocommit(sql, values)
def update_row(self, row_id, dict_rec):
r = self.update_row_nocommit(row_id, dict_rec)
self.commit()
return r
def del_row_nocommit(self, row_id):
return self.sql_do_nocommit(f"DELETE FROM {self._table} WHERE id = ?", (row_id,))
def del_row(self, row_id):
r = self.del_row_nocommit(row_id)
self.commit()
return r
def find_row(self, colname, value):
""" Find the first match and returns id or None """
colname = self.sanitize_string(colname) # sanitize params
sql = f"SELECT * FROM {self._table} WHERE {colname} LIKE ?"
row = self.sql_query_row(sql, (value,))
if row:
return row[0]
else:
return None
def find_rows(self, colname, value):
""" Find the first match and returns id or empty list """
colname = self.sanitize_string(colname) # sanitize params
sql = f"SELECT * FROM {self._table} WHERE {colname} LIKE ?"
row_ids = []
for row in self.sql_query(sql, (value,)):
row_ids.append(row[0])
return row_ids
# Utilities =====
@staticmethod
def version():
return __version__
@staticmethod
def sanitize_string(s):
""" Remove nefarious characters from a string """
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-.% "
san_string = ""
for i in range(0, len(s)):
if s[i] in charset:
san_string += s[i]
else:
san_string += '_'
return san_string
@staticmethod
def sql_colnames_string(colnames):
names_str = ","
if colnames[0] == "id":
colnames = colnames[1:]
return names_str.join(colnames)
@staticmethod
def sql_values_string(num):
s = "?," * num
return s[0:-1]
@staticmethod
def sql_update_string(colnames):
update_string = ","
for i in range(len(colnames)):
colnames[i] += "=?"
return update_string.join(colnames)
def make_dict_row(self, row):
return dict(zip(self.column_names(), row))
def have_db(self):
if self._db is None:
return False
else:
return True
def have_cursor(self):
if self._cur is None:
return False
else:
return True
def have_table(self, table_name=None):
if table_name is None:
table_name = self._table
if table_name is None:
return False
if self._dbms == 'sqlite':
rc = self.sql_query_value("SELECT COUNT(*) FROM sqlite_master WHERE type=? AND name=?",
('table', table_name))
if rc > 0:
return True
if self._dbms == 'mysql':
rc = self.sql_query_value("SELECT COUNT(*) FROM information_schema.tables WHERE table_name = ?",
(table_name,))
if rc > 0:
return True
return False
def lastrowid(self):
return self._cur.lastrowid
def begin_transaction(self):
if self.have_db():
if self._database == 'sqlite':
self.sql_do("BEGIN TRANSACTION")
elif self._database == 'mysql':
self.sql_do("START TRANSACTION")
def rollback(self):
if self.have_db():
self._db.rollback()
def commit(self):
if self.have_db():
self._db.commit()
def disconnect(self):
if self.have_cursor():
self._cur.close()
if self.have_db():
self._db.close()
self._cur = None
self._db = None
self._column_names = None
# destructor
def __del__(self):
self.disconnect()
MY_HOST = 'pluto.local'
MY_USER = 'appuser'
MY_PASS = '<PASSWORD>'
def main():
try:
db = BWDB(dbms='sqlite', database='../db/scratch.db')
# db = BWDB(dbms='mysql', host=MY_HOST, user=MY_USER, password=<PASSWORD>,
# database='scratch')
print(f"BWDB version {db.version()}")
print(f"dbms is {db.dbms}\n")
# start clean
db.sql_do("DROP TABLE IF EXISTS temp")
print(f"have table {db.have_table('temp')}")
print("create a table")
if db.dbms == "sqlite":
create_table = """
CREATE TABLE IF NOT EXISTS temp (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
description TEXT
)
"""
elif db.dbms == "mysql":
create_table = """
CREATE TABLE IF NOT EXISTS temp (
id INTEGER AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(128) NOT NULL,
description VARCHAR(128)
)
"""
else:
raise BWErr("create table: unknown dbms")
# create and set the table
db.sql_do(create_table)
db.table = "temp"
print(f"have table {db.have_table()}")
print(f"table columns: {db.column_names()}\n")
print("populate table")
insert_rows = (
("<NAME>", "Guitar"),
("<NAME>", "Trumpet"),
("<NAME>", "Drums"),
("<NAME>", "Saxophone"),
("<NAME>", "Piano"),
("<NAME>", "Bass"),
)
print("not add rows (rollback)")
db.begin_transaction()
for row in insert_rows:
db.add_row_nocommit(row)
db.rollback()
print("add rows")
db.begin_transaction()
for row in insert_rows:
db.add_row_nocommit(row)
db.commit()
print(f"added {len(insert_rows)} rows")
print(f"there are {db.count_rows()} rows")
for row in db.get_rows():
print(row)
print()
print("find more than one row (%s%)")
row_ids = db.find_rows("name", "%s%")
print(f"found {len(row_ids)} rows")
for row_id in row_ids:
print(db.get_row(row_id))
print()
print("search for %Bird%")
row_id = db.find_row("name", "%Bird%")
if row_id > 0:
print(f"found row {row_id}")
print(db.get_row(row_id))
print()
print(f"update row {row_id}")
numrows = db.update_row(row_id, {'name': 'The Bird', 'description': 'Tenor Sax'})
print(f"{numrows} row(s) modified")
print(db.get_row(row_id))
print()
print("add a row")
numrows = db.add_row(["<NAME>", "Harmonica"])
row_id = db.lastrowid()
print(f"{numrows} row added (row {row_id})")
print(db.get_row(row_id))
print()
print("delete a row (Cobham)")
row_id = db.find_row("name", "%Cobham%")
if row_id > 0:
print(f"deleting row {row_id}")
numrows = db.del_row(row_id)
print(f"{numrows} row(s) deleted")
print()
print("print remaining rows")
for row in db.get_rows():
print(row)
# add more rows to test paging
print()
print("add more rows")
db.begin_transaction()
for row in insert_rows:
numrows = db.add_row_nocommit(row)
for row in insert_rows:
numrows += db.add_row_nocommit(row)
for row in insert_rows:
numrows += db.add_row_nocommit(row)
db.commit()
print(f"added {numrows} rows")
print()
print("page through rows")
offset = 0
limit = 5
| |
270 degrees in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
59: {'desc': 'hop turn left 270 degrees in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
60: {'desc': 'hop turn left 270 degrees in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
61: {'desc': 'hop turn 360 degrees left in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
62: {'desc': 'hop turn left 360 degrees in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
63: {'desc': 'large sidestep to right and forward',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
64: {'desc': 'hop and turn right 360 degrees in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
65: {'desc': 'hop and turn right 360 degrees in air',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
66: {'desc': 'short sidestep to right',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
67: {'desc': 'short sidestep to the right',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
68: {'desc': 'medium sidestep to right',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120}}},
85: {'desc': 'jumps; flips; breakdance',
'motions': {1: {'desc': 'JumpTwist',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'JumpTwist',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'UpRightSequence',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'FancyFootWork',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'HandStandKicks',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'KickFlip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'KickFlipStumble',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Helicopter',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'motorcycle pose',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'EndofBreakDance',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'UpRightSequence',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'LongSequenceGood',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'BadStartSequnce',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'BreakSequencewithFlips',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': '90TwistsFall',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120}}},
86: {'desc': 'sports and various activities',
'motions': {1: {'desc': 'jumps kicks and punches',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'walk, squats, run, stretch, jumps, punches, and drinking',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'walking, running, jumping, kicking, and stretching',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'walking, stretching, punching, chopping, and drinking',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'walking, jumping, jumping jacks, jumping on one foot, punching, chopping,',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'walking, running, kicking, punching, knee kicking, and stretching',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'walking, swinging arms, stretching, jumping on one leg, and jumping',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'walking, squats, stretching, kicking, and punching',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'walking, sitting, looking, stand up',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'walk around, sit, stand up, and running',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'walking, stretching, walking and turning',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'walking, dragging, sweeping, dustpan, wipe window, and wipe mirror',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'walking around, walk up ladder, step down ladder',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'bouncing basketball, shooting basketball, dribble basketball, two handed dribble',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'walking, sitting, hand motions',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
87: {'desc': 'acrobatics',
'motions': {1: {'desc': 'Jump with kick and spin',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 60},
2: {'desc': 'T-Pose',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 60},
3: {'desc': 'Backflip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 60},
4: {'desc': 'backflip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 60},
5: {'desc': 'cartwheels',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 60}}},
88: {'desc': 'acrobatics',
'motions': {1: {'desc': 'backflip',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
2: {'desc': 'backflips, jump onto platform, handstands, vertical pushups',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
3: {'desc': 'motorcycle pose',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
4: {'desc': 'stretches, cartwheels, flips, spin kicks, spins, and fall',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
5: {'desc': 'cartwheel into backflip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 60},
6: {'desc': 'jump and spin kick',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
7: {'desc': 'cartwheel',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
8: {'desc': 'crouch and flip backward on hands',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
9: {'desc': 'stretch and cartwheel',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
10: {'desc': 'stretch and spin',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
11: {'desc': 'stretches and jumps',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60}}},
89: {'desc': 'acrobatics',
'motions': {1: {'desc': 'balance object on forehead',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
2: {'desc': 'motorcycle pose',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
3: {'desc': 'flip and stand on one hand',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
4: {'desc': 'spins, flips, stand on one hand',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
5: {'desc': 'spin upside down, handstand, flips',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60},
6: {'desc': 'balance beam',
'files': ['tvd', 'c3d', 'amc', 'mpg', 'avi'],
'fps': 60}}},
90: {'desc': 'cartwheels; acrobatics; dances',
'motions': {1: {'desc': 'bkwd summersult',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'cartwheel',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'cartwheel',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'cartwheel',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'jump kick',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'jump kick',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'jump kick',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'side flip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'Flip forward onto hands and back again',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': '',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
11: {'desc': 'hand spring',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'bck flp twst fall',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'bck flp twst fall',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'front hand flip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'front hand flip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
16: {'desc': 'fall on face',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
17: {'desc': 'BannanaPeelSlip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
18: {'desc': 'RugPullFall',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
19: {'desc': 'monkey backflip',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
20: {'desc': 'monkey sequence',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
21: {'desc': 'monkey sequence',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
22: {'desc': 'straight walk',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
23: {'desc': 'straight walk',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
24: {'desc': 'ball mount',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
25: {'desc': 'fwd ball walk',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
26: {'desc': 'fwd ball walk',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
27: {'desc': 'bwk ball walk',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
28: {'desc': 'breakdance',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
29: {'desc': 'sequesnce',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
30: {'desc': 'russian dance',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
31: {'desc': 'russian dance',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
32: {'desc': 'moonwalk',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
33: {'desc': 'arm up wide leg roll',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
34: {'desc': 'wide leg roll',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
35: {'desc': 'wide leg roll',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
36: {'desc': 'wide leg roll',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120}}},
91: {'desc': 'walks and turns',
'motions': {1: {'desc': 'Walk digital figure eight',
'files': | |
<filename>ALGORITHM/Starcraft/star_foundation.py<gh_stars>0
from UTILS.colorful import print亮紫, print亮绿
import numpy as np
import copy
import math
import random
import torch
from operator import itemgetter as iget
from UTILS.tensor_ops import _2cpu2numpy, _2tensor, process_space, np_one_hot, __hash__
from config import ChainVar
class AlgorithmConfig(object): # ADD_TO_CONF_SYSTEM 加入参数搜索路径 do not remove this comment !!!
step_mul = 8 # how many steps to make an action
alg = 'qmix'
print('using alg:', alg)
def use_one_reward(alg):
if alg in ['qmix', 'qtran', 'iql_sr']:
return True # iql:False, qmix:True
elif alg in ['iql']:
return False # iql:False, qmix:True
else:
assert False, ('does alg use agent-wise reward ??')
one_reward = use_one_reward(alg)
one_reward_cv = ChainVar(lambda alg:AlgorithmConfig.use_one_reward(alg), chained_with=['alg'])
n_steps = None # total time steps
last_action = True # whether to use the last action to choose action
reuse_network = True # whether to use one network for all agents
gamma = 0.99 # discount factor
optimizer = "RMS" # optimizer
model_dir = None
load_model = False
# network
rnn_hidden_dim = 128
qmix_hidden_dim = 64
two_hyper_layers = False
hyper_hidden_dim = 64
qtran_hidden_dim = 64
lr = 1e-4
# epsilon greedy
epsilon = 1
min_epsilon = 0.05
anneal_steps = 50000
anneal_epsilon = (epsilon - min_epsilon) / anneal_steps
epsilon_anneal_scale = 'episode'
train_interval = 1 # the number of episodes before once training
train_steps = 1 # important !! the number of the train steps in one epoch
# experience replay
batch_size = 64
buffer_size = int(5e3)
# how often to save the model
save_cycle = 5000
# how often to update the target_net
target_update_cycle = 200
# QTRAN lambda
lambda_opt = 1
lambda_nopt = 1
# prevent gradient explosion
grad_norm_clip = 10
# MAVEN
noise_dim = 16
lambda_mi = 0.001
lambda_ql = 1
entropy_coefficient = 0.001
class StarFoundation():
def __init__(self, n_agent, n_thread, space, mcv):
from .agent.agent import Agents
from .common.arguments import get_mixer_args
from config import GlobalConfig
space = process_space(space)
obs_space = eval(space['obs_space'])
act_space = eval(space['act_space'])
self.alg_config = AlgorithmConfig
if self.alg_config.alg in ['qmix','iql','iql_sr']:
pass # self.alg_config = get_mixer_args(self.alg_config)
else:
assert False, ('blind spot')
self.alg_config.cuda = ('cuda' in GlobalConfig.device)
self.alg_config.n_threads = self.n_threads = n_thread
self.alg_config.model_dir = GlobalConfig.logdir
self.alg_config.n_agents = self.n_agents = n_agent
self.alg_config.state_shape = obs_space['state_shape']
self.alg_config.obs_shape = obs_space['obs_shape']
self.alg_config.n_actions = act_space['n_actions']
self.alg_config.map = GlobalConfig.scenario_config.map_
self.alg_config.episode_limit = GlobalConfig.scenario_config.episode_limit
self.state_provided = GlobalConfig.scenario_config.state_provided
self.avail_act_provided = GlobalConfig.scenario_config.avail_act_provided
self.multi_reward2one = (self.alg_config.one_reward) and (not GlobalConfig.scenario_config.RewardAsUnity)
self.agents = Agents(self.alg_config)
self.epsilon = self.alg_config.epsilon
self.anneal_epsilon = self.alg_config.anneal_epsilon
self.min_epsilon = self.alg_config.min_epsilon
self.ReplayBuffer = ReplayBuffer(self.alg_config, train_hook=self.agents.train)
self.__incomplete_rollout_frag__ = None
def cold_init(self, state_recall):
state_recall['_HiddenState_'] = np.zeros((self.n_threads, self.n_agents, self.alg_config.rnn_hidden_dim))
state_recall['_LastActionOnehot_'] = np.zeros((self.n_threads, self.n_agents, self.alg_config.n_actions))
state_recall['_PreStep_'] = state_recall['Current-Obs-Step']
return state_recall
def interact_with_env(self, state_recall):
# init if new train session or new test session
if not '_HiddenState_' in state_recall: state_recall = self.cold_init(state_recall)
# recall hidden states
keys = ['ENV-PAUSE', 'Latest-Obs','Latest-Team-Info','Env-Suffered-Reset','Current-Obs-Step','_HiddenState_','_LastActionOnehot_','Test-Flag']
ENV_PAUSE, obs, info, Env_Suffered_Reset, step_cnt, hidden_state, last_action_oh, EvalMod = iget(*keys)(state_recall)
if not EvalMod: self.ReplayBuffer.train(self.epsilon)
# this is the core of interaction
with torch.no_grad():
new_hidden_state, new_actions, new_action_oh, hook = self.get_actions(ENV_PAUSE, obs, hidden_state, last_action_oh, info,
Env_Suffered_Reset, step_cnt, evaluate=EvalMod)
# remember and to be recall
state_recall['_PreStep_'] = state_recall['Current-Obs-Step'].copy()
state_recall['_HiddenState_'] = new_hidden_state
state_recall['_LastActionOnehot_'] = new_action_oh
if not EvalMod:
state_recall['_hook_'] = hook
return new_actions, state_recall
''' <1> ↑..........↑
Push something into platform dictionary, so we can see them again a step later.
Although you can also do this by creating 'self.' vars instead, but I advise not ^_^
<2>
RL alg requires rewards, but rule based alg does not.
For RL alg the rewards cannot be acquired here
because actions has not been fed to env.
So please just leave a callback hook function here
when the reward and next obs is ready,
the platform will call state_recall['_hook_'](arg),
with arg={'reward':xxx, 'Latest-Obs':xxx}
'''
def get_actions(self, ENV_PAUSE, obs, hidden_state, last_action_oh, info, Env_Suffered_Reset, step_cnt, evaluate=False):
# ! change epsilon
if (not evaluate) and (self.alg_config.epsilon_anneal_scale == 'step') and (self.epsilon > self.min_epsilon):
self.epsilon -= self.anneal_epsilon*self.n_threads
# allocate numpy array
n_threads = hidden_state.shape[0]
new_hidden_state = np.zeros_like(hidden_state)
new_action_oh = np.zeros_like(last_action_oh)
all_avail_actions = np.ones_like(new_action_oh)
all_state = np.zeros((n_threads, self.alg_config.state_shape))
new_actions = np.zeros_like(last_action_oh[:,:,0])
# deal with reset environment and load states
for thread_idx, done in enumerate(Env_Suffered_Reset):
if ENV_PAUSE[thread_idx]:
continue
if self.state_provided: all_state[thread_idx,:] = info[thread_idx]['state']
if self.avail_act_provided: all_avail_actions[thread_idx, :] = info[thread_idx]['avail-act']
if done:
# 环境刚刚复位
hidden_state[thread_idx, :] = 0
last_action_oh[thread_idx, :] = 0
# ! change epsilon
if (not evaluate) and (self.alg_config.epsilon_anneal_scale == 'episode') and (self.epsilon > self.min_epsilon):
self.epsilon -= self.anneal_epsilon
epsilon = self.epsilon if not evaluate else 0
for thread_idx, done in enumerate(Env_Suffered_Reset):
# FOR layer 2: loop through agents
if ENV_PAUSE[thread_idx]: continue
obs_agent = obs[thread_idx, :]
hidden_state_agent = hidden_state[thread_idx, :]
last_action_oh_agent = last_action_oh[thread_idx, :]
avail_action_agent = all_avail_actions[thread_idx, :] # self.env.get_avail_agent_actions(agent_id)
agent_id = np.eye(self.n_agents)
assert self.alg_config.alg != 'maven'
action, new_hidden_state_agent = self.agents.choose_action_batched(obs_agent, hidden_state_agent, last_action_oh_agent, agent_id, avail_action_agent, epsilon, evaluate)
action = _2cpu2numpy(action)
new_hidden_state_agent = _2cpu2numpy(new_hidden_state_agent)
new_hidden_state[thread_idx, :] = new_hidden_state_agent
# generate onehot vector of th action
new_action_oh[thread_idx, :] = np_one_hot(action, n=self.alg_config.n_actions)
# last_action[agent_id] = action_onehot
new_actions[thread_idx]= action
pass
new_actions = np.expand_dims(new_actions, -1)
rollout_frag_hook = None
if not evaluate:
self.__incomplete_rollout_frag__ = { # wait to be completed with reward in hook function
'_SKIP_': ENV_PAUSE.copy(),
'u': new_actions,
'o': obs,
's': all_state,
'u_onehot': new_action_oh,
'avail_u': all_avail_actions,
'padded': np.zeros((n_threads,1))#[0.]
}; self.__check_data_hash() # this is important!
rollout_frag_hook = lambda frag_part2: self.rollout_frag_hook(frag_part2)
new_actions = new_actions.transpose(1,0,2)
return new_hidden_state, new_actions, new_action_oh, rollout_frag_hook
def _collect_states(self, info, n_threads):
all_state = np.zeros((n_threads, self.alg_config.state_shape))
all_avail_actions = np.ones((n_threads, self.n_agents, self.alg_config.n_actions))
for thread_idx in range(n_threads):
if self.state_provided: all_state[thread_idx,:] = info[thread_idx]['state']
if self.avail_act_provided: all_avail_actions[thread_idx, :] = info[thread_idx]['avail-act']
return all_state, all_avail_actions
def rollout_frag_hook(self, frag_part2):
self.__check_data_curruption()
assert self.__incomplete_rollout_frag__ is not None
assert '_SKIP_' in self.__incomplete_rollout_frag__
assert 'Latest-Obs' in frag_part2
assert 'reward' in frag_part2
assert 'done' in frag_part2
self.__incomplete_rollout_frag__.update(frag_part2)
completed_frag = self.__incomplete_rollout_frag__
n_threads = len(completed_frag['done'])
all_state_next, avail_u_next = self._collect_states(completed_frag['info'], n_threads)
completed_frag['state'] = all_state_next
completed_frag['avail-act'] = avail_u_next
# unify agent rewards to team reward, if agent rewards list provided
if self.multi_reward2one:
completed_frag['reward'] = completed_frag['reward'].mean(axis=1) #,keepdims=True)
# load into replay buffer
self.ReplayBuffer.store_timestep(completed_frag)
self.__incomplete_rollout_frag__ = None
# debugging functions
def __check_data_hash(self):
if not hasattr(self, 'patience'): self.patience = 1000
if self.patience > 0:
self.hash_debug = {}
# for debugging, to detect write protection error
for key in self.__incomplete_rollout_frag__:
item = self.__incomplete_rollout_frag__[key]
if isinstance(item, dict):
self.hash_debug[key]={}
for subkey in item:
subitem = item[subkey]
self.hash_debug[key][subkey] = __hash__(subitem)
else:
self.hash_debug[key] = __hash__(item)
def __check_data_curruption(self):
if self.patience > 0:
assert self.__incomplete_rollout_frag__ is not None
assert self.hash_debug is not None
for key in self.__incomplete_rollout_frag__:
item = self.__incomplete_rollout_frag__[key]
if isinstance(item, dict):
for subkey in item:
subitem = item[subkey]
assert self.hash_debug[key][subkey] == __hash__(subitem), ('Currupted data! 发现腐败数据!')
else:
assert self.hash_debug[key] == __hash__(item), ('Currupted data! 发现腐败数据!')
self.patience -= 1
class ReplayBuffer:
def __init__(self, args, train_hook):
self.args = args
self.n_actions = self.args.n_actions
self.n_agents = self.args.n_agents
self.state_shape = self.args.state_shape
self.obs_shape = self.args.obs_shape
self.size = self.args.buffer_size # size = 5000
self.episode_limit = self.args.episode_limit
self.n_threads = self.args.n_threads
# memory management
self.current_idx = 0
self.current_size = 0
self.finished_episode_cnt = 0
# trainer
self.big_train_steps_cnt = 0
self.small_train_steps_cnt = 0
self.train_hook = train_hook
# create the buffer to store info
r_coredim = 1 if self.args.one_reward else self.n_agents
self.buffers = {'o': np.empty([self.size, self.episode_limit, self.n_agents, self.obs_shape]),
'u': np.empty([self.size, self.episode_limit, self.n_agents, 1]),
's': np.empty([self.size, self.episode_limit, self.state_shape]),
'r': np.empty([self.size, self.episode_limit, r_coredim]),
'o_next': np.empty([self.size, self.episode_limit, self.n_agents, self.obs_shape]),
's_next': np.empty([self.size, self.episode_limit, self.state_shape]),
'avail_u': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
'avail_u_next': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
'u_onehot': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
'padded': np.empty([self.size, self.episode_limit, 1]),
'terminated': np.empty([self.size, self.episode_limit, 1])
}
if self.args.alg == 'maven':
self.buffers['z'] = np.empty([self.size, self.args.noise_dim])
self.rollout_unfinished = [None for _ in range(self.args.n_threads)]
return
def train(self, epsilon):
train_steps_should_be_done = self.finished_episode_cnt//self.args.train_interval
# print亮绿(' self.finished_episode_cnt', self.finished_episode_cnt, 'self.alg_config.train_interval', self.args.train_interval)
while self.big_train_steps_cnt < train_steps_should_be_done:
self.big_train_steps_cnt += 1
print('training start! current buffer %.2f percent, current epsilon %.3f'%(self.current_size/self.size *100, epsilon))
for _ in range(self.args.train_steps): # 默认为1
batch = self.sample(min(self.current_size, self.args.batch_size))
# access agent train
# print亮紫('self.agents.train(batch, self.small_train_steps_cnt)')
# print亮紫('self.ReplayBuffer.current_size',self.current_size,'self.alg_config.batch_size',self.args.batch_size)
self.train_hook(batch, self.small_train_steps_cnt)
# mark the train progress
self.small_train_steps_cnt += 1
# if self.small_train_steps_cnt%10 == 0:
# print('train_steps_cnt', self.small_train_steps_cnt, 'epsilon', self.epsilon)
print('training end!')
def store_timestep(self, rollout_frag):
for thread in range(self.n_threads):
# if rollout_frag['done'][thread]:
# print('h')
if rollout_frag['_SKIP_'][thread]: continue
if self.rollout_unfinished[thread] is None:
self.rollout_unfinished[thread] = {
'o':[], | |
float, i2: float, result: float) -> bool:
eq = make_strict_eq(input_wrapper(i1))
return eq(result)
elif eq_to == BinaryCondArg.SECOND:
def check_result(i1: float, i2: float, result: float) -> bool:
eq = make_strict_eq(input_wrapper(i2))
return eq(result)
else:
raise ValueError(f"{eq_to=} must be FIRST or SECOND")
return check_result
def make_binary_check_result(check_just_result: UnaryCheck) -> BinaryResultCheck:
def check_result(i1: float, i2: float, result: float) -> bool:
return check_just_result(result)
return check_result
def integers_from_dtype(dtype: DataType, **kw) -> st.SearchStrategy[float]:
"""
Returns a strategy that generates float-casted integers within the bounds of dtype.
"""
for k in kw.keys():
# sanity check
assert k in ["min_value", "max_value", "exclude_min", "exclude_max"]
m, M = dh.dtype_ranges[dtype]
if "min_value" in kw.keys():
m = kw["min_value"]
if "exclude_min" in kw.keys():
m += 1
if "max_value" in kw.keys():
M = kw["max_value"]
if "exclude_max" in kw.keys():
M -= 1
return st.integers(math.ceil(m), math.floor(M)).map(float)
def parse_binary_case(case_str: str) -> BinaryCase:
"""
Parses a Sphinx-formatted binary case string to return codified binary cases, e.g.
>>> case_str = (
... "If ``x1_i`` is greater than ``0``, ``x1_i`` is a finite number, "
... "and ``x2_i`` is ``+infinity``, the result is ``NaN``."
... )
>>> case = parse_binary_case(case_str)
>>> case
BinaryCase(<x1_i > 0 and isfinite(x1_i) and x2_i == +infinity -> NaN>)
>>> case.cond(42, float('inf'))
True
>>> case.check_result(42, float('inf'), float('nan'))
True
"""
case_m = r_binary_case.match(case_str)
if case_m is None:
raise ParseError(case_str)
cond_strs = r_cond_sep.split(case_m.group(1))
partial_conds = []
partial_exprs = []
x1_cond_from_dtypes = []
x2_cond_from_dtypes = []
for cond_str in cond_strs:
if m := r_input_is_array_element.match(cond_str):
in_sign, in_no, other_sign, other_no = m.groups()
if in_sign != "" or other_no == in_no:
raise ParseError(cond_str)
partial_expr = f"{in_sign}x{in_no}_i == {other_sign}x{other_no}_i"
# For these scenarios, we want to make sure both array elements
# generate respective to one another by using a shared strategy.
shared_from_dtype = lambda d, **kw: st.shared(
xps.from_dtype(d, **kw), key=cond_str
)
input_wrapper = lambda i: -i if other_sign == "-" else noop
if other_no == "1":
def partial_cond(i1: float, i2: float) -> bool:
eq = make_strict_eq(input_wrapper(i1))
return eq(i2)
_x2_cond_from_dtype = shared_from_dtype # type: ignore
def _x1_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]:
return shared_from_dtype(dtype, **kw).map(input_wrapper)
elif other_no == "2":
def partial_cond(i1: float, i2: float) -> bool:
eq = make_strict_eq(input_wrapper(i2))
return eq(i1)
_x1_cond_from_dtype = shared_from_dtype # type: ignore
def _x2_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]:
return shared_from_dtype(dtype, **kw).map(input_wrapper)
else:
raise ParseError(cond_str)
x1_cond_from_dtypes.append(BoundFromDtype(base_func=_x1_cond_from_dtype))
x2_cond_from_dtypes.append(BoundFromDtype(base_func=_x2_cond_from_dtype))
elif m := r_both_inputs_are_value.match(cond_str):
unary_cond, expr_template, cond_from_dtype = parse_cond(m.group(1))
left_expr = expr_template.replace("{}", "x1_i")
right_expr = expr_template.replace("{}", "x2_i")
partial_expr = f"{left_expr} and {right_expr}"
partial_cond = make_binary_cond( # type: ignore
BinaryCondArg.BOTH, unary_cond
)
x1_cond_from_dtypes.append(cond_from_dtype)
x2_cond_from_dtypes.append(cond_from_dtype)
else:
cond_m = r_cond.match(cond_str)
if cond_m is None:
raise ParseError(cond_str)
input_str, value_str = cond_m.groups()
if value_str == "the same mathematical sign":
partial_expr = "copysign(1, x1_i) == copysign(1, x2_i)"
def partial_cond(i1: float, i2: float) -> bool:
return math.copysign(1, i1) == math.copysign(1, i2)
elif value_str == "different mathematical signs":
partial_expr = "copysign(1, x1_i) != copysign(1, x2_i)"
def partial_cond(i1: float, i2: float) -> bool:
return math.copysign(1, i1) != math.copysign(1, i2)
else:
unary_cond, expr_template, cond_from_dtype = parse_cond(value_str)
# Do not define partial_cond via the def keyword or lambda
# expressions, as one partial_cond definition can mess up
# previous definitions in the partial_conds list. This is a
# hard-limitation of using local functions with the same name
# and that use the same outer variables (i.e. unary_cond). Use
# def in a called function avoids this problem.
input_wrapper = None
if m := r_input.match(input_str):
x_no = m.group(1)
partial_expr = expr_template.replace("{}", f"x{x_no}_i")
cond_arg = BinaryCondArg.from_x_no(x_no)
elif m := r_abs_input.match(input_str):
x_no = m.group(1)
partial_expr = expr_template.replace("{}", f"abs(x{x_no}_i)")
cond_arg = BinaryCondArg.from_x_no(x_no)
input_wrapper = abs
elif r_and_input.match(input_str):
left_expr = expr_template.replace("{}", "x1_i")
right_expr = expr_template.replace("{}", "x2_i")
partial_expr = f"{left_expr} and {right_expr}"
cond_arg = BinaryCondArg.BOTH
elif r_or_input.match(input_str):
left_expr = expr_template.replace("{}", "x1_i")
right_expr = expr_template.replace("{}", "x2_i")
partial_expr = f"{left_expr} or {right_expr}"
if len(cond_strs) != 1:
partial_expr = f"({partial_expr})"
cond_arg = BinaryCondArg.EITHER
else:
raise ParseError(input_str)
partial_cond = make_binary_cond( # type: ignore
cond_arg, unary_cond, input_wrapper=input_wrapper
)
if cond_arg == BinaryCondArg.FIRST:
x1_cond_from_dtypes.append(cond_from_dtype)
elif cond_arg == BinaryCondArg.SECOND:
x2_cond_from_dtypes.append(cond_from_dtype)
elif cond_arg == BinaryCondArg.BOTH:
x1_cond_from_dtypes.append(cond_from_dtype)
x2_cond_from_dtypes.append(cond_from_dtype)
else:
# For "either x1_i or x2_i is <condition>" cases, we want to
# test three scenarios:
#
# 1. x1_i is <condition>
# 2. x2_i is <condition>
# 3. x1_i AND x2_i is <condition>
#
# This is achieved by a shared base strategy that picks one
# of these scenarios to determine whether each array will
# use either cond_from_dtype() (i.e. meet the condition), or
# simply xps.from_dtype() (i.e. be any value).
use_x1_or_x2_strat = st.shared(
st.sampled_from([(True, False), (False, True), (True, True)])
)
def _x1_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]:
assert len(kw) == 0 # sanity check
return use_x1_or_x2_strat.flatmap(
lambda t: cond_from_dtype(dtype)
if t[0]
else xps.from_dtype(dtype)
)
def _x2_cond_from_dtype(dtype, **kw) -> st.SearchStrategy[float]:
assert len(kw) == 0 # sanity check
return use_x1_or_x2_strat.flatmap(
lambda t: cond_from_dtype(dtype)
if t[1]
else xps.from_dtype(dtype)
)
x1_cond_from_dtypes.append(
BoundFromDtype(base_func=_x1_cond_from_dtype)
)
x2_cond_from_dtypes.append(
BoundFromDtype(base_func=_x2_cond_from_dtype)
)
partial_conds.append(partial_cond)
partial_exprs.append(partial_expr)
result_m = r_result.match(case_m.group(2))
if result_m is None:
raise ParseError(case_m.group(2))
result_str = result_m.group(1)
# Like with partial_cond, do not define check_result in this function's body.
if m := r_array_element.match(result_str):
sign, x_no = m.groups()
result_expr = f"{sign}x{x_no}_i"
check_result = make_eq_input_check_result( # type: ignore
BinaryCondArg.from_x_no(x_no), eq_neg=sign == "-"
)
else:
_check_result, result_expr = parse_result(result_m.group(1))
check_result = make_binary_check_result(_check_result)
cond_expr = " and ".join(partial_exprs)
def cond(i1: float, i2: float) -> bool:
return all(pc(i1, i2) for pc in partial_conds)
x1_cond_from_dtype = sum(x1_cond_from_dtypes, start=BoundFromDtype())
x2_cond_from_dtype = sum(x2_cond_from_dtypes, start=BoundFromDtype())
return BinaryCase(
cond_expr=cond_expr,
cond=cond,
x1_cond_from_dtype=x1_cond_from_dtype,
x2_cond_from_dtype=x2_cond_from_dtype,
result_expr=result_expr,
check_result=check_result,
)
r_redundant_case = re.compile("result.+determined by the rule already stated above")
def parse_binary_docstring(docstring: str) -> List[BinaryCase]:
"""
Parses a Sphinx-formatted docstring of a binary function to return a list of
codified binary cases, e.g.
>>> def logaddexp(x1, x2):
... '''
... Calculates the logarithm of the sum of exponentiations
...
... **Special Cases**
...
... For floating-point operands,
...
... - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``.
... - If ``x1_i`` is ``+infinity`` and ``x2_i`` is not ``NaN``, the result is ``+infinity``.
... - If ``x1_i`` is not ``NaN`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``.
...
... Parameters
... ----------
... x1: array
... first input array
... x2: array
... second input array
...
... Returns
... -------
... out: array
... an array containing the results
... '''
...
>>> binary_cases = parse_binary_docstring(logaddexp.__doc__)
>>> for case in binary_cases:
... print(repr(case))
BinaryCase(<x1_i == NaN or x2_i == NaN -> NaN>)
BinaryCase(<x1_i == +infinity and not x2_i == NaN -> +infinity>)
BinaryCase(<not x1_i == NaN and x2_i == +infinity -> +infinity>)
"""
match = r_special_cases.search(docstring)
if match is None:
return []
lines = match.group(1).split("\n")[:-1]
cases = []
for line in lines:
if m := r_case.match(line):
case_str = m.group(1)
else:
warn(f"line not machine-readable: '{line}'")
continue
if r_redundant_case.search(case_str):
continue
if m := r_binary_case.match(case_str):
try:
case = parse_binary_case(case_str)
cases.append(case)
except ParseError as e:
warn(f"not machine-readable: '{e.value}'")
else:
if not r_remaining_case.match(case_str):
warn(f"case not machine-readable: '{case_str}'")
return cases
unary_params = []
binary_params = []
iop_params = []
func_to_op: Dict[str, str] = {v: k for k, v in dh.op_to_func.items()}
for stub in category_to_funcs["elementwise"]:
if stub.__doc__ is None:
warn(f"{stub.__name__}() stub has no docstring")
continue
marks = []
try:
func = getattr(xp, stub.__name__)
except AttributeError:
marks.append(
pytest.mark.skip(reason=f"{stub.__name__} not found in array module")
)
func = None
sig = inspect.signature(stub)
param_names = list(sig.parameters.keys())
if len(sig.parameters) == 0:
warn(f"{func=} has no parameters")
continue
if param_names[0] == "x":
if cases := parse_unary_docstring(stub.__doc__):
func_name_to_func = {stub.__name__: func}
if stub.__name__ in func_to_op.keys():
op_name = func_to_op[stub.__name__]
op = getattr(operator, op_name)
func_name_to_func[op_name] = op
for func_name, func in func_name_to_func.items():
for case in cases:
id_ = f"{func_name}({case.cond_expr}) -> {case.result_expr}"
p = pytest.param(func_name, func, case, id=id_)
unary_params.append(p)
continue
if len(sig.parameters) == 1:
warn(f"{func=} has one parameter '{param_names[0]}' which is not named 'x'")
continue
if param_names[0] == "x1" and param_names[1] == "x2":
if cases := parse_binary_docstring(stub.__doc__):
func_name_to_func = {stub.__name__: func}
if stub.__name__ in func_to_op.keys():
op_name = func_to_op[stub.__name__]
op = getattr(operator, op_name)
func_name_to_func[op_name] = op
# We collect inplaceoperator test cases seperately
iop_name = "__i" + op_name[2:]
iop = getattr(operator, iop_name)
for case in cases:
id_ = f"{iop_name}({case.cond_expr}) -> {case.result_expr}"
p = pytest.param(iop_name, iop, | |
# -*- coding: utf-8 -*-
import json
import random
import urllib
from base64 import b64encode
from twisted.python import log
from twisted.web import http
from twisted.web.error import Error
from twisted.internet import defer, reactor
from twisted.internet.protocol import Protocol
from twisted.web.client import Agent, ResponseDone
from twisted.web.http_headers import Headers
from fluiddb.common import defaults, error, permissions
from fluiddb.common import paths
from fluiddb.common.types_thrift.ttypes import (
TNonexistentTag, TNoInstanceOnObject)
from fluiddb.web.util import buildHeader
from integration.wsfe import base
from integration.wsfe.http import getPage, HTTPError
import txbase
class SimpleBodyProducer(object):
def __init__(self, data):
self.data = data
self.length = len(data)
self.d = None
def startProducing(self, consumer):
consumer.write(self.data)
return defer.succeed(None)
class ResponseGetter(Protocol):
_buffer = ''
def __init__(self, finished):
self._finished = finished
def dataReceived(self, bytes):
self._buffer += bytes
def connectionLost(self, reason):
reason.trap(ResponseDone)
self._finished.callback(self._buffer)
class TagInstanceTest(txbase.TxFluidDBTest, base.HTTPTest):
"""
A base class for our FluidDB tag instance tests.
Note: for the time being this class still inherits from base.HTTPTest
so that we can have tests that use both txFluidDB and the methods in
base.HTTPTest. Once we have replaced everything from base.HTTPTest,
this class will disappear entirely.
"""
toplevel = defaults.httpObjectCategoryName
def setUp(self):
"""
Initialize both our superclasses.
"""
txbase.TxFluidDBTest.setUp(self)
base.HTTPTest.setUp(self)
class TestPOST(TagInstanceTest):
verb = 'POST'
@base.showFailures
def testNotAllowed(self):
headers = {
'accept': 'application/json',
}
self.addBasicAuthHeader(headers)
d = self.getPage(base.randomObjectIdStr(), headers=headers)
d.addErrback(self.checkErrorStatus, http.NOT_ALLOWED)
self.failUnlessFailure(d, Error)
return d
class TestGET(TagInstanceTest):
verb = 'GET'
@base.showFailures
def testNonExistentObjectId(self):
aboutPath = defaults.sep.join(paths.aboutPath())
objectId = base.randomObjectIdStr()
d = self.getTagValue(aboutPath, objectId)
d.addErrback(self.checkErrorStatus, http.NOT_FOUND)
d.addErrback(
self.checkErrorHeaders,
{buildHeader('Error-Class'): TNoInstanceOnObject.__name__})
self.failUnlessFailure(d, Error)
return d
@base.showFailures
def testNonType4ObjectId(self):
aboutPath = defaults.sep.join(paths.aboutPath())
d = self.getTagValue(aboutPath, base.nonType4ObjectIdStr())
d.addErrback(self.checkErrorStatus, http.NOT_FOUND)
d.addErrback(
self.checkErrorHeaders,
{buildHeader('Error-Class'): TNoInstanceOnObject.__name__})
self.failUnlessFailure(d, Error)
return d
@base.showFailures
@defer.inlineCallbacks
def testPrimitiveTypes(self):
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
try:
for value in (True, False, 3, 4.5, ['4', '5', '6'], None):
yield self.setTagValue(path, objectId, value)
for acc in (None, defaults.contentTypeForPrimitiveJSON):
result = yield self.getTagValue(path, objectId, accept=acc)
if type(value) is list:
value = sorted(value)
result = sorted(result)
self.assertEqual(value, result)
finally:
yield self.deleteTagValue(path, objectId)
@base.showFailures
@defer.inlineCallbacks
def testNotAcceptableMIMEvMIME(self):
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
primitiveOrAnything = '%s, */*' % defaults.contentTypeForPrimitiveJSON
# data is given as a tuple of tuples. Each sub-tuple has 3 things:
# a value, the accept string with which to try GETting the value,
# and a flag to indicate whether the GET is expected to succeed.
# The value may be a (x, ct) pair, where x is the actual value to
# be PUT and ct is the content-type to PUT it with.
testdata = [
(('xxx', 'application/pdf'), 'no/pdf', False),
(('xxx', 'application/pdf'), 'application/pdf', True),
(('xxx', 'application/pdf'), 'application/pdf, */*', True),
(('xxx', 'yyy/zzz'), defaults.contentTypeForPrimitiveJSON, False),
]
for value in (True, False, 4, 7.3, None, 'ducks', ['Hey', 'Jude']):
for ct in ('application/pdf', 'text/plain', 'xx/yy, ii/jj; q=0.5'):
testdata.append((value, ct, False))
for ct in ('*/*', defaults.contentTypeForPrimitiveJSON,
primitiveOrAnything):
testdata.append((value, ct, True))
try:
for value, accept, succeed in testdata:
if type(value) is tuple:
value, ct = value
yield self.setTagValue(
path, objectId, value, contentType=ct)
d = self.getTagValueAndContentType(
path, objectId, accept=accept)
if succeed:
result, resultCt = yield d
self.assertEqual(value, result)
self.assertEqual(ct, resultCt)
else:
yield self.setTagValue(path, objectId, value)
d = self.getTagValue(path, objectId, accept=accept)
if succeed:
result = yield d
self.assertEqual(value, result)
if not succeed:
d.addErrback(self.checkErrorStatus, http.NOT_ACCEPTABLE)
d.addErrback(
self.checkErrorHeaders,
{buildHeader('Error-Class'):
error.NotAcceptable.__name__})
self.failUnlessFailure(d, Error)
yield d
finally:
yield self.deleteTagValue(path, objectId)
class TestHEAD(TagInstanceTest):
verb = 'HEAD'
@base.showFailures
@defer.inlineCallbacks
def testAdminNewTagOnNewObject(self):
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
exists = yield self.hasTagValue(path, objectId)
self.assertFalse(exists)
try:
yield self.setTagValue(path, objectId, '5')
exists = yield self.hasTagValue(path, objectId)
self.assertTrue(exists)
finally:
yield self.deleteTagValue(path, objectId)
@base.showFailures
@defer.inlineCallbacks
def testRidiculousObjectId(self):
aboutPath = defaults.sep.join(paths.aboutPath())
exists = yield self.hasTagValue(aboutPath, 'dummy')
self.assertFalse(exists)
@base.showFailures
@defer.inlineCallbacks
def testNonExistentObjectId(self):
aboutPath = defaults.sep.join(paths.aboutPath())
objectId = base.randomObjectIdStr()
exists = yield self.hasTagValue(aboutPath, objectId)
self.assertFalse(exists)
@base.showFailures
@defer.inlineCallbacks
def testNonType4ObjectId(self):
aboutPath = defaults.sep.join(paths.aboutPath())
exists = yield self.hasTagValue(aboutPath, base.nonType4ObjectIdStr())
self.assertFalse(exists)
@base.showFailures
@defer.inlineCallbacks
def testHEADSetsContentLengthAndType(self):
"""
A HEAD request is supposed to return a Content-Length header
indicating the size of the resource that a GET would return. Here
we do a simple check to see that the header is present and correct.
This is a bit of a Frankenstein test. It uses the current testing
framework to create a tag, an object, set the tag value, and remove
the tag, but uses the new twisted.web.client Agent to make the HEAD
request. The code would be ~4 times as long if I wrote it all to
use the Agent for every API call.
"""
value = 'i am the value'
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
try:
for value, contentType in (
(True, defaults.contentTypeForPrimitiveJSON),
(False, defaults.contentTypeForPrimitiveJSON),
(None, defaults.contentTypeForPrimitiveJSON),
(5, defaults.contentTypeForPrimitiveJSON),
(3.14, defaults.contentTypeForPrimitiveJSON),
('hey', defaults.contentTypeForPrimitiveJSON),
(u'\u2345\uFDFA\u2619',
defaults.contentTypeForPrimitiveJSON),
('opaque value', 'opaque/stuff')):
yield self.setTagValue(path, objectId, value,
contentType=contentType)
URI = '%s/%s/%s/%s' % (self.endpoint,
defaults.httpObjectCategoryName,
str(objectId), path)
headers = Headers({
'Authorization': ['Basic %s' % b64encode(
'%s:%s' % (defaults.adminUsername.encode('utf-8'),
self.adminPassword))]})
agent = Agent(reactor)
response = yield agent.request('HEAD', URI, headers)
self.assertEqual(http.OK, response.code)
# Test the content length is as expected.
receivedContentLen = int(response.headers.getRawHeaders(
'content-length')[0])
if contentType == defaults.contentTypeForPrimitiveJSON:
# The content-length should be the length of the JSON
# encoded value (since the value was a primitive type).
expectedContentLen = len(json.dumps(value))
else:
expectedContentLen = len(value)
self.assertEqual(receivedContentLen, expectedContentLen)
# Test the content type is as expected.
receivedContentType = response.headers.getRawHeaders(
'content-type')[0]
self.assertEqual(receivedContentType, contentType)
finally:
yield self.deleteTagValue(path, objectId)
class TestPUT(TagInstanceTest):
verb = 'PUT'
@base.showFailures
@defer.inlineCallbacks
def testPlausibleButNonExistentObjectId(self):
# Here we use an object ID that is syntactically a UUID, though
# it's guaranteed not to exist in FluidDB (see base.py).
#
# For now this succeeds as we do not require that an object exist
# before you put a value onto it.
path = 'fluiddb/testing/test1'
value = 'floppy disks, FTW!'
objectId = base.randomObjectIdStr()
try:
yield self.setTagValue(path, objectId, value)
result = yield self.getTagValue(path, objectId)
self.assertEqual(result, value)
finally:
yield self.deleteTagValue(path, objectId)
@base.showFailures
@defer.inlineCallbacks
def testNonType4NonExistentObjectId(self):
# Here we use an object ID that is syntactically a UUID, though
# it's guaranteed not to exist in FluidDB.
#
# For now this succeeds as we do not require that an object exist
# before you put a value onto it, and we do not check that object
# ids obey type 4 rules. Should we? Or do we just not care?
path = 'fluiddb/testing/test1'
value = 5
objectId = base.nonType4ObjectIdStr()
try:
yield self.setTagValue(path, objectId, value)
result = yield self.getTagValue(path, objectId,)
self.assertEqual(result, value)
finally:
result = yield self.deleteTagValue(path, objectId,)
@base.showFailures
@defer.inlineCallbacks
def testPayloadWithNoContentType(self):
headers = {}
self.addBasicAuthHeader(headers)
objectId = yield self.createObject()
uri = '%s/%s/%s/dummy' % (
self.endpoint,
defaults.httpObjectCategoryName, str(objectId))
d = getPage(uri, headers=headers, postdata=json.dumps('x'),
method='PUT')
d.addErrback(self.checkErrorStatus, http.BAD_REQUEST)
d.addErrback(
self.checkErrorHeaders,
{buildHeader('Error-Class'): error.NoContentTypeHeader.__name__})
self.failUnlessFailure(d, Error)
yield d
@base.showFailures
@defer.inlineCallbacks
def testNonExistentTag(self):
objectId = yield self.createObject()
d = self.setTagValue('xx/yy/zz', objectId, '5')
d.addErrback(self.checkErrorStatus, http.NOT_FOUND)
d.addErrback(
self.checkErrorHeaders,
{buildHeader('Error-Class'): TNonexistentTag.__name__})
self.failUnlessFailure(d, Error)
yield d
@base.showFailures
@defer.inlineCallbacks
def testAdminAddTagTwiceOnNewObject(self):
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
try:
yield self.setTagValue(path, objectId, '5')
yield self.setTagValue(path, objectId, '6')
finally:
yield self.deleteTagValue(path, objectId)
@base.showFailures
@defer.inlineCallbacks
def testRoundtripPrimitiveTypes(self):
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
try:
for value in (True, False, None, 5, 3.14,
'hey', u'\u2345\uFDFA\u2619'):
yield self.setTagValue(path, objectId, value)
result = yield self.getTagValue(path, objectId)
self.assertEqual(value, result)
value = ['foobar', u'\u2345\uFDFA\u2619', 'D\xc3\xb8dheimsgard']
yield self.setTagValue(path, objectId, value)
result = yield self.getTagValue(path, objectId)
# We need to sanitize the original value variable, a call to
# sorted will trigger a call to equal between normal strings
# and unicode
sanitizedValue = []
for s in value:
if not isinstance(s, unicode):
s = s.decode("utf-8")
sanitizedValue.append(s)
sanitizedValue = sorted(sanitizedValue)
result = sorted(result)
self.assertEqual(sanitizedValue, result)
finally:
yield self.deleteTagValue(path, objectId)
@base.showFailures
@defer.inlineCallbacks
def testAdminNewBinaryTagOnNewObject(self):
def randBinary(n):
# Make a random binary string of length n.
values = '\x00\x01\x02\x03\x04'.split()
return ''.join([random.choice(values) for _ in xrange(n)])
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
try:
for contentType in ('application/octet-stream', 'hello/world'):
size = random.randint(1, 1000)
value = randBinary(size)
yield self.setTagValue(path, objectId, value,
contentType=contentType)
result = yield self.getTagValueAndContentType(path, objectId)
self.assertEqual(result[0], value)
self.assertEqual(result[1], contentType)
finally:
yield self.deleteTagValue(path, objectId)
@base.showFailures
@defer.inlineCallbacks
def testAdminNewBinaryTagOnNewObjectJSONP(self):
def randBinary(n):
# Make a random binary string of length n.
values = '\x00\x01\x02\x03\x04'.split()
return ''.join([random.choice(values) for _ in xrange(n)])
path = 'fluiddb/testing/test1'
objectId = yield self.createObject()
headers = {}
self.addBasicAuthHeader(headers)
try:
for contentType in ('application/octet-stream', 'hello/world'):
size = random.randint(1, 1000)
value | |
# Main driver file for user input and displaying
# Also checks legal moves and keep a move log
class GameState():
def __init__(self):
# Initial board state
# Board is 8x8 2d list with each element having 2 characters
# First character represents the color: w = white & b = black
# Second character represents the type: R = Rook, K = King, etc.
# "--" represents a empty space
self.board = [
["bR", "bN", "bB", "bQ", "bK", "bB", "bN", "bR"],
["bp", "bp", "bp", "bp", "bp", "bp", "bp", "bp"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["--", "--", "--", "--", "--", "--", "--", "--"],
["wp", "wp", "wp", "wp", "wp", "wp", "wp", "wp"],
["wR", "wN", "wB", "wQ", "wK", "wB", "wN", "wR"]
]
self.moveFunctions = {"p" : self.getPawnMoves, "R" : self.getRookMoves, "N" : self.getKnightMoves, "Q" : self.getQueenMoves, "K" : self.getKingMoves, "B" : self.getBishopMoves}
self.whiteToMove = True
self.moveLog = []
self.whiteKingLocation = (7, 4)
self.blackKingLocation = (0, 4)
self.checkMate = False
self.stalemate = False
self.enpassantPossible = ()
self.currentCastlingRight = CastleRights(True, True, True, True)
self.castleRightsLog = [CastleRights(self.currentCastlingRight.wks, self.currentCastlingRight.bks, self.currentCastlingRight.wqs, self.currentCastlingRight.bqs)]
#Takes a move as a parameter and executes it, doesn't work for castling, pawn promotion and en-passant
def makeMove(self, move):
self.board[move.startRow][move.startCol] = "--"
self.board[move.endRow][move.endCol] = move.pieceMoved
self.moveLog.append(move) # Log the move to see history/undo last move
self.whiteToMove = not self.whiteToMove # Switch turn to other player
# Updates King's position if moved
if move.pieceMoved == "wK":
self.whiteKingLocation = (move.endRow, move.endCol)
if move.pieceMoved == "bK":
self.blackKingLocation = (move.endRow, move.endCol)
# Pawn promotion
if move.isPawnPromotion:
self.board[move.endRow][move.endCol] = move.pieceMoved[0] + move.promotionChoice
# En-passant move
if move.isEnpassantMove:
self.board[move.startRow][move.endCol] = "--"
# Update enpassantPossible variable
if move.pieceMoved[1] == "p" and abs(move.startRow - move.endRow) == 2:
self.enpassantPossible = ((move.startRow + move.endRow) // 2, move.startCol)
else:
self.enpassantPossible = ()
# Castle move
if move.isCastlingMove:
if move.endCol - move.startCol == 2: # Kingside castle
self.board[move.endRow][move.endCol-1] = self.board[move.endRow][move.endCol+1] # Moves the rook
self.board[move.endRow][move.endCol+1] = "--"
else: # Queenside castle
self.board[move.endRow][move.endCol+1] = self.board[move.endRow][move.endCol-2]
self.board[move.endRow][move.endCol-2] = "--"
# Updating Castling rights
self.updateCastleRights(move)
self.castleRightsLog.append(CastleRights(self.currentCastlingRight.wks, self.currentCastlingRight.bks, self.currentCastlingRight.wqs, self.currentCastlingRight.bqs))
def undoMove(self):
if len(self.moveLog) != 0:
move = self.moveLog.pop() # Gets last move and removes it from movelog
self.board[move.startRow][move.startCol] = move.pieceMoved
self.board[move.endRow][move.endCol] = move.pieceCaptured
self.whiteToMove = not self.whiteToMove # Switches turn back to other player
# Updates King's position if moved
if move.pieceMoved == "wK":
self.whiteKingLocation = (move.startRow, move.startCol)
elif move.pieceMoved == "bK":
self.blackKingLocation = (move.startRow, move.startCol)
if move.isEnpassantMove:
self.board[move.endRow][move.endCol] = "--"
self.board[move.startRow][move.endCol] = move.pieceCaptured
self.enpassantPossible = (move.endRow, move.endCol)
if move.pieceMoved[1] == "p" and abs(move.startRow - move.endRow) == 2:
self.enpassantPossible = ()
# Undo-ing castling rights
self.castleRightsLog.pop()
self.currentCastlingRight = self.castleRightsLog[-1]
# Undo-ing castle move
if move.isCastlingMove:
if move.endCol - move.startCol == 2: # Kingside castle
self.board[move.endRow][move.endCol+1] = self.board[move.endRow][move.endCol-1]
self.board[move.endRow][move.endCol-1] = "--"
else:
self.board[move.endRow][move.endCol-2] = self.board[move.endRow][move.endCol+1]
self.board[move.endRow][move.endCol+1] = "--"
def updateCastleRights(self, move):
if move.pieceMoved == "wK":
self.currentCastlingRight.wks = False
self.currentCastlingRight.wqs = False
elif move.pieceMoved == "bK":
self.currentCastlingRight.bks = False
self.currentCastlingRight.bqs = False
elif move.pieceMoved == "wR":
if move.startRow == 7:
if move.startCol == 0:
self.currentCastlingRight.wqs = False
elif move.startCol == 7:
self.currentCastlingRight.wks = False
elif move.pieceMoved == "bR":
if move.startRow == 0:
if move.startCol == 0:
self.currentCastlingRight.bqs = False
elif move.startCol == 7:
self.currentCastlingRight.bks = False
# All posible moves when in check
def getValidMoves(self):
tempEnpassantPossible = self.enpassantPossible
tempCastleRights = CastleRights(self.currentCastlingRight.wks, self.currentCastlingRight.bks, self.currentCastlingRight.wqs, self.currentCastlingRight.bqs)
moves = self.getLegalMoves()
if self.whiteToMove:
self.getCastleMoves(self.whiteKingLocation[0], self.whiteKingLocation[1], moves)
else:
self.getCastleMoves(self.blackKingLocation[0], self.blackKingLocation[1], moves)
for i in range(len(moves)-1, -1, -1):
self.makeMove(moves[i])
self.whiteToMove = not self.whiteToMove
if self.inCheck():
moves.remove(moves[i])
self.whiteToMove = not self.whiteToMove
self.undoMove()
if len(moves) == 0: # Either Check Mate of Stalemate
if self.inCheck():
self.checkMate = True
else:
self.stalemate = True
self.enpassantPossible = tempEnpassantPossible
self.currentCastlingRight = tempCastleRights
return moves
# Determine if King is under attack
def inCheck(self):
if self.whiteToMove:
return self.squareUnderAttack(self.whiteKingLocation[0], self.whiteKingLocation[1])
else:
return self.squareUnderAttack(self.blackKingLocation[0], self.blackKingLocation[1])
# Determine if the enemy can attack a certain square
def squareUnderAttack(self, r, c):
self.whiteToMove = not self.whiteToMove # switch to opponent to see their moves
oppMoves = self.getLegalMoves()
self.whiteToMove = not self.whiteToMove
for move in oppMoves:
if move.endRow == r and move.endCol == c:
return True
return False
# All legal moves without being in check
def getLegalMoves(self):
moves = []
for r in range(len(self.board)):
for c in range(len(self.board[r])):
turn = self.board[r][c][0]
if (turn == "w" and self.whiteToMove) or (turn == "b" and not self.whiteToMove):
piece = self.board[r][c][1]
self.moveFunctions[piece](r, c, moves) # Calls the appropriate move function depending on the piece
return moves
#Define all possible moves for each piece
def getPawnMoves(self, r, c, moves):
if self.whiteToMove: # White pawn movement
if self.board[r-1][c] == "--":
moves.append(Move((r, c), (r-1, c), self.board))
if r == 6 and self.board[r-2][c] == "--":
moves.append(Move((r, c), (r-2, c), self.board))
if c-1 >= 0:
if self.board[r-1][c-1][0] == "b":
moves.append(Move((r, c), (r-1, c-1), self.board))
elif (r-1, c-1) == self.enpassantPossible:
moves.append(Move((r, c), (r-1, c-1), self.board, isEnpassantMove=True))
if c+1 <= 7:
if self.board[r-1][c+1][0] == "b":
moves.append(Move((r, c), (r-1, c+1), self.board))
elif (r-1, c+1) == self.enpassantPossible:
moves.append(Move((r, c), (r-1, c+1), self.board, isEnpassantMove=True))
else: # Black pawn movement
if self.board[r+1][c] == "--":
moves.append(Move((r, c), (r+1, c), self.board))
if r == 1 and self.board[r+2][c] == "--":
moves.append(Move((r, c), (r+2, c), self.board))
if c-1 >= 0:
if self.board[r+1][c-1][0] == "w":
moves.append(Move((r, c), (r+1, c-1), self.board))
elif (r+1, c-1) == self.enpassantPossible:
moves.append(Move((r, c), (r+1, c-1), self.board, isEnpassantMove=True))
if c+1 <= 7:
if self.board[r+1][c+1][0] == "w":
moves.append(Move((r, c), (r+1, c+1), self.board))
elif (r+1, c+1) == self.enpassantPossible:
moves.append(Move((r, c), (r+1, c+1), self.board, isEnpassantMove=True))
def getRookMoves(self, r, c, moves):
directions = ((-1, 0), (0, -1), (1, 0), (0, 1))
enemyColor = "b" if self.whiteToMove else "w"
for d in directions:
for i in range(1, 8):
endRow = r + d[0] * i
endCol = c + d[1] * i
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece == "--":
moves.append(Move((r, c), (endRow, endCol), self.board))
elif endPiece[0] == enemyColor:
moves.append(Move((r, c), (endRow, endCol), self.board))
break
else:
break
else:
break
def getBishopMoves(self, r, c, moves):
directions = ((-1, -1), (1, -1), (1, 1), (-1, 1))
enemyColor = "b" if self.whiteToMove else "w"
for d in directions:
for i in range(1, 8):
endRow = r + d[0] * i
endCol = c + d[1] * i
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece == "--":
moves.append(Move((r, c), (endRow, endCol), self.board))
elif endPiece[0] == enemyColor:
moves.append(Move((r, c), (endRow, endCol), self.board))
break
else:
break
else:
break
def getKnightMoves(self, r, c, moves):
knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))
allyColor = "w" if self.whiteToMove else "b"
for m in knightMoves:
endRow = r + m[0]
endCol = c + m[1]
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece[0] != allyColor:
moves.append(Move((r, c), (endRow, endCol), self.board))
def getQueenMoves(self, r, c, moves):
self.getBishopMoves(r, c, moves)
self.getRookMoves(r, c, moves)
def getKingMoves(self, r, c, moves):
kingMoves = ((-1, -1), (-1, 0), (-1, 1), (0, -1), (1, -1), (1, 1), (1, 0), (0, 1))
allyColor = "w" if self.whiteToMove else "b"
for i in range(8):
endRow = r + kingMoves[i][0]
endCol = c + kingMoves[i][1]
if 0 <= endRow < 8 and 0 <= endCol < 8:
endPiece = self.board[endRow][endCol]
if endPiece[0] != allyColor:
moves.append(Move((r, c), (endRow, endCol), self.board))
# Castling handling
def getCastleMoves(self, r, c, moves):
if self.squareUnderAttack(r, c):
return
if (self.whiteToMove and self.currentCastlingRight.wks) or (not self.whiteToMove and self.currentCastlingRight.bks):
self.getKingSideCastleMoves(r, c, moves)
if (self.whiteToMove and self.currentCastlingRight.wqs) or (not self.whiteToMove and self.currentCastlingRight.bqs):
self.getQueenSideCastleMoves(r, c, moves)
def getKingSideCastleMoves(self, r, c, moves):
if self.board[r][c+1] == "--" and self.board[r][c+2] == "--":
if not self.squareUnderAttack(r, c+1) and not self.squareUnderAttack(r, c+2):
moves.append(Move((r, c), (r, c+2), self.board, | |
Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
#expand the model to include glyoxylate shunt:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','glx_c');
glx = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glx.charge = met_row['charge']
#get metabolites in the model
icit = cobra_model.metabolites.get_by_id('icit_c')
succ = cobra_model.metabolites.get_by_id('succ_c')
accoa = cobra_model.metabolites.get_by_id('accoa_c')
mal = cobra_model.metabolites.get_by_id('mal_DASH_L_c')
#make ICL
rxn_mets = {};
rxn_mets[icit] = -1;
rxn_mets[succ] = 1;
rxn_mets[glx] = 1;
rxn = Reaction('ICL');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='ICL';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['icit_c']
row_tmp['products_ids_tracked']=['glx_c','succ_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C"], ["C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1], [0, 1, 2, 3]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['ab','fcde']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make MALS
rxn_mets = {};
rxn_mets[glx] = -1;
rxn_mets[accoa] = -1;
rxn_mets[mal] = 1;
rxn = Reaction('MALS');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='MALS';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['accoa_c','glx_c']
row_tmp['products_ids_tracked']=['mal_DASH_L_c']
row_tmp['reactants_elements_tracked']=[["C", "C"], ["C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1], [0, 1]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3]]
row_tmp['reactants_mapping']=['ab','cd']
row_tmp['products_mapping']=['cdba']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#add in glucose transporters and intracellular glc
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c");
glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glc_c.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e");
glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e')
glc_e.charge = met_row['charge']
glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e')
glcext.charge = met_row['charge']
glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e')
glcpre.charge = met_row['charge']
#get metabolites in the model
pep = cobra_model.metabolites.get_by_id('pep_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
g6p = cobra_model.metabolites.get_by_id('g6p_c')
#make EX_glc_LPAREN_e_RPAREN_
rxn_mets = {};
rxn_mets[glcext] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN_');
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make EX_glc_LPAREN_e_RPAREN__pre
rxn_mets = {};
rxn_mets[glcpre] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre');
cobra_model.remove_reactions(['v60']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[pep] = -1;
rxn_mets[g6p] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('GLCptspp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCptspp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c']
row_tmp['products_ids_tracked']=['g6p_c','pyr_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['reactants_mapping']=['abcdef','ghi']
row_tmp['products_mapping']=['abcdef','ghi']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[glc_c] = 1;
rxn = Reaction('GLCt2pp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCt2pp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e']
row_tmp['products_ids_tracked']=['glc_DASH_D_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c"
rxn_mets = {};
rxn_mets[glc_c] = -1;
rxn_mets[g6p] = 1;
rxn = Reaction('HEX1');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='HEX1';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_c']
row_tmp['products_ids_tracked']=['g6p_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
##expand the model
#acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c');
#cit = cobra_model.metabolites.get_by_id('cit_c')
#icit = cobra_model.metabolites.get_by_id('icit_c')
#e4p = cobra_model.metabolites.get_by_id('e4p_c')
#r5p = cobra_model.metabolites.get_by_id('r5p_c')
#phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c')
#his = cobra_model.metabolites.get_by_id('his_DASH_L_c')
#phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c');
#prpp = Metabolite('prpp_c','C5H8O14P3','5-Phospho-alpha-D-ribose 1-diphosphate','c');
## update selected reactions to account for new metabolites
#for rxn,row in enumerate(atomMappingReactions):
# if row['rxn_id'] == 'ACONTa_ACONTb':
# #split ACONTa_ACONTb
# aconta_mets = {};
# aconta_mets[cit] = -1;
# aconta_mets[acon] = 1;
# aconta = Reaction('ACONTa');
# aconta.add_metabolites(aconta_mets);
# cobra_model.remove_reactions(['ACONTa_ACONTb']);
# cobra_model.add_reactions([aconta]);
# cobra_model.repair();
# # Update the mapping ids
# atomMappingReactions[rxn]['products_ids_tracked']=['acon_DASH_C_c']
# atomMappingReactions[rxn]['comment_']='updated'
# elif row['rxn_id'] == 'PheSYN':
# #split PheSYN to add in phpyr
# # Update the mapping_ids
# atomMappingReactions[rxn]['mapping_id']=mapping_id_O;
# atomMappingReactions[rxn]['rxn_id']=rxn_ids[rxn];
# atomMappingReactions[rxn]['rxn_description']='';
# atomMappingReactions[rxn]['rxn_equation']='';
# atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[]
# atomMappingReactions[rxn]['products_stoichiometry_tracked']=[]
# atomMappingReactions[rxn]['reactants_ids_tracked']=[]
# atomMappingReactions[rxn]['products_ids_tracked']=[]
# atomMappingReactions[rxn]['reactants_elements_tracked']=[]
# atomMappingReactions[rxn]['products_elements_tracked']=[]
# atomMappingReactions[rxn]['reactants_positions_tracked']=[]
# atomMappingReactions[rxn]['products_positions_tracked']=[]
# atomMappingReactions[rxn]['reactants_mapping']=[]
# atomMappingReactions[rxn]['products_mapping']=[]
# atomMappingReactions[rxn]['used_']=True
# atomMappingReactions[rxn]['comment_']=None
# elif row['rxn_id'] == 'HisSYN':
# # split HisSYN to add in prpp
# #cobra_model.reactions.get_by_id(rxn_ids[rxn])
# #cobra_model.reactions.get_by_id(rxn_ids[rxn])
# # Update the mapping_ids
# atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']]
# # combine TKT1a and TKT1b
# # combine TKT2a and TKT2b
# # split PPC_PPCK
# # split PTAr_ACKr_ACS
## add in ACONTb
#acontb_mets = {};
#acontb_mets[acon] = -1;
#acontb_mets[icit] = 1;
#acontb = Reaction('ACONTb');
#acontb.add_metabolites(acontb_mets);
#cobra_model.add_reactions([acontb]);
#cobra_model.repair();
## add in ACONTb mapping
#row={};
#row['mapping_id']=mapping_id_O;
#row['rxn_id']='ACONTb';
#row['rxn_description']='';
#row['rxn_equation']='';
#row['reactants_stoichiometry_tracked']=[-1]
#row['products_stoichiometry_tracked']=[1]
#row['reactants_ids_tracked']=['acon_DASH_C_c']
#row['products_ids_tracked']=['icit_c']
#row['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row['reactants_mapping']=['abcdef']
#row['products_mapping']=['abcdef']
#row['used_']=True
#row['comment_']='added'
#atomMappingReactions.append(row)
## add in e4p_to_phpyr
## add in r5p_to_prp
#r5p_to_prpp_mets = {};
#r5p_to_prpp_mets[e4p] = -1;
#r5p_to_prpp_mets[prpp] = 1;
#r5p_to_prpp = Reaction('r5p_to_prpp');
#r5p_to_prpp.add_metabolites(r5p_to_prpp_mets);
#cobra_model.add_reactions([r5p_to_prpp]);
#cobra_model.repair();
## add in r5p_to_prpp mapping
#row={};
#row['mapping_id']=mapping_id_O;
#row['rxn_id']='r5p_to_prpp';
#row['rxn_description']='';
#row['rxn_equation']='';
#row['reactants_stoichiometry_tracked']=[-1]
#row['products_stoichiometry_tracked']=[1]
#row['reactants_ids_tracked']=['r5p_c']
#row['products_ids_tracked']=['prpp_c']
#row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]]
#row['products_elements_tracked']=[["C", "C", "C", "C", "C"]]
#row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]]
#row['products_positions_tracked']=[[0, 1, 2, 3, 4]]
#row['reactants_mapping']=['abcde']
#row['products_mapping']=['abcde']
#row['used_']=True
#row['comment_']='added'
#atomMappingReactions.append(row)
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
def expand_ecoliINCA02(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
accoa = cobra_model.metabolites.get_by_id('accoa_c')
#expand the model to include ATPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','atp_c');
atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
atp.charge = met_row['charge']
#get metabolites in the model
r5p = cobra_model.metabolites.get_by_id('r5p_c')
fthf = cobra_model.metabolites.get_by_id('10fthf_c')
gly = cobra_model.metabolites.get_by_id('gly_c')
co2 = cobra_model.metabolites.get_by_id('co2_c')
glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c')
gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c')
asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c')
fum = cobra_model.metabolites.get_by_id('fum_c')
#make ATPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gly] = -1;
rxn_mets[co2] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gln] = -1;
rxn_mets[asp] = -1;
rxn_mets[asp] = -1;
rxn_mets[atp] = 1;
rxn_mets[glu] = 1;
rxn_mets[fum] = 1;
rxn_mets[fum] = 1;
rxn = Reaction('ATPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include GTPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','gtp_c');
gtp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
gtp.charge = met_row['charge']
#get metabolites in the model
r5p = cobra_model.metabolites.get_by_id('r5p_c')
fthf | |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init
import torch.backends.cudnn as cudnn
import numpy as np
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.nn.utils.clip_grad import clip_grad_norm
def l1norm(matrix, dim, eps=1e-8):
"""
l1 normalization
"""
norm = torch.abs(matrix).sum(dim=dim, keepdim=True) + eps
matrix = matrix / norm
return matrix
def l2norm(matrix, dim, eps=1e-8):
norm = torch.pow(matrix, 2).sum(dim=dim, keepdim=True).sqrt() + eps
matrix = matrix / norm
return matrix
def func_attention(query, context, smooth, norm_func):
batch_size = query.size(0)
query_length = query.size(1)
context_length = context.size(1)
# query_transpose: (batch_size, d, query_length)
query_transpose = torch.transpose(query, 1, 2)
# score: (batch_size, context_length, query_length)
score = torch.bmm(context, query_transpose)
# normalize score(for query)
if norm_func == 'softmax':
# score: (batch_size * context_length, query_length)
score = score.view(batch_size * context_length, query_length)
score = nn.Softmax()(score)
# score: (batch_size , context_length, query_length)
score = score.view(batch_size, context_length, query_length)
elif norm_func == 'l1norm':
score = l1norm(score, 2)
elif norm_func == 'clipped_l1norm':
score = nn.ReLU()(score)
score = l1norm(score, 2)
elif norm_func == 'clipped_leaky_l1norm':
score = nn.LeakyReLU(0.1)(score)
score = l1norm(score, 2)
elif norm_func == 'l2norm':
score = l2norm(score, 2)
elif norm_func == 'clipped_l2norm':
score = nn.ReLU()(score)
score = l2norm(score, 2)
elif norm_func == 'clipped_leaky_l2norm':
score = nn.LeakyReLU(0.1)(score)
score = l2norm(score, 2)
elif norm_func == 'no_norm':
pass
else:
raise ValueError("unknown first norm type: ", norm_func)
# alignment function(softmax): get attention weights(for context)
# score: (batch_size, query_length, context_length)
score = torch.transpose(score, 1, 2).contiguous()
score = score.view(batch_size * query_length, context_length)
# attn: (batch_size, query_length, context_length)
attn = nn.Softmax()(score * smooth)
attn = attn.view(batch_size, query_length, context_length)
# get weighted context vector
# weighted_context: (batch_size, query_length, d)
# (batch_size, query_length, context_length) * (batch_size, context_length, d)
# -->(batch_size, query_length, d)
weighted_context = torch.bmm(attn, context)
return weighted_context, attn
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class EncoderText(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, bidirectional=False, text_norm=True):
super(EncoderText, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.gru = nn.GRU(embed_size, hidden_size, num_layers, bidirectional=bidirectional, batch_first=True)
self.use_bi_gru = bidirectional
self.text_norm = text_norm
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
def forward(self, x, lengths):
"""
:param x: (batch_size, seq_len)
:param lengths: (batch_size, )
:return: (batch_size, seq_len, embed_size)
"""
# embed the words
x = self.embed(x)
packed_x = pack_padded_sequence(x, lengths, batch_first=True)
# RNN forward propagate RNN
packed_out, _ = self.gru(packed_x)
# out: (batch_size, seq_len, num_directions * embed_size)
out, out_len = pad_packed_sequence(packed_out, batch_first=True)
if self.use_bi_gru:
# out: (batch_size, seq_len, embed_size)
out = (out[:, :, :out.size(2)//2] + out[:, :, out.size(2)//2:]) / 2
# normalize the text representation vector in the joint embedding space
if self.text_norm:
out = l2norm(out, dim=-1)
# return caption embedding: (batch_size, seq_len, embed_size)
return out, out_len
class EncoderImagePrecomp(nn.Module):
def __init__(self, img_size, embed_size, use_abs=False, img_norm=True):
super(EncoderImagePrecomp, self).__init__()
self.use_abs = use_abs
self.img_norm = img_norm
self.fc = nn.Linear(img_size, embed_size)
self.init_weights()
def init_weights(self):
"""
Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, img_features):
"""
:param img_features: (batch_size, num_regions, row_img_features)
:return: features: (batch_size, num_regions, img_features)
"""
# embed precomputed img features into joint embedding space
features = self.fc(img_features)
# normalize in the joint embedding space
if self.img_norm:
features = l2norm(features, -1)
if self.use_abs:
features = torch.abs(features)
return features
class GCN(nn.Module):
"""
from VSRN
"""
def __init__(self, in_channels, inter_channels, bn_layer=True):
super(GCN, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
conv_nd = nn.Conv1d
max_pool = nn.MaxPool1d
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant(self.W[1].weight, 0)
nn.init.constant(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = None
self.phi = None
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
def forward(self, v):
"""
:param v: (B, D, N)
:return:
"""
batch_size = v.size(0)
g_v = self.g(v).view(batch_size, self.inter_channels, -1)
g_v = g_v.permute(0, 2, 1)
theta_v = self.theta(v).view(batch_size, self.inter_channels, -1)
theta_v = theta_v.permute(0, 2, 1)
phi_v = self.phi(v).view(batch_size, self.inter_channels, -1)
R = torch.matmul(theta_v, phi_v)
N = R.size(-1)
R_div_C = R / N
y = torch.matmul(R_div_C, g_v)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *v.size()[2:])
W_y = self.W(y)
v_star = W_y + v
return v_star
def cross_attention(images, captions, cap_lens, smooth, norm_func):
"""
Images: (n_image, n_region, d)
Captions: (n_caption, max_n_word, d)
cap_lens: (n_caption, )
"""
n_caption = captions.size(0)
attn_cap = torch.zeros_like(captions)
attn_img = torch.zeros_like(images)
for i in range(n_caption):
n_word = cap_lens[i]
# cap_i:(1, n_word, d)
cap_i = captions[i, :n_word, :].unsqueeze(0).contiguous()
# img_i:(1, n_region, d)
img_i = images[i].unsqueeze(0).contiguous()
attn_cap_i, _ = func_attention(cap_i, img_i, smooth, norm_func)
attn_img_i, _ = func_attention(img_i, cap_i, smooth, norm_func)
attn_cap[i, :n_word, :] = attn_cap_i.squeeze(0)
attn_img[i] = attn_img_i.squeeze(0)
return attn_cap, attn_img
class CrossAttentionLayer(nn.Module):
def __init__(self, hidden_size, smooth, norm_func, norm=True, activation_fun='relu'):
super(CrossAttentionLayer, self).__init__()
self.norm_func = norm_func
self.smooth = smooth
self.fc_img = nn.Linear(hidden_size, hidden_size)
self.fc_txt = nn.Linear(hidden_size, hidden_size)
self.norm = norm
self.activation_fun = activation_fun
def forward(self, txt_embed, img_embed, lengths):
txt_attn_output, img_attn_output = cross_attention(img_embed, txt_embed, lengths, self.smooth, self.norm_func)
# txt_attn_embed = self.fc_txt(txt_attn_embed)
# img_attn_embed = self.fc_img(img_attn_embed)
#
# if self.activation_fun == 'relu':
# txt_attn_output = F.relu(txt_attn_embed)
# img_attn_output = F.relu(img_attn_embed)
# elif self.activation_fun == 'gelu':
# txt_attn_output = gelu(txt_attn_embed)
# img_attn_output = gelu(img_attn_embed)
# elif self.activation_fun == 'no_activation_fun':
# txt_attn_output = txt_attn_embed
# img_attn_output = img_attn_embed
# else:
# raise ValueError('Unknown activation function :', self.activation_fun)
if self.norm:
txt_attn_output = l2norm(txt_attn_output, 2)
img_attn_output = l2norm(img_attn_output, 2)
return txt_attn_output, img_attn_output
class CARRNEncoder(nn.Module):
def __init__(self, img_size, hidden_size, use_abs, vocab_size,
word_embed_size, num_layers, bi_gru, smooth,
norm_func, norm, activation_func):
super(CARRNEncoder, self).__init__()
self.base_img_enc = EncoderImagePrecomp(img_size, hidden_size, use_abs, norm)
self.base_text_enc = EncoderText(vocab_size, word_embed_size, hidden_size,
num_layers, bi_gru, norm)
self.GCN_1 = GCN(in_channels=hidden_size, inter_channels=hidden_size)
self.GCN_2 = GCN(in_channels=hidden_size, inter_channels=hidden_size)
# self.GCN_3 = GCN(in_channels=hidden_size, inter_channels=hidden_size)
# self.GCN_4 = GCN(in_channels=hidden_size, inter_channels=hidden_size)
self.cross_attn1 = CrossAttentionLayer(hidden_size, smooth, norm_func, norm, activation_func)
self.cross_attn2 = CrossAttentionLayer(hidden_size, smooth, norm_func, norm, activation_func)
def forward(self, captions, images, lengths):
# embed captions and images into joint space
raw_txt, _ = self.base_text_enc(captions, lengths)
raw_img = self.base_img_enc(images)
# object-level cross-attention
txt_embed, img_embed = self.cross_attn1(raw_txt, raw_img, lengths)
# image object relation reasoning(object alignment)
# GCN_img_embed : (batch_size, hidden_size, num_regions)
gcn_img_embed = img_embed.permute(0, 2, 1)
gcn_img_embed = self.GCN_1(gcn_img_embed)
gcn_img_embed = self.GCN_2(gcn_img_embed)
# gcn_img_embed = self.GCN_3(gcn_img_embed)
# gcn_img_embed = self.GCN_4(gcn_img_embed)
gcn_img_embed = gcn_img_embed.permute(0, 2, 1)
gcn_img_embed = l2norm(gcn_img_embed, 2)
# relation-level cross-attention(relation alignment)
txt_embed, img_embed = self.cross_attn2(txt_embed, gcn_img_embed, lengths)
return txt_embed, img_embed
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
"""
Returns cosine similarity between x1 and x2, computed along dim.
"""
w12 = torch.sum(x1 * x2, dim)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
def xattn_score_t2i(images, captions, cap_lens, smooth, norm_func, agg_func, lambda_lse):
"""
Images: (n_image, n_regions, d) matrix of images
Captions: (n_caption, max_n_word, d) matrix of captions
CapLens: (n_caption) array of caption lengths
"""
similarities = []
n_image = images.size(0)
n_caption = captions.size(0)
for i in range(n_caption):
# Get the i-th text description
n_word = cap_lens[i]
cap_i = captions[i, :n_word, :].unsqueeze(0).contiguous()
# --> (n_image, n_word, d)
cap_i_expand = cap_i.repeat(n_image, 1, 1)
"""
word(query): (n_image, n_word, d)
image(context): (n_image, n_regions, d)
weiContext: (n_image, n_word, d)
attn: (n_image, n_region, n_word)
"""
weiContext, attn = func_attention(cap_i_expand, images, smooth, norm_func)
cap_i_expand = cap_i_expand.contiguous()
weiContext = weiContext.contiguous()
# (n_image, n_word)
row_sim = cosine_similarity(cap_i_expand, weiContext, dim=2)
if agg_func == 'LogSumExp':
row_sim.mul_(lambda_lse).exp_()
row_sim = row_sim.sum(dim=1, keepdim=True)
row_sim = torch.log(row_sim) / lambda_lse
elif agg_func == 'Max':
row_sim = row_sim.max(dim=1, keepdim=True)[0]
elif agg_func == 'Sum':
row_sim = row_sim.sum(dim=1, keepdim=True)
elif agg_func == 'Mean':
row_sim = row_sim.mean(dim=1, keepdim=True)
else:
raise ValueError("unknown aggfunc: {}".format(agg_func))
similarities.append(row_sim)
# (n_image, n_caption)
similarities = torch.cat(similarities, 1)
return similarities
def xattn_score_i2t(images, captions, cap_lens, smooth, norm_func, agg_func, lambda_lse):
"""
Images: (batch_size, n_regions, d) matrix of images
Captions: (batch_size, max_n_words, d) matrix of captions
CapLens: (batch_size) array of caption lengths
"""
similarities = []
n_image = images.size(0)
n_caption = captions.size(0)
n_region = images.size(1)
for i in range(n_caption):
# Get the i-th text description
n_word = cap_lens[i]
cap_i = captions[i, :n_word, :].unsqueeze(0).contiguous()
# (n_image, n_word, d)
cap_i_expand = cap_i.repeat(n_image, 1, 1)
"""
word(query): (n_image, n_word, d)
image(context): (n_image, n_region, d)
weiContext: (n_image, n_region, d)
attn: (n_image, n_word, | |
<gh_stars>0
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import datetime as dt
import json
import logging
from collections import defaultdict
import ibm_db
import numpy as np
import pandas as pd
from sqlalchemy import (MetaData, Table)
from . import dbhelper
from .exceptions import StageException, DataWriterException
from .util import MessageHub, asList
from . import metadata as md
logger = logging.getLogger(__name__)
DATALAKE_BATCH_UPDATE_ROWS = 5000
KPI_ENTITY_ID_COLUMN = 'ENTITY_ID'
class PersistColumns:
def __init__(self, dms, sources=None, checkpoint=False):
self.logger = logging.getLogger('%s.%s' % (self.__module__, self.__class__.__name__))
if dms is None:
raise RuntimeError("argument dms must be provided")
if sources is None:
raise RuntimeError("argument sources must be provided")
self.dms = dms
self.schema = self.dms.schema
self.db_connection = self.dms.db_connection
self.is_postgre_sql = dms.is_postgre_sql
self.sources = asList(sources)
self.checkpoint = checkpoint
def execute(self, df):
self.logger.debug('columns_to_persist=%s, df_columns=%s' % (str(self.sources), str(df.dtypes.to_dict())))
if self.dms.production_mode:
t1 = dt.datetime.now()
self.store_derived_metrics(df[list(set(self.sources) & set(df.columns))])
t2 = dt.datetime.now()
self.logger.info("persist_data_time_seconds=%s" % (t2 - t1).total_seconds())
if self.checkpoint is True:
self.dms.create_checkpoint_entries(df)
t3 = dt.datetime.now()
self.logger.info("checkpoint_time_seconds=%s" % (t3 - t2).total_seconds())
else:
self.logger.info("***** The calculated metric data is not stored into the database. ***** ")
return df
def store_derived_metrics(self, dataFrame):
if self.dms.production_mode:
table_insert_stmt = {}
table_metrics_to_persist = defaultdict(dict)
for source, dtype in dataFrame.dtypes.to_dict().items():
source_metadata = self.dms.data_items.get(source)
if source_metadata is None:
continue
# skip transient data items
if source_metadata.get(md.DATA_ITEM_TRANSIENT_KEY) is True:
self.logger.debug('skip persisting transient data_item=%s' % source)
continue
# if source not in (get_all_kpi_targets(self.get_pipeline()) & self.data_items.get_derived_metrics()):
# continue
try:
tableName = source_metadata.get(md.DATA_ITEM_SOURCETABLE_KEY)
except Exception:
self.logger.warning('sourceTableName invalid for derived_metric=%s' % source, exc_info=True)
continue
if tableName not in table_insert_stmt:
grain = self.dms.target_grains[source]
sql = None
try:
if self.is_postgre_sql:
sql = self.create_upsert_statement_postgres_sql(tableName, grain)
table_insert_stmt[tableName] = (sql, grain)
else:
sql = self.create_upsert_statement(tableName, grain)
stmt = ibm_db.prepare(self.db_connection, sql)
table_insert_stmt[tableName] = (stmt, grain)
self.logger.debug('derived_metrics_upsert_sql = %s' % sql)
except Exception:
self.logger.warning('Error creating db upsert statement for sql = %s' % sql, exc_info=True)
continue
value_bool = False
value_number = False
value_string = False
value_timestamp = False
dtype = dtype.name.lower()
# if dtype.startswith('int') or dtype.startswith('float') or dtype.startswith('long') or dtype.startswith('complex'):
if source_metadata.get(md.DATA_ITEM_COLUMN_TYPE_KEY) == md.DATA_ITEM_TYPE_NUMBER:
value_number = True
# elif dtype.startswith('bool'):
elif source_metadata.get(md.DATA_ITEM_COLUMN_TYPE_KEY) == md.DATA_ITEM_TYPE_BOOLEAN:
value_bool = True
# elif dtype.startswith('datetime'):
elif source_metadata.get(md.DATA_ITEM_COLUMN_TYPE_KEY) == md.DATA_ITEM_TYPE_TIMESTAMP:
value_timestamp = True
else:
value_string = True
table_metrics_to_persist[tableName][source] = [value_bool, value_number, value_string, value_timestamp]
self.logger.debug('table_metrics_to_persist=%s' % str(table_metrics_to_persist))
# Remember position of column in dataframe. Index starts at 1.
col_position = {}
for pos, col_name in enumerate(dataFrame.columns, 1):
col_position[col_name] = pos
index_name_pos = {name: idx for idx, name in enumerate(dataFrame.index.names)}
for table, metric_and_type in table_metrics_to_persist.items():
stmt, grain = table_insert_stmt[table]
# Loop over rows of data frame
# We do not use namedtuples in intertuples() (name=None) because of clashes of column names with python
# keywords and column names starting with underscore; both lead to renaming of column names in df_rows.
# Additionally, namedtuples are limited to 255 columns in itertuples(). We access the columns in df_row
# by index. Position starts at 1 because 0 is reserved for the row index.
valueList = []
cnt = 0
total_saved = 0
for df_row in dataFrame.itertuples(index=True, name=None):
ix = df_row[0]
for metric, metric_type in metric_and_type.items():
derivedMetricVal = df_row[col_position[metric]]
# Skip missing values
if pd.notna(derivedMetricVal):
rowVals = list()
rowVals.append(metric)
if grain is None or len(grain) == 0:
# no grain, the index must be an array of (id, timestamp)
rowVals.append(ix[0])
rowVals.append(ix[1])
elif not isinstance(ix, list) and not isinstance(ix, tuple):
# only one element in the grain, ix is not an array, just append it anyway
rowVals.append(ix)
else:
if grain[2]:
# entity_first, the first level index must be the entity id
rowVals.append(ix[0])
if grain[0] is not None:
if grain[2]:
# if both id and time are included in the grain, time must be at pos 1
rowVals.append(ix[1])
else:
# if only time is included, time must be at pos 0
rowVals.append(ix[0])
if grain[1] is not None:
for dimension in grain[1]:
rowVals.append(ix[index_name_pos[dimension]])
if metric_type[0]:
if self.dms.is_postgre_sql:
rowVals.append(
False if (derivedMetricVal == False or derivedMetricVal == 0) else True)
else:
rowVals.append(0 if (derivedMetricVal == False or derivedMetricVal == 0) else 1)
else:
rowVals.append(None)
if metric_type[1]:
myFloat = float(derivedMetricVal)
rowVals.append(myFloat if np.isfinite(myFloat) else None)
else:
rowVals.append(None)
rowVals.append(str(derivedMetricVal) if metric_type[2] else None)
rowVals.append(derivedMetricVal if metric_type[3] else None)
if metric_type[1] and float(derivedMetricVal) is np.nan or metric_type[2] and str(
derivedMetricVal) == 'nan':
self.logger.debug('!!! weird case, derivedMetricVal=%s' % derivedMetricVal)
continue
valueList.append(tuple(rowVals))
cnt += 1
if cnt >= DATALAKE_BATCH_UPDATE_ROWS:
try:
# Bulk insert
if self.is_postgre_sql:
dbhelper.execute_batch(self.db_connection, stmt, valueList,
DATALAKE_BATCH_UPDATE_ROWS)
saved = cnt # Work around because we don't receive row count from batch query.
else:
res = ibm_db.execute_many(stmt, tuple(valueList))
saved = res if res is not None else ibm_db.num_rows(stmt)
total_saved += saved
self.logger.debug('Records saved so far = %d' % total_saved)
except Exception as ex:
raise Exception('Error persisting derived metrics, batch size = %s, valueList=%s' % (
len(valueList), str(valueList))) from ex
valueList = []
cnt = 0
if len(valueList) > 0:
try:
# Bulk insert
if self.is_postgre_sql:
dbhelper.execute_batch(self.db_connection, stmt, valueList, DATALAKE_BATCH_UPDATE_ROWS)
saved = cnt # Work around because we don't receive row count from batch query.
else:
res = ibm_db.execute_many(stmt, tuple(valueList))
saved = res if res is not None else ibm_db.num_rows(stmt)
total_saved += saved
except Exception as ex:
raise Exception('Error persisting derived metrics, batch size = %s, valueList=%s' % (
len(valueList), str(valueList))) from ex
self.logger.debug('derived_metrics_persisted = %s' % str(total_saved))
def create_upsert_statement(self, tableName, grain):
dimensions = []
if grain is None or len(grain) == 0:
dimensions.append(KPI_ENTITY_ID_COLUMN)
dimensions.append('TIMESTAMP')
else:
if grain[2]:
dimensions.append(KPI_ENTITY_ID_COLUMN)
if grain[0] is not None:
dimensions.append('TIMESTAMP')
if grain[1] is not None:
dimensions.extend(grain[1])
colExtension = ''
parmExtension = ''
joinExtension = ''
sourceExtension = ''
for dimension in dimensions:
quoted_dimension = dbhelper.quotingColumnName(dimension)
colExtension += ', ' + quoted_dimension
parmExtension += ', ?'
joinExtension += ' AND TARGET.' + quoted_dimension + ' = SOURCE.' + quoted_dimension
sourceExtension += ', SOURCE.' + quoted_dimension
return ("MERGE INTO %s.%s AS TARGET "
"USING (VALUES (?%s, ?, ?, ?, ?, CURRENT TIMESTAMP)) AS SOURCE (KEY%s, VALUE_B, VALUE_N, VALUE_S, VALUE_T, LAST_UPDATE) "
"ON TARGET.KEY = SOURCE.KEY%s "
"WHEN MATCHED THEN "
"UPDATE SET TARGET.VALUE_B = SOURCE.VALUE_B, TARGET.VALUE_N = SOURCE.VALUE_N, TARGET.VALUE_S = SOURCE.VALUE_S, TARGET.VALUE_T = SOURCE.VALUE_T, TARGET.LAST_UPDATE = SOURCE.LAST_UPDATE "
"WHEN NOT MATCHED THEN "
"INSERT (KEY%s, VALUE_B, VALUE_N, VALUE_S, VALUE_T, LAST_UPDATE) VALUES (SOURCE.KEY%s, SOURCE.VALUE_B, SOURCE.VALUE_N, SOURCE.VALUE_S, SOURCE.VALUE_T, CURRENT TIMESTAMP)") % (
dbhelper.quotingSchemaName(self.schema), dbhelper.quotingTableName(tableName), parmExtension,
colExtension, joinExtension, colExtension, sourceExtension)
def create_upsert_statement_postgres_sql(self, tableName, grain):
dimensions = []
if grain is None or len(grain) == 0:
dimensions.append(KPI_ENTITY_ID_COLUMN.lower())
dimensions.append('timestamp')
else:
if grain[2]:
dimensions.append(KPI_ENTITY_ID_COLUMN.lower())
if grain[0] is not None:
dimensions.append('timestamp')
if grain[1] is not None:
dimensions.extend(grain[1])
colExtension = ''
parmExtension = ''
for dimension in dimensions:
# Note: the dimension grain need to be in lower case since the table will be created with lowercase column.
quoted_dimension = dbhelper.quotingColumnName(dimension.lower(), self.is_postgre_sql)
colExtension += ', ' + quoted_dimension
parmExtension += ', %s'
sql = "insert into " + self.schema + "." + tableName + " (key " + colExtension + ",value_b,value_n,value_s,value_t,last_update) values (%s " + parmExtension + ", %s, %s, %s, %s, current_timestamp) on conflict on constraint uc_" + tableName + " do update set value_b = EXCLUDED.value_b, value_n = EXCLUDED.value_n, value_s = EXCLUDED.value_s, value_t = EXCLUDED.value_t, last_update = EXCLUDED.last_update"
return sql
class ProduceAlerts(object):
is_system_function = True
produces_output_items = False
def __init__(self, dms, alerts=None, all_cols=None, **kwargs):
if dms is None:
raise RuntimeError("argument dms must be provided")
if alerts is None and all_cols is None:
raise RuntimeError("either alerts argument or all_cols arguments must be provided")
self.dms = dms
try:
self.entity_type_name = dms.logical_name
except AttributeError:
self.entity_type_name = dms.entity_type
self.entity_type_id = dms.entity_type_id
self.is_postgre_sql = dms.is_postgre_sql
self.db_connection = dms.db_connection
self.quotedSchema = dbhelper.quotingSchemaName(dms.default_db_schema, self.is_postgre_sql)
self.quotedTableName = dbhelper.quotingTableName('dm_wiot_as_alert', self.is_postgre_sql)
self.alert_to_kpi_input_dict = dict()
self.alerts_to_message_hub = []
self.alerts_to_database = []
self.alert_catalogs = dms.catalog.get_alerts()
if alerts is None:
if all_cols is not None:
for alert_data_item in asList(all_cols):
metadata = dms.data_items.get(alert_data_item)
if metadata is not None:
if md.DATA_ITEM_TAG_ALERT in metadata.get(md.DATA_ITEM_TAGS_KEY, []):
self.alerts_to_message_hub.append(alert_data_item)
kpi_func_dto = metadata.get(md.DATA_ITEM_KPI_FUNCTION_DTO_KEY, None)
kpi_function_name = kpi_func_dto.get(md.DATA_ITEM_KPI_FUNCTION_DTO_FUNCTION_NAME, None)
alert_catalog = self.alert_catalogs.get(kpi_function_name, None)
if alert_catalog is not None:
self.alerts_to_database.append(alert_data_item)
self.alert_to_kpi_input_dict[alert_data_item] = kpi_func_dto.get('input')
| |
<gh_stars>10-100
# Generated from SqlSmall.g4 by ANTLR 4.8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\u00ac")
buf.write("\u05f6\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\4j\tj\4k\tk\4l\tl\4m\tm\4n\tn\4o\to\4")
buf.write("p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4")
buf.write("y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4~\t~\4\177\t\177\4\u0080")
buf.write("\t\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083")
buf.write("\4\u0084\t\u0084\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087")
buf.write("\t\u0087\4\u0088\t\u0088\4\u0089\t\u0089\4\u008a\t\u008a")
buf.write("\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d\4\u008e")
buf.write("\t\u008e\4\u008f\t\u008f\4\u0090\t\u0090\4\u0091\t\u0091")
buf.write("\4\u0092\t\u0092\4\u0093\t\u0093\4\u0094\t\u0094\4\u0095")
buf.write("\t\u0095\4\u0096\t\u0096\4\u0097\t\u0097\4\u0098\t\u0098")
buf.write("\4\u0099\t\u0099\4\u009a\t\u009a\4\u009b\t\u009b\4\u009c")
buf.write("\t\u009c\4\u009d\t\u009d\4\u009e\t\u009e\4\u009f\t\u009f")
buf.write("\4\u00a0\t\u00a0\4\u00a1\t\u00a1\4\u00a2\t\u00a2\4\u00a3")
buf.write("\t\u00a3\4\u00a4\t\u00a4\4\u00a5\t\u00a5\4\u00a6\t\u00a6")
buf.write("\4\u00a7\t\u00a7\4\u00a8\t\u00a8\4\u00a9\t\u00a9\4\u00aa")
buf.write("\t\u00aa\4\u00ab\t\u00ab\4\u00ac\t\u00ac\4\u00ad\t\u00ad")
buf.write("\4\u00ae\t\u00ae\4\u00af\t\u00af\4\u00b0\t\u00b0\4\u00b1")
buf.write("\t\u00b1\4\u00b2\t\u00b2\4\u00b3\t\u00b3\4\u00b4\t\u00b4")
buf.write("\4\u00b5\t\u00b5\4\u00b6\t\u00b6\4\u00b7\t\u00b7\4\u00b8")
buf.write("\t\u00b8\4\u00b9\t\u00b9\4\u00ba\t\u00ba\4\u00bb\t\u00bb")
buf.write("\4\u00bc\t\u00bc\4\u00bd\t\u00bd\4\u00be\t\u00be\4\u00bf")
buf.write("\t\u00bf\4\u00c0\t\u00c0\4\u00c1\t\u00c1\4\u00c2\t\u00c2")
buf.write("\4\u00c3\t\u00c3\4\u00c4\t\u00c4\4\u00c5\t\u00c5\4\u00c6")
buf.write("\t\u00c6\4\u00c7\t\u00c7\4\u00c8\t\u00c8\4\u00c9\t\u00c9")
buf.write("\4\u00ca\t\u00ca\4\u00cb\t\u00cb\3\2\3\2\3\3\3\3\3\4\3")
buf.write("\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\b\3\b\3\t\3\t\3\t")
buf.write("\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\17\3\17\3")
buf.write("\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21")
buf.write("\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23")
buf.write("\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24")
buf.write("\3\24\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\27\3\27")
buf.write("\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31")
buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32")
buf.write("\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35")
buf.write("\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3!\3!\3!\3!\3!\3")
buf.write("!\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3#\3")
buf.write("#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3$\3%\3")
buf.write("%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3%\3&\3")
buf.write("&\3&\3&\3&\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3(\3)")
buf.write("\3)\3)\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3*\3*\3*\3")
buf.write("*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3,\3-\3-\3-\3")
buf.write("-\3.\3.\3.\3.\3.\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\61")
buf.write("\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62")
buf.write("\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\67\3\67\3\67\3\67\3\67\38\38\38\38\38\38\39\3")
buf.write("9\39\39\39\39\39\3:\3:\3:\3:\3:\3;\3;\3;\3<\3<\3<\3<\3")
buf.write("=\3=\3=\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3?\3?\3?\3@\3")
buf.write("@\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3B\3B\3B\3B\3B\3C\3")
buf.write("C\3C\3C\3C\3D\3D\3D\3D\3D\3D\3E\3E\3E\3F\3F\3F\3F\3G\3")
buf.write("G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3")
buf.write("J\3J\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3")
buf.write("M\3M\3M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3")
buf.write("O\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3S\3S\3")
buf.write("S\3S\3S\3S\3S\3S\3T\3T\3T\3U\3U\3U\3V\3V\3V\3V\3V\3V\3")
buf.write("W\3W\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3")
buf.write("Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3Z\3")
buf.write("Z\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3[\3\\")
buf.write("\3\\\3\\\3]\3]\3]\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3^\3^")
buf.write("\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a\3")
buf.write("b\3b\3b\3b\3b\3b\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3d\3")
buf.write("d\3d\3d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3f\3")
buf.write("f\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3h\3i\3i\3i\3i\3i\3")
buf.write("j\3j\3j\3j\3k\3k\3k\3k\3k\3l\3l\3l\3l\3m\3m\3m\3m\3m\3")
buf.write("n\3n\3n\3n\3n\3n\3n\3o\3o\3o\3o\3p\3p\3p\3p\3p\3p\3p\3")
buf.write("q\3q\3q\3q\3q\3q\3r\3r\3r\3r\3r\3r\3r\3r\3r\3r\3s\3s\3")
buf.write("s\3s\3t\3t\3t\3t\3u\3u\3u\3u\3u\3v\3v\3v\3v\3v\3w\3w\3")
buf.write("w\3w\3w\3w\3w\3w\3w\3w\3x\3x\3x\3x\3y\3y\3y\3y\3y\3z\3")
buf.write("z\3z\3z\3z\3{\3{\3{\3{\3{\3{\3|\3|\3|\3|\3|\3|\3|\3|\3")
buf.write("|\3}\3}\3}\3}\3}\3}\3}\3}\3}\3~\3~\3~\3~\3~\3~\3\177\3")
buf.write("\177\3\177\3\177\3\177\3\177\3\u0080\3\u0080\3\u0080\3")
buf.write("\u0080\3\u0080\3\u0080\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write("\3\u0082\3\u0082\3\u0082\3\u0082\3\u0082\3\u0082\3\u0082")
buf.write("\3\u0082\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write("\3\u0083\3\u0083\3\u0083\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write("\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0085\3\u0085")
buf.write("\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0086")
buf.write("\3\u0086\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087\3\u0087")
buf.write("\3\u0087\3\u0087\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write("\3\u0088\3\u0089\3\u0089\3\u0089\5\u0089\u04ca\n\u0089")
buf.write("\3\u008a\3\u008a\3\u008a\3\u008a\3\u008b\3\u008b\3\u008b")
buf.write("\3\u008c\3\u008c\3\u008c\3\u008d\3\u008d\3\u008e\3\u008e")
buf.write("\3\u008e\3\u008e\5\u008e\u04dc\n\u008e\3\u008f\3\u008f")
buf.write("\3\u0090\3\u0090\3\u0090\3\u0090\5\u0090\u04e4\n\u0090")
buf.write("\3\u0091\3\u0091\3\u0092\3\u0092\3\u0093\3\u0093\3\u0094")
buf.write("\3\u0094\3\u0095\3\u0095\3\u0096\3\u0096\3\u0097\3\u0097")
buf.write("\3\u0098\3\u0098\3\u0099\3\u0099\3\u0099\3\u009a\3\u009a")
buf.write("\3\u009b\3\u009b\3\u009c\3\u009c\3\u009c\3\u009c\7\u009c")
buf.write("\u0501\n\u009c\f\u009c\16\u009c\u0504\13\u009c\3\u009c")
buf.write("\3\u009c\3\u009d\6\u009d\u0509\n\u009d\r\u009d\16\u009d")
buf.write("\u050a\3\u009e\6\u009e\u050e\n\u009e\r\u009e\16\u009e")
buf.write("\u050f\3\u009e\3\u009e\3\u009e\3\u009e\5\u009e\u0516\n")
buf.write("\u009e\5\u009e\u0518\n\u009e\3\u009f\3\u009f\3\u009f\3")
buf.write("\u009f\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0\3\u00a0")
buf.write("\3\u00a1\3\u00a1\5\u00a1\u0526\n\u00a1\3\u00a2\3\u00a2")
buf.write("\7\u00a2\u052a\n\u00a2\f\u00a2\16\u00a2\u052d\13\u00a2")
buf.write("\3\u00a3\6\u00a3\u0530\n\u00a3\r\u00a3\16\u00a3\u0531")
buf.write("\3\u00a3\3\u00a3\3\u00a3\7\u00a3\u0537\n\u00a3\f\u00a3")
buf.write("\16\u00a3\u053a\13\u00a3\3\u00a3\3\u00a3\5\u00a3\u053e")
buf.write("\n\u00a3\3\u00a4\3\u00a4\3\u00a4\3\u00a4\7\u00a4\u0544")
buf.write("\n\u00a4\f\u00a4\16\u00a4\u0547\13\u00a4\3\u00a4\3\u00a4")
buf.write("\3\u00a4\3\u00a4\3\u00a4\7\u00a4\u054e\n\u00a4\f\u00a4")
buf.write("\16\u00a4\u0551\13\u00a4\3\u00a4\3\u00a4\3\u00a4\3\u00a4")
buf.write("\3\u00a4\7\u00a4\u0558\n\u00a4\f\u00a4\16\u00a4\u055b")
buf.write("\13\u00a4\3\u00a4\5\u00a4\u055e\n\u00a4\3\u00a5\3\u00a5")
buf.write("\5\u00a5\u0562\n\u00a5\3\u00a6\3\u00a6\3\u00a7\3\u00a7")
buf.write("\3\u00a7\3\u00a7\3\u00a7\3\u00a7\7\u00a7\u056c\n\u00a7")
buf.write("\f\u00a7\16\u00a7\u056f\13\u00a7\3\u00a7\3\u00a7\3\u00a8")
buf.write("\6\u00a8\u0574\n\u00a8\r\u00a8\16\u00a8\u0575\3\u00a8")
buf.write("\3\u00a8\7\u00a8\u057a\n\u00a8\f\u00a8\16\u00a8\u057d")
buf.write("\13\u00a8\3\u00a9\3\u00a9\5\u00a9\u0581\n\u00a9\3\u00a9")
buf.write("\6\u00a9\u0584\n\u00a9\r\u00a9\16\u00a9\u0585\3\u00aa")
buf.write("\3\u00aa\3\u00ab\3\u00ab\3\u00ac\3\u00ac\3\u00ad\3\u00ad")
buf.write("\3\u00ae\3\u00ae\3\u00af\3\u00af\3\u00b0\3\u00b0\3\u00b1")
buf.write("\3\u00b1\3\u00b2\3\u00b2\3\u00b3\3\u00b3\3\u00b4\3\u00b4")
buf.write("\3\u00b5\3\u00b5\3\u00b6\3\u00b6\3\u00b7\3\u00b7\3\u00b8")
buf.write("\3\u00b8\3\u00b9\3\u00b9\3\u00ba\3\u00ba\3\u00bb\3\u00bb")
buf.write("\3\u00bc\3\u00bc\3\u00bd\3\u00bd\3\u00be\3\u00be\3\u00bf")
buf.write("\3\u00bf\3\u00c0\3\u00c0\3\u00c1\3\u00c1\3\u00c2\3\u00c2")
buf.write("\3\u00c3\3\u00c3\3\u00c4\3\u00c4\3\u00c5\3\u00c5\3\u00c6")
buf.write("\3\u00c6\3\u00c7\3\u00c7\3\u00c7\3\u00c7\7\u00c7\u05c6")
buf.write("\n\u00c7\f\u00c7\16\u00c7\u05c9\13\u00c7\3\u00c7\5\u00c7")
buf.write("\u05cc\n\u00c7\3\u00c7\5\u00c7\u05cf\n\u00c7\3\u00c7\3")
buf.write("\u00c7\3\u00c8\3\u00c8\3\u00c8\3\u00c8\3\u00c8\3\u00c8")
buf.write("\3\u00c8\3\u00c9\3\u00c9\3\u00c9\3\u00c9\3\u00c9\7\u00c9")
buf.write("\u05df\n\u00c9\f\u00c9\16\u00c9\u05e2\13\u00c9\3\u00c9")
buf.write("\3\u00c9\3\u00c9\3\u00c9\3\u00c9\3\u00ca\6\u00ca\u05ea")
buf.write("\n\u00ca\r\u00ca\16\u00ca\u05eb\3\u00ca\3\u00ca\3\u00cb")
buf.write("\6\u00cb\u05f1\n\u00cb\r\u00cb\16\u00cb\u05f2\3\u00cb")
buf.write("\3\u00cb\6\u0545\u054f\u0559\u05e0\2\u00cc\3\3\5\4\7\5")
buf.write("\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35")
buf.write("\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33")
buf.write("\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[")
buf.write("/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@\177")
buf.write("A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH\u008f")
buf.write("I\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009f")
buf.write("Q\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00af")
buf.write("Y\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bf")
buf.write("a\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cdh\u00cf")
buf.write("i\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00ddp\u00df")
buf.write("q\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00edx\u00ef")
buf.write("y\u00f1z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080")
buf.write("\u00ff\u0081\u0101\u0082\u0103\u0083\u0105\u0084\u0107")
buf.write("\u0085\u0109\u0086\u010b\u0087\u010d\u0088\u010f\u0089")
buf.write("\u0111\u008a\u0113\u008b\u0115\u008c\u0117\u008d\u0119")
buf.write("\u008e\u011b\u008f\u011d\u0090\u011f\u0091\u0121\u0092")
buf.write("\u0123\u0093\u0125\u0094\u0127\u0095\u0129\u0096\u012b")
buf.write("\u0097\u012d\u0098\u012f\u0099\u0131\u009a\u0133\u009b")
buf.write("\u0135\u009c\u0137\u009d\u0139\u009e\u013b\u009f\u013d")
buf.write("\u00a0\u013f\u00a1\u0141\u00a2\u0143\u00a3\u0145\u00a4")
buf.write("\u0147\u00a5\u0149\u00a6\u014b\u00a7\u014d\2\u014f\2\u0151")
buf.write("\2\u0153\2\u0155\2\u0157\2\u0159\2\u015b\2\u015d\2\u015f")
buf.write("\2\u0161\2\u0163\2\u0165\2\u0167\2\u0169\2\u016b\2\u016d")
buf.write("\2\u016f\2\u0171\2\u0173\2\u0175\2\u0177\2\u0179\2\u017b")
buf.write("\2\u017d\2\u017f\2\u0181\2\u0183\2\u0185\2\u0187\2\u0189")
buf.write("\2\u018b\2\u018d\u00a8\u018f\u00a9\u0191\u00aa\u0193\u00ab")
buf.write("\u0195\u00ac\3\2)\4\2))^^\6\2C\\aac|\u00a3\1\b\2&&\62")
buf.write(";C\\aac|\u00a3\1\4\2\"\"aa\4\2$$^^\4\2--//\3\2\62;\3\2")
buf.write("C\\\3\2c|\4\2CCcc\4\2DDdd\4\2EEee\4\2FFff\4\2GGgg\4\2")
buf.write("HHhh\4\2IIii\4\2JJjj\4\2KKkk\4\2LLll\4\2MMmm\4\2NNnn\4")
buf.write("\2OOoo\4\2PPpp\4\2QQqq\4\2RRrr\4\2SSss\4\2TTtt\4\2UUu")
buf.write("u\4\2VVvv\4\2WWww\4\2XXxx\4\2YYyy\4\2ZZzz\4\2[[{{\4\2")
buf.write("\\\\||\4\2\f\f\17\17\3\2--\5\2\13\f\17\17\"\"\4\2\13\13")
buf.write("\"\"\2\u05ff\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3")
buf.write("\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2")
buf.write("\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2")
buf.write("\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2")
buf.write("#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2")
buf.write("\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65")
buf.write("\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2")
buf.write("\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2")
buf.write("\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2")
buf.write("\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3")
buf.write("\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e")
buf.write("\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2")
buf.write("o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2")
buf.write("\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081")
buf.write("\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2")
buf.write("\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f")
buf.write("\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2")
buf.write("\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d")
buf.write("\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2")
buf.write("\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab")
buf.write("\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2")
buf.write("\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9")
buf.write("\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2")
buf.write("\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7")
buf.write("\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2")
buf.write("\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5")
buf.write("\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2")
buf.write("\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3")
buf.write("\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2")
buf.write("\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1")
buf.write("\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2")
buf.write("\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff")
buf.write("\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2")
buf.write("\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b\3\2\2\2\2\u010d")
buf.write("\3\2\2\2\2\u010f\3\2\2\2\2\u0111\3\2\2\2\2\u0113\3\2\2")
buf.write("\2\2\u0115\3\2\2\2\2\u0117\3\2\2\2\2\u0119\3\2\2\2\2\u011b")
buf.write("\3\2\2\2\2\u011d\3\2\2\2\2\u011f\3\2\2\2\2\u0121\3\2\2")
buf.write("\2\2\u0123\3\2\2\2\2\u0125\3\2\2\2\2\u0127\3\2\2\2\2\u0129")
buf.write("\3\2\2\2\2\u012b\3\2\2\2\2\u012d\3\2\2\2\2\u012f\3\2\2")
buf.write("\2\2\u0131\3\2\2\2\2\u0133\3\2\2\2\2\u0135\3\2\2\2\2\u0137")
buf.write("\3\2\2\2\2\u0139\3\2\2\2\2\u013b\3\2\2\2\2\u013d\3\2\2")
buf.write("\2\2\u013f\3\2\2\2\2\u0141\3\2\2\2\2\u0143\3\2\2\2\2\u0145")
buf.write("\3\2\2\2\2\u0147\3\2\2\2\2\u0149\3\2\2\2\2\u014b\3\2\2")
buf.write("\2\2\u018d\3\2\2\2\2\u018f\3\2\2\2\2\u0191\3\2\2\2\2\u0193")
buf.write("\3\2\2\2\2\u0195\3\2\2\2\3\u0197\3\2\2\2\5\u0199\3\2\2")
buf.write("\2\7\u019b\3\2\2\2\t\u019d\3\2\2\2\13\u019f\3\2\2\2\r")
buf.write("\u01a1\3\2\2\2\17\u01a3\3\2\2\2\21\u01a7\3\2\2\2\23\u01ac")
buf.write("\3\2\2\2\25\u01b0\3\2\2\2\27\u01b4\3\2\2\2\31\u01b9\3")
buf.write("\2\2\2\33\u01bc\3\2\2\2\35\u01c0\3\2\2\2\37\u01c5\3\2")
buf.write("\2\2!\u01ca\3\2\2\2#\u01d0\3\2\2\2%\u01d4\3\2\2\2\'\u01dc")
buf.write("\3\2\2\2)\u01e4\3\2\2\2+\u01e7\3\2\2\2-\u01ec\3\2\2\2")
buf.write("/\u01f1\3\2\2\2\61\u01f6\3\2\2\2\63\u0202\3\2\2\2\65\u0207")
buf.write("\3\2\2\2\67\u020f\3\2\2\29\u0216\3\2\2\2;\u021f\3\2\2")
buf.write("\2=\u0226\3\2\2\2?\u022a\3\2\2\2A\u022e\3\2\2\2C\u0234")
buf.write("\3\2\2\2E\u023a\3\2\2\2G\u0247\3\2\2\2I\u0254\3\2\2\2")
buf.write("K\u0266\3\2\2\2M\u026b\3\2\2\2O\u026f\3\2\2\2Q\u0277\3")
buf.write("\2\2\2S\u027f\3\2\2\2U\u028a\3\2\2\2W\u028f\3\2\2\2Y\u0298")
buf.write("\3\2\2\2[\u029c\3\2\2\2]\u02a1\3\2\2\2_\u02a5\3\2\2\2")
buf.write("a\u02a9\3\2\2\2c\u02b1\3\2\2\2e\u02b7\3\2\2\2g\u02bd\3")
buf.write("\2\2\2i\u02c3\3\2\2\2k\u02c7\3\2\2\2m\u02cc\3\2\2\2o\u02d1")
buf.write("\3\2\2\2q\u02d7\3\2\2\2s\u02de\3\2\2\2u\u02e3\3\2\2\2")
buf.write("w\u02e6\3\2\2\2y\u02ea\3\2\2\2{\u02ed\3\2\2\2}\u02f3\3")
buf.write("\2\2\2\177\u02fb\3\2\2\2\u0081\u0305\3\2\2\2\u0083\u0308")
buf.write("\3\2\2\2\u0085\u030d\3\2\2\2\u0087\u0312\3\2\2\2\u0089")
buf.write("\u0318\3\2\2\2\u008b\u031b\3\2\2\2\u008d\u031f\3\2\2\2")
buf.write("\u008f\u0325\3\2\2\2\u0091\u032a\3\2\2\2\u0093\u0330\3")
buf.write("\2\2\2\u0095\u0334\3\2\2\2\u0097\u0340\3\2\2\2\u0099\u0344")
buf.write("\3\2\2\2\u009b\u034b\3\2\2\2\u009d\u0351\3\2\2\2\u009f")
buf.write("\u0357\3\2\2\2\u00a1\u035d\3\2\2\2\u00a3\u0361\3\2\2\2")
buf.write("\u00a5\u0366\3\2\2\2\u00a7\u036e\3\2\2\2\u00a9\u0371\3")
buf.write("\2\2\2\u00ab\u0374\3\2\2\2\u00ad\u037a\3\2\2\2\u00af\u0380")
buf.write("\3\2\2\2\u00b1\u0385\3\2\2\2\u00b3\u038f\3\2\2\2\u00b5")
buf.write("\u039f\3\2\2\2\u00b7\u03af\3\2\2\2\u00b9\u03b2\3\2\2\2")
buf.write("\u00bb\u03bb\3\2\2\2\u00bd\u03c1\3\2\2\2\u00bf\u03c6\3")
buf.write("\2\2\2\u00c1\u03cd\3\2\2\2\u00c3\u03d2\3\2\2\2\u00c5\u03d8")
buf.write("\3\2\2\2\u00c7\u03de\3\2\2\2\u00c9\u03e9\3\2\2\2\u00cb")
buf.write("\u03f0\3\2\2\2\u00cd\u03f7\3\2\2\2\u00cf\u03fe\3\2\2\2")
buf.write("\u00d1\u0403\3\2\2\2\u00d3\u0408\3\2\2\2\u00d5\u040c\3")
buf.write("\2\2\2\u00d7\u0411\3\2\2\2\u00d9\u0415\3\2\2\2\u00db\u041a")
buf.write("\3\2\2\2\u00dd\u0421\3\2\2\2\u00df\u0425\3\2\2\2\u00e1")
buf.write("\u042c\3\2\2\2\u00e3\u0432\3\2\2\2\u00e5\u043c\3\2\2\2")
buf.write("\u00e7\u0440\3\2\2\2\u00e9\u0444\3\2\2\2\u00eb\u0449\3")
buf.write("\2\2\2\u00ed\u044e\3\2\2\2\u00ef\u0458\3\2\2\2\u00f1\u045c")
buf.write("\3\2\2\2\u00f3\u0461\3\2\2\2\u00f5\u0466\3\2\2\2\u00f7")
buf.write("\u046c\3\2\2\2\u00f9\u0475\3\2\2\2\u00fb\u047e\3\2\2\2")
buf.write("\u00fd\u0484\3\2\2\2\u00ff\u048a\3\2\2\2\u0101\u0490\3")
buf.write("\2\2\2\u0103\u0494\3\2\2\2\u0105\u049c\3\2\2\2\u0107\u04a5")
buf.write("\3\2\2\2\u0109\u04ae\3\2\2\2\u010b\u04b6\3\2\2\2\u010d")
buf.write("\u04bb\3\2\2\2\u010f\u04c1\3\2\2\2\u0111\u04c9\3\2\2\2")
buf.write("\u0113\u04cb\3\2\2\2\u0115\u04cf\3\2\2\2\u0117\u04d2\3")
buf.write("\2\2\2\u0119\u04d5\3\2\2\2\u011b\u04db\3\2\2\2\u011d\u04dd")
buf.write("\3\2\2\2\u011f\u04e3\3\2\2\2\u0121\u04e5\3\2\2\2\u0123")
buf.write("\u04e7\3\2\2\2\u0125\u04e9\3\2\2\2\u0127\u04eb\3\2\2\2")
buf.write("\u0129\u04ed\3\2\2\2\u012b\u04ef\3\2\2\2\u012d\u04f1\3")
buf.write("\2\2\2\u012f\u04f3\3\2\2\2\u0131\u04f5\3\2\2\2\u0133\u04f8")
buf.write("\3\2\2\2\u0135\u04fa\3\2\2\2\u0137\u04fc\3\2\2\2\u0139")
buf.write("\u0508\3\2\2\2\u013b\u0517\3\2\2\2\u013d\u0519\3\2\2\2")
buf.write("\u013f\u051d\3\2\2\2\u0141\u0525\3\2\2\2\u0143\u0527\3")
buf.write("\2\2\2\u0145\u053d\3\2\2\2\u0147\u055d\3\2\2\2\u0149\u0561")
buf.write("\3\2\2\2\u014b\u0563\3\2\2\2\u014d\u0565\3\2\2\2\u014f")
buf.write("\u0573\3\2\2\2\u0151\u057e\3\2\2\2\u0153\u0587\3\2\2\2")
buf.write("\u0155\u0589\3\2\2\2\u0157\u058b\3\2\2\2\u0159\u058d\3")
buf.write("\2\2\2\u015b\u058f\3\2\2\2\u015d\u0591\3\2\2\2\u015f\u0593")
buf.write("\3\2\2\2\u0161\u0595\3\2\2\2\u0163\u0597\3\2\2\2\u0165")
buf.write("\u0599\3\2\2\2\u0167\u059b\3\2\2\2\u0169\u059d\3\2\2\2")
buf.write("\u016b\u059f\3\2\2\2\u016d\u05a1\3\2\2\2\u016f\u05a3\3")
buf.write("\2\2\2\u0171\u05a5\3\2\2\2\u0173\u05a7\3\2\2\2\u0175\u05a9")
buf.write("\3\2\2\2\u0177\u05ab\3\2\2\2\u0179\u05ad\3\2\2\2\u017b")
buf.write("\u05af\3\2\2\2\u017d\u05b1\3\2\2\2\u017f\u05b3\3\2\2\2")
buf.write("\u0181\u05b5\3\2\2\2\u0183\u05b7\3\2\2\2\u0185\u05b9\3")
buf.write("\2\2\2\u0187\u05bb\3\2\2\2\u0189\u05bd\3\2\2\2\u018b\u05bf")
buf.write("\3\2\2\2\u018d\u05c1\3\2\2\2\u018f\u05d2\3\2\2\2\u0191")
buf.write("\u05d9\3\2\2\2\u0193\u05e9\3\2\2\2\u0195\u05f0\3\2\2\2")
buf.write("\u0197\u0198\7=\2\2\u0198\4\3\2\2\2\u0199\u019a\7*\2\2")
buf.write("\u019a\6\3\2\2\2\u019b\u019c\7+\2\2\u019c\b\3\2\2\2\u019d")
buf.write("\u019e\7.\2\2\u019e\n\3\2\2\2\u019f\u01a0\7\60\2\2\u01a0")
buf.write("\f\3\2\2\2\u01a1\u01a2\7$\2\2\u01a2\16\3\2\2\2\u01a3\u01a4")
buf.write("\5\u0159\u00ad\2\u01a4\u01a5\5\u015b\u00ae\2\u01a5\u01a6")
buf.write("\5\u017d\u00bf\2\u01a6\20\3\2\2\2\u01a7\u01a8\5\u0159")
buf.write("\u00ad\2\u01a8\u01a9\5\u015d\u00af\2\u01a9\u01aa\5\u0175")
buf.write("\u00bb\2\u01aa\u01ab\5\u017d\u00bf\2\u01ab\22\3\2\2\2")
buf.write("\u01ac\u01ad\5\u0159\u00ad\2\u01ad\u01ae\5\u016f\u00b8")
buf.write("\2\u01ae\u01af\5\u016f\u00b8\2\u01af\24\3\2\2\2\u01b0")
buf.write("\u01b1\5\u0159\u00ad\2\u01b1\u01b2\5\u0173\u00ba\2\u01b2")
buf.write("\u01b3\5\u015f\u00b0\2\u01b3\26\3\2\2\2\u01b4\u01b5\5")
buf.write("\u0159\u00ad\2\u01b5\u01b6\5\u0173\u00ba\2\u01b6\u01b7")
buf.write("\5\u017f\u00c0\2\u01b7\u01b8\5\u0169\u00b5\2\u01b8\30")
buf.write("\3\2\2\2\u01b9\u01ba\5\u0159\u00ad\2\u01ba\u01bb\5\u017d")
buf.write("\u00bf\2\u01bb\32\3\2\2\2\u01bc\u01bd\5\u0159\u00ad\2")
buf.write("\u01bd\u01be\5\u017d\u00bf\2\u01be\u01bf\5\u015d\u00af")
buf.write("\2\u01bf\34\3\2\2\2\u01c0\u01c1\5\u0159\u00ad\2\u01c1")
buf.write("\u01c2\5\u017d\u00bf\2\u01c2\u01c3\5\u0169\u00b5\2\u01c3")
buf.write("\u01c4\5\u0173\u00ba\2\u01c4\36\3\2\2\2\u01c5\u01c6\5")
buf.write("\u0159\u00ad\2\u01c6\u01c7\5\u017f\u00c0\2\u01c7\u01c8")
buf.write("\5\u0159\u00ad\2\u01c8\u01c9\5\u0173\u00ba\2\u01c9 \3")
buf.write("\2\2\2\u01ca\u01cb\5\u0159\u00ad\2\u01cb\u01cc\5\u017f")
buf.write("\u00c0\2\u01cc\u01cd\5\u0159\u00ad\2\u01cd\u01ce\5\u0173")
buf.write("\u00ba\2\u01ce\u01cf\5\u0167\u00b4\2\u01cf\"\3\2\2\2\u01d0")
buf.write("\u01d1\5\u0159\u00ad\2\u01d1\u01d2\5\u0183\u00c2\2\u01d2")
buf.write("\u01d3\5\u0165\u00b3\2\u01d3$\3\2\2\2\u01d4\u01d5\5\u015b")
buf.write("\u00ae\2\u01d5\u01d6\5\u0161\u00b1\2\u01d6\u01d7\5\u017f")
buf.write("\u00c0\2\u01d7\u01d8\5\u0185\u00c3\2\u01d8\u01d9\5\u0161")
buf.write("\u00b1\2\u01d9\u01da\5\u0161\u00b1\2\u01da\u01db\5\u0173")
buf.write("\u00ba\2\u01db&\3\2\2\2\u01dc\u01dd\5\u015b\u00ae\2\u01dd")
buf.write("\u01de\5\u0175\u00bb\2\u01de\u01df\5\u0175\u00bb\2\u01df")
buf.write("\u01e0\5\u016f\u00b8\2\u01e0\u01e1\5\u0161\u00b1\2\u01e1")
buf.write("\u01e2\5\u0159\u00ad\2\u01e2\u01e3\5\u0173\u00ba\2\u01e3")
buf.write("(\3\2\2\2\u01e4\u01e5\5\u015b\u00ae\2\u01e5\u01e6\5\u0189")
buf.write("\u00c5\2\u01e6*\3\2\2\2\u01e7\u01e8\5\u015d\u00af\2\u01e8")
buf.write("\u01e9\5\u0159\u00ad\2\u01e9\u01ea\5\u017d\u00bf\2\u01ea")
buf.write("\u01eb\5\u0161\u00b1\2\u01eb,\3\2\2\2\u01ec\u01ed\5\u015d")
buf.write("\u00af\2\u01ed\u01ee\5\u0159\u00ad\2\u01ee\u01ef\5\u017d")
buf.write("\u00bf\2\u01ef\u01f0\5\u017f\u00c0\2\u01f0.\3\2\2\2\u01f1")
buf.write("\u01f2\5\u015d\u00af\2\u01f2\u01f3\5\u0167\u00b4\2\u01f3")
buf.write("\u01f4\5\u0159\u00ad\2\u01f4\u01f5\5\u017b\u00be\2\u01f5")
buf.write("\60\3\2\2\2\u01f6\u01f7\5\u015d\u00af\2\u01f7\u01f8\5")
buf.write("\u0167\u00b4\2\u01f8\u01f9\5\u0159\u00ad\2\u01f9\u01fa")
buf.write("\5\u017b\u00be\2\u01fa\u01fb\5\u0135\u009b\2\u01fb\u01fc")
buf.write("\5\u016f\u00b8\2\u01fc\u01fd\5\u0161\u00b1\2\u01fd\u01fe")
buf.write("\5\u0173\u00ba\2\u01fe\u01ff\5\u0165\u00b3\2\u01ff\u0200")
buf.write("\5\u017f\u00c0\2\u0200\u0201\5\u0167\u00b4\2\u0201\62")
buf.write("\3\2\2\2\u0202\u0203\5\u015d\u00af\2\u0203\u0204\5\u0161")
buf.write("\u00b1\2\u0204\u0205\5\u0169\u00b5\2\u0205\u0206\5\u016f")
buf.write("\u00b8\2\u0206\64\3\2\2\2\u0207\u0208\5\u015d\u00af\2")
buf.write("\u0208\u0209\5\u0161\u00b1\2\u0209\u020a\5\u0169\u00b5")
buf.write("\2\u020a\u020b\5\u016f\u00b8\2\u020b\u020c\5\u0169\u00b5")
buf.write("\2\u020c\u020d\5\u0173\u00ba\2\u020d\u020e\5\u0165\u00b3")
buf.write("\2\u020e\66\3\2\2\2\u020f\u0210\5\u015d\u00af\2\u0210")
buf.write("\u0211\5\u0167\u00b4\2\u0211\u0212\5\u0175\u00bb\2\u0212")
buf.write("\u0213\5\u0175\u00bb\2\u0213\u0214\5\u017d\u00bf\2\u0214")
buf.write("\u0215\5\u0161\u00b1\2\u02158\3\2\2\2\u0216\u0217\5\u015d")
buf.write("\u00af\2\u0217\u0218\5\u0175\u00bb\2\u0218\u0219\5\u0159")
buf.write("\u00ad\2\u0219\u021a\5\u016f\u00b8\2\u021a\u021b\5\u0161")
buf.write("\u00b1\2\u021b\u021c\5\u017d\u00bf\2\u021c\u021d\5\u015d")
buf.write("\u00af\2\u021d\u021e\5\u0161\u00b1\2\u021e:\3\2\2\2\u021f")
buf.write("\u0220\5\u015d\u00af\2\u0220\u0221\5\u0175\u00bb\2\u0221")
buf.write("\u0222\5\u0173\u00ba\2\u0222\u0223\5\u015d\u00af\2\u0223")
buf.write("\u0224\5\u0159\u00ad\2\u0224\u0225\5\u017f\u00c0\2\u0225")
buf.write("<\3\2\2\2\u0226\u0227\5\u015d\u00af\2\u0227\u0228\5\u0175")
buf.write("\u00bb\2\u0228\u0229\5\u017d\u00bf\2\u0229>\3\2\2\2\u022a")
buf.write("\u022b\5\u015d\u00af\2\u022b\u022c\5\u0175\u00bb\2\u022c")
buf.write("\u022d\5\u017f\u00c0\2\u022d@\3\2\2\2\u022e\u022f\5\u015d")
buf.write("\u00af\2\u022f\u0230\5\u0175\u00bb\2\u0230\u0231\5\u0181")
buf.write("\u00c1\2\u0231\u0232\5\u0173\u00ba\2\u0232\u0233\5\u017f")
buf.write("\u00c0\2\u0233B\3\2\2\2\u0234\u0235\5\u015d\u00af\2\u0235")
buf.write("\u0236\5\u017b\u00be\2\u0236\u0237\5\u0175\u00bb\2\u0237")
buf.write("\u0238\5\u017d\u00bf\2\u0238\u0239\5\u017d\u00bf\2\u0239")
buf.write("D\3\2\2\2\u023a\u023b\5\u015d\u00af\2\u023b\u023c\5\u0181")
buf.write("\u00c1\2\u023c\u023d\5\u017b\u00be\2\u023d\u023e\5\u017b")
buf.write("\u00be\2\u023e\u023f\5\u0161\u00b1\2\u023f\u0240\5\u0173")
buf.write("\u00ba\2\u0240\u0241\5\u017f\u00c0\2\u0241\u0242\5\u0135")
buf.write("\u009b\2\u0242\u0243\5\u015f\u00b0\2\u0243\u0244\5\u0159")
buf.write("\u00ad\2\u0244\u0245\5\u017f\u00c0\2\u0245\u0246\5\u0161")
buf.write("\u00b1\2\u0246F\3\2\2\2\u0247\u0248\5\u015d\u00af\2\u0248")
buf.write("\u0249\5\u0181\u00c1\2\u0249\u024a\5\u017b\u00be\2\u024a")
buf.write("\u024b\5\u017b\u00be\2\u024b\u024c\5\u0161\u00b1\2\u024c")
buf.write("\u024d\5\u0173\u00ba\2\u024d\u024e\5\u017f\u00c0\2\u024e")
buf.write("\u024f\5\u0135\u009b\2\u024f\u0250\5\u017f\u00c0\2\u0250")
buf.write("\u0251\5\u0169\u00b5\2\u0251\u0252\5\u0171\u00b9\2\u0252")
buf.write("\u0253\5\u0161\u00b1\2\u0253H\3\2\2\2\u0254\u0255\5\u015d")
buf.write("\u00af\2\u0255\u0256\5\u0181\u00c1\2\u0256\u0257\5\u017b")
buf.write("\u00be\2\u0257\u0258\5\u017b\u00be\2\u0258\u0259\5\u0161")
buf.write("\u00b1\2\u0259\u025a\5\u0173\u00ba\2\u025a\u025b\5\u017f")
buf.write("\u00c0\2\u025b\u025c\5\u0135\u009b\2\u025c\u025d\5\u017f")
buf.write("\u00c0\2\u025d\u025e\5\u0169\u00b5\2\u025e\u025f\5\u0171")
buf.write("\u00b9\2\u025f\u0260\5\u0161\u00b1\2\u0260\u0261\5\u017d")
buf.write("\u00bf\2\u0261\u0262\5\u017f\u00c0\2\u0262\u0263\5\u0159")
buf.write("\u00ad\2\u0263\u0264\5\u0171\u00b9\2\u0264\u0265\5\u0177")
buf.write("\u00bc\2\u0265J\3\2\2\2\u0266\u0267\5\u015f\u00b0\2\u0267")
buf.write("\u0268\5\u0159\u00ad\2\u0268\u0269\5\u017f\u00c0\2\u0269")
buf.write("\u026a\5\u0161\u00b1\2\u026aL\3\2\2\2\u026b\u026c\5\u015f")
buf.write("\u00b0\2\u026c\u026d\5\u0159\u00ad\2\u026d\u026e\5\u0189")
buf.write("\u00c5\2\u026eN\3\2\2\2\u026f\u0270\5\u015f\u00b0\2\u0270")
buf.write("\u0271\5\u0159\u00ad\2\u0271\u0272\5\u0189\u00c5\2\u0272")
buf.write("\u0273\5\u0173\u00ba\2\u0273\u0274\5\u0159\u00ad\2\u0274")
buf.write("\u0275\5\u0171\u00b9\2\u0275\u0276\5\u0161\u00b1\2\u0276")
buf.write("P\3\2\2\2\u0277\u0278\5\u015f\u00b0\2\u0278\u0279\5\u0161")
buf.write("\u00b1\2\u0279\u027a\5\u0165\u00b3\2\u027a\u027b\5\u017b")
buf.write("\u00be\2\u027b\u027c\5\u0161\u00b1\2\u027c\u027d\5\u0161")
buf.write("\u00b1\2\u027d\u027e\5\u017d\u00bf\2\u027eR\3\2\2\2\u027f")
buf.write("\u0280\5\u015f\u00b0\2\u0280\u0281\5\u0161\u00b1\2\u0281")
buf.write("\u0282\5\u0173\u00ba\2\u0282\u0283\5\u017d\u00bf\2\u0283")
buf.write("\u0284\5\u0161\u00b1\2\u0284\u0285\7a\2\2\u0285\u0286")
buf.write("\5\u017b\u00be\2\u0286\u0287\5\u0159\u00ad\2\u0287\u0288")
buf.write("\5\u0173\u00ba\2\u0288\u0289\5\u016d\u00b7\2\u0289T\3")
buf.write("\2\2\2\u028a\u028b\5\u015f\u00b0\2\u028b\u028c\5\u0161")
buf.write("\u00b1\2\u028c\u028d\5\u017d\u00bf\2\u028d\u028e\5\u015d")
buf.write("\u00af\2\u028eV\3\2\2\2\u028f\u0290\5\u015f\u00b0\2\u0290")
buf.write("\u0291\5\u0169\u00b5\2\u0291\u0292\5\u017d\u00bf\2\u0292")
buf.write("\u0293\5\u017f\u00c0\2\u0293\u0294\5\u0169\u00b5\2\u0294")
buf.write("\u0295\5\u0173\u00ba\2\u0295\u0296\5\u015d\u00af\2\u0296")
buf.write("\u0297\5\u017f\u00c0\2\u0297X\3\2\2\2\u0298\u0299\5\u015f")
buf.write("\u00b0\2\u0299\u029a\5\u0169\u00b5\2\u029a\u029b\5\u0183")
buf.write("\u00c2\2\u029bZ\3\2\2\2\u029c\u029d\5\u0161\u00b1\2\u029d")
buf.write("\u029e\5\u016f\u00b8\2\u029e\u029f\5\u017d\u00bf\2\u029f")
buf.write("\u02a0\5\u0161\u00b1\2\u02a0\\\3\2\2\2\u02a1\u02a2\5\u0161")
buf.write("\u00b1\2\u02a2\u02a3\5\u0173\u00ba\2\u02a3\u02a4\5\u015f")
buf.write("\u00b0\2\u02a4^\3\2\2\2\u02a5\u02a6\5\u0161\u00b1\2\u02a6")
buf.write("\u02a7\5\u0187\u00c4\2\u02a7\u02a8\5\u0177\u00bc\2\u02a8")
buf.write("`\3\2\2\2\u02a9\u02aa\5\u0161\u00b1\2\u02aa\u02ab\5\u0187")
buf.write("\u00c4\2\u02ab\u02ac\5\u017f\u00c0\2\u02ac\u02ad\5\u017b")
buf.write("\u00be\2\u02ad\u02ae\5\u0159\u00ad\2\u02ae\u02af\5\u015d")
buf.write("\u00af\2\u02af\u02b0\5\u017f\u00c0\2\u02b0b\3\2\2\2\u02b1")
buf.write("\u02b2\5\u0163\u00b2\2\u02b2\u02b3\5\u0159\u00ad\2\u02b3")
buf.write("\u02b4\5\u016f\u00b8\2\u02b4\u02b5\5\u017d\u00bf\2\u02b5")
buf.write("\u02b6\5\u0161\u00b1\2\u02b6d\3\2\2\2\u02b7\u02b8\5\u0163")
buf.write("\u00b2\2\u02b8\u02b9\5\u016f\u00b8\2\u02b9\u02ba\5\u0175")
buf.write("\u00bb\2\u02ba\u02bb\5\u0159\u00ad\2\u02bb\u02bc\5\u017f")
buf.write("\u00c0\2\u02bcf\3\2\2\2\u02bd\u02be\5\u0163\u00b2\2\u02be")
buf.write("\u02bf\5\u016f\u00b8\2\u02bf\u02c0\5\u0175\u00bb\2\u02c0")
buf.write("\u02c1\5\u0175\u00bb\2\u02c1\u02c2\5\u017b\u00be\2\u02c2")
buf.write("h\3\2\2\2\u02c3\u02c4\5\u0163\u00b2\2\u02c4\u02c5\5\u0175")
buf.write("\u00bb\2\u02c5\u02c6\5\u017b\u00be\2\u02c6j\3\2\2\2\u02c7")
buf.write("\u02c8\5\u0163\u00b2\2\u02c8\u02c9\5\u017b\u00be\2\u02c9")
buf.write("\u02ca\5\u0175\u00bb\2\u02ca\u02cb\5\u0171\u00b9\2\u02cb")
buf.write("l\3\2\2\2\u02cc\u02cd\5\u0163\u00b2\2\u02cd\u02ce\5\u0181")
buf.write("\u00c1\2\u02ce\u02cf\5\u016f\u00b8\2\u02cf\u02d0\5\u016f")
buf.write("\u00b8\2\u02d0n\3\2\2\2\u02d1\u02d2\5\u0165\u00b3\2\u02d2")
buf.write("\u02d3\5\u017b\u00be\2\u02d3\u02d4\5\u0175\u00bb\2\u02d4")
buf.write("\u02d5\5\u0181\u00c1\2\u02d5\u02d6\5\u0177\u00bc\2\u02d6")
buf.write("p\3\2\2\2\u02d7\u02d8\5\u0167\u00b4\2\u02d8\u02d9\5\u0159")
buf.write("\u00ad\2\u02d9\u02da\5\u0183\u00c2\2\u02da\u02db\5\u0169")
buf.write("\u00b5\2\u02db\u02dc\5\u0173\u00ba\2\u02dc\u02dd\5\u0165")
buf.write("\u00b3\2\u02ddr\3\2\2\2\u02de\u02df\5\u0167\u00b4\2\u02df")
buf.write("\u02e0\5\u0175\u00bb\2\u02e0\u02e1\5\u0181\u00c1\2\u02e1")
buf.write("\u02e2\5\u017b\u00be\2\u02e2t\3\2\2\2\u02e3\u02e4\5\u0169")
buf.write("\u00b5\2\u02e4\u02e5\5\u0163\u00b2\2\u02e5v\3\2\2\2\u02e6")
buf.write("\u02e7\5\u0169\u00b5\2\u02e7\u02e8\5\u0169\u00b5\2\u02e8")
buf.write("\u02e9\5\u0163\u00b2\2\u02e9x\3\2\2\2\u02ea\u02eb\5\u0169")
buf.write("\u00b5\2\u02eb\u02ec\5\u0173\u00ba\2\u02ecz\3\2\2\2\u02ed")
buf.write("\u02ee\5\u0169\u00b5\2\u02ee\u02ef\5\u0173\u00ba\2\u02ef")
buf.write("\u02f0\5\u0173\u00ba\2\u02f0\u02f1\5\u0161\u00b1\2\u02f1")
buf.write("\u02f2\5\u017b\u00be\2\u02f2|\3\2\2\2\u02f3\u02f4\5\u0169")
buf.write("\u00b5\2\u02f4\u02f5\5\u0173\u00ba\2\u02f5\u02f6\5\u017f")
buf.write("\u00c0\2\u02f6\u02f7\5\u0161\u00b1\2\u02f7\u02f8\5\u0165")
buf.write("\u00b3\2\u02f8\u02f9\5\u0161\u00b1\2\u02f9\u02fa\5\u017b")
buf.write("\u00be\2\u02fa~\3\2\2\2\u02fb\u02fc\5\u0169\u00b5\2\u02fc")
buf.write("\u02fd\5\u0173\u00ba\2\u02fd\u02fe\5\u017f\u00c0\2\u02fe")
buf.write("\u02ff\5\u0161\u00b1\2\u02ff\u0300\5\u017b\u00be\2\u0300")
buf.write("\u0301\5\u017d\u00bf\2\u0301\u0302\5\u0161\u00b1\2\u0302")
buf.write("\u0303\5\u015d\u00af\2\u0303\u0304\5\u017f\u00c0\2\u0304")
buf.write("\u0080\3\2\2\2\u0305\u0306\5\u0169\u00b5\2\u0306\u0307")
buf.write("\5\u017d\u00bf\2\u0307\u0082\3\2\2\2\u0308\u0309\5\u016b")
buf.write("\u00b6\2\u0309\u030a\5\u0175\u00bb\2\u030a\u030b\5\u0169")
buf.write("\u00b5\2\u030b\u030c\5\u0173\u00ba\2\u030c\u0084\3\2\2")
buf.write("\2\u030d\u030e\5\u016f\u00b8\2\u030e\u030f\5\u0161\u00b1")
buf.write("\2\u030f\u0310\5\u0163\u00b2\2\u0310\u0311\5\u017f\u00c0")
buf.write("\2\u0311\u0086\3\2\2\2\u0312\u0313\5\u016f\u00b8\2\u0313")
buf.write("\u0314\5\u0169\u00b5\2\u0314\u0315\5\u0171\u00b9\2\u0315")
buf.write("\u0316\5\u0169\u00b5\2\u0316\u0317\5\u017f\u00c0\2\u0317")
buf.write("\u0088\3\2\2\2\u0318\u0319\5\u016f\u00b8\2\u0319\u031a")
buf.write("\5\u0173\u00ba\2\u031a\u008a\3\2\2\2\u031b\u031c\5\u016f")
buf.write("\u00b8\2\u031c\u031d\5\u0175\u00bb\2\u031d\u031e\5\u0165")
buf.write("\u00b3\2\u031e\u008c\3\2\2\2\u031f\u0320\5\u016f\u00b8")
buf.write("\2\u0320\u0321\5\u0175\u00bb\2\u0321\u0322\5\u0165\u00b3")
buf.write("\2\u0322\u0323\7\63\2\2\u0323\u0324\7\62\2\2\u0324\u008e")
buf.write("\3\2\2\2\u0325\u0326\5\u016f\u00b8\2\u0326\u0327\5\u0175")
buf.write("\u00bb\2\u0327\u0328\5\u0165\u00b3\2\u0328\u0329\7\64")
buf.write("\2\2\u0329\u0090\3\2\2\2\u032a\u032b\5\u016f\u00b8\2\u032b")
buf.write("\u032c\5\u0175\u00bb\2\u032c\u032d\5\u0185\u00c3\2\u032d")
buf.write("\u032e\5\u0161\u00b1\2\u032e\u032f\5\u017b\u00be\2\u032f")
buf.write("\u0092\3\2\2\2\u0330\u0331\5\u0171\u00b9\2\u0331\u0332")
buf.write("\5\u0159\u00ad\2\u0332\u0333\5\u0187\u00c4\2\u0333\u0094")
buf.write("\3\2\2\2\u0334\u0335\5\u0171\u00b9\2\u0335\u0336\5\u0169")
buf.write("\u00b5\2\u0336\u0337\5\u015d\u00af\2\u0337\u0338\5\u017b")
buf.write("\u00be\2\u0338\u0339\5\u0175\u00bb\2\u0339\u033a\5\u017d")
buf.write("\u00bf\2\u033a\u033b\5\u0161\u00b1\2\u033b\u033c\5\u015d")
buf.write("\u00af\2\u033c\u033d\5\u0175\u00bb\2\u033d\u033e\5\u0173")
buf.write("\u00ba\2\u033e\u033f\5\u015f\u00b0\2\u033f\u0096\3\2\2")
buf.write("\2\u0340\u0341\5\u0171\u00b9\2\u0341\u0342\5\u0169\u00b5")
buf.write("\2\u0342\u0343\5\u0173\u00ba\2\u0343\u0098\3\2\2\2\u0344")
buf.write("\u0345\5\u0171\u00b9\2\u0345\u0346\5\u0169\u00b5\2\u0346")
buf.write("\u0347\5\u0173\u00ba\2\u0347\u0348\5\u0181\u00c1\2\u0348")
buf.write("\u0349\5\u017f\u00c0\2\u0349\u034a\5\u0161\u00b1\2\u034a")
buf.write("\u009a\3\2\2\2\u034b\u034c\5\u0171\u00b9\2\u034c\u034d")
buf.write("\5\u0175\u00bb\2\u034d\u034e\5\u0173\u00ba\2\u034e\u034f")
buf.write("\5\u017f\u00c0\2\u034f\u0350\5\u0167\u00b4\2\u0350\u009c")
buf.write("\3\2\2\2\u0351\u0352\5\u0173\u00ba\2\u0352\u0353\5\u015d")
buf.write("\u00af\2\u0353\u0354\5\u0167\u00b4\2\u0354\u0355\5\u0159")
buf.write("\u00ad\2\u0355\u0356\5\u017b\u00be\2\u0356\u009e\3\2\2")
buf.write("\2\u0357\u0358\5\u0173\u00ba\2\u0358\u0359\5\u0161\u00b1")
buf.write("\2\u0359\u035a\5\u0185\u00c3\2\u035a\u035b\5\u0169\u00b5")
buf.write("\2\u035b\u035c\5\u015f\u00b0\2\u035c\u00a0\3\2\2\2\u035d")
buf.write("\u035e\5\u0173\u00ba\2\u035e\u035f\5\u0175\u00bb\2\u035f")
buf.write("\u0360\5\u017f\u00c0\2\u0360\u00a2\3\2\2\2\u0361\u0362")
buf.write("\5\u0173\u00ba\2\u0362\u0363\5\u0181\u00c1\2\u0363\u0364")
buf.write("\5\u016f\u00b8\2\u0364\u0365\5\u016f\u00b8\2\u0365\u00a4")
buf.write("\3\2\2\2\u0366\u0367\5\u0173\u00ba\2\u0367\u0368\5\u0181")
buf.write("\u00c1\2\u0368\u0369\5\u0171\u00b9\2\u0369\u036a\5\u0161")
buf.write("\u00b1\2\u036a\u036b\5\u017b\u00be\2\u036b\u036c\5\u0169")
buf.write("\u00b5\2\u036c\u036d\5\u015d\u00af\2\u036d\u00a6\3\2\2")
buf.write("\2\u036e\u036f\5\u0175\u00bb\2\u036f\u0370\5\u0173\u00ba")
buf.write("\2\u0370\u00a8\3\2\2\2\u0371\u0372\5\u0175\u00bb\2\u0372")
buf.write("\u0373\5\u017b\u00be\2\u0373\u00aa\3\2\2\2\u0374\u0375")
buf.write("\5\u0175\u00bb\2\u0375\u0376\5\u017b\u00be\2\u0376\u0377")
buf.write("\5\u015f\u00b0\2\u0377\u0378\5\u0161\u00b1\2\u0378\u0379")
buf.write("\5\u017b\u00be\2\u0379\u00ac\3\2\2\2\u037a\u037b\5\u0175")
buf.write("\u00bb\2\u037b\u037c\5\u0181\u00c1\2\u037c\u037d\5\u017f")
buf.write("\u00c0\2\u037d\u037e\5\u0161\u00b1\2\u037e\u037f\5\u017b")
buf.write("\u00be\2\u037f\u00ae\3\2\2\2\u0380\u0381\5\u0175\u00bb")
buf.write("\2\u0381\u0382\5\u0183\u00c2\2\u0382\u0383\5\u0161\u00b1")
buf.write("\2\u0383\u0384\5\u017b\u00be\2\u0384\u00b0\3\2\2\2\u0385")
buf.write("\u0386\5\u0177\u00bc\2\u0386\u0387\5\u0159\u00ad\2\u0387")
buf.write("\u0388\5\u017b\u00be\2\u0388\u0389\5\u017f\u00c0\2\u0389")
buf.write("\u038a\5\u0169\u00b5\2\u038a\u038b\5\u017f\u00c0\2\u038b")
buf.write("\u038c\5\u0169\u00b5\2\u038c\u038d\5\u0175\u00bb\2\u038d")
buf.write("\u038e\5\u0173\u00ba\2\u038e\u00b2\3\2\2\2\u038f\u0390")
buf.write("\5\u0177\u00bc\2\u0390\u0391\5\u0161\u00b1\2\u0391\u0392")
buf.write("\5\u017b\u00be\2\u0392\u0393\5\u015d\u00af\2\u0393\u0394")
buf.write("\5\u0161\u00b1\2\u0394\u0395\5\u0173\u00ba\2\u0395\u0396")
buf.write("\5\u017f\u00c0\2\u0396\u0397\5\u0169\u00b5\2\u0397\u0398")
buf.write("\5\u016f\u00b8\2\u0398\u0399\5\u0161\u00b1\2\u0399\u039a")
buf.write("\7a\2\2\u039a\u039b\5\u015d\u00af\2\u039b\u039c\5\u0175")
buf.write("\u00bb\2\u039c\u039d\5\u0173\u00ba\2\u039d\u039e\5\u017f")
buf.write("\u00c0\2\u039e\u00b4\3\2\2\2\u039f\u03a0\5\u0177\u00bc")
buf.write("\2\u03a0\u03a1\5\u0161\u00b1\2\u03a1\u03a2\5\u017b\u00be")
buf.write("\2\u03a2\u03a3\5\u015d\u00af\2\u03a3\u03a4\5\u0161\u00b1")
buf.write("\2\u03a4\u03a5\5\u0173\u00ba\2\u03a5\u03a6\5\u017f\u00c0")
buf.write("\2\u03a6\u03a7\5\u0169\u00b5\2\u03a7\u03a8\5\u016f\u00b8")
buf.write("\2\u03a8\u03a9\5\u0161\u00b1\2\u03a9\u03aa\7a\2\2\u03aa")
buf.write("\u03ab\5\u015f\u00b0\2\u03ab\u03ac\5\u0169\u00b5\2\u03ac")
buf.write("\u03ad\5\u017d\u00bf\2\u03ad\u03ae\5\u015d\u00af\2\u03ae")
buf.write("\u00b6\3\2\2\2\u03af\u03b0\5\u0177\u00bc\2\u03b0\u03b1")
buf.write("\5\u0169\u00b5\2\u03b1\u00b8\3\2\2\2\u03b2\u03b3\5\u0177")
buf.write("\u00bc\2\u03b3\u03b4\5\u0175\u00bb\2\u03b4\u03b5\5\u017d")
buf.write("\u00bf\2\u03b5\u03b6\5\u0169\u00b5\2\u03b6\u03b7\5\u017f")
buf.write("\u00c0\2\u03b7\u03b8\5\u0169\u00b5\2\u03b8\u03b9\5\u0175")
buf.write("\u00bb\2\u03b9\u03ba\5\u0173\u00ba\2\u03ba\u00ba\3\2\2")
buf.write("\2\u03bb\u03bc\5\u0177\u00bc\2\u03bc\u03bd\5\u0175\u00bb")
buf.write("\2\u03bd\u03be\5\u0185\u00c3\2\u03be\u03bf\5\u0161\u00b1")
buf.write("\2\u03bf\u03c0\5\u017b\u00be\2\u03c0\u00bc\3\2\2\2\u03c1")
buf.write("\u03c2\5\u017b\u00be\2\u03c2\u03c3\5\u0159\u00ad\2\u03c3")
buf.write("\u03c4\5\u0173\u00ba\2\u03c4\u03c5\5\u015f\u00b0\2\u03c5")
buf.write("\u00be\3\2\2\2\u03c6\u03c7\5\u017b\u00be\2\u03c7\u03c8")
buf.write("\5\u0159\u00ad\2\u03c8\u03c9\5\u0173\u00ba\2\u03c9\u03ca")
buf.write("\5\u015f\u00b0\2\u03ca\u03cb\5\u0175\u00bb\2\u03cb\u03cc")
buf.write("\5\u0171\u00b9\2\u03cc\u00c0\3\2\2\2\u03cd\u03ce\5\u017b")
buf.write("\u00be\2\u03ce\u03cf\5\u0159\u00ad\2\u03cf\u03d0\5\u0173")
buf.write("\u00ba\2\u03d0\u03d1\5\u016d\u00b7\2\u03d1\u00c2\3\2\2")
buf.write("\2\u03d2\u03d3\5\u017b\u00be\2\u03d3\u03d4\5\u0169\u00b5")
buf.write("\2\u03d4\u03d5\5\u0165\u00b3\2\u03d5\u03d6\5\u0167\u00b4")
buf.write("\2\u03d6\u03d7\5\u017f\u00c0\2\u03d7\u00c4\3\2\2\2\u03d8")
buf.write("\u03d9\5\u017b\u00be\2\u03d9\u03da\5\u0175\u00bb\2\u03da")
buf.write("\u03db\5\u0181\u00c1\2\u03db\u03dc\5\u0173\u00ba\2\u03dc")
buf.write("\u03dd\5\u015f\u00b0\2\u03dd\u00c6\3\2\2\2\u03de\u03df")
buf.write("\5\u017b\u00be\2\u03df\u03e0\5\u0175\u00bb\2\u03e0\u03e1")
buf.write("\5\u0185\u00c3\2\u03e1\u03e2\7a\2\2\u03e2\u03e3\5\u0173")
buf.write("\u00ba\2\u03e3\u03e4\5\u0181\u00c1\2\u03e4\u03e5\5\u0171")
buf.write("\u00b9\2\u03e5\u03e6\5\u015b\u00ae\2\u03e6\u03e7\5\u0161")
buf.write("\u00b1\2\u03e7\u03e8\5\u017b\u00be\2\u03e8\u00c8\3\2\2")
buf.write("\2\u03e9\u03ea\5\u017b\u00be\2\u03ea\u03eb\5\u0175\u00bb")
buf.write("\2\u03eb\u03ec\5\u0185\u00c3\2\u03ec\u03ed\5\u0173\u00ba")
buf.write("\2\u03ed\u03ee\5\u0181\u00c1\2\u03ee\u03ef\5\u0171\u00b9")
buf.write("\2\u03ef\u00ca\3\2\2\2\u03f0\u03f1\5\u017d\u00bf\2\u03f1")
buf.write("\u03f2\5\u0161\u00b1\2\u03f2\u03f3\5\u015d\u00af\2\u03f3")
buf.write("\u03f4\5\u0175\u00bb\2\u03f4\u03f5\5\u0173\u00ba\2\u03f5")
buf.write("\u03f6\5\u015f\u00b0\2\u03f6\u00cc\3\2\2\2\u03f7\u03f8")
buf.write("\5\u017d\u00bf\2\u03f8\u03f9\5\u0161\u00b1\2\u03f9\u03fa")
buf.write("\5\u016f\u00b8\2\u03fa\u03fb\5\u0161\u00b1\2\u03fb\u03fc")
buf.write("\5\u015d\u00af\2\u03fc\u03fd\5\u017f\u00c0\2\u03fd\u00ce")
buf.write("\3\2\2\2\u03fe\u03ff\5\u017d\u00bf\2\u03ff\u0400\5\u0161")
buf.write("\u00b1\2\u0400\u0401\5\u0171\u00b9\2\u0401\u0402\5\u0169")
buf.write("\u00b5\2\u0402\u00d0\3\2\2\2\u0403\u0404\5\u017d\u00bf")
buf.write("\2\u0404\u0405\5\u0169\u00b5\2\u0405\u0406\5\u0165\u00b3")
buf.write("\2\u0406\u0407\5\u0173\u00ba\2\u0407\u00d2\3\2\2\2\u0408")
buf.write("\u0409\5\u017d\u00bf\2\u0409\u040a\5\u0169\u00b5\2\u040a")
buf.write("\u040b\5\u0173\u00ba\2\u040b\u00d4\3\2\2\2\u040c\u040d")
buf.write("\5\u017d\u00bf\2\u040d\u040e\5\u0175\u00bb\2\u040e\u040f")
buf.write("\5\u017b\u00be\2\u040f\u0410\5\u017f\u00c0\2\u0410\u00d6")
buf.write("\3\2\2\2\u0411\u0412\5\u017d\u00bf\2\u0412\u0413\5\u0179")
buf.write("\u00bd\2\u0413\u0414\5\u016f\u00b8\2\u0414\u00d8\3\2\2")
buf.write("\2\u0415\u0416\5\u017d\u00bf\2\u0416\u0417\5\u0179\u00bd")
buf.write("\2\u0417\u0418\5\u017b\u00be\2\u0418\u0419\5\u017f\u00c0")
buf.write("\2\u0419\u00da\3\2\2\2\u041a\u041b\5\u017d\u00bf\2\u041b")
buf.write("\u041c\5\u0179\u00bd\2\u041c\u041d\5\u0181\u00c1\2\u041d")
buf.write("\u041e\5\u0159\u00ad\2\u041e\u041f\5\u017b\u00be\2\u041f")
buf.write("\u0420\5\u0161\u00b1\2\u0420\u00dc\3\2\2\2\u0421\u0422")
buf.write("\5\u017d\u00bf\2\u0422\u0423\5\u017f\u00c0\2\u0423\u0424")
buf.write("\5\u015f\u00b0\2\u0424\u00de\3\2\2\2\u0425\u0426\5\u017d")
buf.write("\u00bf\2\u0426\u0427\5\u017f\u00c0\2\u0427\u0428\5\u015f")
buf.write("\u00b0\2\u0428\u0429\5\u015f\u00b0\2\u0429\u042a\5\u0161")
buf.write("\u00b1\2\u042a\u042b\5\u0183\u00c2\2\u042b\u00e0\3\2\2")
buf.write("\2\u042c\u042d\5\u017d\u00bf\2\u042d\u042e\5\u017f\u00c0")
buf.write("\2\u042e\u042f\5\u015f\u00b0\2\u042f\u0430\5\u0161\u00b1")
buf.write("\2\u0430\u0431\5\u0183\u00c2\2\u0431\u00e2\3\2\2\2\u0432")
buf.write("\u0433\5\u017d\u00bf\2\u0433\u0434\5\u0181\u00c1\2\u0434")
buf.write("\u0435\5\u015b\u00ae\2\u0435\u0436\5\u017d\u00bf\2\u0436")
buf.write("\u0437\5\u017f\u00c0\2\u0437\u0438\5\u017b\u00be\2\u0438")
buf.write("\u0439\5\u0169\u00b5\2\u0439\u043a\5\u0173\u00ba\2\u043a")
buf.write("\u043b\5\u0165\u00b3\2\u043b\u00e4\3\2\2\2\u043c\u043d")
buf.write("\5\u017d\u00bf\2\u043d\u043e\5\u0181\u00c1\2\u043e\u043f")
buf.write("\5\u0171\u00b9\2\u043f\u00e6\3\2\2\2\u0440\u0441\5\u017f")
buf.write("\u00c0\2\u0441\u0442\5\u0159\u00ad\2\u0442\u0443\5\u0173")
buf.write("\u00ba\2\u0443\u00e8\3\2\2\2\u0444\u0445\5\u017f\u00c0")
buf.write("\2\u0445\u0446\5\u0167\u00b4\2\u0446\u0447\5\u0161\u00b1")
buf.write("\2\u0447\u0448\5\u0173\u00ba\2\u0448\u00ea\3\2\2\2\u0449")
buf.write("\u044a\5\u017f\u00c0\2\u044a\u044b\5\u0169\u00b5\2\u044b")
buf.write("\u044c\5\u0171\u00b9\2\u044c\u044d\5\u0161\u00b1\2\u044d")
buf.write("\u00ec\3\2\2\2\u044e\u044f\5\u017f\u00c0\2\u044f\u0450")
buf.write("\5\u0169\u00b5\2\u0450\u0451\5\u0171\u00b9\2\u0451\u0452")
buf.write("\5\u0161\u00b1\2\u0452\u0453\5\u017d\u00bf\2\u0453\u0454")
buf.write("\5\u017f\u00c0\2\u0454\u0455\5\u0159\u00ad\2\u0455\u0456")
buf.write("\5\u0171\u00b9\2\u0456\u0457\5\u0177\u00bc\2\u0457\u00ee")
buf.write("\3\2\2\2\u0458\u0459\5\u017f\u00c0\2\u0459\u045a\5\u0175")
buf.write("\u00bb\2\u045a\u045b\5\u0177\u00bc\2\u045b\u00f0\3\2\2")
buf.write("\2\u045c\u045d\5\u017f\u00c0\2\u045d\u045e\5\u017b\u00be")
buf.write("\2\u045e\u045f\5\u0169\u00b5\2\u045f\u0460\5\u0171\u00b9")
buf.write("\2\u0460\u00f2\3\2\2\2\u0461\u0462\5\u017f\u00c0\2\u0462")
buf.write("\u0463\5\u017b\u00be\2\u0463\u0464\5\u0181\u00c1\2\u0464")
buf.write("\u0465\5\u0161\u00b1\2\u0465\u00f4\3\2\2\2\u0466\u0467")
buf.write("\5\u017f\u00c0\2\u0467\u0468\5\u017b\u00be\2\u0468\u0469")
buf.write("\5\u0181\u00c1\2\u0469\u046a\5\u0173\u00ba\2\u046a\u046b")
buf.write("\5\u015d\u00af\2\u046b\u00f6\3\2\2\2\u046c\u046d\5\u017f")
buf.write("\u00c0\2\u046d\u046e\5\u017b\u00be\2\u046e\u046f\5\u0181")
buf.write("\u00c1\2\u046f\u0470\5\u0173\u00ba\2\u0470\u0471\5\u015d")
buf.write("\u00af\2\u0471\u0472\5\u0159\u00ad\2\u0472\u0473\5\u017f")
buf.write("\u00c0\2\u0473\u0474\5\u0161\u00b1\2\u0474\u00f8\3\2\2")
buf.write("\2\u0475\u0476\5\u017f\u00c0\2\u0476\u0477\5\u018b\u00c6")
buf.write("\2\u0477\u0478\5\u0175\u00bb\2\u0478\u0479\5\u0163\u00b2")
buf.write("\2\u0479\u047a\5\u0163\u00b2\2\u047a\u047b\5\u017d\u00bf")
buf.write("\2\u047b\u047c\5\u0161\u00b1\2\u047c\u047d\5\u017f\u00c0")
buf.write("\2\u047d\u00fa\3\2\2\2\u047e\u047f\5\u0181\u00c1\2\u047f")
buf.write("\u0480\5\u0173\u00ba\2\u0480\u0481\5\u0169\u00b5\2\u0481")
buf.write("\u0482\5\u0175\u00bb\2\u0482\u0483\5\u0173\u00ba\2\u0483")
buf.write("\u00fc\3\2\2\2\u0484\u0485\5\u0181\u00c1\2\u0485\u0486")
buf.write("\5\u0177\u00bc\2\u0486\u0487\5\u0177\u00bc\2\u0487\u0488")
buf.write("\5\u0161\u00b1\2\u0488\u0489\5\u017b\u00be\2\u0489\u00fe")
buf.write("\3\2\2\2\u048a\u048b\5\u0181\u00c1\2\u048b\u048c\5\u017d")
buf.write("\u00bf\2\u048c\u048d\5\u0169\u00b5\2\u048d\u048e\5\u0173")
buf.write("\u00ba\2\u048e\u048f\5\u0165\u00b3\2\u048f\u0100\3\2\2")
buf.write("\2\u0490\u0491\5\u0183\u00c2\2\u0491\u0492\5\u0159\u00ad")
buf.write("\2\u0492\u0493\5\u017b\u00be\2\u0493\u0102\3\2\2\2\u0494")
buf.write("\u0495\5\u0183\u00c2\2\u0495\u0496\5\u0159\u00ad\2\u0496")
buf.write("\u0497\5\u017b\u00be\2\u0497\u0498\5\u015d\u00af\2\u0498")
buf.write("\u0499\5\u0167\u00b4\2\u0499\u049a\5\u0159\u00ad\2\u049a")
buf.write("\u049b\5\u017b\u00be\2\u049b\u0104\3\2\2\2\u049c\u049d")
buf.write("\5\u0173\u00ba\2\u049d\u049e\5\u0183\u00c2\2\u049e\u049f")
buf.write("\5\u0159\u00ad\2\u049f\u04a0\5\u017b\u00be\2\u04a0\u04a1")
buf.write("\5\u015d\u00af\2\u04a1\u04a2\5\u0167\u00b4\2\u04a2\u04a3")
buf.write("\5\u0159\u00ad\2\u04a3\u04a4\5\u017b\u00be\2\u04a4\u0106")
buf.write("\3\2\2\2\u04a5\u04a6\5\u0183\u00c2\2\u04a6\u04a7\5\u0159")
buf.write("\u00ad\2\u04a7\u04a8\5\u017b\u00be\2\u04a8\u04a9\5\u0169")
buf.write("\u00b5\2\u04a9\u04aa\5\u0159\u00ad\2\u04aa\u04ab\5\u0173")
buf.write("\u00ba\2\u04ab\u04ac\5\u015d\u00af\2\u04ac\u04ad\5\u0161")
buf.write("\u00b1\2\u04ad\u0108\3\2\2\2\u04ae\u04af\5\u0185\u00c3")
buf.write("\2\u04af\u04b0\5\u0161\u00b1\2\u04b0\u04b1\5\u0161\u00b1")
buf.write("\2\u04b1\u04b2\5\u016d\u00b7\2\u04b2\u04b3\5\u015f\u00b0")
buf.write("\2\u04b3\u04b4\5\u0159\u00ad\2\u04b4\u04b5\5\u0189\u00c5")
buf.write("\2\u04b5\u010a\3\2\2\2\u04b6\u04b7\5\u0185\u00c3\2\u04b7")
buf.write("\u04b8\5\u0167\u00b4\2\u04b8\u04b9\5\u0161\u00b1\2\u04b9")
buf.write("\u04ba\5\u0173\u00ba\2\u04ba\u010c\3\2\2\2\u04bb\u04bc")
buf.write("\5\u0185\u00c3\2\u04bc\u04bd\5\u0167\u00b4\2\u04bd\u04be")
buf.write("\5\u0161\u00b1\2\u04be\u04bf\5\u017b\u00be\2\u04bf\u04c0")
buf.write("\5\u0161\u00b1\2\u04c0\u010e\3\2\2\2\u04c1\u04c2\5\u0189")
buf.write("\u00c5\2\u04c2\u04c3\5\u0161\u00b1\2\u04c3\u04c4\5\u0159")
buf.write("\u00ad\2\u04c4\u04c5\5\u017b\u00be\2\u04c5\u0110\3\2\2")
buf.write("\2\u04c6\u04ca\7?\2\2\u04c7\u04c8\7?\2\2\u04c8\u04ca\7")
buf.write("?\2\2\u04c9\u04c6\3\2\2\2\u04c9\u04c7\3\2\2\2\u04ca\u0112")
buf.write("\3\2\2\2\u04cb\u04cc\7>\2\2\u04cc\u04cd\7?\2\2\u04cd\u04ce")
buf.write("\7@\2\2\u04ce\u0114\3\2\2\2\u04cf\u04d0\7>\2\2\u04d0\u04d1")
buf.write("\7@\2\2\u04d1\u0116\3\2\2\2\u04d2\u04d3\7#\2\2\u04d3\u04d4")
buf.write("\7?\2\2\u04d4\u0118\3\2\2\2\u04d5\u04d6\7>\2\2\u04d6\u011a")
buf.write("\3\2\2\2\u04d7\u04d8\7>\2\2\u04d8\u04dc\7?\2\2\u04d9\u04da")
buf.write("\7#\2\2\u04da\u04dc\7@\2\2\u04db\u04d7\3\2\2\2\u04db\u04d9")
buf.write("\3\2\2\2\u04dc\u011c\3\2\2\2\u04dd\u04de\7@\2\2\u04de")
buf.write("\u011e\3\2\2\2\u04df\u04e0\7@\2\2\u04e0\u04e4\7?\2\2\u04e1")
buf.write("\u04e2\7#\2\2\u04e2\u04e4\7>\2\2\u04e3\u04df\3\2\2\2\u04e3")
buf.write("\u04e1\3\2\2\2\u04e4\u0120\3\2\2\2\u04e5\u04e6\7-\2\2")
buf.write("\u04e6\u0122\3\2\2\2\u04e7\u04e8\7/\2\2\u04e8\u0124\3")
buf.write("\2\2\2\u04e9\u04ea\7,\2\2\u04ea\u0126\3\2\2\2\u04eb\u04ec")
buf.write("\7\61\2\2\u04ec\u0128\3\2\2\2\u04ed\u04ee\7\'\2\2\u04ee")
buf.write("\u012a\3\2\2\2\u04ef\u04f0\7\u0080\2\2\u04f0\u012c\3\2")
buf.write("\2\2\u04f1\u04f2\7(\2\2\u04f2\u012e\3\2\2\2\u04f3\u04f4")
buf.write("\7~\2\2\u04f4\u0130\3\2\2\2\u04f5\u04f6\7~\2\2\u04f6\u04f7")
buf.write("\7~\2\2\u04f7\u0132\3\2\2\2\u04f8\u04f9\7`\2\2\u04f9\u0134")
buf.write("\3\2\2\2\u04fa\u04fb\7a\2\2\u04fb\u0136\3\2\2\2\u04fc")
buf.write("\u0502\7)\2\2\u04fd\u0501\n\2\2\2\u04fe\u04ff\7^\2\2\u04ff")
buf.write("\u0501\13\2\2\2\u0500\u04fd\3\2\2\2\u0500\u04fe\3\2\2")
buf.write("\2\u0501\u0504\3\2\2\2\u0502\u0500\3\2\2\2\u0502\u0503")
buf.write("\3\2\2\2\u0503\u0505\3\2\2\2\u0504\u0502\3\2\2\2\u0505")
buf.write("\u0506\7)\2\2\u0506\u0138\3\2\2\2\u0507\u0509\5\u0153")
buf.write("\u00aa\2\u0508\u0507\3\2\2\2\u0509\u050a\3\2\2\2\u050a")
buf.write("\u0508\3\2\2\2\u050a\u050b\3\2\2\2\u050b\u013a\3\2\2\2")
buf.write("\u050c\u050e\5\u0153\u00aa\2\u050d\u050c\3\2\2\2\u050e")
buf.write("\u050f\3\2\2\2\u050f\u050d\3\2\2\2\u050f\u0510\3\2\2\2")
buf.write("\u0510\u0511\3\2\2\2\u0511\u0512\5\u0151\u00a9\2\u0512")
buf.write("\u0518\3\2\2\2\u0513\u0515\5\u014f\u00a8\2\u0514\u0516")
buf.write("\5\u0151\u00a9\2\u0515\u0514\3\2\2\2\u0515\u0516\3\2\2")
buf.write("\2\u0516\u0518\3\2\2\2\u0517\u050d\3\2\2\2\u0517\u0513")
buf.write("\3\2\2\2\u0518\u013c\3\2\2\2\u0519\u051a\5\u0141\u00a1")
buf.write("\2\u051a\u051b\7\60\2\2\u051b\u051c\5\u0141\u00a1\2\u051c")
buf.write("\u013e\3\2\2\2\u051d\u051e\5\u0141\u00a1\2\u051e\u051f")
buf.write("\7\60\2\2\u051f\u0520\5\u0141\u00a1\2\u0520\u0521\7\60")
buf.write("\2\2\u0521\u0522\5\u0141\u00a1\2\u0522\u0140\3\2\2\2\u0523")
buf.write("\u0526\5\u0145\u00a3\2\u0524\u0526\5\u0147\u00a4\2\u0525")
buf.write("\u0523\3\2\2\2\u0525\u0524\3\2\2\2\u0526\u0142\3\2\2\2")
buf.write("\u0527\u052b\t\3\2\2\u0528\u052a\t\4\2\2\u0529\u0528\3")
buf.write("\2\2\2\u052a\u052d\3\2\2\2\u052b\u0529\3\2\2\2\u052b\u052c")
buf.write("\3\2\2\2\u052c\u0144\3\2\2\2\u052d\u052b\3\2\2\2\u052e")
buf.write("\u0530\5\u0149\u00a5\2\u052f\u052e\3\2\2\2\u0530\u0531")
buf.write("\3\2\2\2\u0531\u052f\3\2\2\2\u0531\u0532\3\2\2\2\u0532")
buf.write("\u0538\3\2\2\2\u0533\u0537\5\u0149\u00a5\2\u0534\u0537")
buf.write("\5\u0153\u00aa\2\u0535\u0537\7a\2\2\u0536\u0533\3\2\2")
buf.write("\2\u0536\u0534\3\2\2\2\u0536\u0535\3\2\2\2\u0537\u053a")
buf.write("\3\2\2\2\u0538\u0536\3\2\2\2\u0538\u0539\3\2\2\2\u0539")
buf.write("\u053e\3\2\2\2\u053a\u0538\3\2\2\2\u053b\u053e\5\u014b")
buf.write("\u00a6\2\u053c\u053e\5\u0143\u00a2\2\u053d\u052f\3\2\2")
buf.write("\2\u053d\u053b\3\2\2\2\u053d\u053c\3\2\2\2\u053e\u0146")
buf.write("\3\2\2\2\u053f\u0545\7]\2\2\u0540\u0544\5\u0149\u00a5")
buf.write("\2\u0541\u0544\5\u0153\u00aa\2\u0542\u0544\t\5\2\2\u0543")
buf.write("\u0540\3\2\2\2\u0543\u0541\3\2\2\2\u0543\u0542\3\2\2\2")
buf.write("\u0544\u0547\3\2\2\2\u0545\u0546\3\2\2\2\u0545\u0543\3")
buf.write("\2\2\2\u0546\u0548\3\2\2\2\u0547\u0545\3\2\2\2\u0548\u055e")
buf.write("\7_\2\2\u0549\u054f\7$\2\2\u054a\u054e\5\u0149\u00a5\2")
buf.write("\u054b\u054e\5\u0153\u00aa\2\u054c\u054e\t\5\2\2\u054d")
buf.write("\u054a\3\2\2\2\u054d\u054b\3\2\2\2\u054d\u054c\3\2\2\2")
buf.write("\u054e\u0551\3\2\2\2\u054f\u0550\3\2\2\2\u054f\u054d\3")
buf.write("\2\2\2\u0550\u0552\3\2\2\2\u0551\u054f\3\2\2\2\u0552\u055e")
buf.write("\7$\2\2\u0553\u0559\7b\2\2\u0554\u0558\5\u0149\u00a5\2")
buf.write("\u0555\u0558\5\u0153\u00aa\2\u0556\u0558\t\5\2\2\u0557")
buf.write("\u0554\3\2\2\2\u0557\u0555\3\2\2\2\u0557\u0556\3\2\2\2")
buf.write("\u0558\u055b\3\2\2\2\u0559\u055a\3\2\2\2\u0559\u0557\3")
buf.write("\2\2\2\u055a\u055c\3\2\2\2\u055b\u0559\3\2\2\2\u055c\u055e")
buf.write("\7b\2\2\u055d\u053f\3\2\2\2\u055d\u0549\3\2\2\2\u055d")
buf.write("\u0553\3\2\2\2\u055e\u0148\3\2\2\2\u055f\u0562\5\u0155")
buf.write("\u00ab\2\u0560\u0562\5\u0157\u00ac\2\u0561\u055f\3\2\2")
buf.write("\2\u0561\u0560\3\2\2\2\u0562\u014a\3\2\2\2\u0563\u0564")
buf.write("\5\u014d\u00a7\2\u0564\u014c\3\2\2\2\u0565\u056d\7$\2")
buf.write("\2\u0566\u0567\7^\2\2\u0567\u056c\13\2\2\2\u0568\u0569")
buf.write("\7$\2\2\u0569\u056c\7$\2\2\u056a\u056c\n\6\2\2\u056b\u0566")
buf.write("\3\2\2\2\u056b\u0568\3\2\2\2\u056b\u056a\3\2\2\2\u056c")
buf.write("\u056f\3\2\2\2\u056d\u056b\3\2\2\2\u056d\u056e\3\2\2\2")
buf.write("\u056e\u0570\3\2\2\2\u056f\u056d\3\2\2\2\u0570\u0571\7")
buf.write("$\2\2\u0571\u014e\3\2\2\2\u0572\u0574\5\u0153\u00aa\2")
buf.write("\u0573\u0572\3\2\2\2\u0574\u0575\3\2\2\2\u0575\u0573\3")
buf.write("\2\2\2\u0575\u0576\3\2\2\2\u0576\u0577\3\2\2\2\u0577\u057b")
buf.write("\7\60\2\2\u0578\u057a\5\u0153\u00aa\2\u0579\u0578\3\2")
buf.write("\2\2\u057a\u057d\3\2\2\2\u057b\u0579\3\2\2\2\u057b\u057c")
buf.write("\3\2\2\2\u057c\u0150\3\2\2\2\u057d\u057b\3\2\2\2\u057e")
buf.write("\u0580\7G\2\2\u057f\u0581\t\7\2\2\u0580\u057f\3\2\2\2")
buf.write("\u0580\u0581\3\2\2\2\u0581\u0583\3\2\2\2\u0582\u0584\5")
buf.write("\u0153\u00aa\2\u0583\u0582\3\2\2\2\u0584\u0585\3\2\2\2")
buf.write("\u0585\u0583\3\2\2\2\u0585\u0586\3\2\2\2\u0586\u0152\3")
buf.write("\2\2\2\u0587\u0588\t\b\2\2\u0588\u0154\3\2\2\2\u0589\u058a")
buf.write("\t\t\2\2\u058a\u0156\3\2\2\2\u058b\u058c\t\n\2\2\u058c")
buf.write("\u0158\3\2\2\2\u058d\u058e\t\13\2\2\u058e\u015a\3\2\2")
buf.write("\2\u058f\u0590\t\f\2\2\u0590\u015c\3\2\2\2\u0591\u0592")
buf.write("\t\r\2\2\u0592\u015e\3\2\2\2\u0593\u0594\t\16\2\2\u0594")
buf.write("\u0160\3\2\2\2\u0595\u0596\t\17\2\2\u0596\u0162\3\2\2")
buf.write("\2\u0597\u0598\t\20\2\2\u0598\u0164\3\2\2\2\u0599\u059a")
buf.write("\t\21\2\2\u059a\u0166\3\2\2\2\u059b\u059c\t\22\2\2\u059c")
buf.write("\u0168\3\2\2\2\u059d\u059e\t\23\2\2\u059e\u016a\3\2\2")
buf.write("\2\u059f\u05a0\t\24\2\2\u05a0\u016c\3\2\2\2\u05a1\u05a2")
buf.write("\t\25\2\2\u05a2\u016e\3\2\2\2\u05a3\u05a4\t\26\2\2\u05a4")
buf.write("\u0170\3\2\2\2\u05a5\u05a6\t\27\2\2\u05a6\u0172\3\2\2")
buf.write("\2\u05a7\u05a8\t\30\2\2\u05a8\u0174\3\2\2\2\u05a9\u05aa")
buf.write("\t\31\2\2\u05aa\u0176\3\2\2\2\u05ab\u05ac\t\32\2\2\u05ac")
buf.write("\u0178\3\2\2\2\u05ad\u05ae\t\33\2\2\u05ae\u017a\3\2\2")
buf.write("\2\u05af\u05b0\t\34\2\2\u05b0\u017c\3\2\2\2\u05b1\u05b2")
buf.write("\t\35\2\2\u05b2\u017e\3\2\2\2\u05b3\u05b4\t\36\2\2\u05b4")
buf.write("\u0180\3\2\2\2\u05b5\u05b6\t\37\2\2\u05b6\u0182\3\2\2")
buf.write("\2\u05b7\u05b8\t \2\2\u05b8\u0184\3\2\2\2\u05b9\u05ba")
buf.write("\t!\2\2\u05ba\u0186\3\2\2\2\u05bb\u05bc\t\"\2\2\u05bc")
buf.write("\u0188\3\2\2\2\u05bd\u05be\t#\2\2\u05be\u018a\3\2\2\2")
buf.write("\u05bf\u05c0\t$\2\2\u05c0\u018c\3\2\2\2\u05c1\u05c2\7")
buf.write("/\2\2\u05c2\u05c3\7/\2\2\u05c3\u05c7\3\2\2\2\u05c4\u05c6")
buf.write("\n%\2\2\u05c5\u05c4\3\2\2\2\u05c6\u05c9\3\2\2\2\u05c7")
buf.write("\u05c5\3\2\2\2\u05c7\u05c8\3\2\2\2\u05c8\u05cb\3\2\2\2")
buf.write("\u05c9\u05c7\3\2\2\2\u05ca\u05cc\7\17\2\2\u05cb\u05ca")
buf.write("\3\2\2\2\u05cb\u05cc\3\2\2\2\u05cc\u05ce\3\2\2\2\u05cd")
buf.write("\u05cf\7\f\2\2\u05ce\u05cd\3\2\2\2\u05ce\u05cf\3\2\2\2")
buf.write("\u05cf\u05d0\3\2\2\2\u05d0\u05d1\b\u00c7\2\2\u05d1\u018e")
buf.write("\3\2\2\2\u05d2\u05d3\7\61\2\2\u05d3\u05d4\7,\2\2\u05d4")
buf.write("\u05d5\7,\2\2\u05d5\u05d6\7\61\2\2\u05d6\u05d7\3\2\2\2")
buf.write("\u05d7\u05d8\b\u00c8\2\2\u05d8\u0190\3\2\2\2\u05d9\u05da")
buf.write("\7\61\2\2\u05da\u05db\7,\2\2\u05db\u05dc\3\2\2\2\u05dc")
buf.write("\u05e0\n&\2\2\u05dd\u05df\13\2\2\2\u05de\u05dd\3\2\2\2")
buf.write("\u05df\u05e2\3\2\2\2\u05e0\u05e1\3\2\2\2\u05e0\u05de\3")
buf.write("\2\2\2\u05e1\u05e3\3\2\2\2\u05e2\u05e0\3\2\2\2\u05e3\u05e4")
buf.write("\7,\2\2\u05e4\u05e5\7\61\2\2\u05e5\u05e6\3\2\2\2\u05e6")
buf.write("\u05e7\b\u00c9\2\2\u05e7\u0192\3\2\2\2\u05e8\u05ea\t\'")
buf.write("\2\2\u05e9\u05e8\3\2\2\2\u05ea\u05eb\3\2\2\2\u05eb\u05e9")
buf.write("\3\2\2\2\u05eb\u05ec\3\2\2\2\u05ec\u05ed\3\2\2\2\u05ed")
buf.write("\u05ee\b\u00ca\2\2\u05ee\u0194\3\2\2\2\u05ef\u05f1\t(")
buf.write("\2\2\u05f0\u05ef\3\2\2\2\u05f1\u05f2\3\2\2\2\u05f2\u05f0")
buf.write("\3\2\2\2\u05f2\u05f3\3\2\2\2\u05f3\u05f4\3\2\2\2\u05f4")
buf.write("\u05f5\b\u00cb\2\2\u05f5\u0196\3\2\2\2&\2\u04c9\u04db")
buf.write("\u04e3\u0500\u0502\u050a\u050f\u0515\u0517\u0525\u052b")
buf.write("\u0531\u0536\u0538\u053d\u0543\u0545\u054d\u054f\u0557")
buf.write("\u0559\u055d\u0561\u056b\u056d\u0575\u057b\u0580\u0585")
buf.write("\u05c7\u05cb\u05ce\u05e0\u05eb\u05f2\3\b\2\2")
return buf.getvalue()
class SqlSmallLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
ABS = 7
ACOS = 8
ALL = 9
AND = 10
ANTI = 11
AS = 12
ASC = 13
ASIN = 14
ATAN = 15
ATANH = 16
AVG = 17
BETWEEN = 18
BOOLEAN = 19
BY = 20
CASE = 21
CAST = 22
CHAR = 23
CHAR_LENGTH = 24
CEIL = 25
CEILING = 26
CHOOSE = 27
COALESCE = 28
CONCAT = 29
COS = 30
COT = 31
COUNT = 32
CROSS = 33
CURRENT_DATE = 34
CURRENT_TIME = 35
CURRENT_TIMESTAMP = 36
DATE = 37
DAY = 38
DAYNAME = 39
DEGREES = 40
DENSE_RANK = 41
DESC = 42
DISTINCT = 43
DIV = 44
ELSE = 45
END = 46
EXP = 47
EXTRACT = 48
FALSE = 49
FLOAT = 50
FLOOR = 51
FOR = 52
FROM = 53
FULL = 54
GROUP = 55
HAVING = 56
HOUR = 57
IF = 58
IIF = 59
IN = 60
INNER = 61
INTEGER = 62
INTERSECT = 63
IS = 64
JOIN = 65
LEFT = 66
LIMIT = 67
LN = 68
LOG = 69
LOG10 = 70
LOG2 = 71
LOWER = 72
MAX = 73
MICROSECOND = 74
MIN = 75
MINUTE = 76
MONTH = 77
NCHAR = 78
NEWID = 79
NOT = 80
NULL = 81
NUMERIC = 82
ON = 83
OR = 84
ORDER = 85
OUTER = 86
OVER = 87
PARTITION = 88
PERCENTILE_CONT = 89
PERCENTILE_DISC = 90
PI = 91
POSITION = 92
POWER = 93
RAND = 94
RANDOM = 95
RANK = 96
RIGHT = 97
ROUND = 98
ROW_NUMBER = 99
ROWNUM = 100
SECOND = 101
SELECT = 102
SEMI = 103
SIGN = 104
SIN = 105
SORT = 106
SQL = 107
SQRT = 108
SQUARE = 109
STD = 110
STDDEV = 111
STDEV = 112
SUBSTRING = 113
SUM = 114
TAN = 115
THEN = 116
TIME = 117
TIMESTAMP = 118
TOP = 119
TRIM = 120
TRUE = 121
TRUNC = 122
TRUNCATE = 123
TZOFFSET = 124
UNION = 125
UPPER = 126
USING = 127
VAR = 128
VARCHAR = 129
NVARCHAR = 130
VARIANCE = 131
WEEKDAY = 132
WHEN = 133
WHERE = 134
YEAR = 135
EQ = 136
NSEQ = 137
NEQ = 138
NEQJ = 139
LT = 140
| |
width=9*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='<b>Description</>', top=8.1*cm, left=1.4*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Rect(left=11.5*cm, top=7.9*cm, width=5*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='<b>Qty</b>', top=8.1*cm, left=11.2*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Rect(left=16.5*cm, top=7.9*cm, width=6*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='UNIT PRICE(RWF)', top=8.1*cm, left=17*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Rect(left=22.5*cm, top=7.9*cm, width=4*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='SUM(RWF)', top=8.1*cm, left=22.3*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Rect(left=0*cm, top=8.8*cm, width=26.5*cm, height=1.5*cm, fill=False,stroke=True),
# Label(text='Invoice Details', top=9.3*cm, left=10.3*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Rect(left=0*cm, top=11*cm, width=22.5*cm, height=0.8*cm, fill=False,stroke=True),
# Label(text='VAT included in price', top=11.2*cm, left=0.5*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_LEFT}),
# Rect(left=22.5*cm, top=11*cm, width=4*cm, height=0.8*cm, fill=False,stroke=True),
# # Empty Label
# Rect(left=0*cm, top=11.8*cm, width=13*cm, height=2.5*cm, fill=False,stroke=True),
# Label(text='Received By:_________________________________________', top=12.3*cm, left=0.5*cm, width=BAND_WIDTH, style={'fontSize': 12, 'fontName': 'Candara-Bold'}),
# Label(text='Date:__________________________________________________________', top=13.2*cm, left=0.5*cm, width=BAND_WIDTH, style={'fontName': 'Candara-Bold'}),
# Rect(left=13*cm, top=11.8*cm, width=3.5*cm, height=2.5*cm, fill=False,stroke=True),
# Label(text='Currency:', top=12.3*cm, left=13.7*cm, style={'fontSize': 12}),
# Label(text='RWF', top=12.9*cm, left=13.7*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=16.5*cm, top=11.8*cm, width=6*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='TOTAL EXCLU VAT:', top=12*cm, left=19.1*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=22.5*cm, top=11.8*cm, width=4*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='xxxxx', top=12*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=16.5*cm, top=12.7*cm, width=6*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='VAT 18%:', top=12.9*cm, left=20.7*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=22.5*cm, top=12.7*cm, width=4*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='xxxxx', top=12.9*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=16.5*cm, top=13.6*cm, width=6*cm, height=0.7*cm, fill=False,stroke=True),
# Label(text='TOTAL:', top=13.7*cm, left=20.8*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=22.5*cm, top=13.6*cm, width=4*cm, height=0.7*cm, fill=False,stroke=True),
# Label(text='xxxxx', top=13.7*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
# Label Here
]
class invoicepdfdollar(Report):
additional_fonts = {
'Candara': os.path.join(cur_dir, '../main/static/fonts/Candara.ttf'), # full path to font file
'Candara-Bold': os.path.join(cur_dir, '../main/static/fonts/Candarab.ttf'),
'Candara-Italic': os.path.join(cur_dir, '../main/static/fonts/Candarai.ttf'),
'Calibri': os.path.join(cur_dir, '../main/static/fonts/Calibri.ttf'), # full path to font file
'Calibri-Bold': os.path.join(cur_dir, '../main/static/fonts/Calibri_Bold.ttf'),
'Calibri-Italic': os.path.join(cur_dir, '../main/static/fonts/Calibri_Italic.ttf'),
'Calibri-Bold-Italic': os.path.join(cur_dir, '../main/static/fonts/Calibri_Bold_Italic.ttf'),
'Trebuchet-MS': os.path.join(cur_dir, '../main/static/fonts/Trebuchet_MS.ttf'),
'Trebuchet-MS-Bold': os.path.join(cur_dir, '../main/static/fonts/Trebuchet_MS_Bold.ttf'),
}
title = 'Invoice'
# default_style = {'fontName': 'Calibri','fontSize': 10}
author = '<NAME>'
default_style = {'fontName':'Calibri', 'fontSize':10}
# page_size = landscape(A5)
margin_left = 1.8*cm
# margin_top = 0.5*cm
# margin_right = 0.5*cm
# margin_bottom = 0.5*cm
class band_begin(ReportBand):
height = 0.3*cm
test = 'Name'
elements = [
Label(text='<b>Invoice</b>', top=4*cm, width=BAND_WIDTH, style={'alignment':TA_RIGHT,'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Invoice</b>', top=4.5*cm, width=BAND_WIDTH, style={'alignment':TA_RIGHT,'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Attention</b>', top=5.7*cm, left=0.5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Customer No</b>', top=6.5*cm, left=0.5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Customer No</b>', top=7*cm, left=0.5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Our Reference</b>', top=8*cm, left=0.5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Our Reference</b>', top=8.5*cm, left=0.5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Customer</b>', top=6.5*cm, left=5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Customer</b>', top=7*cm, left=5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Description</b>', top=8*cm, left=5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Description</b>', top=8.5*cm, left=5*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Invoice Date</b>', top=6.5*cm, left=11*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Invoice Date</b>', top=7*cm, left=11*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Invoice Due Date</b>', top=8*cm, left=11*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Label(text='<b>Invoice Due Date</b>', top=8.5*cm, left=11*cm, width=BAND_WIDTH, style={'alignment':TA_LEFT, 'fontName': 'Candara-Bold', 'fontSize':10}),
Rect(left=0*cm, top=9.7*cm, width=1.5*cm, height=0.9*cm,fill=False,stroke=True),
Label(text='<b>ITEM</b>', top=9.9*cm, left=0.3*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=1.5*cm, top=9.7*cm, width=7.8*cm, height=0.9*cm, fill=False,stroke=True),
Label(text='<b>Description</>', top=9.9*cm, left=0.1*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
Rect(left=9.3*cm, top=9.7*cm, width=2.5*cm, height=0.9*cm, fill=False,stroke=True),
Label(text='<b>Qty</b>', top=9.9*cm, left=8*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
Rect(left=11.8*cm, top=9.7*cm, width=4.1*cm, height=0.9*cm, fill=False,stroke=True),
Label(text='UNIT PRICE(USD)', top=9.9*cm, left=11.2*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
Rect(left=15.9*cm, top=9.7*cm, width=2.4*cm, height=0.9*cm, fill=False,stroke=True),
Label(text='SUM(USD)', top=9.9*cm, left=14.5*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Edited starts from here
# Rect(left=0*cm, top=11.7*cm, width=15.9*cm, height=0.8*cm, fill=False,stroke=True),
# Rect(left=0*cm, top=8.8*cm, width=26.5*cm, height=1.5*cm, fill=False,stroke=True),
# Label(text='Invoice Details', top=9.3*cm, left=10.3*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
# Rect(left=0*cm, top=11*cm, width=22.5*cm, height=0.8*cm, fill=False,stroke=True),
# Label(text='VAT included in price', top=11.2*cm, left=0.5*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_LEFT}),
# Rect(left=22.5*cm, top=11*cm, width=4*cm, height=0.8*cm, fill=False,stroke=True),
# # Empty Label
# Rect(left=0*cm, top=11.8*cm, width=13*cm, height=2.5*cm, fill=False,stroke=True),
# Label(text='Received By:_________________________________________', top=12.3*cm, left=0.5*cm, width=BAND_WIDTH, style={'fontSize': 12, 'fontName': 'Candara-Bold'}),
# Label(text='Date:__________________________________________________________', top=13.2*cm, left=0.5*cm, width=BAND_WIDTH, style={'fontName': 'Candara-Bold'}),
# Rect(left=13*cm, top=11.8*cm, width=3.5*cm, height=2.5*cm, fill=False,stroke=True),
# Label(text='Currency:', top=12.3*cm, left=13.7*cm, style={'fontSize': 12}),
# Label(text='RWF', top=12.9*cm, left=13.7*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=16.5*cm, top=11.8*cm, width=6*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='TOTAL EXCLU VAT:', top=12*cm, left=19.1*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=22.5*cm, top=11.8*cm, width=4*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='xxxxx', top=12*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=16.5*cm, top=12.7*cm, width=6*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='VAT 18%:', top=12.9*cm, left=20.7*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=22.5*cm, top=12.7*cm, width=4*cm, height=0.9*cm, fill=False,stroke=True),
# Label(text='xxxxx', top=12.9*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=16.5*cm, top=13.6*cm, width=6*cm, height=0.7*cm, fill=False,stroke=True),
# Label(text='TOTAL:', top=13.7*cm, left=20.8*cm, style={'fontName': 'Candara-Bold'}),
# Rect(left=22.5*cm, top=13.6*cm, width=4*cm, height=0.7*cm, fill=False,stroke=True),
# Label(text='xxxxx', top=13.7*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
]
class band_summary(ReportBand):
height = 0.3*cm
elements = [
Rect(left=0*cm, top=10.3*cm, width=15.9*cm, height=0.8*cm, fill=False,stroke=True),
Label(text='VAT included in price', top=10.5*cm, left=0.4*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_LEFT}),
Rect(left=15.9*cm, top=10.3*cm, width=2.4*cm, height=0.8*cm, fill=False,stroke=True),
# # Empty Label
Rect(left=0*cm, top=11.1*cm, width=9.3*cm, height=3*cm, fill=False,stroke=True),
Label(text='Received By:______________________________________', top=11.9*cm, left=0.5*cm, width=BAND_WIDTH, style={'fontSize': 10, 'fontName': 'Candara-Bold'}),
Label(text='Date:____________________________________________', top=13*cm, left=0.5*cm, width=BAND_WIDTH, style={'fontName': 'Candara-Bold'}),
Rect(left=9.3*cm, top=11.1*cm, width=2.5*cm, height=3*cm, fill=False,stroke=True),
Label(text='Currency:', top=12.1*cm, left=9.7*cm, style={'fontSize': 12}),
Label(text='RWF', top=12.8*cm, left=9.7*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=11.8*cm, top=11.1*cm, width=4.1*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='TOTAL EXCLU VAT(USD):', top=11.2*cm, left=12*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=15.9*cm, top=11.1*cm, width=2.4*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='xxxxx', top=11.2*cm, left=16.5*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=11.8*cm, top=11.7*cm, width=4.1*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='EXCHANGE RATE:', top=11.8*cm, left=13.1*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=15.9*cm, top=11.7*cm, width=2.4*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='xxxxx', top=11.8*cm, left=16.5*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=11.8*cm, top=12.3*cm, width=4.1*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='TOTAL EXCLU VAT(RWF):', top=12.4*cm, left=11.9*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=15.9*cm, top=12.3*cm, width=2.4*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='xxxxx', top=12.4*cm, left=16.5*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=11.8*cm, top=12.9*cm, width=4.1*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='VAT 18%:', top=13*cm, left=14.4*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=15.9*cm, top=12.9*cm, width=2.4*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='xxxxx', top=13*cm, left=16.5*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=11.8*cm, top=13.5*cm, width=4.1*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='TOTAL TO PAY:', top=13.6*cm, left=13.4*cm, style={'fontName': 'Candara-Bold'}),
Rect(left=15.9*cm, top=13.5*cm, width=2.4*cm, height=0.6*cm, fill=False,stroke=True),
Label(text='xxxxx', top=13.6*cm, left=16.5*cm, style={'fontName': 'Candara-Bold'}),
Label(text =u"PAYMENT MODES: CHEQUE, BANK TRANSFER", width=BAND_WIDTH, top=14.5*cm, style={'fontName': 'Candara-Bold', 'fontSize': 12,}),
Label(text =u"Payment to be done to the account name and number shown below, Currency in Rwandan Francs", width=BAND_WIDTH, top=15*cm,),
Label(text =u"Account name: INTOUCH COMMUNICATIONS LTD", width=BAND_WIDTH, top=15.7*cm,),
# Label(text = 'Account name: INTOUCH COMMUNICATIONS LTD', top=15.2*cm, style={'fontSize': 12,}),
Label(text =u"Bank Name: KCB", top=16.1*cm,),
Label(text =u"Branch: Remera Branch", width=BAND_WIDTH, top=16.5*cm,),
Label(text =u"Account No.:4401710142", width=BAND_WIDTH, top=16.9*cm,),
Label(text =u"Currency: RWANDAN FRANCS", width=BAND_WIDTH, top=17.3*cm,),
# Label(text='xxxxx', top=11.2*cm, left=24.2*cm, style={'fontName': 'Candara-Bold'}),
]
class band_page_header(ReportBand):
height = 0.5*cm
elements = [
Image(left=0*cm, top=0.1*cm, width=2.4*cm, height=5.18*cm, filename=os.path.join(cur_dir, '../main/static/images/intouchlogo.png')),
Label(text='<b>intouch</b>', top=0.8*cm, left=1.4*cm, width=BAND_WIDTH,style={'alignment':TA_LEFT,'fontName': 'Candara-Bold','fontSize':18}),
Label(text='<b>COMMUNICATIONS</b>', top=1.5*cm, left=1.4*cm, width=BAND_WIDTH,style={'alignment':TA_LEFT,'fontName': 'Candara-Bold','fontSize':10}),
Label(text='<b>Intouch Communications Ltd</b>', top=2.2*cm, width=BAND_WIDTH, style={'alignment': TA_RIGHT,'fontName': 'Candara-Bold','fontSize':9}),
Label(text='<b>3rd Floor, Prince House, Remera</b>', top=2.6*cm, width=BAND_WIDTH, style={'alignment': TA_RIGHT,'fontName': 'Candara-Bold', 'fontSize':9}),
Label(text='<b>Tel 1: +(250)-788-304-441,</b>', top=3*cm, width=BAND_WIDTH, style={'alignment': TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
Label(text='<b>Tel 1: +(250)-785-971-082</b>', top=3.4*cm, width=BAND_WIDTH, style={'alignment': TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
Label(text='<b>TIN: 102830733</b>', top=3.8*cm, width=BAND_WIDTH, style={'alignment': TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
# Label(text='<b>Intouch Communications Ltd</b>', top=0.2*cm, width=BAND_WIDTH,style={'alignment':TA_RIGHT,'fontName': 'Candara-Bold','fontSize':9}),
# Label(text='<b>3rd Floor, Prince House, Remera</b>', top=0.6*cm, width=BAND_WIDTH, style={'alignment':TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
# Label(text='<b>Tel 1: +(250)-788-304-441,</b>', top=1*cm, width=BAND_WIDTH, style={'alignment':TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
# Label(text='<b>Tel 1: +(250)-785-971-082</b>', top=1.4*cm, width=BAND_WIDTH, style={'alignment':TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
# Label(text='<b>TIN: 102830733</b>', top=1.8*cm, width=BAND_WIDTH, style={'alignment':TA_RIGHT, 'fontName': 'Candara-Bold', 'fontSize':9}),
# SystemField(expression= '%(report_title)s', top=0.1*cm, left=0, width=BAND_WIDTH,
# style={'fontName' : 'Helvetica', 'fontSize' : 10, 'alignment' : TA_CENTER}
# )
]
# class band_page_footer(ReportBand):
# height = 3*cm
# elements = [
# Label(text = '<b>PAYMENT MODES: CHEQUE, BANK TRANSFER</b>', width=BAND_WIDTH, top=0.1*cm, style={'fontName': 'Candara-Bold', 'fontSize': 12,}),
# Label(text = 'Payment to be done to the account name and number shown below, Currency in Rwandan Francs', width=BAND_WIDTH, top=0.5*cm, style={'fontSize':12, 'fontName': 'Calibri'}),
# Label(text = 'Account name: INTOUCH COMMUNICATIONS LTD', width=BAND_WIDTH, top=1.2*cm, style={'fontSize':12, 'fontName': 'Calibri'}),
# Label(text = 'Bank Name: KCB', width=BAND_WIDTH, top=1.7*cm, style={'fontSize':10, 'fontName': 'Calibri'}),
# Label(text = 'Branch: Remera', width=BAND_WIDTH, top=2.1*cm, style={'fontSize':10, 'fontName': 'Calibri'}),
# Label(text = 'Account No.:4401710142', width=BAND_WIDTH, top=2.5*cm, style={'fontSize':10, 'fontName': 'Calibri'}),
# Label(text = 'Currency: RWANDAN FRANCS', width=BAND_WIDTH, top=2.9*cm, style={'fontSize':10, 'fontName': 'Calibri'}),
# # SystemField(expression='Printed in %(now:%Y, %b %d)s at%(now:%H:%M)s', top=0.1*cm,
# # width=BAND_WIDTH, style={'alignment': TA_RIGHT}), was for time printed
# # SystemField(expression=u'Page %(page_number)d of %(page_count)d', top=0.1*cm,
# # width=BAND_WIDTH, style= {'alignment': TA_RIGHT, 'fontName' : 'Helvetica',
# # 'fontSize' : 10},),
# ]
# borders = {'top': True}
class band_detail(ReportBand):
height = 1.1*cm
# auto_expand_height = True
elements = [
# Label(text='Invoice Details', top=9.3*cm, left=10.3*cm, style={'fontName': 'Candara-Bold', 'alignment': TA_CENTER}),
Rect(left=0*cm, top=10.3*cm, width=1.5*cm, height=1.1*cm,fill=False,stroke=True),
ObjectValue(attribute_name='number', top=10.6*cm, left=0.5*cm),
# Rect(left=2.7*cm, top=9.3*cm, width=3.5*cm, height=0.7*cm, fill=False,stroke=True),
Rect(left=1.5*cm, top=10.3*cm, width=7.8*cm, height=1.1*cm,fill=False,stroke=True),
ObjectValue(attribute_name='Item_desc', top=10.6*cm, left=1.7*cm, width=BAND_WIDTH),
Rect(left=9.3*cm, top=10.3*cm, width=2.5*cm, height=1.1*cm,fill=False,stroke=True),
ObjectValue(attribute_name='Quantity', top=10.6*cm, left=10.4*cm, style={'alignment':TA_JUSTIFY,}),
Rect(left=11.8*cm, top=10.3*cm, width=4.1*cm, height=1.1*cm, fill=False,stroke=True),
ObjectValue(attribute_name='unitPrice_rwf', top=10.6*cm, left=12.5*cm, | |
tuple(slc_tuple)
slice_list.append(slc_tuple)
return tuple(slice_list)
def calc_matching_peer_rank_slices(out_slice, inp_arys):
"""
For each input array in :samp:`{inp_arys}, calculates the portion
which broadcasts to the :samp:`{out_slice}`.
Returns :obj:`tuple` of :obj:`slice` (one tuple for each array/scalar element
in :samp:`{inp_arys}`). The returned *slices* indicate the
portion of the input which matches the specified :samp:`{out_slice}`
for broadcasting.
Assumes :samp:`len({out_slice}) >= {inp_arys}[i].ndim`
for :samp:`i in range(0, len({inp_arys})`.
:type out_slice: :obj:`tuple` of :obj:`slice`
:param out_slice: Slice indicating a portion (sub-array) of an output array.
:type inp_arys: Sequence of :obj:`numpy.ndarray`
:param inp_arys: The ufunc input arrays.
"""
slice_list = []
for inp_ary in inp_arys:
slc_tuple = None
if hasattr(inp_ary, "ndim") and (inp_ary.ndim >= 1):
inp_shape = _np.array(inp_ary.shape)
inp_slc_start = _np.zeros_like(inp_shape)
inp_slc_stop = inp_slc_start + inp_shape
slc_tuple = []
for a in range(-1, -(len(inp_shape) + 1), -1):
if inp_shape[a] == 1:
inp_slc_start[a] = 0
inp_slc_stop[a] = 1
else:
inp_slc_start[a] = out_slice[a].start
inp_slc_stop[a] = out_slice[a].stop
slc = slice(inp_slc_start[a], inp_slc_stop[a])
slc_tuple.insert(0, slc)
slc_tuple = tuple(slc_tuple)
slice_list.append(slc_tuple)
return tuple(slice_list)
def convert_to_array_like(inputs):
"""
Uses :obj:`numpy.asanyarray` to convert input ufunc arguments
to array-like objects.
:type inputs: sequence of :obj:`object`
:param inputs: Elements of this sequence which to not have both :samp:`"shape"`
and :samp:`"ndim"` attributes are converted to a new object
using :obj:`numpy.asanyarray`.
:rtype: sequence of :obj:`object`
:return: Sequence where elements of :samp:`{inputs}` have been converted to array-like objects.
Example::
>>> import numpy as np
>>> inputs = (np.array([1, 2, 3, 4], dtype="uint8"), 32.0, [[1, 2], [3, 4], [5, 6]])
>>> convert_to_array_like(inputs)
(array([1, 2, 3, 4], dtype=uint8), array(32.0), array([[1, 2],
[3, 4],
[5, 6]]))
>>> converted = convert_to_array_like(inputs)
>>> converted[0] is inputs[0]
True
>>> converted[1] is inputs[1]
False
>>> converted[2] is inputs[2]
False
"""
return \
tuple(
input
if hasattr(input, "shape") and hasattr(input, "ndim") else _np.asanyarray(input)
for input in inputs
)
def check_equivalent_inter_locale_comms(
gndarrays,
equivalent_compare=(_mpi.IDENT, _mpi.CONGRUENT)
):
"""
Checks that all the :obj:`mpi_array.globale.gndarray` elements
of :samp:`{gndarrays}` have equivalent inter-locale communicators.
:raises ValueError: if the arrays do not have equivalent inter-locale communicators.
"""
if (gndarrays is not None) and (len(gndarrays) > 0):
inter_locale_comm0 = gndarrays[0].locale_comms.inter_locale_comm
for c in (gndary.locale_comms.inter_locale_comm for gndary in gndarrays[1:]):
if (
(
(c == _mpi.COMM_NULL)
and
(inter_locale_comm0 != _mpi.COMM_NULL)
)
or
(
(c != _mpi.COMM_NULL)
and
(inter_locale_comm0 == _mpi.COMM_NULL)
)
or
_mpi.Comm.Compare(inter_locale_comm0, c) not in equivalent_compare
):
raise ValueError(
(
"Got inter_locale_comm=%s (name=%s) non-congruent with "
+
" inter_locale_comm=%s (name=%s)."
)
%
(
inter_locale_comm0,
inter_locale_comm0.name if inter_locale_comm0 != _mpi.COMM_NULL else "",
c,
c.name if c != _mpi.COMM_NULL else ""
)
)
class GndarrayArrayUfuncExecutor(object):
"""
Instances execute a ufunc for a :obj:`mpi_array.globale.gndarray`.
Takes care of creating outputs, remote fetching of required parts of inputs
and forwarding call to :obj:`numpy.ufunc` instance to perform
the computation on the locale :obj:`numpy.ndarray` instances.
"""
def __init__(self, array_like_obj, ufunc, method, *inputs, **kwargs):
"""
Initialise.
:type array_like_obj: :obj:`mpi_array.globale.gndarray`
:param array_like_obj: The :obj:`mpi_array.globale.gndarray` which
triggered the :samp:`__array_ufunc__` call.
:type ufunc: :obj:`numpy.ufunc`
:param ufunc: The ufunc to be executed.
:type method: :obj:`str`
:param method: The name of the method of :samp:`{ufunc}` which is
to be executed.
:type inputs: array like
:param inputs: The ufunc inputs.
:type kwargs: keyword args
:param kwargs: The ufunc keyword arguments.
"""
self._array_like_obj = array_like_obj
self._ufunc = ufunc
self._method = method
self._inputs = convert_to_array_like(inputs)
self._kwargs = kwargs
self._outputs = None
if "out" in self._kwargs.keys():
self._outputs = self._kwargs["out"]
self._casting = None
if "casting" in self._kwargs.keys():
self._casting = self._kwargs["casting"]
else:
self._casting = "same_kind"
@property
def array_like_obj(self):
"""
The :obj:`mpi_array.globale.gndarray` object which triggered the
construction of this :obj:`GndarrayArrayUfuncExecutor` object.
"""
return self._array_like_obj
@property
def peer_comm(self):
"""
The peer :obj:`mpi4py.MPI.Comm` communicator.
"""
return self._array_like_obj.locale_comms.peer_comm
@property
def intra_locale_comm(self):
"""
The intra-locale :obj:`mpi4py.MPI.Comm` communicator.
"""
return self._array_like_obj.locale_comms.intra_locale_comm
@property
def inter_locale_comm(self):
"""
The inter-locale :obj:`mpi4py.MPI.Comm` communicator.
"""
return self._array_like_obj.locale_comms.inter_locale_comm
@property
def ufunc(self):
"""
The :obj:`numpy.ufunc` to be executed.
"""
return self._ufunc
@property
def outputs(self):
"""
The ufunc :obj:`mpi_array.globale.gndarray` output arrays.
"""
return self._outputs
@property
def inputs(self):
"""
The sequence of ufunc inputs.
"""
return self._inputs
@property
def casting(self):
"""
A :obj:`str` indicating the casting mode.
"""
return self._casting
@property
def method(self):
"""
A :obj:`str` indicating the method of the :attr:`ufunc` to be executed.
"""
return self._method
def get_inputs_shapes(self):
"""
Returns a *shape* :obj:`tuple` for each element of :attr:`inputs`.
:rtype: :obj:`tuple`
:return: Shape of each ufunc input.
"""
return \
tuple(
input.shape
if hasattr(input, "shape") else
_np.asarray(input).shape
for input in self._inputs
)
def get_best_match_input(self, result_shape):
"""
Returns the element of :attr:`inputs` whose globale shape
best matches :samp:`{result_shape}`.
:rtype: :samp:`None` or :obj:`mpi_array.globale.gndarray`.
:return: The input array whose shape matches :samp:`{result_shape}`,
or :samp:`None` if none of the inputs are a good match.
"""
best_input = None
result_shape = _np.array(result_shape, dtype="int64")
input_shapes = self.get_inputs_shapes()
are_same_shape = \
_np.array(
tuple(
(len(result_shape) == len(in_shape)) and _np.all(result_shape == in_shape)
for in_shape in input_shapes
)
)
if _np.any(are_same_shape):
best_input = self._inputs[_np.where(are_same_shape)[0][0]]
else:
input_shapes = \
_np.array(
tuple(
_np.array(shape_extend_dims(len(result_shape), in_shape))
for in_shape in input_shapes
),
dtype="int64"
)
d = input_shapes - result_shape
d *= d
d = d.sum(axis=1)
best_input = self._inputs[_np.argmin(d)]
return best_input
def create_outputs(self, outputs, result_shape, result_types):
"""
Returns list of output :obj:`mpi_array.globale.gndarray` instances.
:type outputs: :samp:`None` or :obj:`tuple` of :obj:`mpi_array.globale.gndarray`
:param outputs: Output arrays passed in as the :samp:`out` argument
of the :obj:`numpy.ufunc`.
:type result_shape: sequence of :obj:`int`
:param result_shape: The shape of all output arrays.
:type result_types: sequence of :samp:`numpy.dtype`
:param result_types: The :samp:`dtype` of each output array. Note
that this is the list for all outputs including any
in the :samp:`outputs` argument. This determines the
number of output arrays.
:rtype: :obj:`list` of :obj:`mpi_array.globale.gndarray`
:return: A list of length :samp:`len(result_types)` elements,
each element is a :obj:`mpi_array.globale.gndarray`.
"""
template_output_gary = None
if (outputs is not None) and (len(outputs) > 0):
check_equivalent_inter_locale_comms(outputs)
template_output_gary = outputs[-1]
else:
best_match_input = self.get_best_match_input(result_shape)
comms_distrib = None
if best_match_input is not None:
comms_distrib = \
_comms.reshape_comms_distribution(
best_match_input.comms_and_distrib,
result_shape
)
if comms_distrib is not None:
template_output_gary = \
_globale_creation.empty(
result_shape,
comms_and_distrib=comms_distrib,
dtype=result_types[0]
)
else:
template_output_gary = \
_globale_creation.empty(
result_shape,
dtype=result_types[0],
peer_comm=self.peer_comm,
intra_locale_comm=self.intra_locale_comm,
inter_locale_comm=self.inter_locale_comm
)
outputs = (template_output_gary,)
outputs = \
(
outputs
+
tuple(
_globale_creation.empty_like(template_output_gary, dtype=result_types[i])
for i in range(len(outputs), len(result_types))
)
)
return outputs
def get_input_extents(self, locale_info):
"""
Returns tuple of :samp:`(locale_extent, globale_extent)` pairs,
one for each of the :attr:`inputs`.
:type locale_info: :obj:`mpi_array.comms.ThisLocaleInfo`
:param locale_info: The rank info required for constructing
a :obj:`mpi_array.distribution.LocaleExtent` instance
for :samp:`input` types which are not :obj:`mpi_array.globale.gndarray`.
:rtype: :obj:`tuple`
:return: Pairs which indicate the locale extent of the ufunc :attr:`inputs`.
.. seealso:: :func:`get_extents`
"""
return \
tuple(
get_extents(inp, locale_info) for inp in self.inputs
)
def get_numpy_ufunc_peer_rank_inputs_outputs(self, gndarray_outputs):
"""
Returns two element tuple of :samp:`(input_arrays, output_arrays)` which
are to be passed to the :obj:`numpy.ufunc` object :attr:`ufunc`.
:type gndarray_outputs: sequence of :obj:`mpi_array.globale.gndarray`
:param gndarray_outputs: The output arrays. All arrays should be the
same shape and same distribution.
:rtype: :samp:`None` or :obj:`tuple`
:return: A tuple :samp:`(input_arrays, output_arrays)` of inputs and
outputs which are to be passed to :obj:`numpy.ufunc` call.
Returns :samp:`None` if the output locale extents are empty (i.e. no
array elements to compute on this locale).
"""
# First fetch/slice the parts of the input required for the locale extent
out_gndarray = gndarray_outputs[0]
out_globale_extent = out_gndarray.distribution.globale_extent
out_locale_extent = out_gndarray.lndarray_proxy.locale_extent
ret = None
if _np.product(out_locale_extent.shape_n) > 0:
inp_locale_extents = \
self.get_input_extents(out_gndarray.comms_and_distrib.this_locale)
inp_locale_slices = \
calc_matching_locale_slices(
out_locale_extent,
out_globale_extent,
inp_locale_extents
)
inp_locale_arys = [None, ] * len(self.inputs)
for i in range(len(self.inputs)):
input = self.inputs[i]
slice_tuple = inp_locale_slices[i]
if slice_tuple is not None:
if hasattr(input, "locale_get"):
# is a gndarray
inp_locale_arys[i] = input.locale_get(slice_tuple)
else:
# is a numpy array (or similar)
inp_locale_arys[i] = input[slice_tuple]
else:
# is a scalar
inp_locale_arys[i] = input
# Now slice the locale input arrays to match the peer-rank portions of the output.
out_peer_rank_slice = out_gndarray.lndarray_proxy.intra_partition.rank_view_slice_n
out_peer_rank_slice = out_locale_extent.locale_to_globale_slice_h(out_peer_rank_slice)
out_peer_rank_slice = out_locale_extent.globale_to_locale_slice_n(out_peer_rank_slice)
inp_peer_rank_slices = calc_matching_peer_rank_slices(
out_peer_rank_slice, inp_locale_arys)
inp_peer_rank_arys = [None, ] * len(inp_locale_arys)
for i in range(len(inp_locale_arys)):
input = inp_locale_arys[i]
slice_tuple = inp_peer_rank_slices[i]
if slice_tuple is not None:
# is a numpy array (or similar)
inp_peer_rank_arys[i] = input[slice_tuple]
else:
# is a scalar
inp_peer_rank_arys[i] = input
ret = \
(
tuple(inp_peer_rank_arys),
tuple(
| |
def __mul__(self, o):
res = self.__v * o
return concolic_int(sym_mul(ast(self), ast(o)), res)
def __div__(self, o):
res = self.__v / o
return concolic_int(sym_div(ast(self), ast(o)), res)
def _sym_ast(self):
return self.__sym
class concolic_str(str):
def __new__(cls, sym, v):
assert type(v) == str or type(v) == unicode
self = super(concolic_str, cls).__new__(cls, v)
self.__v = v
self.__sym = sym
return self
def concrete_value(self):
return self.__v
def __eq__(self, o):
if not isinstance(o, str) and not isinstance(o, unicode):
return False
if isinstance(o, concolic_str):
res = (self.__v == o.__v)
else:
res = (self.__v == o)
return concolic_bool(sym_eq(ast(self), ast(o)), res)
def __ne__(self, o):
return not self.__eq__(o)
def __add__(self, o):
if isinstance(o, concolic_str):
res = self.__v + o.__v
else:
res = self.__v + o
return concolic_str(sym_concat(ast(self), ast(o)), res)
def __radd__(self, o):
res = o + self.__v
return concolic_str(sym_concat(ast(o), ast(self)), res)
def __len__(self):
res = len(self.__v)
return concolic_int(sym_length(ast(self)), res)
def __contains__(self, o):
res = o in self.__v
return concolic_bool(sym_contains(ast(self), ast(o)), res)
def startswith(self, o):
res = self.__v.startswith(o)
return concolic_bool(sym_startswith(ast(self), ast(o)), res)
def endswith(self, o):
res = self.__v.endswith(o)
return concolic_bool(sym_endswith(ast(self), ast(o)), res)
def __getitem__(self, i):
res = self.__v[i]
return concolic_str(sym_substring(ast(self), ast(i), ast(1)), res)
def __getslice__(self, i, j):
if j == 9223372036854775807 or j == 2147483647:
## Python passes in INT_MAX when there's no upper bound.
## Unfortunately, this differs depending on whether you're
## running in a 32-bit or a 64-bit system.
j = self.__len__()
res = self.__v[i:j]
return concolic_str(sym_substring(ast(self), ast(i), ast(j-i)), res)
def find(self, ch):
res = self.__v.find(ch)
return concolic_int(sym_indexof(ast(self), ast(ch)), res)
def decode(self, encoding = sys.getdefaultencoding(), errors = 'strict'):
## XXX hack: we restrict z3str to just 7-bit ASCII (see call to
## setAlphabet7bit) and then pretend that str and unicode objects
## are the same.
return self
def encode(self, encoding = sys.getdefaultencoding(), errors = 'strict'):
## XXX same hack as for decode().
return self
def __unicode__(self):
## XXX same hack as for decode().
return self
def lstrip(self, chars = ' \t\n\r'):
for ch in chars:
if self.startswith(chars):
return self[1:].lstrip(chars)
return self
def rsplit(self, sep = None, maxsplit = -1):
if maxsplit != 1 or type(sep) != str:
return self.__v.rsplit(sep, maxsplit)
name = 'rsplit_%s_%s' % (self.__sym, sep)
l = mk_str(name + '_l')
r = mk_str(name + '_r')
if l + sep + r != self:
require(sep not in self)
return self
require(sep not in l)
require(sep not in r)
return (l, r)
def upper(self):
## XXX an incorrect overloading that gets us past werkzeug's use
## of .upper() on the HTTP method name..
return self
def _sym_ast(self):
return self.__sym
## Override some builtins..
old_len = __builtin__.len
def xlen(o):
if isinstance(o, concolic_str):
return o.__len__()
return old_len(o)
__builtin__.len = xlen
## Track inputs that should be tried later
class InputQueue(object):
def __init__(self):
## "inputs" is a priority queue storing inputs we should try.
## The inputs are stored as a dictionary, from symbolic variable
## name to the value we should try. If a value is not present,
## mk_int() and mk_str() below will pick a default value. Each
## input also has a priority (lower is "more important"), which
## is useful when there's too many inputs to process.
self.inputs = Queue.PriorityQueue()
self.inputs.put((0, {'values': {}, 'path_condition': None}))
self.input_history = []
## "branchcount" is a map from call site (filename and line number)
## to the number of branches we have already explored at that site.
## This is used to choose priorities for inputs.
self.branchcount = collections.defaultdict(int)
def empty(self):
return self.inputs.empty()
def get(self):
(prio, values) = self.inputs.get()
return (values['values'], values['path_condition'])
def add(self, new_values, caller, path_condition, uniqueinputs = False):
if uniqueinputs:
if self.check_input_history(new_values):
if verbose > 1:
print "SKIPPING INPUT"
return
prio = self.branchcount[caller[0]]
self.branchcount[caller[0]] += 1
self.inputs.put((prio, {'values': new_values, 'path_condition': path_condition}))
if uniqueinputs:
self.input_history.append((prio, new_values))
def check_input_history(self, new_values):
## Return True if new_values has been added to the input queue before.
for (prio, values) in self.input_history:
if self.value_dicts_match(values, new_values):
return True
return False
def value_dicts_match(self, old_values, new_values):
if len(old_values) != len(new_values):
return False
if len(old_values) == 0:
return True
for k in old_values:
if k not in new_values:
return False
if old_values[k] != new_values[k]:
return False
return True
## Actual concolic execution API
concrete_values = {}
def mk_int(id, value = 0):
global concrete_values
if id not in concrete_values:
concrete_values[id] = value
return concolic_int(sym_int(id), concrete_values[id])
def mk_str(id, value = ''):
global concrete_values
if id not in concrete_values:
concrete_values[id] = value
return concolic_str(sym_str(id), concrete_values[id])
verbose = 0
def concolic_test(testfunc, maxiter = 100, v = 0,
uniqueinputs = True,
removeredundant = True,
usecexcache = True):
# globally available 'verbose' flag
verbose = v
## "checked" is the set of constraints we already sent to Z3 for
## checking. use this to eliminate duplicate paths.
checked_paths = set()
## list of inputs we should try to explore.
inputs = InputQueue()
## cache of solutions to previously checked path conditions,
## or lack thereof, being a counterexample.
## a dictionary that maps path conditions to value assignments.
cexcache = {}
iter = 0
while iter < maxiter and not inputs.empty():
iter += 1
global concrete_values
global path_condition
(concrete_values, path_condition) = inputs.get()
global cur_path_constr, cur_path_constr_callers
cur_path_constr = []
cur_path_constr_callers = []
if verbose > 0:
# print 'Trying concrete values:', ["%s = %s" % (k, concrete_values[k]) for k in concrete_values if not k.startswith('_t_')]
print 'Trying concrete values:', ["%s = %s" % (k, concrete_values[k]) for k in concrete_values]
try:
testfunc()
except RequireMismatch:
pass
if verbose > 1:
print 'Test generated', len(cur_path_constr), 'branches:'
for (c, caller) in zip(cur_path_constr, cur_path_constr_callers):
if verbose > 2:
print indent(z3expr(c, True)), '@'
for c in caller:
print indent(indent('%s:%d' % (c[0], c[1])))
else:
print indent(z3expr(c, True)), '@', '%s:%d' % (caller[0][0], caller[0][1])
## for each branch, invoke Z3 to find an input that would go
## the other way, and add it to the list of inputs to explore.
partial_path = []
for (branch_condition, caller) in \
zip(cur_path_constr, cur_path_constr_callers):
## Identify a new branch forked off the current path,
## but skip it if it has been solved before.
if removeredundant:
new_branch = extend_and_prune(partial_path, sym_not(branch_condition))
partial_path = extend_and_prune(partial_path, branch_condition)
else:
new_branch = partial_path + [sym_not(branch_condition)]
partial_path = partial_path + [branch_condition]
new_path_condition = sym_and(*new_branch)
if new_path_condition in checked_paths:
continue
## Solve for a set of inputs that goes down the new branch.
## Avoid solving the branch again in the future.
(ok, model) = (None, None)
if usecexcache:
(ok, model) = check_cache(new_path_condition, cexcache)
if ok != None:
if verbose > 1:
print "USED CEXCACHE"
else:
(ok, model) = fork_and_check(new_path_condition)
else:
(ok, model) = fork_and_check(new_path_condition)
checked_paths.add(new_path_condition)
## If a solution was found, put it on the input queue,
## (if it hasn't been inserted before).
if ok == z3.sat:
new_values = {}
for k in model:
if k in concrete_values:
new_values[k] = model[k]
inputs.add(new_values, caller, new_path_condition, uniqueinputs)
if usecexcache:
cexcache[new_path_condition] = new_values
else:
if usecexcache:
cexcache[new_path_condition] = None
if verbose > 0:
print 'Stopping after', iter, 'iterations'
def check_cache(path_condition, cache):
## return (ok, model) where
## ok = z3.unsat if a subset of path_condition has no solution.
## ok = z3.sat if a superset of path_condition has a solution.
## ok = None if neither of the above can be ascertained.
for old_path in cache:
if cache[old_path] is None and \
issubset(old_path.args, path_condition.args):
return (z3.unsat, None)
if cache[old_path] is not None and \
issubset(path_condition.args, old_path.args):
return (z3.sat, cache[old_path])
return (None, None)
# (ok, model) = fork_and_check(path_condition)
# return (ok, model)
def issubset(candidate_set, context_set):
for elem in candidate_set:
if elem not in context_set:
return False
return True
def extend_and_prune(partial_path, branch_condition):
branch_condition = simplify(branch_condition)
branch_condition = simplify_StartsWith(branch_condition)
## Remove any constraints in partial_path that are
## implied by branch_condition.
prune_set = []
for constraint in partial_path:
# resultZ3 = Z3implies(branch_condition, constraint)
# result = implies(branch_condition, constraint)
# if resultZ3 and not result:
# print "MISSED IMPLICATION"
# print " ", branch_condition
# print " ", constraint
# if not resultZ3 and result:
# print "FALSE IMPLICATION"
# print " ", branch_condition
# print " ", constraint
if | |
<filename>paperII/abundace_evolution.py
from galaxy_analysis.plot.plot_styles import *
import numpy as np
import matplotlib.pyplot as plt
import deepdish as dd
import h5py, glob, sys
from galaxy_analysis.utilities import utilities
from galaxy_analysis.analysis import Galaxy
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import binned_statistic_2d
# temporary
import time as cpu_time
def load_abundance_data(data, data_list,
fields,
property_list,
phases = ['CNM','WNM','WIM','HIM'],
field_types = None):
time_data = {}
for i, field in enumerate(fields):
time_data[field] = {}
if field_types is None:
if '_Fraction' in field or '_over_' in field:
ft = 'mass_fraction'
# elif '_over_' in field:
# ft = 'abundance'
else:
print("Cannot properly determing field type for " + field)
raise RunTimeError
elif hasattr(field_types, 'keys') :
ft = field_types[field]
else:
ft = field_types[i]
for phase in phases:
time_data[field][phase] = {}
for property in property_list:
if property == 'IQR' or property == 'inner_quartile_range':
for p in ['Q1','Q3']:
field_path = [phase,ft,field,p]
# need to load the data here into a time array
time_data[field][phase][p] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][p] = np.array(time_data[field][phase][p], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
time_data[field][phase][property] = np.log10(time_data[field][phase]['Q3']) -\
np.log10(time_data[field][phase]['Q1'])
else:
time_data[field][phase][property] = time_data[field][phase]['Q3'] -\
time_data[field][phase]['Q1']
elif property == 'mean_median_distance':
for p in ['median','mean']:
if not p in list(time_data[field][phase].keys()):
field_path = [phase,ft,field,p]
time_data[field][phase][p] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][p] = np.array(time_data[field][phase][p], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
time_data[field][phase]['mean_median_distance'] =\
np.log10(time_data[field][phase]['mean']) -\
time_data[field][phase]['median']
else:
time_data[field][phase]['mean_median_distance'] = time_data[field][phase]['mean']-\
time_data[field][phase]['median']
elif property == 'inner_decile_range' or property == 'd9_d1_range':
for p in ['decile_1','decile_9']:
field_path = [phase,ft,field,p]
# need to load the data here into a time array
time_data[field][phase][p] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][p] = np.array(time_data[field][phase][p], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
time_data[field][phase][property] = np.log10(time_data[field][phase]['decile_9']) -\
np.log10(time_data[field][phase]['decile_1'])
else:
time_data[field][phase][property] = time_data[field][phase]['decile_9']-\
time_data[field][phase]['decile_1']
else:
field_path = [phase,ft,field,property]
time_data[field][phase][property] = utilities.get_property(field_path,
file_list = data.filename,
data_list = data_list,
self_contained = True)
time_data[field][phase][property] = np.array(time_data[field][phase][property], dtype = np.float)
if '_Fraction' in field: # do not do for abundance ratios
if property in ['median','mean']:
time_data[field][phase][property] = np.log10(time_data[field][phase][property])
return time_data
def plot_stellar_2d_hist(galaxy, field, time_bins = np.arange(0.0,20.0,0.2),
ybins = np.arange(-20,-6,0.1)):
if '_Fraction' in field:
yval = np.log10(galaxy.df[('io','particle_' + field.strip('_Fraction') + '_fraction')].value)
else:
yval = galaxy.df[('io','particle_' + field)].value
creation_time = galaxy.df['creation_time'].to('Myr').value
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
statistic_data = np.ones(np.size(creation_time))
N, x_edge, y_edge, binnum = binned_statistic_2d(creation_time - np.min(creation_time), yval, statistic_data,
statistic = 'count', bins = (time_bins,ybins))
fraction = N / (1.0 * np.size(creation_time))
fraction[fraction <= 0] = -99
fraction[fraction > 0] = np.log10(fraction[fraction > 0])
fraction = np.log10(N / (1.0 * np.size(creation_time)))
plot_val = fraction
xmesh, ymesh = np.meshgrid(x_edge, y_edge)
img1 = ax.pcolormesh(xmesh, ymesh, plot_val.T,
cmap = 'magma', vmin = -4, vmax = -1)
divider = make_axes_locatable(ax)
cax1 = divider.append_axes('right', size = '5%', pad = 0.05)
fig.colorbar(img1, cax=cax1, label = "Fraction")
ax.set_xlabel(r'Time (Myr)')
plt.minorticks_on()
plt.tight_layout()
fig.savefig('stellar_O_2d_hist.png')
return
def plot_stellar_separation(time, data, galaxy,
field = 'O_Fraction',
property = 'median',
phases = ['CNM'],
figdim=None,
field_types = None,
labels = None, line_styles = {},
xlim = None, ylim = None,
annotate_text = None, ylabels = None):
if labels is None:
labels = {}
for k in phases + [field] + [property]:
if not (k in list(labels.keys())):
labels[k] = k
if figdim is None:
if len(phases) == 1:
nrow, ncol = 1, 1
else:
ncol = 3
nrow = 2
else:
nrow, ncol = figdim
data_list = np.array(np.sort([x for x in list(data.keys()) if 'DD' in x]))
data_list = data_list[:len(time)]
time_data = load_abundance_data(data, data_list, [field], [property],
phases = phases, field_types = field_types)
# now need to load the data output to get the stellar values
#
if nrow*ncol > 1:
fig, all_axes = plt.subplots(nrow,ncol,sharex=True,sharey=True)
fig.set_size_inches(ncol*5, nrow*5)
fig.subplots_adjust(hspace=0.0,wspace=0.0)
else:
fig, ax = plt.subplots()
fig.set_size_inches(6,6)
axi = axj = 0
creation_time = galaxy.df['creation_time'].convert_to_units('Myr').value
for i, phase in enumerate(phases):
if nrow*ncol > 1:
ax = all_axes[(axi,axj)]
property_value = time_data[field][phase][property]
if '_Fraction' in field:
star_val = np.log10(galaxy.df['particle_' + field.strip('_Fraction') + '_fraction'])
else:
star_val = galaxy.df['particle_' + field]
select = creation_time > 0.0 #do all for now (np.min(creation_time) + 100.0)
distance = star_val[select] - np.interp(creation_time[select],time,property_value)
ax.scatter(creation_time[select] - np.min(creation_time),
distance, alpha = 0.75, color = 'black', s = 20)
ax.set_xlim(0.0, 500.0)
ax.set_ylim(-2.5,2.5)
ax.plot( ax.get_xlim(), [0.0,0.0], color = 'black', lw = line_width, ls = '--')
plt.minorticks_on()
xy = (200.0, ax.get_ylim()[1] - 0.35)
#q3dist = q3 - median_distance
#q1dist = median_distance - q1
# ax.annotate(labels[phase] + " : %.2f + %.2f - %.2f"%(median_distance,q3dist,q1dist), xy=xy,xytext=xy)
#d1, d9 = np.percentile(distance, [0.1,0.9])
select = creation_time > (np.min(creation_time) + 120.0)
ax.annotate("Median Dist. = %.2f"%(np.median(np.abs(distance[select]))), xy=xy,xytext=xy)
xy = (200.0, ax.get_ylim()[1] - 0.7)
q1, q3 = np.percentile(np.abs(distance[select]), [25,75])
print(q3, q1, q3-q1, np.size(distance[select][distance[select]<0])/(1.0*np.size(distance[select])), np.size(distance[select][distance[select]>0]))
ax.annotate("IQR = %.2f"%(q3-q1), xy=xy,xytext=xy)
axj = axj + 1
if axj >= ncol:
axj = 0
axi = axi + 1
#plt.tight_layout()
if nrow*ncol>1:
for i in np.arange(ncol):
all_axes[(nrow-1,i)].set_xlabel(r'Time (Myr)')
for i in np.arange(nrow):
all_axes[(i,0)].set_ylabel(r'Distance From ' + labels[property] + '[dex]')
else:
ax.set_xlabel(r'Time (Myr)')
if ylabels is None:
ax.set_ylabel(r'Distance to ' + labels[phase] + ' ' + labels[property] + ' [dex]')
else:
ax.set_ylabel(ylabels[0])
if nrow*ncol == 1:
plt.tight_layout()
fig.savefig("stellar_distance_to_median.png")
return
def plot_abundace_resolution_study():
fields = ['O_Fraction','Ba_Fraction']
property_list = ['d9_d1_range']
phases = ['CNM','WNM','WIM','HIM']
simulations = {'3pcH2' : './3pc_H2/abundances/gas_abundances.h5',
'6pcH2' : './6pc_H2/abundances/gas_abundances.h5'}
fig, ax = plt.subplots(2,2,sharex=True,sharey=True)
fig.set_size_inches(12,12)
fig.subplots_adjust(hspace=0.0,wspace=0.0)
all_time_data = {}
for sim in simulations:
data = h5py.File(simulations[sim])
data_list = np.sort([x for x in list(data.keys()) if 'DD' in x])
times = np.array([float(x.strip('DD')) for x in data_list])
all_time_data[sim] = load_abundance_data(data, data_list,
fields, property_list)
all_time_data[sim]['time'] = times - times[0]
ls = {'CNM':'-','WNM':'-','WIM':'-','HIM':':'}
for phase in phases:
ax[(0,0)].plot(all_time_data['3pcH2']['time'],
all_time_data['3pcH2']['O_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
ax[(0,1)].plot(all_time_data['6pcH2']['time'],
all_time_data['6pcH2']['O_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
ax[(1,0)].plot(all_time_data['3pcH2']['time'],
all_time_data['3pcH2']['Ba_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
ax[(1,1)].plot(all_time_data['6pcH2']['time'],
all_time_data['6pcH2']['Ba_Fraction'][phase]['d9_d1_range'],
color = color_dict[phase], ls = ls[phase], lw = line_width)
for a1 in ax:
for a2 in a1:
a2.set_xlim(0, 500)
a2.set_ylim(0, 3)
a2.minorticks_on()
for i in [0,1]:
ax[(i,0)].set_ylabel(r'Inner Decile Range [dex]')
ax[(1,i)].set_xlabel(r'Time (Myr)')
x = 300
y = 2.7
size = 20
ax[(0,0)].text(x, y, r'O - 3.6 pc', color = 'black', size = size)
ax[(1,0)].text(x, y, r'Ba - 3.6 pc', color = 'black', size = size)
ax[(0,1)].text(x, y, r'O - 7.2 pc', color = 'black', size = size)
ax[(1,1)].text(x, y, r'Ba - 7.2 pc', color = 'black', size = size)
plt.minorticks_on()
fig.savefig('O_Ba_resolution_study.png')
return
def plot_abundance_evolution(time, data,
fields = ['O_Fraction','Ba_Fraction'],
property_list = ['median','IQR'],
phases = ['CNM','WNM','WIM','HIM'],
figdim=None,
field_types = None,
labels = None, line_styles = None,
xlim = None, ylim = None,
annotate_text = None, fsize=5):
if figdim is None:
ncol = len(property_list)
nrow = len(fields)
else:
nrow, ncol = figdim
data_list = np.array(np.sort([x for x in list(data.keys()) if 'DD' in x]))
data_list = data_list[:len(time)]
start = cpu_time.time()
time_data = load_abundance_data(data, data_list, fields, property_list,
phases = phases, field_types = field_types)
print("DATA LOADING TOOK %2.2E"%(cpu_time.time()- start))
fig, ax = plt.subplots(nrow,ncol)
fig.set_size_inches(fsize*ncol, fsize*nrow)
axi = 0
axj = 0
xval = np.array(time)
xval = xval - xval[0]
for field in fields:
for property in property_list:
if len(fields) == 1:
axindex = axj
else:
axindex = (axi,axj)
for phase in phases:
yval = time_data[field][phase][property] # - time_data[field]['CNM'][property]
if phase in list(line_styles.keys()):
ls = line_styles[phase]
else:
ls = '-'
ax[axindex].plot(xval, yval, lw = line_width,
ls = ls, color = color_dict[phase],
label = phase)
ax[axindex].set_xlabel(r'Time (Myr)')
if hasattr(labels[property],"keys"):
ax[axindex].set_ylabel(labels[property][field])
else:
ax[axindex].set_ylabel(labels[property])
if xlim is None:
ax[axindex].set_xlim(xval[0],xval[-1])
else:
ax[axindex].set_xlim(xlim)
if not (ylim is None):
if np.size(fields) == 1:
ymin,ymax = ylim[axindex]
else:
ymin,ymax = ylim[axindex[0]][axindex[1]]
ax[axindex].set_ylim(ymin,ymax)
axj = axj + 1
if axj >= ncol:
axj = 0
axi = axi + 1
if len(fields) == 1:
indexzero=0
else:
indexzero=(0,0)
ax[indexzero].legend(loc='lower right', ncol=2)
if not annotate_text is None:
for axi in len(fields):
if len(fields) == 1:
axindex0 = axi
axindex1 = axi
else:
axindex0 = (axi,1)
axindex1 = (axi,0)
xy = annotate_text[axindex0]
ax[axindex1].annotate( annotate_text[axindex0], xy,xy)
plt.minorticks_on()
#plt.tight_layout()
fig.savefig( '_'.join(fields + property_list) + '_evolution.png')
return fig, ax
if __name__ == "__main__":
# plot_abundace_resolution_study()
if len(sys.argv) > 1:
filename = sys.argv[1]
| |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains definitions of Mobile Video Networks.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
import math
from typing import Dict, Mapping, Optional, Sequence, Tuple, Union
import dataclasses
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.beta.modeling.backbones import factory
from official.vision.beta.projects.movinet.modeling import movinet_layers
# Defines a set of kernel sizes and stride sizes to simplify and shorten
# architecture definitions for configs below.
KernelSize = Tuple[int, int, int]
# K(ab) represents a 3D kernel of size (a, b, b)
K13: KernelSize = (1, 3, 3)
K15: KernelSize = (1, 5, 5)
K33: KernelSize = (3, 3, 3)
K53: KernelSize = (5, 3, 3)
# S(ab) represents a 3D stride of size (a, b, b)
S11: KernelSize = (1, 1, 1)
S12: KernelSize = (1, 2, 2)
S22: KernelSize = (2, 2, 2)
S21: KernelSize = (2, 1, 1)
@dataclasses.dataclass
class BlockSpec:
"""Configuration of a block."""
pass
@dataclasses.dataclass
class StemSpec(BlockSpec):
"""Configuration of a Movinet block."""
filters: int = 0
kernel_size: KernelSize = (0, 0, 0)
strides: KernelSize = (0, 0, 0)
@dataclasses.dataclass
class MovinetBlockSpec(BlockSpec):
"""Configuration of a Movinet block."""
base_filters: int = 0
expand_filters: Sequence[int] = ()
kernel_sizes: Sequence[KernelSize] = ()
strides: Sequence[KernelSize] = ()
@dataclasses.dataclass
class HeadSpec(BlockSpec):
"""Configuration of a Movinet block."""
project_filters: int = 0
head_filters: int = 0
# Block specs specify the architecture of each model
BLOCK_SPECS = {
'a0': (
StemSpec(filters=8, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=8,
expand_filters=(24,),
kernel_sizes=(K15,),
strides=(S12,)),
MovinetBlockSpec(
base_filters=32,
expand_filters=(80, 80, 80),
kernel_sizes=(K33, K33, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(184, 112, 184),
kernel_sizes=(K53, K33, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(184, 184, 184, 184),
kernel_sizes=(K53, K33, K33, K33),
strides=(S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=104,
expand_filters=(384, 280, 280, 344),
kernel_sizes=(K53, K15, K15, K15),
strides=(S12, S11, S11, S11)),
HeadSpec(project_filters=480, head_filters=2048),
),
'a1': (
StemSpec(filters=16, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=16,
expand_filters=(40, 40),
kernel_sizes=(K15, K33),
strides=(S12, S11)),
MovinetBlockSpec(
base_filters=40,
expand_filters=(96, 120, 96, 96),
kernel_sizes=(K33, K33, K33, K33),
strides=(S12, S11, S11, S11)),
MovinetBlockSpec(
base_filters=64,
expand_filters=(216, 128, 216, 168, 216),
kernel_sizes=(K53, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=64,
expand_filters=(216, 216, 216, 128, 128, 216),
kernel_sizes=(K53, K33, K33, K33, K15, K33),
strides=(S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=136,
expand_filters=(456, 360, 360, 360, 456, 456, 544),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K13),
strides=(S12, S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=600, head_filters=2048),
),
'a2': (
StemSpec(filters=16, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=16,
expand_filters=(40, 40, 64),
kernel_sizes=(K15, K33, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=40,
expand_filters=(96, 120, 96, 96, 120),
kernel_sizes=(K33, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=72,
expand_filters=(240, 160, 240, 192, 240),
kernel_sizes=(K53, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=72,
expand_filters=(240, 240, 240, 240, 144, 240),
kernel_sizes=(K53, K33, K33, K33, K15, K33),
strides=(S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=144,
expand_filters=(480, 384, 384, 480, 480, 480, 576),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K13),
strides=(S12, S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=640, head_filters=2048),
),
'a3': (
StemSpec(filters=16, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=16,
expand_filters=(40, 40, 64, 40),
kernel_sizes=(K15, K33, K33, K33),
strides=(S12, S11, S11, S11)),
MovinetBlockSpec(
base_filters=48,
expand_filters=(112, 144, 112, 112, 144, 144),
kernel_sizes=(K33, K33, K33, K15, K33, K33),
strides=(S12, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=80,
expand_filters=(240, 152, 240, 192, 240),
kernel_sizes=(K53, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=88,
expand_filters=(264, 264, 264, 264, 160, 264, 264, 264),
kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33),
strides=(S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=168,
expand_filters=(560, 448, 448, 560, 560, 560, 448, 448, 560, 672),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K15, K15, K33, K13),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=744, head_filters=2048),
),
'a4': (
StemSpec(filters=24, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=24,
expand_filters=(64, 64, 96, 64, 96, 64),
kernel_sizes=(K15, K33, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(168, 168, 136, 136, 168, 168, 168, 136, 136),
kernel_sizes=(K33, K33, K33, K33, K33, K33, K33, K15, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=96,
expand_filters=(320, 160, 320, 192, 320, 160, 320, 256, 320),
kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=96,
expand_filters=(320, 320, 320, 320, 192, 320, 320, 192, 320, 320),
kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33, K33, K33),
strides=(S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=192,
expand_filters=(640, 512, 512, 640, 640, 640, 512, 512, 640, 768,
640, 640, 768),
kernel_sizes=(K53, K15, K15, K15, K15, K33, K15, K15, K15, K15, K15,
K33, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11,
S11)),
HeadSpec(project_filters=856, head_filters=2048),
),
'a5': (
StemSpec(filters=24, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=24,
expand_filters=(64, 64, 96, 64, 96, 64),
kernel_sizes=(K15, K15, K33, K33, K33, K33),
strides=(S12, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=64,
expand_filters=(192, 152, 152, 152, 192, 192, 192, 152, 152, 192,
192),
kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33, K33,
K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=112,
expand_filters=(376, 224, 376, 376, 296, 376, 224, 376, 376, 296,
376, 376, 376),
kernel_sizes=(K53, K33, K33, K33, K33, K33, K33, K33, K33, K33, K33,
K33, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11,
S11)),
MovinetBlockSpec(
base_filters=120,
expand_filters=(376, 376, 376, 376, 224, 376, 376, 224, 376, 376,
376),
kernel_sizes=(K53, K33, K33, K33, K15, K33, K33, K33, K33, K33,
K33),
strides=(S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=224,
expand_filters=(744, 744, 600, 600, 744, 744, 744, 896, 600, 600,
896, 744, 744, 896, 600, 600, 744, 744),
kernel_sizes=(K53, K33, K15, K15, K15, K15, K33, K15, K15, K15, K15,
K15, K33, K15, K15, K15, K15, K33),
strides=(S12, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11, S11,
S11, S11, S11, S11, S11, S11)),
HeadSpec(project_filters=992, head_filters=2048),
),
't0': (
StemSpec(filters=8, kernel_size=K13, strides=S12),
MovinetBlockSpec(
base_filters=8,
expand_filters=(16,),
kernel_sizes=(K15,),
strides=(S12,)),
MovinetBlockSpec(
base_filters=32,
expand_filters=(72, 72),
kernel_sizes=(K33, K15),
strides=(S12, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(112, 112, 112),
kernel_sizes=(K53, K15, K33),
strides=(S12, S11, S11)),
MovinetBlockSpec(
base_filters=56,
expand_filters=(184, 184, 184, 184),
kernel_sizes=(K53, K15, K33, K33),
strides=(S11, S11, S11, S11)),
MovinetBlockSpec(
base_filters=104,
expand_filters=(344, 344, 344, 344),
kernel_sizes=(K53, K15, K15, K33),
strides=(S12, S11, S11, S11)),
HeadSpec(project_filters=240, head_filters=1024),
),
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class Movinet(tf.keras.Model):
"""Class to build Movinet family model.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
def __init__(self,
model_id: str = 'a0',
causal: bool = False,
use_positional_encoding: bool = False,
conv_type: str = '3d',
input_specs: Optional[tf.keras.layers.InputSpec] = None,
activation: str = 'swish',
use_sync_bn: bool = True,
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
kernel_initializer: str = 'HeNormal',
kernel_regularizer: Optional[str] = None,
bias_regularizer: Optional[str] = None,
stochastic_depth_drop_rate: float = 0.,
use_external_states: bool = False,
**kwargs):
"""MoViNet initialization function.
Args:
model_id: name of MoViNet backbone model.
causal: use causal mode, with CausalConv and CausalSE operations.
use_positional_encoding: if True, adds a positional encoding before
temporal convolutions and the cumulative global average pooling
layers.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' configures the network
to use the default 3D convolution. '2plus1d' uses (2+1)D convolution
with Conv2D operations and 2D reshaping (e.g., a 5x3x3 kernel becomes
3x3 followed by 5x1 conv). '3d_2plus1d' uses (2+1)D convolution with
Conv3D and no 2D reshaping (e.g., a 5x3x3 kernel becomes 1x3x3 followed
by 5x1x1 conv).
input_specs: the model input spec to use.
activation: name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: normalization momentum for the moving average.
norm_epsilon: small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Defaults to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Defaults to None.
stochastic_depth_drop_rate: the base rate for stochastic depth.
| |
w = r*w
#print shortstr(w)
v = dot(B, w) - w
assert eq(v, s)
weights.append(w)
W = array(weights)
print("W:")
print(shortstr(W))
Wt = W.transpose()
St = S.transpose()
# What combination of weights gives the (simple) roots?
U = solve(Wt, St)
print("U:")
print(shortstr(U))
# What combination of roots gives the weights?
V = solve(St, Wt)
print("V:")
print(shortstr(V))
print('--'*10)
#for A in As:
weyl = G.generate() # entire weyl group, sorted by word length
print("weyl:", len(weyl))
I = weyl[0]
assert I.word == '' # identity
rows = []
for s in simple:
r = I(s)
r = dot(T, r)
#print shortstr(r)
rows.append(r)
A0 = array(rows).transpose()
for g in weyl:
rows = []
for s in simple:
r = g(s)
r = dot(T, r)
#print shortstr(r)
rows.append(r)
A = array(rows).transpose()
#print "A:"
#print shortstr(A)
print(g.word or 'I')
D = dot(A-A0, V).transpose()
print(shortstr(D))
def find_negi(G):
roots = G.roots
simple = G.simple
print("roots:", len(roots))
# print roots
# print
weyl = G.generate()
print("weyl:",)
for w in weyl:
print(w.word,)
print()
print("weyl:", len(weyl))
nroots = [rscale(-1, root) for root in roots]
n = len(roots)
N = len(weyl)
for i in range(N):
for j in range(i+1, N):
X = weyl[i]
Z = weyl[j]
XZ = X*Z
ZX = Z*X
for root in simple:
if XZ(root) != rscale(-1, ZX(root)):
break
else:
if len(X.word)==len(Z.word)==G.n:
x, z = X.word, Z.word
print("%s*%s = -%s*%s" % (x, z, z, x))
#return
return
for g in weyl:
for i in range(n):
if g(roots[i]) != nroots[i]:
break
else:
print("%s = -I" % g.word)
def test_monoid(G):
"build the Coxeter-Bruhat monoid, represented as a monoid of functions."
"this representation is not faithful."
roots = G.roots
print("roots:", len(roots))
weyl = G.generate()
print("weyl:",)
for w in weyl:
print(w.word,)
print()
print("weyl:", len(weyl))
r0 = roots[0]
bdy = set([r0])
seen = set()
identity = dict((r, r) for r in roots)
perms = [dict(identity) for g in gen]
while bdy:
_bdy = set()
seen.update(bdy)
for r0 in bdy:
for i in range(n):
g = gen[i]
perm = perms[i]
r1 = g*r0
assert perm.get(r0) == r0
if r1 not in seen:
perm[r0] = r1
_bdy.add(r1)
bdy = _bdy
gen = [Perm(perms[i], roots, gen[i].word) for i in range(len(perms))]
identity = Perm(identity, roots)
for g in gen:
print(g.str())
assert g*g == g
monoid = mulclose_short([identity]+gen)
print("monoid:", len(monoid))
#monoid = mulclose(monoid)
#print "monoid:", len(monoid)
desc = "ABCDEFG"[:n]
def txlate(word):
"monoid to weyl"
g = identity
for c in word:
g = g * G.gen[desc.index(c)]
return g
monoid = list(monoid)
monoid.sort(key = lambda g : (len(g.word), g.word))
for g in monoid:
tgt = list(set(g.perm.values()))
g = txlate(g.word)
print("%6s"%g.word, len(tgt))
print()
return
m = G.matrix(desc)
from coxeter import BruhatMonoid
monoid = BruhatMonoid(desc, m, bruhat=True, build=True)
#for w in monoid.words:
# print w,
#print
def txlate(word):
"_translate to function monoid"
g = identity
for c in word:
g = g * gen[desc.index(c)]
return g
lookup = {}
for w0 in monoid.words:
g = txlate(w0)
w1 = lookup.get(g)
if w1 is not None:
print(w0, "=", w1)
else:
lookup[g] = w0
print(w0)
for w0 in monoid.words:
for w1 in monoid.words:
w2 = monoid.mul[w0, w1]
if txlate(w0)*txlate(w1) == txlate(w2):
pass
else:
print("%r*%r = %r" % (w0, w1, w2),)
print(" ****************** FAIL")
def test(n):
G = Weyl.build_A(n)
# print "%d roots" % len(G.roots)
# print G.roots
# for g in G.gen:
# print [g(root) for root in G.roots]
gen = G.gen
for i in range(n):
for j in range(n):
gi = gen[i]
gj = gen[j]
if i==j:
assert gi*gj == G.identity
elif abs(i-j)==1:
assert gi*gj != G.identity
assert (gi*gj)**2 != G.identity
assert (gi*gj)**3 == G.identity
else:
assert gi*gj != G.identity
assert (gi*gj)**2 == G.identity
if n < 5:
assert len(mulclose(G.gen)) == factorial(n+1)
# ---------------------------------------------------------
G = Weyl.build_B(n)
gen = G.gen
for g in gen:
assert g*g == G.identity
for i in range(n-1):
for j in range(i+1, n-1):
gi = gen[i]
gj = gen[j]
if abs(i-j)==1:
assert gi*gj != G.identity
assert (gi*gj)**2 != G.identity
assert (gi*gj)**3 == G.identity
else:
assert gi*gj != G.identity
assert (gi*gj)**2 == G.identity
if i < n-1:
gj = gen[n-1]
assert gi*gj != G.identity
assert (gi*gj)**2 == G.identity
if n>2:
gi = gen[n-2]
gj = gen[n-1]
assert (gi*gj) != G.identity
assert (gi*gj)**2 != G.identity
assert (gi*gj)**3 != G.identity
assert (gi*gj)**4 == G.identity
if n < 5:
assert len(mulclose(G.gen)) == (2**n)*factorial(n)
# ---------------------------------------------------------
G = Weyl.build_C(n)
# ---------------------------------------------------------
if n<3:
return # <---------------------- return
G = Weyl.build_D(n)
gen = G.gen
for i in range(n-1):
gi = gen[i]
for j in range(i+1, n-1):
gj = gen[j]
if abs(i-j)==1:
assert gi*gj != G.identity
assert (gi*gj)**2 != G.identity
assert (gi*gj)**3 == G.identity
else:
assert gi*gj != G.identity
assert (gi*gj)**2 == G.identity
gj = gen[n-1]
if i < n-3 or i==n-2:
assert gi*gj != G.identity
assert (gi*gj)**2 == G.identity
elif i==n-3:
assert gi*gj != G.identity
assert (gi*gj)**2 != G.identity
assert (gi*gj)**3 == G.identity
if n < 5:
assert len(mulclose(G.gen)) == (2**(n-1))*factorial(n)
def test_longest_element():
G = Weyl.build_A(2)
g = G.longest_element()
A, B = G.gen
assert g == A*B*A
G = Weyl.build_A(3)
g = G.longest_element()
A, B, C = G.gen
assert g == B*C*B*A*B*C
w = A*C*B
assert g == (w**2)
return
while w != g:
w = w*A*C*C
print(".")
G = Weyl.build_B(2)
g = G.longest_element()
A, B = G.gen
assert g == B*A*B*A
G = Weyl.build_B(3)
g = G.longest_element()
A, B, C = G.gen
assert g == C*A*B*C*A*B*C*A*B
G = Weyl.build_D(4)
g = G.longest_element()
A, B, C, D = G.gen
assert g == B*A*D*B*A*D*C*B*D*A*B*C
G = Weyl.build_D(6)
g = G.longest_element()
for root in G.roots:
assert g(root) == rscale(-1, root)
G = Weyl.build_E7()
g = G.longest_element()
for root in G.roots:
assert g(root) == rscale(-1, root)
G = Weyl.build_E8()
g = G.longest_element()
for root in G.roots:
assert g(root) == rscale(-1, root)
G = Weyl.build_F4()
g = G.longest_element()
for root in G.roots:
assert g(root) == rscale(-1, root)
G = Weyl.build_G2()
g = G.longest_element()
for root in G.roots:
assert g(root) == rscale(-1, root)
print("OK")
def show_qpoly(G):
gen = G.gen
roots = G.roots
els = G.generate()
e = G.identity
G = Group(els, roots)
print("order:", len(els))
ring = element.Z
value = zero = Poly({}, ring)
q = Poly("q", ring)
for g in els:
#print(g.word)
value = value + q**(len(g.word))
print(value.qstr())
lookup = dict((g, g) for g in G) # remember canonical word
n = len(gen)
for i in range(n):
gen1 = gen[:i] + gen[i+1:]
H = mulclose_short([e]+gen1)
eH = Coset(H, roots)
H = Group(H, roots)
#gHs = G.left_cosets(H)
cosets = set([eH])
bdy = set(cosets)
while bdy:
_bdy = set()
for coset in bdy:
for g in gen:
gH = Coset([g*h for h in coset], roots)
if gH not in cosets:
cosets.add(gH)
_bdy.add(gH)
bdy = _bdy
value = zero
for gH in cosets:
items = list(gH)
items.sort(key = lambda g : len(g.word))
#for g in items:
# print(g.word, end=" ")
#print()
g = items[0]
value = value + q**len(g.word)
#print(len(gH))
print(value.qstr())
def main():
n = argv.n
if n:
test(n)
elif argv.test:
for n in range(2, 6):
test(n)
test_longest_element()
if argv.test_longest_element:
test_longest_element()
G = None
if argv.E_8:
G = Weyl.build_E8()
assert G.matrix() == {
(0, 1):3, (1, 2):3, (2, 3):3, (3, 4):3, (4, 5):3, (4, 6):3, (6, 7):3}
if argv.E_7:
G = Weyl.build_E7()
assert G.matrix() == {
(0, 6): 3, (1, 2): 3, (2, 3): 3, (3, 4): 3, (3, 5): 3, (5, 6): 3}
if argv.E_6:
G = Weyl.build_E6()
assert G.matrix() == {
(0, 1): 3, (1, 2): 3, (2, 3): 3, (2, 4): 3, (4, 5): 3}
#items = mulclose(G.gen, verbose=True)
#print("|E6|=", len(items))
if argv.F_4:
G = Weyl.build_F4()
assert G.matrix() == {(0, 1): 3, (1, 2): 4, (2, 3): 3}
#items = mulclose(G.gen, verbose=True)
#print("|F4|=", len(items)) # == 1152
if argv.G_2:
G = Weyl.build_G2()
assert G.matrix() == {(0, 1): 6}
assert len(mulclose(G.gen)) == 12 # dihedral group D_6
for arg in argv:
if len(arg) < 3 or arg[1]!="_":
continue
try:
n = int(arg[2:])
except:
continue
print("constructing %s"%arg)
if arg.startswith("A"):
G = Weyl.build_A(n)
if | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Unit tests for utils.py.
@author: <NAME>
"""
import os
import pathlib
import glob
import time
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import numpy as np
import mne
from mneflow import utils
# Change current directory to the one this file resides in
os.chdir(pathlib.Path(__file__).parent.absolute())
# --- Auxiliary functions ---
def check_meta_trial_class():
return dict(train_paths=[], val_paths=[], test_paths=[],
data_id='', val_size=0, savepath='',
target_type='', input_type='', fs=0,
class_proportions=dict(), orig_classes=dict(),
y_shape=tuple(), n_ch=0, n_t=0, test_size=0)
def check_meta_seq_class():
return dict(train_paths=[], val_paths=[], test_paths=[],
data_id='', val_size=0, savepath='', n_seq=0,
target_type='', input_type='', fs=0,
class_proportions=dict(), orig_classes=dict(),
y_shape=tuple(), n_ch=0, n_t=0, test_size=0)
def check_meta_trial_reg():
return dict(train_paths=[], val_paths=[], test_paths=[],
data_id='', val_size=0, savepath='',
target_type='', input_type='', fs=0,
y_shape=tuple(), n_ch=0, n_t=0, test_size=0)
def check_meta_seq_reg():
return dict(train_paths=[], val_paths=[], test_paths=[],
data_id='', val_size=0, savepath='', n_seq=0,
target_type='', input_type='', fs=0,
y_shape=tuple(), n_ch=0, n_t=0, test_size=0)
def check_tfrecords(path='./', pattern=''):
"""Iterate over TFRecord files maching the pattern and checks for
corrupted records.
Adapted from keras_utils.py"""
import glob
total_files = 0
error_files = 0
train_files = sorted(glob.glob('%s*%s*.tfrecord' % (path, pattern)))
for f_i, file in enumerate(train_files):
try:
total_files += sum(
[1 for _ in tf.python_io.tf_record_iterator(file)])
except IOError:
total_files += 1
error_files += 1
return total_files, error_files
# --- UNIT TESTS ---
class TestUtils(unittest.TestCase):
"""Unit test class for most functions contained in utils.py file."""
def test_true(self):
"""Sanity check test #1."""
self.assertEqual(True, True)
def test_pass(self):
"""Sanity check test #2."""
pass
def test_partition(self):
"""Placeholder for utils.partition"""
# data = np.arange(10000).reshape(100, 5, 20)
# data = data.astype(np.float32)
# events = np.arange(100).reshape(100, 1)
# idx = [(1, 2), (10, 11)]
# x1, x2 = utils.partition(data, idx)
# y1, y2 = utils.partition(events, idx)
def test_cont_split_indices(self):
"""Unit test for utils.cont_split_indices"""
data = np.arange(1000).reshape(1, 10, 5, 20)
idx = utils.cont_split_indices(data, test_size=0.1, test_segments=2)
self.assertTrue(len(idx) == 2)
self.assertTrue(np.all([jj < data.shape[-1]
for ii in idx for jj in ii]))
def test_create_example_fif(self):
from mne.datasets import multimodal
fname = 'example-epo.fif'
if not os.path.exists(fname):
mne.set_log_level(verbose='CRITICAL')
rname = os.path.join(multimodal.data_path(), 'multimodal_raw.fif')
raw = mne.io.read_raw_fif(rname)
cond = raw.acqparser.get_condition(
raw, condition=['Auditory left', 'Auditory right'])
epochs_list = [mne.Epochs(raw, **c) for c in cond]
epochs = mne.concatenate_epochs(epochs_list)
epochs.save(fname, overwrite=False)
del raw, epochs, cond, epochs_list
epochs = mne.epochs.read_epochs(fname, preload=False)
self.assertTrue(epochs)
def test_create_example_npz(self):
fname = 'example_meg.npz'
if not os.path.exists('example_meg.npz'):
epochs = mne.read_epochs('example-epo.fif', preload=True)
data = epochs.get_data()
events = epochs.events[:, 2]
np.savez_compressed('example_meg', data=data, events=events)
del epochs, data, events
datafile = np.load(fname)
self.assertTrue(np.any(datafile['data']))
self.assertTrue(np.any(datafile['events']))
def test_create_example_mat(self):
import scipy.io as sio
fname = 'example_meg.mat'
if not os.path.exists(fname):
tmp = np.load('example_meg.npz')
adict = {}
adict['data'] = tmp['data']
adict['events'] = tmp['events']
sio.savemat(fname, adict)
del tmp, adict
datafile = sio.loadmat(fname)
self.assertTrue(np.any(datafile['data']))
self.assertTrue(np.any(datafile['events']))
def test_onehot(self):
"""Unit test for utils._onehot function."""
y = np.arange(0, 10, dtype='int')
y_ = utils._onehot(y)
y_true = np.eye(10, dtype='int')
np.testing.assert_equal(y_, y_true)
def test_load_meta_trials(self):
"""Unit test for utils._load_meta function."""
with self.assertRaises(FileNotFoundError):
s = utils._load_meta('', '')
s = utils._load_meta('./', 'example_trials')
self.assertTrue(isinstance(s, dict))
tmp = check_meta_trial_class()
self.assertEqual(set(s.keys()), set(tmp.keys()))
for ii in s.keys():
self.assertEqual(type(tmp[ii]), type(s[ii]))
def test_load_meta_seq(self):
"""Unit test for utils._load_meta function."""
with self.assertRaises(FileNotFoundError):
s = utils._load_meta('', '')
s = utils._load_meta('./', 'example_seq')
self.assertTrue(isinstance(s, dict))
tmp = check_meta_seq_class()
self.assertEqual(set(s.keys()), set(tmp.keys()))
for ii in s.keys():
self.assertEqual(type(tmp[ii]), type(s[ii]))
def test_scale_to_baseline_cont(self):
"""Test on a continuous signal (sigmoid) with std = 1 and mean = 0."""
from scipy.special import expit
t = np.linspace(-5, 5, 1000)
f = 2.56976368*expit(t) - 1.28488184
X = np.stack([f for _ in range(10)])
X_ = np.asarray([(ii+1)*X[ii] + (ii+1) for ii in range(10)])
s = utils.scale_to_baseline(X_.copy())
# almost_equal since the values are floats
np.testing.assert_almost_equal(X, s)
def test_scale_to_baseline_range_crop(self):
"""Baseline is calculated from range and cropped."""
X = np.stack([[-1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for _ in range(10)])
X_ = np.asarray([(ii+1)*X[ii] + 0.5 for ii in range(0, 10)])
inv = (0, 2)
s = utils.scale_to_baseline(X_, baseline=inv, crop_baseline=True)
np.testing.assert_equal(X[..., (inv[1]-1):].shape, s.shape)
np.testing.assert_equal(X[..., (inv[1]-1):], s)
def test_scale_to_baseline_range(self):
"""Baseline is calculated from range."""
X = np.stack([[-1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for _ in range(10)])
X_ = np.asarray([(ii+1)*X[ii] + 0.5 for ii in range(0, 10)])
inv = (0, 2)
# test without cropping the baseline
s = utils.scale_to_baseline(X_, baseline=inv, crop_baseline=False)
np.testing.assert_equal(X.shape, s.shape)
np.testing.assert_equal(X, s)
def test_produce_labels(self):
"""Test labels produced from event trigger codes."""
f = np.arange(1, 10, step=2)
y = np.stack([f for _ in range(10)]).flatten()
y = np.random.permutation(y)
inv, tot, prop, orig = utils.produce_labels(y, return_stats=True)
self.assertEqual(len(y), tot)
np.testing.assert_equal(list(prop.keys()), np.arange(5))
np.testing.assert_equal(list(prop.values()), 0.2*np.ones(5))
np.testing.assert_equal(list(orig.items()),
[(ii, v) for ii, v in enumerate(f)])
np.testing.assert_equal(inv, np.unique(y, return_inverse=True)[1])
def test_combine_labels(self):
"""Test label combination."""
events = np.arange(10) // 2 # range [0, 4]
# testing single label, invalid label and list of labels
combine_events = {24: 3, 0: 5, 11: [1, 2]}
avail_labels = [1, 2, 3]
new_avail_labels = [11, 24]
tevents, keep_ind = utils._combine_labels(events, combine_events)
for a, b in combine_events.items():
print(a, b)
idx = [ii for ii, v in enumerate(events) if v in b]
self.assertTrue(np.all(tevents[idx] == a))
self.assertTrue(np.all(np.isin(events[keep_ind], avail_labels)))
self.assertTrue(np.all(events[keep_ind] != 5))
self.assertTrue(np.all(np.isin(tevents[keep_ind], new_avail_labels)))
self.assertTrue(np.all(tevents[keep_ind] != 0))
class TestPreprocess(unittest.TestCase):
"""Unit test class for utils.preprocess function."""
def test_preprocess(self):
data = np.arange(1000).reshape(10, 5, 20)
data = data.astype(np.float32)
events = np.arange(10).reshape(10, 1)
# No options check
x1, y1, x2, y2 = utils.preprocess(data, events)
self.assertEqual(x1.shape[0] + x2.shape[0], data.shape[0])
self.assertEqual(x1.shape[1:], data.shape[1:])
self.assertEqual(x2.shape[1:], data.shape[1:])
self.assertEqual(y1.shape[0] + y2.shape[0], events.shape[0])
self.assertEqual(y1.shape[1:], events.shape[1:])
self.assertEqual(y2.shape[1:], events.shape[1:])
def test_preprocess_trials(self):
data = np.arange(1000).reshape(10, 5, 20)
data = data.astype(np.float32)
events = np.arange(10).reshape(10, 1)
# trials
x1, y1, x2, y2 = utils.preprocess(data, events, input_type='trials')
self.assertEqual(x1.shape[0] + x2.shape[0], data.shape[0])
self.assertEqual(x1.shape[1:], data.shape[1:])
self.assertEqual(x2.shape[1:], data.shape[1:])
self.assertEqual(y1.shape[0] + y2.shape[0], events.shape[0])
self.assertEqual(y1.shape[1:], events.shape[1:])
self.assertEqual(y2.shape[1:], events.shape[1:])
def test_preprocess_seq(self):
data = np.arange(1000).reshape(10, 5, 20)
data = data.astype(np.float32)
events = np.arange(10).reshape(10, 1)
# seq
x1, y1, x2, y2 = utils.preprocess(data, events, input_type='seq')
self.assertEqual(x1.shape[0] + x2.shape[0], data.shape[0])
self.assertEqual(x1.shape[1:], data.shape[1:])
self.assertEqual(x2.shape[1:], data.shape[1:])
self.assertEqual(y1.shape[0] + y2.shape[0], events.shape[0])
self.assertEqual(y1.shape[1:], events.shape[1:])
self.assertEqual(y2.shape[1:], events.shape[1:])
def test_preprocess_scale(self):
data = np.arange(1000).reshape(10, 5, 20)
data = data.astype(np.float32)
events = np.arange(10).reshape(10, 1)
# scale
x1, y1, x2, y2 = utils.preprocess(data, events, scale=True)
self.assertEqual(x1.shape[0] + x2.shape[0], data.shape[0])
self.assertEqual(x1.shape[1:], data.shape[1:])
self.assertEqual(x2.shape[1:], data.shape[1:])
self.assertEqual(y1.shape[0] + y2.shape[0], events.shape[0])
self.assertEqual(y1.shape[1:], events.shape[1:])
self.assertEqual(y2.shape[1:], events.shape[1:])
def test_preprocess_decimate(self):
data = np.arange(1000).reshape(10, 5, 20)
data = data.astype(np.float32)
events = np.arange(10).reshape(10, 1)
# decimate
x1, y1, x2, y2 = utils.preprocess(data, events, decimate=2)
self.assertEqual(x1.shape[0] + x2.shape[0], data.shape[0])
self.assertEqual(x1.shape[1], data.shape[1])
self.assertEqual(x1.shape[2], data.shape[2]/2)
self.assertEqual(x2.shape[1], data.shape[1])
self.assertEqual(x2.shape[2], data.shape[2]/2)
self.assertEqual(y1.shape[0] + y2.shape[0], events.shape[0])
self.assertEqual(y1.shape[1:], events.shape[1:])
self.assertEqual(y2.shape[1:], events.shape[1:])
def test_preprocess_segment(self):
data = np.arange(1000).reshape(10, 5, 20)
data = data.astype(np.float32)
events = np.arange(10).reshape(10, 1)
# segment
x1, y1, x2, y2 = utils.preprocess(data, events, segment=10)
self.assertEqual(x1.shape[0] + x2.shape[0], 2*data.shape[0])
self.assertEqual(x1.shape[1], data.shape[1])
self.assertEqual(x1.shape[2], data.shape[2]/2)
self.assertEqual(x2.shape[1], data.shape[1])
self.assertEqual(x2.shape[2], data.shape[2]/2)
self.assertEqual(y1.shape[0] + y2.shape[0], 2*events.shape[0])
self.assertEqual(y1.shape[1:], events.shape[1:])
self.assertEqual(y2.shape[1:], events.shape[1:])
class TestUtilstf(tf.test.TestCase):
"""Unit tests assessing Tensorflow functionality."""
def test_true(self):
"""Sanity check test #1."""
self.assertEqual(True, True)
def test_pass(self):
"""Sanity check test #2."""
pass
class TestSplitSets(unittest.TestCase):
"""Unit test class for utils._split_sets function."""
def test_split_sets_2d_target1d_v1(self):
"""Split numpy arrays into sets."""
X = np.arange(200).reshape(10, 20)
y = np.arange(10) # Note! y is 1D with shape (10, )
x1, y1, x2, y2 = utils._split_sets(X, y, val=0.5)
self.assertTrue(np.all(np.isin(x1, X)))
self.assertTrue(np.all(np.isin(x2, X)))
self.assertTrue(np.all(np.isin(np.concatenate((x1, x2), axis=0), X)))
self.assertTrue(np.all(np.isin(X, np.concatenate((x1, x2), axis=0))))
self.assertEqual(x1.shape[0] + x2.shape[0], X.shape[0])
self.assertEqual(x1.shape[1], X.shape[1])
self.assertEqual(x2.shape[1], X.shape[1])
self.assertTrue(np.all(np.isin(y1, y)))
self.assertTrue(np.all(np.isin(y2, y)))
self.assertTrue(np.all(np.isin(np.concatenate((y1, y2), axis=0), y)))
self.assertTrue(np.all(np.isin(y, np.concatenate((y1, y2), axis=0))))
self.assertEqual(y1.shape[0] + y2.shape[0], y.shape[0])
self.assertEqual(y1.shape[1], 1)
self.assertEqual(y2.shape[1], 1)
def test_split_sets_2d_target1d_v2(self):
"""Split numpy arrays into sets."""
X = np.arange(200).reshape(10, 20)
y = np.arange(10).reshape(10, 1)
x1, y1, x2, y2 = utils._split_sets(X, y, val=0.5)
self.assertTrue(np.all(np.isin(x1, X)))
self.assertTrue(np.all(np.isin(x2, X)))
self.assertTrue(np.all(np.isin(np.concatenate((x1, x2), axis=0), X)))
self.assertTrue(np.all(np.isin(X, np.concatenate((x1, x2), axis=0))))
self.assertEqual(x1.shape[0] + x2.shape[0], X.shape[0])
self.assertEqual(x1.shape[1], X.shape[1])
self.assertEqual(x2.shape[1], X.shape[1])
self.assertTrue(np.all(np.isin(y1, y)))
self.assertTrue(np.all(np.isin(y2, y)))
self.assertTrue(np.all(np.isin(np.concatenate((y1, y2), axis=0), y)))
self.assertTrue(np.all(np.isin(y, np.concatenate((y1, y2), axis=0))))
self.assertEqual(y1.shape[0] + y2.shape[0], y.shape[0])
self.assertEqual(y1.shape[1], y.shape[1])
self.assertEqual(y2.shape[1], y.shape[1])
def test_split_sets_3d_target1d(self):
"""Split 3d numpy arrays into sets."""
X = np.arange(1000).reshape(10, 5, 20)
y = np.arange(50).reshape(10, 5, 1)
x1, y1, x2, y2 = utils._split_sets(X, y, val=0.5)
self.assertTrue(np.all(np.isin(x1, X)))
self.assertTrue(np.all(np.isin(x2, X)))
self.assertTrue(np.all(np.isin(np.concatenate((x1, x2), axis=0), X)))
self.assertTrue(np.all(np.isin(X, np.concatenate((x1, x2), axis=0))))
self.assertEqual(x1.shape[0] + x2.shape[0], X.shape[0])
self.assertEqual(x1.shape[1], X.shape[1])
self.assertEqual(x2.shape[1], X.shape[1])
self.assertEqual(x1.shape[2], X.shape[2])
self.assertEqual(x2.shape[2], X.shape[2])
self.assertTrue(np.all(np.isin(y1, y)))
self.assertTrue(np.all(np.isin(y2, y)))
self.assertTrue(np.all(np.isin(np.concatenate((y1, y2), axis=0), y)))
self.assertTrue(np.all(np.isin(y, np.concatenate((y1, y2), axis=0))))
self.assertEqual(y1.shape[0] + y2.shape[0], y.shape[0])
self.assertEqual(y1.shape[1], y.shape[1])
self.assertEqual(y2.shape[1], y.shape[1])
self.assertEqual(y1.shape[2], y.shape[2])
self.assertEqual(y2.shape[2], y.shape[2])
def test_split_sets_2d_target2d(self):
"""Split numpy arrays into sets."""
X = np.arange(200).reshape(10, 20)
y = np.arange(20).reshape(10, 2)
x1, y1, x2, y2 = utils._split_sets(X, y, val=0.5)
self.assertTrue(np.all(np.isin(x1, X)))
self.assertTrue(np.all(np.isin(x2, X)))
self.assertTrue(np.all(np.isin(np.concatenate((x1, x2), axis=0), X)))
self.assertTrue(np.all(np.isin(X, np.concatenate((x1, x2), axis=0))))
self.assertEqual(x1.shape[0] + x2.shape[0], X.shape[0])
| |
# The chain (Rumors reference intended)
# ----------------------------------------- Higher-level Chain functions -----------------------------------------------
def makeChain(functionDictList, preiterable=None, inMethod=None, in_kwargs=None, writeInfoExists=False,
recordMethod=None, record_kwargs=None, writeInfo_recordkw=None, extractMethod=None, extract_kwargs=None):
"""
FUNCTION: produces an object (class: Chain) that contains information about a source of data (preiterable) and, if
required, a method by which the data can be converted to a iterable form (inMethod, in_kwargs). The object
contains info for applying a series of functions, with specified parameters (functionDictList) at each
iteration. Additionally, the method records information with each iteration which may be a list of objects
returned from the final function in the data processing chain or some information pertaining to this depending
on whether a function to be applied to this output is provided (recordMethod, record_kwargs,
writeInfo_recordkw). This added functionality is useful in the case where one functionDictList is useful in
the context of different types of data, requiring different types of records [apologies, this is vague but
meaningful]. Once iterations are complete, the record will either be returned as is or operated on depending on
whether a function has been supplied (extractMethod, extract_kwargs).
ARGUMENTS:
preiterable: data source (type depends on inMethod, if inMethod is None, this must be iterable)
functionDictList: list of dictionaries, which must contain specific keys including: function, suffix,
writeOut, args*, kwargs* (* optional). The value at function should be the function to execute.
The value at writeOut should be None or a dictionary with keys function, args*, kwargs*, priorLink_kw*,
newLink_kw, and suffix_kw*. ([{}, ...])
inMethod: If the data source must be converted to an iterable object, a function can be supplied
in_kwargs: If the inMethod function requires additional arguments, supply these here. (dict)
writeInfoExists: Should data be written out for each iteration? (bool)
recordMethod: function which can be applied to the final output of processing chain each iteration
in order to produce an object which can be added to the record (function or None).
If None, the the final object of the processing chain will be added to the record.
record_kwargs: key word arguments for the recordMethod function. (dict)
writeInfo_recordkw: name of the argument to which any writeInfo (as above) should be supplied in the
recordMethod function.
extractMethod: function that can be applied to the accumulated record following the final iteration
to produce the final result returned by Chain().execute() (function or None).
If None, Chain().execute() will return the record as is.
extract_kwargs: key word arguments for the extractMethod function (dict).
DEPENDENCIES: Class: Chain
RETURNS: instance of Chain
"""
chain = Chain(preiterable, inMethod=inMethod, in_kwargs=in_kwargs, writeInfoExists=writeInfoExists,
recordMethod=recordMethod, record_kwargs=record_kwargs, writeInfo_recordkw=writeInfo_recordkw,
extractMethod=extractMethod, extract_kwargs=extract_kwargs, functionDictList=functionDictList)
return chain
def convertChain(chain, preiterable, inMethod=None, in_kwargs=None, writeInfo=True,
recordMethod=None, extractMethod=None, writeInfo_recordkw=None):
"""
FUNCTION: Alter an existing chain object in order to apply its chain functions to a different type of data
"""
chain.newInput(preiterable)
chain.newInputMethods(inMethod=inMethod, in_kwargs=in_kwargs, writeInfo=writeInfo)
chain.newOutputMethods(recordMethod=recordMethod, extractMethod=extractMethod,
writeInfo_recordkw=writeInfo_recordkw)
return chain
def alterChainMethod(chain, suffix, update, paramName=None):
if paramName is None:
chain.updateChainMethod(suffix, update)
if paramName is not None:
chain.updateChainMethodParam(suffix, paramName, update)
return chain
# ---------------------------------------- Chain-related helper functions ----------------------------------------------
def makeChainTemplateDict(functionDictList=None, noLinks=0, writeOutBools=None):
if functionDictList is None:
functionDictList = []
for link in range(noLinks):
functionDict = makeFunctionDictTemplate(writeOut=writeOutBools[link])
functionDictList.append(functionDict)
chainDict = {'preiterable': None, 'in_kwargs': None, 'writeInfoExists': False,
'functionDictList': functionDictList,
'recordMethod': None, 'record_kwargs': None, 'writeInfo_recordkw': None,
'extractMethod': None, 'extract_kwargs': None}
return chainDict
def makeFunctionDictTemplate(writeOut=False):
writeOutDict = None
if writeOut:
writeOutDict = {'function': None, 'args': None, 'kwargs': None, 'priorLink_kw': None, 'newLink_kw': None,
'writeInfo_kw': None, 'suffix_kw': None}
functionDict = {'function': None, 'suffix': None, 'args': None, 'kwargs': None, 'writeOut': writeOutDict}
return functionDict
# ------------------------------------------------ Chain classes -------------------------------------------------------
class Chain:
def __init__(self, preiterable=None,
inMethod=None,
in_kwargs=None,
writeInfoExists=False,
functionDictList=None,
recordMethod=None,
record_kwargs=None,
writeInfo_recordkw=None,
extractMethod=None,
extract_kwargs=None,
**kwargs):
"""
DATA ITERATION
:param preiterable: data source (type depends on inMethod and functions in chain). Not necessary and wont
be used if data is supplied to the execute method.
:param inMethod: method for converting data source into iterable object. If None, the preiterable object
must be iterable (function)
:param in_kwargs: key word arguments for inMethod function (dict)
:param writeInfoExists: When iterating over the inputted data (once inMethod(preiterable, **in_kwargs)
is applied), does the data have the form ~ (chainObject, writeInfo)*?
* where chainObject is the data object with which to apply chain functions and
writeInfo is some information required to write out information at this step.
CHAIN FUNCTIONS
:param functionDictList: list of dictionaries, which must contain specific keys including: function,
suffix, writeOut, args*, kwargs* (* optional). The value at function should be the function to execute.
The value at writeOut should be None or a dictionary with keys function, args*, kwargs*, priorLink_kw*,
newLink_kw, and suffix_kw*. ([{}, ...])
DATA RECORDING OVER ITERATIONS
:param recordMethod: function which can be applied to the final output of processing chain each iteration
in order to produce an object which can be added to the record (function or None).
If None, the the final object of the processing chain will be added to the record.
:param record_kwargs: key word arguments for the recordMethod function (dict).
:param writeInfo_recordkw: name of the argument to which any writeInfo (as above) should be supplied in
the recordMethod function.
:param extractMethod: function that can be applied to the accumulated record following the final iteration
to produce the final result returned by Chain().execute() (function or None).
If None, Chain().execute() will return the record as is.
:param extract_kwargs: key word arguments for the extractMethod function (dict).
"""
# the following relate to the method by which data source is converted to an iterable object
# and the
self.preiterable = preiterable # probably a substack or directory of files
self.inMethod = inMethod # function for reading input into chain (e.g., generateFromDirectory(...))
self.in_kwargs = in_kwargs
if self.in_kwargs is None:
self.in_kwargs = {}
self.writeInfoExists = writeInfoExists
# The following code defines the chain if functionDictList is specified
self.chain = [] # list of ChainMethods
if functionDictList is not None:
self.defineChain(functionDictList=functionDictList)
# define the scribe (can be overwritten)
self.scribe = Scribe(recordMethod=recordMethod,
record_kwargs=record_kwargs,
writeInfo_recordkw=writeInfo_recordkw,
extractMethod=extractMethod,
extract_kwargs=extract_kwargs)
# Basic methods ---------------------------------------
def defineChain(self, functionDictList):
for functionDict in functionDictList:
newLink = ChainMethod(functionDict)
self.chain.append(newLink)
def addScribe(self, recordMethod=None, record_kwargs=None, writeInfo_recordkw=None,
extractMethod=None, extract_kwargs=None):
self.scribe = Scribe(recordMethod=recordMethod,
record_kwargs=record_kwargs,
writeInfo_recordkw=writeInfo_recordkw,
extractMethod=extractMethod,
extract_kwargs=extract_kwargs)
def addChainMethod(self, functionDict):
newLink = ChainMethod(functionDict)
self.chain.append(newLink)
def removeChainMethod(self, suffix):
for i in range(len(self.chain)):
if self.chain[i].suffix == suffix:
remove = i
if remove is not None:
del self.chain[remove]
else:
print('No ChainMethod with suffix {}'.format(suffix))
def getChainMethodSuffixes(self):
suffixes = [chainMethod.suffix for chainMethod in chain]
return suffixes
# Update methods ---------------------------------------
def updateChainMethod(self, suffix, functionDict):
"""
Redefine one instance of ChainMethod with the desired value at ChainMethod.suffix
"""
for i in range(len(self.chain)):
if self.chain[i].suffix == suffix:
self.chain[i].update(functionDict)
def updateChainMethodParam(self, suffix, paramName, update):
"""
Redefine one parameter in an existing ChainMethod with the desired value at ChainMethod.suffix
"""
for i in range(len(self.chain)):
if self.chain[i].suffix == suffix:
self.chain[i].funct[paramName] = update
self.chain[i].setAll()
def newInput(self, newInput):
self.preiterable = newInput
def newInputMethods(self, inMethod=None, in_kwargs=None, write=True):
self.inMethod = inMethod
self.in_kwargs = in_kwargs
self.writeInfoExists = writeInfoExists
def newOutputMethods(self, recordMethod=None, record_kwargs=None, extractMethod=None, extract_kwargs=None,
writeInfo_recordkw=None):
self.scribe = Scribe(recordMethod=recordMethod,
record_kwargs=record_kwargs,
writeInfo_recordkw=writeInfo_recordkw,
extractMethod=extractMethod,
extract_kwargs=extract_kwargs)
# Usage method -----------------------------------------
def execute(self, data=None):
# define the objects that must be iterated over
if data is None:
data = self.preiterable
if self.writeInfoExists and self.inMethod is not None:
for chainObj, writeInfo in self.inMethod(data, **self.in_kwargs):
for chainMethod in self.chain:
chainObj = chainMethod.run(chainObj, writeInfo=writeInfo)
self.scribe.addRecord(chainObj, writeInfo=writeInfo)
output = self.scribe.extract()
self.scribe.cleanRecord()
return output
if not self.writeInfoExists and self.inMethod is not None:
for chainObj in self.inMethod(data, **self.in_kwargs):
for chainMethod in self.chain:
chainObj = chainMethod.run(chainObj)
self.scribe.addRecord(chainObj)
output = self.scribe.extract()
self.scribe.cleanRecord()
return output
else:
for chainObj in data:
for chainMethod in self.chain:
chainObj = chainMethod.run(chainObj)
self.scribe.addRecord(chainObj)
output = self.scribe.extract()
self.scribe.cleanRecord()
return output
class ChainMethod:
"""
Takes a function dictionary as the initial argument. An instance of ChainMethod is an object that allows
a function to be executed with a series of pre-defined parameters (as args or kwargs) when supplied only
a single argument.
If a writeOut key is supplied with an appropriate dict, data will be saved at this step.
If a writeOut | |
import numpy as np
import multiprocessing
import sys
import time
import matplotlib.pyplot as plt
# =============================================================================
# Distributed Computing Parameters
pool_size = multiprocessing.cpu_count()
# Genetic Circuit Hyperparameters
NODES = 3000
# Evolutionary Algorithm Hyperparameters
GENERATIONS = 201 # number of generations to run
# Other Hyperparameters
# STEP_MUTATION_RATE = 0.9
# BIG_STEP_MUTATION_RATE = 0.8
# RANDOM_MUTATION_RATE = 1
# SIGN_FLIP_MUTATION_RATE = 0.1
# REG_RATE = 0.0003 # regularization rate
STEP_SIZE = 2.0 # max mutation intensity of each weight
POPULATION = pool_size * 6 # total number of population
SURVIVABLE_PARENTS = POPULATION // 3 # number of parents to survive
# Novelty Search Hyperparameters
# KNN_BC_NUM = 1 # k nearest neighbors number for behavior characteristics
# ARCHIVE_STORING_RATE = 0.01
# ODE
TIME_STEPS = 300
BATCH_SIZE = 30 # Fully dividable by 3 recommended
# Score Constraints
ERROR_BOUND = 0.1 # percentage of error allowed (sigmoid bounds are +-1)
BANDPASS_BOUND = 0.3
# the absolute bound of each weight (very important)
# choose something close to sigmoid saturation is good (eg. 7.5+, 5 is not good, 10 is good)
BOUND = 13
# Parameters (Derived from hyperparameters)
DNA_SIZE = NODES * NODES
UPPER_BANDPASS_BOUND = 1 - BANDPASS_BOUND
COST_UPPER_BOUND = ERROR_BOUND * BATCH_SIZE
# =============================================================================
# Mean normalization
def standardize(population):
# as known as z-score normalization
# the other method being min-max normalization
for i, weights in enumerate(population):
mean = np.mean(weights)
std = np.std(weights)
population[i] = (weights - mean) / std
return population
# =============================================================================
# ODE & Simulations
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# FF Classifier
# Here, only the classical solution determinator is implemented
# def simulate_ode_original(W, N, B, S):
# dt = 0.01
# initial_val = 0.1 * np.ones([B, S]) # can we reuse this?
# input_val = np.linspace(0, 2, B).reshape(B, 1) * np.random.normal(
# loc=1.0, scale=0.0001, size=[N, B, S]) # can we reduce the redundants?
# input_val[:, :, 1:S] = 0.0
# output = initial_val + (
# sigmoid(np.matmul(initial_val, W)) - initial_val + input_val[0]) * dt
# # print(output)
# # HOW: create one time np.linspace(0, 2, B), mutate and reuse in for loop
# for i in range(1, N):
# output = output + (
# sigmoid(np.matmul(output, W)) - output + input_val[i]) * dt
# # print(output)
# return output
# input_initializer = np.linspace(0, 2, BATCH_SIZE).reshape(BATCH_SIZE, 1,)
# input_val[:, 0] = np.linspace(0, 2, BATCH_SIZE).reshape(BATCH_SIZE)
# print(np.random.normal(loc=1.0, scale=0.0001))
dt = 0.01
initial_val = 0.1 * np.ones([BATCH_SIZE, NODES])
input_val = np.zeros((BATCH_SIZE, NODES))
linspace_col = np.linspace(0, 2, BATCH_SIZE).reshape(BATCH_SIZE)
def simulate_ode(W, N, B, S):
# Insert one input and have three outputs
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 2] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, W)) - initial_val + input_val) * dt
)
for i in range(1, N):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 2] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = output + (sigmoid(np.matmul(output, W)) - output + input_val) * dt
# print(output)
return output
def plot_expressions(y, B):
b = np.linspace(1, B, B)
plt.title(f"{NODES} Nodes")
plt.plot(b, y[:, 0], "black", linewidth=2, label="Input Node #1")
plt.plot(b, y[:, 1], "saddlebrown", linewidth=2, label="Input Node #2")
for i in range(3, y.shape[1] - 1):
# plt.plot(b, y[:, i], 'g-', linewidth=2, label='Support Node')
plt.plot(b, y[:, i], "gray", linewidth=2)
plt.plot(b, y[:, -3], "b", linewidth=2, label="Output Node #3 - Switch")
plt.plot(b, y[:, -2], "g", linewidth=2, label="Output Node #2 - Valley")
plt.plot(b, y[:, -1], "r", linewidth=2, label="Output Node #1 - Bandpass")
plt.xlabel("Input Level")
plt.ylabel("Output Level")
plt.legend()
plt.show()
# =============================================================================
# Behavior characteristic distance mean calculator
# def population_novelty(population):
# pop_novelty = np.zeros(POPULATION)
# bc_distance = np.zeros(POPULATION)
# for i, weights in enumerate(population):
# for j, target in enumerate(population):
# bc_distance[j] = np.linalg.norm(weights - target)
# # only uses KNN_BC_NUM of bc_distance to calculate bc_dist_mean
# bc_distance.sort()
# pop_novelty[i] = np.mean(bc_distance[-KNN_BC_NUM:])
# return pop_novelty
# =============================================================================
# The forever (unforgettable) archive of most novel children in a generation
# Or another method: Prob 1% to store any children to archive
# archive = []
# =============================================================================
# Double mergesort sorting by alist
def double_mergesort(alist, blist):
# print("Splitting ",alist)
if len(alist) > 1:
mid = len(alist) // 2
lefthalf_a = alist[:mid]
lefthalf_b = blist[:mid]
righthalf_a = alist[mid:]
righthalf_b = blist[mid:]
double_mergesort(lefthalf_a, lefthalf_b)
double_mergesort(righthalf_a, righthalf_b)
i = 0
j = 0
k = 0
while i < len(lefthalf_a) and j < len(righthalf_a):
if lefthalf_a[i] < righthalf_a[j]:
alist[k] = lefthalf_a[i]
blist[k] = lefthalf_b[i]
i = i + 1
else:
alist[k] = righthalf_a[j]
blist[k] = righthalf_b[j]
j = j + 1
k = k + 1
while i < len(lefthalf_a):
alist[k] = lefthalf_a[i]
blist[k] = lefthalf_b[i]
i = i + 1
k = k + 1
while j < len(righthalf_a):
alist[k] = righthalf_a[j]
blist[k] = righthalf_b[j]
j = j + 1
k = k + 1
# =============================================================================
# Main functions
# Bandpass Determinator
# Determines whether the solution given is a bandpass
# so that you don't need the flags -> faster
def bandpass_determinator(y):
# here we check only one node
# it would be wise to check other nodes, to check if it is classical solution
starting_low_flag = False
middle_high_flag = False
ending_low_flag = False
for pt in y[:, -1]:
if not starting_low_flag:
if pt < BANDPASS_BOUND:
starting_low_flag = True
elif not middle_high_flag:
if pt > UPPER_BANDPASS_BOUND:
middle_high_flag = True
elif not ending_low_flag:
if pt < BANDPASS_BOUND: # something is wrong here
ending_low_flag = True
else:
if pt > BANDPASS_BOUND:
ending_low_flag = False
# print(starting_low_flag, middle_high_flag, ending_low_flag)
return starting_low_flag and middle_high_flag and ending_low_flag
# Bandpass Cost function (for objective based selection method, the lower the better)
# Assume pt size is dividable by three
bandpass_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.5,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.5,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
bandpass_design = np.array(bandpass_design)
def bandpass_cost_calculator(y, B):
cost = np.sum(np.abs(y - bandpass_design))
return cost
def switch_cost_calculator(y, B):
cost = 0
for pt in y[: B // 2]:
cost += np.absolute(pt - 0)
for put in y[B // 2 :]:
cost += np.absolute(1 - pt)
return cost
def linear_cost_calculator(y, B):
B -= 1
cost = 0
for i, pt in enumerate(y):
cost += np.absolute(pt - (i / B))
return cost
peak_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.125,
0.25,
0.375,
0.5,
0.625,
0.75,
0.875,
1.0,
1.0,
0.875,
0.75,
0.625,
0.5,
0.375,
0.25,
0.125,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
peak_design = np.array(peak_design)
def peak_cost_calculator(y, B):
# Experiment failed: Made a mountain instead, much easier than bandpass...
cost = np.sum(np.abs(y - peak_design))
return cost
cosine_design = [
1.0,
0.9766205557100867,
0.907575419670957,
0.7960930657056438,
0.6473862847818277,
0.46840844069979015,
0.26752833852922075,
0.05413890858541761,
-0.16178199655276473,
-0.37013815533991445,
-0.5611870653623823,
-0.7259954919231308,
-0.8568571761675893,
-0.9476531711828025,
-0.9941379571543596,
-0.9941379571543596,
-0.9476531711828025,
-0.8568571761675892,
-0.7259954919231307,
-0.5611870653623825,
-0.37013815533991445,
-0.16178199655276476,
0.05413890858541758,
0.267528338529221,
0.4684084406997903,
0.6473862847818279,
0.796093065705644,
0.9075754196709569,
0.9766205557100867,
1.0,
]
cosine_design = np.array(cosine_design)
def cosine_cost_calculator(y, B):
cost = np.sum(np.abs(y - cosine_design))
return cost
# valley_design = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.9458172417006346, 0.7891405093963936, 0.546948158122427, 0.24548548714079924, -0.08257934547233227, -0.40169542465296926, -0.6772815716257409, -0.879473751206489, -0.9863613034027223, -0.9863613034027224, -0.8794737512064891, -0.6772815716257414, -0.40169542465296987, -0.08257934547233274, 0.2454854871407988, 0.5469481581224266, 0.7891405093963934, 0.9458172417006346, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# valley_design = 1 - bandpass_design
# valley_design = 1 - peak_design
def valley_cost_calculator(y, B):
cost = np.sum(np.abs(y - valley_design))
return cost
bandpass_reversed_design = 1 - bandpass_design
def bandpass_reversed_cost_calculator(y, B):
cost = np.sum(np.abs(y - bandpass_reversed_design))
return cost
# def adaptation_cost_calculator(y, B):
# cost = 0
# ADAPTED_LEVEL = 0.1
# for pt in y[:B // 3]:
# cost += np.absolute(pt - 0)
# slice = ((1- ADAPTED_LEVEL) / (B//3))
# for i, pt in enumerate(y[B // 3:2 * B // 3]):
# cost += np.absolute(1 - i * slice) * 3
# print(1 - i * slice)
# sys.exit()
# for pt in y[2 * B // 3:]:
# cost += np.absolute(pt - ADAPTED_LEVEL)
# return cost
adaptation_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.5,
1.0,
0.5,
0.25,
0.125,
0.0625,
0.03125,
0.015625,
0.0078125,
0.00390625,
0.001953125,
0.0009765625,
0.00048828125,
0.000244140625,
0.0001220703125,
6.103515625e-05,
3.0517578125e-05,
1.52587890625e-05,
7.62939453125e-06,
3.814697265625e-06,
1.9073486328125e-06,
]
adaptation_design = np.array(adaptation_design)
def adaptation_cost_calculator(y, B):
cost = 0
# for i, pt in enumerate(y):
# cost += np.absolute(pt - adaptation_design[i])
cost = np.sum(np.abs(y - adaptation_design))
return cost
# # def adaptation_cost_calculator(y, B):
# cost = 0
# for pt in y[:B // 3]:
# cost += np.absolute(pt - 0)
# for pt in y[B // 3:2 * B // 3]:
# cost += np.absolute(1 - pt)
# for pt in y[2 * B // 3:]:
# cost += np.absolute(pt - 0.5)
# return cost
# Fitness based
cost_storage = [-1] * POPULATION
# def | |
"""Parameters.py - Parameter handling for ruffus pipelines
==========================================================
Reference
---------
"""
import re
import collections
import os
import configparser
import sys
import CGATCore.Experiment as E
import CGATCore.IOTools as IOTools
from CGATCore.Pipeline.Utils import getCallerLocals, isTest
# sort out script paths
# root directory of CGAT Code collection
CGATSCRIPTS_ROOT_DIR = os.path.dirname(
os.path.dirname(E.__file__))
# CGAT Code collection scripts
CGATSCRIPTS_SCRIPTS_DIR = os.path.join(CGATSCRIPTS_ROOT_DIR, "CGAT", "scripts")
# root directory of CGAT Pipelines
CGATPIPELINES_ROOT_DIR = os.path.dirname(os.path.dirname(
os.path.dirname(__file__)))
# CGAT Pipeline scripts
CGATPIPELINES_SCRIPTS_DIR = os.path.join(CGATPIPELINES_ROOT_DIR,
"scripts")
# Directory of CGAT pipelines
CGATPIPELINES_PIPELINE_DIR = os.path.join(CGATPIPELINES_ROOT_DIR,
"CGATPipelines")
# CGAT Pipeline R scripts
CGATPIPELINES_R_DIR = os.path.join(CGATPIPELINES_ROOT_DIR, "R")
# if Pipeline.py is called from an installed version, scripts are
# located in the "bin" directory.
if not os.path.exists(CGATSCRIPTS_SCRIPTS_DIR):
SCRIPTS_DIR = os.path.join(sys.exec_prefix, "bin")
if not os.path.exists(CGATPIPELINES_SCRIPTS_DIR):
PIPELINE_SCRIPTS_DIR = os.path.join(sys.exec_prefix, "bin")
# Global variable for configuration file data
CONFIG = configparser.SafeConfigParser()
class TriggeredDefaultFactory:
with_default = False
def __call__(self):
if TriggeredDefaultFactory.with_default:
return str()
else:
raise KeyError("missing parameter accessed")
# Global variable for parameter interpolation in commands
# This is a dictionary that can be switched between defaultdict
# and normal dict behaviour.
PARAMS = collections.defaultdict(TriggeredDefaultFactory())
# patch - if --help or -h in command line arguments,
# switch to a default dict to avoid missing paramater
# failures
if isTest() or "--help" in sys.argv or "-h" in sys.argv:
TriggeredDefaultFactory.with_default = True
# A list of hard-coded parameters within the CGAT environment
# These can be overwritten by command line options and
# configuration files
HARDCODED_PARAMS = {
'scriptsdir': CGATSCRIPTS_SCRIPTS_DIR,
'toolsdir': CGATSCRIPTS_SCRIPTS_DIR,
'pipeline_scriptsdir': CGATPIPELINES_SCRIPTS_DIR,
'pipelinedir': CGATPIPELINES_PIPELINE_DIR,
'pipeline_rdir': CGATPIPELINES_R_DIR,
# script to perform map/reduce like computation.
'cmd-farm': """python %(pipeline_scriptsdir)s/farm.py
--method=drmaa
--bashrc=%(pipeline_scriptsdir)s/bashrc.cgat
--cluster-options=%(cluster_options)s
--cluster-queue=%(cluster_queue)s
--cluster-num-jobs=%(cluster_num_jobs)i
--cluster-priority=%(cluster_priority)i
--cluster-queue-manager=%(cluster_queue_manager)s
--cluster-memory-resource=%(cluster_memory_resource)s
--cluster-memory-default=%(cluster_memory_default)s
""",
# command to get tab-separated output from database
'cmd-sql': """sqlite3 -header -csv -separator $'\\t' """,
# DEPRECATED: options to use for csv2db upload
"csv2db_options": "--backend=sqlite --retry --map=gene_id:str "
"--map=contig:str --map=transcript_id:str",
# database backend
'database_backend': "sqlite",
# database host
'database_host': "",
# name of database
'database_name': "csvdb",
# database connection options
'database_username': "cgat",
# database password - if required
'database_password': "",
# database port - if required
'database_port': 3306,
# wrapper around non-CGAT scripts
'cmd-run': """%(pipeline_scriptsdir)s/run.py""",
# legacy directory used for temporary local files
# Use of this var can be problematic (issue #174)
# - it may be depreciated.
'tmpdir': os.environ.get("TMPDIR", '/scratch'),
# directory used for temporary local tempory files on compute nodes
# *** passed directly to the shell ***
# *** may not exist on login/head nodes ***
# default matches 'tmpdir' only for backwards compatibility
# typically a shell environment var is expected, e.g.
# 'local_tmpdir': '$SCRATCH_DIR',
'local_tmpdir': os.environ.get("TMPDIR", '/scratch'),
# directory used for temporary files shared across machines
'shared_tmpdir': os.environ.get("SHARED_TMPDIR", "/ifs/scratch"),
# queue manager (supported: sge, slurm, torque, pbspro)
'cluster_queue_manager': 'sge',
# cluster queue to use
'cluster_queue': 'all.q',
# priority of jobs in cluster queue
'cluster_priority': -10,
# number of jobs to submit to cluster queue
'cluster_num_jobs': 100,
# name of consumable resource to use for requesting memory
'cluster_memory_resource': "mem_free",
# amount of memory set by default for each job
'cluster_memory_default': "2G",
# general cluster options
'cluster_options': "",
# parallel environment to use for multi-threaded jobs
'cluster_parallel_environment': 'dedicated',
# ruffus job limits for databases
'jobs_limit_db': 10,
# ruffus job limits for R
'jobs_limit_R': 1,
}
# After all configuration files have been read, some
# parameters need to be interpolated with other parameters
# The list is below:
INTERPOLATE_PARAMS = ('cmd-farm', 'cmd-run')
def configToDictionary(config):
"""convert the contents of a :py:class:`ConfigParser.ConfigParser`
object to a dictionary
This method works by iterating over all configuration values in a
:py:class:`ConfigParser.ConfigParser` object and inserting values
into a dictionary. Section names are prefixed using and underscore.
Thus::
[sample]
name=12
is entered as ``sample_name=12`` into the dictionary. The sections
``general`` and ``DEFAULT`` are treated specially in that both
the prefixed and the unprefixed values are inserted: ::
[general]
genome=hg19
will be added as ``general_genome=hg19`` and ``genome=hg19``.
Numbers will be automatically recognized as such and converted into
integers or floats.
Returns
-------
config : dict
A dictionary of configuration values
"""
p = {}
for section in config.sections():
for key, value in config.items(section):
try:
v = IOTools.str2val(value)
except TypeError:
E.error("error converting key %s, value %s" % (key, value))
E.error("Possible multiple concurrent attempts to "
"read configuration")
raise
p["%s_%s" % (section, key)] = v
if section in ("general", "DEFAULT"):
p["%s" % (key)] = v
for key, value in config.defaults().items():
p["%s" % (key)] = IOTools.str2val(value)
return p
def inputValidation(PARAMS, pipeline_script=""):
'''Inspects the PARAMS dictionary looking for problematic input values.
So far we just check that:
* all required 3rd party tools are on the PATH
* input parameters are not empty
* input parameters do not contain the "?" character (used as a
placeholder in different pipelines)
* if the input is a file, check whether it exists and
is readable
'''
E.info('''=== Input Validation starts ===''')
E.info(''' Checking 3rd party dependencies ''')
### check 3rd party dependencies ###
if len(pipeline_script) > 0:
# this import requires the PYTHONPATH in the following order
# PYTHONPATH=<src>/CGATPipelines:<src>/cgat
import scripts.cgat_check_deps as cd
deps, check_path_failures = cd.checkDepedencies(pipeline_script)
# print info about dependencies
if len(deps) == 0:
print('\nNo dependencies found.\n')
else:
# print dictionary ordered by value
for k in sorted(deps, key=deps.get, reverse=True):
print('\nProgram: {0!s} used {1} time(s)'.format(k, deps[k]))
n_failures = len(check_path_failures)
if n_failures == 0:
print('\nCongratulations! All required programs are available on your PATH\n')
else:
print('\nThe following programs are not on your PATH')
for p in check_path_failures:
print('\n{0!s}'.format(p))
print
## check PARAMS
num_missing = 0
num_questions = 0
E.info(''' Checking pipeline configuration ''')
for key, value in sorted(PARAMS.iteritems()):
key = str(key)
value = str(value)
# check for missing values
if value == "":
print('\n"{}" is empty, is that expected?'.format(key))
num_missing += 1
# check for a question mark in the dictironary (indicates
# that there is a missing input parameter)
if "?" in value:
print('\n"{}" is not defined (?), is that expected?'.format(key))
num_questions += 1
# validate input files listed in PARAMS
if (value.startswith("/") \
or value.endswith(".gz") \
or value.endswith(".gtf")) \
and "," not in value:
if os.access(value, os.R_OK):
pass
else:
print('\n"{}": "{}" is not readable'.format(key, value))
if num_missing == 0 and num_questions == 0:
while True:
confirmation = raw_input('''
##########################################################
Your input data seems all correct, congratulations!
Do you want to continue running the pipeline? (y/n)
##########################################################
''')
if confirmation.lower() == "y":
E.info('=== Input Validation ends ===')
break
elif confirmation.lower() == "n":
E.info('=== Input Validation ends ===')
E.info('Pipeline aborted.')
sys.exit(0)
else:
while True:
start_pipeline = raw_input('''
###########################################################
Please check the WARNING messages and if you are
happy then enter "y" to continue or "n" to abort running
the pipeline.
###########################################################
''')
if start_pipeline.lower() == "y":
E.info('=== Input Validation ends ===')
break
if start_pipeline.lower() == "n":
E.info('=== Input Validation ends ===')
E.info('Pipeline aborted.')
sys.exit(0)
def getParameters(filenames=["pipeline.ini", ],
defaults=None,
site_ini=True,
user_ini=True,
default_ini=True,
only_import=None):
'''read a config file and return as a dictionary.
Sections and keys are combined with an underscore. If a key
without section does not exist, it will be added plain.
For example::
[general]
input=input1.file
[special]
input=input2.file
will be entered as { 'general_input' : "input1.file",
'input: "input1.file", 'special_input' : "input2.file" }
This function also updates the module-wide parameter map.
The section [DEFAULT] is equivalent to [general].
The order of initialization is as follows:
1. hard-coded defaults
2. pipeline specific default file in the CGAT code installation
3. :file:`.cgat` in the users home directory
4. files supplied by the user in the order given
If the same configuration value appears in multiple
files, later configuration files will overwrite the
settings form earlier files.
Path names are expanded to the absolute pathname to avoid
ambiguity with relative path names. Path names are updated
for parameters that end in the suffix "dir" and start with
a "." such as "." or "../data".
Arguments
---------
filenames : list
List of filenames of the configuration files to read.
defaults : dict
Dictionary with default values. These will be overwrite
any hard-coded parameters, but will be overwritten by user
specified parameters in the configuration files.
default_ini : bool
If set, the default initialization file will be read from
'CGATPipelines/configuration/pipeline.ini'
user_ini : bool
If set, configuration | |
= copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.load.p_kw.at[4] *= 1000
check_result = pp.overload(net, diag_params['overload_scaling_factor'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'generation': 'uncertain', 'load': True}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.load.p_kw.at[4] /= 1000
net.gen.p_kw *= 1000
check_result = pp.overload(net, diag_params['overload_scaling_factor'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'generation': True, 'load': 'uncertain'}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_switch_configuration(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'wrong_switch_configuration'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.switch.closed.at[0] = 0
net.switch.closed.at[2] = 0
check_result = pp.wrong_switch_configuration(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == True
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.switch.closed.at[0] = 1
net.switch.closed.at[2] = 1
net.switch.closed = 0
# this will raise the warning "Matrix is exactly singular" -> ignore
warnings.simplefilter("ignore")
check_result = pp.wrong_switch_configuration(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == True
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.switch.closed = 1
net.load.p_kw.at[4] *= 1000
check_result = pp.wrong_switch_configuration(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == 'uncertain'
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_different_voltage_levels_connected(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'different_voltage_levels_connected'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
pp.create_switch(net, 41, 45, et = 'b')
net.bus.vn_kv.loc[38] = 30
check_result = pp.different_voltage_levels_connected(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'lines': [6, 7], 'switches': [88]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_lines_with_impedance_close_to_zero(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'lines_with_impedance_close_to_zero'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.line.length_km.at[0] = 0
net.line.r_ohm_per_km.at[1] = 0
net.line.x_ohm_per_km.at[1] = 0
net.line.r_ohm_per_km.at[2] = 0
net.line.x_ohm_per_km.at[3] = 0
net.line.length_km.at[4] = 0
net.line.r_ohm_per_km.at[4] = 0
net.line.x_ohm_per_km.at[4] = 0
check_result = pp.lines_with_impedance_close_to_zero(net,
diag_params['lines_min_length_km'],
diag_params['lines_min_z_ohm'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == [0, 1, 4]
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.line.length_km.at[0] = 0.001
net.line.r_ohm_per_km.at[1] = 0.001
net.line.x_ohm_per_km.at[1] = 0.001
net.line.r_ohm_per_km.at[2] = 0.001
net.line.x_ohm_per_km.at[3] = 0.001
net.line.length_km.at[4] = 1
net.line.r_ohm_per_km.at[4] = 0.001
net.line.x_ohm_per_km.at[4] = 0.001
check_result = pp.lines_with_impedance_close_to_zero(net,
diag_params['lines_min_length_km'],
diag_params['lines_min_z_ohm'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert check_function not in diag_results
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_nominal_voltages_dont_match(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'nominal_voltages_dont_match'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
trafo_copy = copy.deepcopy(net.trafo)
trafo3w_copy = copy.deepcopy(net.trafo3w)
net.trafo.hv_bus.at[0] = trafo_copy.lv_bus.at[0]
net.trafo.lv_bus.at[0] = trafo_copy.hv_bus.at[0]
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'trafo': {'hv_lv_swapped': [0]}}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo = copy.deepcopy(trafo_copy)
net.trafo.vn_hv_kv.at[0] *= 1.31
net.trafo.vn_lv_kv.at[0] *= 1.31
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'trafo': {'hv_bus': [0], 'lv_bus': [0]}}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo = copy.deepcopy(trafo_copy)
net.trafo.vn_hv_kv.at[0] *= 0.69
net.trafo.vn_lv_kv.at[0] *= 0.69
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'trafo': {'hv_bus': [0], 'lv_bus': [0]}}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo = copy.deepcopy(trafo_copy)
net.trafo.vn_hv_kv.at[0] *= 1.29
net.trafo.vn_lv_kv.at[0] *= 1.29
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert check_function not in diag_results
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo = copy.deepcopy(trafo_copy)
net.trafo.vn_hv_kv.at[0] *= 0.71
net.trafo.vn_lv_kv.at[0] *= 0.71
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert check_function not in diag_results
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo = copy.deepcopy(trafo_copy)
net.trafo3w.hv_bus.at[0] = trafo3w_copy.mv_bus.at[0]
net.trafo3w.mv_bus.at[0] = trafo3w_copy.lv_bus.at[0]
net.trafo3w.lv_bus.at[0] = trafo3w_copy.hv_bus.at[0]
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'trafo3w': {'connectors_swapped_3w': [0]}}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo3w = copy.deepcopy(trafo3w_copy)
net.trafo3w.vn_hv_kv.at[0] *= 1.31
net.trafo3w.vn_mv_kv.at[0] *= 1.31
net.trafo3w.vn_lv_kv.at[0] *= 1.31
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'trafo3w': {'hv_bus': [0], 'lv_bus': [0], 'mv_bus': [0]}}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo3w = copy.deepcopy(trafo3w_copy)
net.trafo3w.vn_hv_kv.at[0] *= 0.69
net.trafo3w.vn_mv_kv.at[0] *= 0.69
net.trafo3w.vn_lv_kv.at[0] *= 0.69
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'trafo3w': {'hv_bus': [0], 'lv_bus': [0], 'mv_bus': [0]}}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo3w = copy.deepcopy(trafo3w_copy)
net.trafo3w.vn_hv_kv.at[0] *= 1.29
net.trafo3w.vn_mv_kv.at[0] *= 1.29
net.trafo3w.vn_lv_kv.at[0] *= 1.29
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert check_function not in diag_results
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
net.trafo3w = copy.deepcopy(trafo3w_copy)
net.trafo3w.vn_hv_kv.at[0] *= 0.71
net.trafo3w.vn_mv_kv.at[0] *= 0.71
net.trafo3w.vn_lv_kv.at[0] *= 0.71
check_result = pp.nominal_voltages_dont_match(net, diag_params['nom_voltage_tolerance'])
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert check_function not in diag_results
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_wrong_reference_system(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'wrong_reference_system'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.load.p_kw.at[0] = -1
net.gen.p_kw.at[0] = 1
net.sgen.p_kw.at[0] = 1
check_result = pp.wrong_reference_system(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == {'gens': [0], 'loads': [0], 'sgens': [0]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_disconnected_elements(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'disconnected_elements'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.switch.closed.loc[37,38] = False
pp.drop_trafos(net, [1])
check_result = pp.disconnected_elements(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
expected_disconnect = [{'buses': [33, 36, 37, 38, 39, 40, 41, 42, 43, 44],
'lines': [6, 7, 8, 9, 11, 12, 13],
'loads': [2, 5, 6, 7, 8, 9, 10, 11, 12],
'sgens': [1, 2, 3, 4],
'switches': [37, 38, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61],
'trafos3w': [0]},
{'buses': [45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56],
'lines': [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 12:59:15 2020
@author: ssli
script to make the data vector plot
"""
import pandas as pd
import numpy as np
from XiPlot import XiPlotFunc
# # ++++++++++++++++++++++++++++++++++++++++++ data: whole vs. red vs. blue
# # Number of bins
# nzbins = 5
# # custom settings for plot
# # color: dimgray (solid) / red (half transparent) / blue (half transparent)
# CRs = ['dimgray', [1, 0, 0, 0.5], [0, 0, 1, 0.5]]
# # marker: circle / diamond / square
# MKs = ['o', 'd', 's']
# # marker size
# MSs = [2, 2, 2]
# # linestyle (not used for data)
# # LSs = ['none', 'none', 'none']
# LSs = ['-', '-.', '--']
# # linewidth
# LWs = [0.6, 0.6, 0.6]
# # linewidth of the errorbar lines
# ELWs = [0.6, 0.6, 0.6]
# # YTYPE
# YTYPE = 'orig'
# # output directory
# outpath = "/net/raam/data1/surfdrive_ssli/Projects/6CosmicShear_RB/plot/CorrFunc/data_whole_red_blue.png"
# # whole
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_whole.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_whole = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/KV450_COSMIC_SHEAR_DATA_RELEASE/COV_MAT/xipmcutcov_KV450_analytic_inc_m.dat"
# data = np.loadtxt(inpath)
# indx_i = data[:,0]
# indx_j = data[:,1]
# err_full = data[:,2]
# err_diagonal = err_full[indx_j==indx_i]
# para_whole['error'] = np.sqrt(err_diagonal)
# # red
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_red.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_red = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_error_red.dat"
# data = np.loadtxt(inpath)
# err_diagonal = data[:,2]
# para_red['error'] = err_diagonal
# # blue
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_blue.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_blue = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_error_blue.dat"
# data = np.loadtxt(inpath)
# err_diagonal = data[:,2]
# para_blue['error'] = err_diagonal
# paras = [para_whole, para_red, para_blue]
# names = ['data', 'data', 'data']
# XiPlotFunc(paras, names, nzbins,
# CRs, MKs, MSs, LSs, LWs, ELWs,
# YTYPE,
# outpath)
# # ++++++++++++++++++++++++++++++++++++++++++ data vs. theory: whole
# # Number of bins
# nzbins = 5
# # custom settings for plot
# # color
# CRs = ['dimgray', 'black']
# # marker: circle
# MKs = ['o', None]
# # marker size
# MSs = [2, None]
# # linestyle (not used for data)
# LSs = ['none', '-']
# # linewidth
# LWs = [None, 1.0]
# # linewidth of the errorbar lines
# ELWs = [1.0, None]
# # YTYPE
# YTYPE = 'orig'
# # output directory
# outpath = "/net/raam/data1/surfdrive_ssli/Projects/6CosmicShear_RB/plot/CorrFunc/theory_data_whole.png"
# # whole
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_whole.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_data = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/KV450_COSMIC_SHEAR_DATA_RELEASE/COV_MAT/xipmcutcov_KV450_analytic_inc_m.dat"
# data = np.loadtxt(inpath)
# indx_i = data[:,0]
# indx_j = data[:,1]
# err_full = data[:,2]
# err_diagonal = err_full[indx_j==indx_i]
# para_data['error'] = np.sqrt(err_diagonal)
# # theory
# inpath = '/disks/shear15/ssli/CosmicShear/theory_vector/xi_cut_to_cut_values_5zbins_whole.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_theory = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error (not used)
# para_theory['error'] = np.zeros(len(theta))
# paras = [para_data, para_theory]
# names = ['data', 'theory']
# XiPlotFunc(paras, names, nzbins,
# CRs, MKs, MSs, LSs, LWs, ELWs,
# YTYPE,
# outpath)
# # ++++++++++++++++++++++++++++++++++++++++++ data vs. theory: red
# # Number of bins
# nzbins = 5
# # custom settings for plot
# # color
# CRs = ['red', 'black']
# # marker: diamond
# MKs = ['d', None]
# # marker size
# MSs = [2, None]
# # linestyle (not used for data)
# LSs = ['none', '-']
# # linewidth
# LWs = [None, 1.0]
# # linewidth of the errorbar lines
# ELWs = [1.0, None]
# # YTYPE
# YTYPE = 'orig'
# # output directory
# outpath = "/net/raam/data1/surfdrive_ssli/Projects/6CosmicShear_RB/plot/CorrFunc/theory_data_red.png"
# # red
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_red.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_data = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_error_red.dat"
# data = np.loadtxt(inpath)
# err_diagonal = data[:,2]
# para_data['error'] = err_diagonal
# # theory
# inpath = '/disks/shear15/ssli/CosmicShear/theory_vector/xi_cut_to_cut_values_5zbins_red.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_theory = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error (not used)
# para_theory['error'] = np.zeros(len(theta))
# paras = [para_data, para_theory]
# names = ['data', 'theory']
# XiPlotFunc(paras, names, nzbins,
# CRs, MKs, MSs, LSs, LWs, ELWs,
# YTYPE,
# outpath)
# # ++++++++++++++++++++++++++++++++++++++++++ data vs. theory: blue
# # Number of bins
# nzbins = 5
# # custom settings for plot
# # color
# CRs = ['blue', 'black']
# # marker: square
# MKs = ['s', None]
# # marker size
# MSs = [2, None]
# # linestyle (not used for data)
# LSs = ['none', '-']
# # linewidth
# LWs = [None, 1.0]
# # linewidth of the errorbar lines
# ELWs = [1.0, None]
# # YTYPE
# YTYPE = 'orig'
# # output directory
# outpath = "/net/raam/data1/surfdrive_ssli/Projects/6CosmicShear_RB/plot/CorrFunc/theory_data_blue.png"
# # blue
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_blue.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_data = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_error_blue.dat"
# data = np.loadtxt(inpath)
# err_diagonal = data[:,2]
# para_data['error'] = err_diagonal
# # theory
# inpath = '/disks/shear15/ssli/CosmicShear/theory_vector/xi_cut_to_cut_values_5zbins_blue.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_theory = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error (not used)
# para_theory['error'] = np.zeros(len(theta))
# paras = [para_data, para_theory]
# names = ['data', 'theory']
# XiPlotFunc(paras, names, nzbins,
# CRs, MKs, MSs, LSs, LWs, ELWs,
# YTYPE,
# outpath)
# # ++++++++++++++++++++++++++++++++++++++++++ data vs. theory (KV450 only): red-blue
# # Number of bins
# nzbins = 5
# # custom settings for plot
# # color
# CRs = ['orange', 'black']
# # marker: square
# MKs = ['o', None]
# # marker size
# MSs = [2, None]
# # linestyle (not used for data)
# LSs = ['none', '-']
# # linewidth
# LWs = [None, 1.0]
# # linewidth of the errorbar lines
# ELWs = [1.0, None]
# # YTYPE
# YTYPE = 'diff'
# # output directory
# outpath = "/net/raam/data1/surfdrive_ssli/Projects/6CosmicShear_RB/plot/CorrFunc/theory_data_red_blue_diff.png"
# # red
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_red.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_data_red = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_error_red.dat"
# data = np.loadtxt(inpath)
# err_diagonal = data[:,2]
# para_data_red['error'] = err_diagonal
# # theory
# inpath = '/disks/shear15/ssli/CosmicShear/theory_vector/xi_cut_to_cut_values_5zbins_red.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_theory_red = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error (not used)
# para_theory_red['error'] = np.zeros(len(theta))
# # blue
# # data
# inpath = '/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_withK_blue.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_data_blue = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error
# inpath = "/disks/shear15/ssli/CosmicShear/data_vector/for_plot/xi_for_plot_error_blue.dat"
# data = np.loadtxt(inpath)
# err_diagonal = data[:,2]
# para_data_blue['error'] = err_diagonal
# # theory
# inpath = '/disks/shear15/ssli/CosmicShear/theory_vector/xi_cut_to_cut_values_5zbins_blue.dat'
# data = np.loadtxt(inpath)
# theta = data[:,1]
# xi = data[:,2]
# pm = data[:,3]
# ito = data[:,4]
# jto = data[:,5]
# para_theory_blue = pd.DataFrame({'theta': theta, 'xi': xi, 'pm': pm, 'ito': ito, 'jto': jto})
# # error (not used)
# para_theory_blue['error'] = np.zeros(len(theta))
# # difference
# # use avarage for theta
# para_data = pd.DataFrame({'theta': (para_data_red['theta'].values+para_data_blue['theta'].values)/2.,
# 'xi': para_data_red['xi'].values - para_data_blue['xi'].values,
# 'pm': para_data_red['pm'].values,
# 'ito': para_data_red['ito'].values,
# 'jto': para_data_red['jto'].values,
# 'error': np.sqrt(para_data_red['error'].values**2.+para_data_blue['error'].values**2.)
# })
# para_theory = pd.DataFrame({'theta': (para_theory_red['theta'].values+para_theory_blue['theta'].values)/2.,
# 'xi': para_theory_red['xi'].values - para_theory_blue['xi'].values,
# 'pm': para_theory_red['pm'].values,
# 'ito': para_theory_red['ito'].values,
# 'jto': para_theory_red['jto'].values,
# 'error': para_theory_red['error'].values
# })
# paras = [para_data, para_theory]
# names = ['data', 'theory']
# XiPlotFunc(paras, names, nzbins,
# CRs, MKs, MSs, LSs, LWs, ELWs,
# YTYPE,
# outpath)
# # ++++++++++++++++++++++++++++++++++++++++++ data (only): blue-red
# # Number of bins
# nzbins = 5
# # custom settings for plot
# # color
# # without K / with K
# CRs = ['gray', 'orange']
# # marker: square
# MKs = ['o', 'o']
# # marker size
# MSs = [2, 2]
# # linestyle (not used for data)
# LSs = ['none', 'none']
# # linewidth
# | |
have issues here because H5py dataset
# return data
#
#
# def _set_datas(dat: DatHDF, datas: EA_datas):
# """Saves 2D datasets properly in HDF, more efficient for loading etc"""
# for name, list_datas in asdict(datas).items():
# if list_datas is not None and list_datas != []:
# for i, data in enumerate(list_datas):
# if data is not None:
# k = name + str(i)
# dat.Other.set_data(k, data)
#
#
# def get_datas(dat: DatHDF) -> EA_datas:
# """Gets 2D datasets from Dat.Other.Data (i.e. if each row of data saved, not just 1D)"""
# datas = EA_datas()
# for name in datas.__annotations__.keys():
# i = 0
# list_data = getattr(datas, name)
# while True:
# k = name + str(i)
# if k in dat.Other.Data.keys():
# list_data.append(dat.Other.Data[k])
# i += 1
# else:
# break
# return datas
#
#
# def save_to_dat(dat: DatHDF, data: Union[EA_data, EA_datas], values: Union[EA_value, EA_values], params: EA_params,
# uncertainties: Optional[EA_value] = None):
# """
# Saves each of EA_data, EA_values, EA_analysis_params to Dat.Other in a way that gets loaded automatically
# Note: data is stored as datasets, so need to call EA.get_data(dat) to get that back nicely
# Args:
# dat (DatHDF):
# data (Union[EA_data, EA_datas]): 1D EA_data class or 2D EA_datas class
# values (Union[EA_value, EA_values]): 1D EA_values class or 2D EA_values class
# params (EA_params): Params for analysis
# uncertainties (Optional[EA_value]): 1D EA_value class with uncertainties
# Returns:
# None: Saves everything in dat.Other
# """
# if isinstance(data, EA_data):
# _set_data(dat, data)
# elif isinstance(data, EA_datas):
# _set_datas(dat, data)
# else:
# logger.info(f'dat{dat.datnum} - data had class {dat.__class__} which is incorrect')
#
# if isinstance(values, EA_value):
# dat.Other.EA_values = values
# elif isinstance(values, EA_values):
# efits = values.efit_infos
# values.efit_infos = None
# dat.Other.EA_valuess = values
# for i, fit in enumerate(efits):
# name = f'efit_info_{i}'
# setattr(dat.Other, name, fit)
# else:
# logger.info(f'dat{dat.datnum} - values had class {dat.__class__} which is incorrect')
#
# if uncertainties is not None:
# dat.Other.EA_uncertainties = uncertainties
#
# dat.Other.EA_analysis_params = params
# dat.Other.time_processed = str(pd.Timestamp.now())
# dat.Other.update_HDF()
#
#
# def make_datas_from_dat(dat):
# """
# Makes a datas dataclass from a single dat (i.e. data for each row)
# Args:
# dat (DatHDF): Single dat to make EA_datas from
#
# Returns:
# EA_datas: datas dataclass for dat
# """
# x = dat.SquareEntropy.Processed.outputs.x
# t_data = dat.SquareEntropy.Processed.outputs.cycled
# datas = make_datas(x, t_data)
# return datas
#
#
# def make_datas(x, trans_data):
# """
# Makes a datas dataclass from x and trans_data only (i.e. will duplicate x to make an x for each row of trans_data,
# and will calculate entropy signal from trans_data). Note: Will not calculate integrated data because that requires
# params.
# Args:
# x (np.ndarray): 1D x_array
# trans_data (np.ndarray): Can be either 1D or 2D transition_data (already cycled... i.e. shape == ([nd], 4, len(x))
#
# Returns:
# EA_datas: datas class
# """
# t_data = np.array(trans_data, ndmin=3)
# e_data = np.array([entropy_signal(d) for d in t_data])
# datas = EA_datas()
# for t, e in zip(t_data, e_data):
# datas.append(EA_data(x, t, e))
# return datas
#
#
# def calculate_datas(datas, params):
# """
# Calculates CT, Fit, Integrated for each data in datas using params. Returns filled values
# Args:
# datas (EA_datas):
# params (EA_params):
#
# Returns:
# EA_values: All fit values etc
# """
# all_values = EA_values()
# for data in datas:
# v = EA_value()
# calculate_CT_values(data, v, params)
# calculate_fit(data, v, params)
# calculate_integrated(data, v, params)
# all_values.append(v)
#
# return all_values
#
#
# def _batch_datas(datas, batch_size, centers=None):
# if centers is None:
# centers = [0] * len(datas)
#
# new_datas = EA_datas()
# df = datas.to_df()
# i = 0
# while i + batch_size <= len(df):
# x = df['xs'][i]
# new_data_row = EA_data(x=x)
# for k in set(df.columns) - {'xs'}:
# data = np.array(list(df[k][i:i + batch_size]))
# if None not in data:
# avg_data = CU.mean_data(x, data, centers[i:i + batch_size])
# setattr(new_data_row, k[:-1], avg_data) # EA_data has same names as EA_datas minus the s at the end
# else:
# logger.debug(f'None in {k} data, not averaging or using that data')
# new_datas.append(new_data_row)
# i += batch_size
# return new_datas
#
#
# def calculate_uncertainties(datas, params, centers: Optional[Union[list, np.ndarray]] = None):
# """
# Takes datas dataclass, fits to each individual row, and then returns the standard error (scipy.stats.sem) of the
# values stored in EA_values class. Returns in an instance of EA_value
# Args:
# datas (EA_datas): Datas to be fitted
# params (EA_params): Params to do fitting etc with
# centers (Optional[Union[list, np.ndarray]]): Transitions centers to be used for centering, if None will
# blind average
# Returns:
# EA_value: EA_value class with uncertainties instead of values
# """
#
# from scipy.stats import sem
# from copy import copy
# params = copy(params) # To prevent changing overall params
# params.sub_line = False # Doesn't really make sense with no good flat bit at the beginning for narrow data
# # (which is how I'm current calculating uncertainties)
#
# if centers is None or params.center_data is False:
# centers = [0] * len(datas)
#
# x = datas[0].x
# params.int_entropy_range = [x[-1] - (x[-1] - x[0]) / 20, x[-1]] # Calc entropy value from last 5% of data
#
# if (b := params.batch_uncertainty) != 1:
# datas = _batch_datas(datas, b, centers)
#
# all_values = calculate_datas(datas, params)
#
# uncertainties = EA_value()
# for k in set(uncertainties.__annotations__.keys()) - {'efit_info', 'fit_dS'}: # TODO: removing fit_dS is a temp fix
# vs = getattr(all_values, k + 's', None)
# if vs is not None:
# standard_error = sem(np.array(vs), nan_policy='omit')
# setattr(uncertainties, k, standard_error)
#
# # TEMPORARY FIX # TODO: make this better
# fit_dS = all_values.dSs
# if fit_dS != list():
# uncertainties.fit_dS = sem(fit_dS)
#
# return uncertainties
#
#
# def calculate_uncertainties_from_dat(dat, params):
# """
# Wrapper for calculate_uncertainties.
# Fits to each individual row of data in dat, and then returns the standard error (scipy.stats.sem) of the
# values stored in EA_values class. Returns in an instance of EA_value
# Args:
# dat (DatHDF): Dat to get uncertainties for
# params (EA_params): Params to do fitting etc with
#
# Returns:
# EA_value: EA_value class with uncertainties instead of values
# """
# datas = make_datas_from_dat(dat)
# if params.center_data is True:
# centers = _get_centers(dat.Transition.all_fits, datnum=dat.datnum)
# else:
# centers = None
# return calculate_uncertainties(datas, params, centers=centers)
#
#
# def standard_square_process(dat: Union[DatHDF, List[DatHDF]], analysis_params: EA_params, data: EA_data = None,
# per_row=False):
# """
# Standard processing of single or two part entropy dats. Just needs EA.analysis_params to be passed in with dat(s)
# Args:
# dat (Union[Tuple[DatHDF, DatHDF], DatHDF]): Either single dat or dat pair
# analysis_params (EA_params): The analysis params to use for processing dat pair
# data (Optional[EA_data]): To be used instead of data from dats, info will be saved in dat.Other
# per_row (bool): Whether to calculate per row of a single dat, or to calculate one averaged set of data
# Returns:
# None: Info saved in dat(s).Other
# """
#
# def calc_data(single_data: EA_data):
# vals = EA_value()
# calculate_CT_values(single_data, vals, analysis_params) # Returns more info for debugging if necessary
# calculate_integrated(single_data, vals, analysis_params)
# calculate_fit(single_data, vals, analysis_params) # Can add edit_param_kwargs here or use EA_params
#
# if analysis_params.bin_data is True:
# bin_datas(single_data, analysis_params.num_per_row)
#
# return vals
#
# if per_row is False:
# if data is None:
# if isinstance(dat, Sized):
# dat: Tuple[DatHDF, DatHDF]
# if len(dat) != 2:
# raise ValueError(
# f'dat must either be a single dat, or single pair of dats which form a two part measurement')
# for i, d in enumerate(dat):
# if hasattr(d.Logs, 'part_of'):
# if d.Logs.part_of[0] != i + 1:
# logger.warning(f'{len(dat)} dats passed in, the dat at position {i} reports part = '
# f'{d.Logs.part_of[0]}, but should be {i + 1}. Continuing anyway, '
# f'but things may be wrong!')
# data = data_from_dat_pair(dat,
# centers=None) # Determines whether to center based on stderr of mid fit vals
# datas = make_datas_from_dat(dat[1]) # Use the narrow scan dat to estimate errors
# else:
# out = dat.SquareEntropy.Processed.outputs
# data = EA_data(x=out.x, trans_data=out.averaged, entropy_data=out.entropy_signal)
# datas = make_datas_from_dat(dat)
# else:
# datas = None
# assert np.all([v is not None for v in [data.x, data.trans_data, data.entropy_data]])
# elif per_row is True and not isinstance(dat, Sized):
# datas = None # No overall things to calculate uncertainties from... Probably should use fit uncertainties, but not implemented yet
# if data is None:
# data = make_datas_from_dat(dat) # Returns EA_datas NOT EA_data
# else:
# assert isinstance(data, EA_datas)
# else:
# raise ValueError('Either pass in a single dat to calculate per_row, or turn off per_row')
#
# uncertainties = None
# if isinstance(data, EA_data):
# values = calc_data(data)
# if datas is not None and analysis_params.calculate_uncertainty is True:
# uncertainties = calculate_uncertainties(datas, analysis_params)
# elif isinstance(data, EA_datas):
# values = EA_values()
# for sd in data:
# values.append(calc_data(sd))
# else:
# raise NotImplementedError(f'Somehow got to here without data being defined...')
#
# if not isinstance(dat, Sized):
# dats = [dat]
# else:
# dats = dat
#
# for d in dats:
# save_to_dat(d, data, values, analysis_params, uncertainties=uncertainties)
#
#
# from scipy.stats import zscore # standard deviations from mean
#
# @dataclass
# class CarefulFit(DA.DatDataclassTemplate):
# """
# Stores info for doing careful fits (i.e. removing rows of data based on some criteria)
# """
# | |
from __future__ import print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
from optparse import OptionParser
from math import sqrt, floor, ceil, pi
from glob import glob
import numpy as np
import pylab as plt
from scipy import linalg
from astrom_common import *
from astrom_intra import Intra
from astrom_merge import mergeBrick
#from astrom_merge2 import mergeBrick2
from astrometry.util.plotutils import antigray
def loadBrickCached(cam, brick, mergedfn=None, ps=None, **kwargs):
if cam in ['CFHT', 'CFHT2']:
return loadBrick(cam, **kwargs)
T = mergeBrick(cam, brick, mergedfn, ps, **kwargs)
if 'primary' in T.columns():
T = T[T.primary]
print('After cutting on primary:', len(T))
return T
def main():
import sys
parser = OptionParser(usage='%(program) [options] <gst.fits filenames>')
parser.add_option('-b', '--brick', dest='brick', type='int', help='Brick')
parser.add_option('-c', '--cam', dest='cam', help='Camera -- ACS, IR or UV', action=None)
parser.add_option('--ref', dest='ref', help='Reference "camera" -- CFHT, ACS, IR or UV', action=None)
parser.add_option('--refmerged', dest='refmergedfn', help='File to read/write merged reference sources from/into')
#parser.add_option('--refitab', dest='refitab', help='Reference source table')
parser.add_option('--refmagcut', dest='refmagcut', type='float', help='Reference mag cut')
parser.add_option('-p', '--path', dest='path', help='Path to .gst.fits files (default: "data/pipe/*/proc")')
parser.add_option('-r', '--radius', dest='radius', type='float', help='Search radius (default 1")', default=1.)
parser.add_option('-m', '--magcut', dest='magcut', type='float', help='mag cut (default: 22 for ACS, 21 for IR)')
parser.add_option('-R', '--rotation', dest='rotation', type='float', help='Apply this rotation correction (default=0 deg)', default=0.)
parser.add_option('-s', '--smallrad', dest='smallrad', type='float', help='Small search radius (default 0.1")', default=0.1)
parser.add_option('-E', '--emrad', dest='emrad', type='float', help='Radius for EM (default: searchrad)')
parser.add_option('--merged', dest='mergedfn', help='File to read/write merged sources from/into')
#parser.add_option('--itab', dest='itab', help='Target source table')
parser.add_option('-G', '--grid', dest='grid', action='store_true', default=False,
help='Show a grid of the 18 fields in this brick.')
parser.add_option('-B', '--basefn', dest='basefn',
help='Base output filename for plots')
parser.add_option('--rot-lo', dest='rotlo', type='float',
help='Search rotations from --rot-lo to --rot-hi in steps of --rot-step')
parser.add_option('--rot-hi', dest='rothi', type='float')
parser.add_option('--rot-step', dest='rotstep', type='float', default=0.01)
parser.add_option('--output', '-o', dest='outfn', help='Output filename (affine FITS)', default=None)
opt,args = parser.parse_args()
if opt.brick is None or opt.cam is None:
parser.print_help()
print('Need --brick and --cam')
sys.exit(-1)
if opt.emrad is None:
opt.emrad = opt.radius
#if opt.itab is not None:
# opt.itab = fits_table(opt.itab)
#if opt.refitab is not None:
# opt.refitab = fits_table(opt.refitab)
if opt.basefn is None:
basefn = 'inter-%02i-%s-%s' % (opt.brick, opt.cam, opt.ref)
else:
basefn = opt.basefn
ps = PlotSequence(basefn+'-', format='%02i')
Tme = loadBrickCached(opt.cam, opt.brick, path=opt.path, mergedfn=opt.mergedfn,
#itab=opt.itab,
ps=ps)
me = describeFilters(opt.cam, Tme)
Tref = loadBrickCached(opt.ref, opt.brick, path=opt.path, mergedfn=opt.refmergedfn,
#itab=opt.refitab,
ps=ps)
ref = describeFilters(opt.ref, Tref)
i,j = getNearMags(me, ref)
Tme.cam = opt.cam
Tme.mag = Tme.get('mag%i' % (i+1))
Tme.filter = me.fnames[i]
Tref.cam = opt.ref
Tref.mag = Tref.get('mag%i' % (j+1))
Tref.filter = ref.fnames[j]
if opt.magcut is not None:
I = (Tme.mag < opt.magcut)
Tme = Tme[I]
print('Got', len(Tme), 'after mag cut (at', opt.magcut, ')')
if opt.refmagcut is not None:
I = (Tref.mag < opt.refmagcut)
Tref = Tref[I]
print('Got', len(Tref), 'reference after mag cut (at %g)' % opt.refmagcut)
rl,rh = Tme.ra.min(), Tme.ra.max()
dl,dh = Tme.dec.min(), Tme.dec.max()
dmid = (dl+dh)/2.
rmid = (rl+rh)/2.
def rotate_radec(rot, ra, dec, refra, refdec):
trans = Affine()
trans.setRotation(rot, smallangle=False)
trans.setReferenceRadec(refra, refdec)
newra,newdec = trans.apply(ra, dec)
return newra, newdec, trans
rot = 0
trans0 = None
if opt.rotation != 0.:
rot = opt.rotation
# rotate.
print('Applying rotation correction of', rot, 'deg')
Tme.ra, Tme.dec, trans0 = rotate_radec(rot, Tme.ra, Tme.dec, rmid, dmid)
elif opt.rotlo is not None and opt.rothi is not None:
lo = opt.rotlo
hi = opt.rothi
step = opt.rotstep
print('Trying rotations between', lo, 'and', hi, 'in steps of', step)
variances = []
rots = np.arange(lo, hi+step/2., step)
for rot in rots:
print('Rotation', rot)
Tm = Tme.copy()
Tm.ra, Tm.dec, nil = rotate_radec(rot, Tm.ra, Tm.dec, rmid, dmid)
print('Matching...')
M = Match(Tm, Tref, opt.radius)
print('Got %i matches' % len(M.I))
nbins = 200
H,xe,ye = plothist(M.dra_arcsec, M.ddec_arcsec, nbins)
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDec (arcsec)')
plt.title('Rotated by %g deg' % rot)
ps.savefig()
plotresids(Tm, M, 'Rotated by %g deg' % rot, bins=100)
ps.savefig()
# Trim the circle to avoid edge effects, and then measure the variance.
X,Y = np.meshgrid(np.arange(nbins), np.arange(nbins))
R2 = (X - nbins/2.)**2 + (Y - nbins/2.)**2
I = (R2 < (0.95 * (nbins/2)**2))
v = np.var(H[I])
print('Variance:', v)
variances.append(v)
plt.clf()
plt.plot(rots, variances, 'r-')
plt.xlabel('Rotation (deg)')
plt.ylabel('Variance in dRA,dDec histogram')
ps.savefig()
I = np.argmax(variances)
rot = rots[I]
print('Applying rotation correction of', rot, 'deg')
Tme.ra, Tme.dec, trans0 = rotate_radec(rot, Tme.ra, Tme.dec, rmid, dmid)
if trans0 is not None:
print('Setting initial rotation affine transformation:')
print(trans0)
A = alignAndPlot(Tme, Tref, opt.radius, ps, emrad=opt.emrad, doweighted=False)
#print 'Cov:', A.C
trans = findAffine(Tme, Tref, A, (rmid,dmid))
RR,DD = np.meshgrid(np.linspace(rl, rh, 20),
np.linspace(dl, dh, 20))
RR = RR.ravel()
DD = DD.ravel()
plotaffine(trans, RR, DD, exag=1.)
setRadecAxes(rl,rh,dl,dh)
ps.savefig()
plotaffine(trans, RR, DD, exag=100.)
setRadecAxes(rl,rh,dl,dh)
ps.savefig()
exag = 1000.
plotaffine(trans, RR, DD, exag, affineOnly=True)
ps.savefig()
Tme.ra,Tme.dec = trans.apply(Tme.ra, Tme.dec)
# Do it again!
A2 = alignAndPlot(Tme, Tref, opt.smallrad, ps, doweighted=False, emrad=opt.smallrad)
trans2 = findAffine(Tme, Tref, A2, (rmid,dmid))
Tme.ra,Tme.dec = trans2.apply(Tme.ra, Tme.dec)
# For the 'after' plots
A3 = alignAndPlot(Tme, Tref, opt.smallrad, ps, doweighted=False, emrad=opt.smallrad)
# Save
if opt.outfn:
if trans0 is None:
trans.add(trans2)
else:
trans0.add(trans)
trans0.add(trans2)
trans = trans0
T = Affine.toTable([trans])
T.writeto(opt.outfn)
def findAffine(Tme, Tref, A, refradec, affine=True, order=1):
'''
Computes an Affine transformation between two aligned catalogs.
*Tme*: catalog to align
*Tref*: reference catalog
*A*: an Alignment object matching these two catalogs
*refradec*: tuple (refra, refdec) of the reference point about which to
rotate.
*affine*: if True, produce an affine transformation; otherwise, just a shift
*order*: polynomial distortion order.
Returns:
*Affine* object.
'''
refra,refdec = refradec
rascale = np.cos(np.deg2rad(refdec))
srdeg,sddeg = A.getshift()
if not affine:
affine = Affine(dra = -srdeg, ddec = -sddeg,
refra = refra, refdec = refdec)
return affine
assert(order >= 1)
sr,sd = A.arcsecshift()
w = np.sqrt(A.fore)
M = A.match
dra = M.dra_arcsec [A.subset] - sr
ddec = M.ddec_arcsec[A.subset] - sd
ra = Tme.ra [M.I[A.subset]]
dec = Tme.dec[M.I[A.subset]]
comps = [np.ones_like(ra) * w]
for o in range(1, order+1):
for deco in range(o+1):
rao = o - deco
rr = (ra - refra )*rascale
dd = (dec - refdec)
# rr and dd are in isotropic degrees
comps.append((rr ** rao) * (dd ** deco) * w)
print('ra order', rao, 'dec order', deco)
# In the linear case (order=1), the terms are listed as rao=1 then deco=1
Amat = np.vstack(comps).T
Amat = np.matrix(Amat)
# dra,ddec are in isotropic degrees
b1 = -dra / 3600. * w
b2 = -ddec / 3600. * w
X1 = linalg.lstsq(Amat, b1)
X2 = linalg.lstsq(Amat, b2)
X1 = X1[0]
X2 = X2[0]
e,a,b = X1[:3]
f,c,d = X2[:3]
#print 'a,b,c,d', a,b,c,d
#print 'e,f', e,f
if order >= 2:
rapoly = X1[3:]
decpoly = X2[3:]
else:
rapoly = decpoly = None
affine = Affine(dra = e/rascale - srdeg, ddec = f - sddeg,
T = [ a, b, c, d ],
refra = refra, refdec = refdec,
rapoly=rapoly, decpoly=decpoly)
return affine
'''
Returns the Alignment object A.
'''
def alignAndPlot(Tme, Tref, rad, ps, doweighted=True, emrad=None, nearest=False, **kwargs):
aliargs = dict(cutrange=emrad)
aliargs.update(kwargs)
A = Alignment(Tme, Tref, searchradius=rad, **aliargs)
if nearest:
# There is something badly wrong with spherematch.nearest().
assert(False)
A.findMatches(nearest=True)
M = A.match
print('dra,ddec arcsec:', M.dra_arcsec[:100], M.ddec_arcsec[:100])
if A.shift() is None:
print('Shift not found!')
return None
M = A.match
print('Shift:', A.arcsecshift())
sr,sd = A.arcsecshift()
sumd2 = np.sum(A.fore * ((M.dra_arcsec [A.subset] - sr)**2 +
(M.ddec_arcsec[A.subset] - sd)**2))
sumw = np.sum(A.fore)
# / 2. to get std per coord.
std = sqrt(sumd2 / (sumw * 2.))
angles = np.linspace(0, 2.*pi, 100)
modstr = ''
if A.cov:
eigs = A.getEllipseSize() * 1000.
if eigs[0] > 100:
modstr = '%.0fx%.0f' % (eigs[0], eigs[1])
else:
modstr = '%.1fx%.1f' % (eigs[0], eigs[1])
else:
modstr = '%.1f' % (1000. * A.sigma)
W = np.zeros_like(A.subset).astype(float)
W[A.subset] = A.fore
rl,rh = Tme.ra.min(), Tme.ra.max()
dl,dh = Tme.dec.min(), Tme.dec.max()
if doweighted:
rounds = [ {}, { 'weights': W } ]
else:
rounds = [ {} ]
for i,args in enumerate(rounds):
tsuf = '' if i == 0 else ' (weighted)'
N = len(M.dra_arcsec) if i == 0 else sumw
plotresids(Tme, M, '%s-%s match residuals%s' % (Tme.cam, Tref.cam, tsuf),
bins=100, **args)
ps.savefig()
dst = 1000. * np.sqrt(M.dra_arcsec ** 2 + M.ddec_arcsec ** 2)
loghist(Tme.mag[M.I], dst, 100, **args)
plt.xlabel(Tme.filter)
plt.ylabel('Match residual (mas)')
ps.savefig()
loghist(Tref.mag[M.J], dst, 100, **args)
plt.xlabel(Tref.filter)
plt.ylabel('Match residual (mas)')
ps.savefig()
H,xe,ye = | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils as tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.layers.normalization import BatchNormalization
from tensorflow.python.ops import gen_control_flow_ops
class MovingFreeBatchNormalization(BatchNormalization):
def build(self, input_shape):
super(BatchNormalization, self).build(input_shape)
self.built = False
# all assertion are
input_shape = tensor_shape.TensorShape(input_shape)
ndims = len(input_shape)
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
param_dtype = dtypes.float32
else:
param_dtype = self.dtype or dtypes.float32
axis_to_dim = {x: input_shape[x].value for x in self.axis}
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
# internal statistics fitted during a pre-inference step
self.mean = self.add_variable(
name='mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
trainable=False)
self.variance = self.add_variable(
name='variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
trainable=False)
self.n_updates = self.add_variable(
name='n_updates',
shape=[],
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, momentum]) as scope:
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (variable - value) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _update_statistics(self, variable, value, n_updates):
with ops.name_scope(None, 'UpdateStatistics',
[variable, value, n_updates]) as scope:
with ops.colocate_with(variable):
stat = variable * n_updates + value
stat /= n_updates + 1
return state_ops.assign(variable, stat, name=scope)
def _fused_batch_norm(self, inputs, training, use_moving_statistics):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
# use_moving_statistics==True use moving_mean and moving_variance, else mean and variance
mean = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_mean, lambda: self.mean)
variance = tf_utils.smart_cond(use_moving_statistics, lambda: self.moving_variance, lambda: self.variance)
# these variables will be used in _fused_batch_norm_inference(), thanks to python closure
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=mean,
variance=variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(training, _fused_batch_norm_training, _fused_batch_norm_inference)
# if training == True: mean and variance returned are mean and variance of the current batch
# elif training == False: mean and variance return are (self.mean, self.variance) or
# (self.moving_mean, self.moving_variance) depending of the value of use_moving_statistics
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
# if training, first create operations which update self.mean and self.variance
mean_update = self._update_statistics(self.mean, mean, self.n_updates)
variance_update = self._update_statistics(self.variance, variance, self.n_updates)
with ops.control_dependencies([mean_update, variance_update]):
update_n_updates = state_ops.assign_add(self.n_updates, 1., )
# add this combination of operations to a specific collection 'UPDATE_BN_OPS'
ops.add_to_collection('UPDATE_BN_OPS', update_n_updates)
# operations to reset bn statistics
reset_mean = state_ops.assign(self.mean, array_ops.zeros_like(self.mean))
reset_variance = state_ops.assign(self.variance, array_ops.zeros_like(self.variance))
reset_n_updates = state_ops.assign(self.n_updates, 0.)
with ops.control_dependencies([reset_mean, reset_variance, reset_n_updates]):
reset_bn = gen_control_flow_ops.no_op("ResetBatchNormStats")
ops.add_to_collection('RESET_BN_OPS', reset_bn)
# to keep the classical behavior of the Batch Norm !
# update moving averages and add operations to tf.GraphKeys.UPDATE_OPS
# these operation must be run when optimizing the network
moving_mean_update = self._assign_moving_average(self.moving_mean, mean, momentum)
moving_variance_update = self._assign_moving_average(self.moving_variance, variance, momentum)
self.add_update(moving_mean_update, inputs=True)
self.add_update(moving_variance_update, inputs=True)
return output
def call(self, inputs, training=None, use_moving_statistics=True):
"""
:param inputs: input features
:param training: boolean or boolean Tensor (with shape []) which determines the current training phase
:param use_moving_statistics: boolean or boolean Tensor (with shape []) which selects statistics to use
when training==True (or the Tensor value) statistics (mean and variance) are from the inputs !
when training==False, if use_moving_statistics==True -> feed forward with moving statistics (updated
with operations defined in GraphKeys.UPDATE_OPS)
else (use_moving_statistics==False -> feed forward with raw statistics (updated
with operations from collections 'UPDATE_BN_OPS'
'RESET_BN_OPS' contains operations to reset these vaiables between inferences.
"""
in_eager_mode = context.executing_eagerly()
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training, use_moving_statistics=use_moving_statistics)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
# mean and variance of the current batch
mean, variance = nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: tf_utils.smart_cond(use_moving_statistics,
lambda: self.moving_mean,
lambda: self.mean))
variance = tf_utils.smart_cond(training,
lambda: variance,
lambda: tf_utils.smart_cond(use_moving_statistics,
lambda: self.moving_variance,
lambda: self.variance))
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
else:
new_mean, new_variance = mean, variance
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
def _do_update(var, value):
if in_eager_mode and not self.trainable:
return
return self._assign_moving_average(var, value, self.momentum)
moving_mean_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
moving_variance_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if not context.executing_eagerly():
self.add_update(moving_mean_update, inputs=True)
self.add_update(moving_variance_update, inputs=True)
mean_update = self._update_statistics(self.mean, mean, self.n_updates)
variance_update = self._update_statistics(self.variance, variance, self.n_updates)
with ops.control_dependencies([mean_update, variance_update]):
# update n_updates only after updating self.mean and self.variance
update_n_updates = state_ops.assign_add(self.n_updates, 1.)
ops.add_to_collection('UPDATE_BN_OPS', update_n_updates)
reset_mean = | |
<reponame>ChenSunMac/Kaggle_Project<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 17:00:02 2018
@author: Chens
"""
import sys #access to system parameters https://docs.python.org/3/library/sys.html
print("Python version: {}". format(sys.version))
import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features
print("pandas version: {}". format(pd.__version__))
import matplotlib #collection of functions for scientific and publication-ready visualization
print("matplotlib version: {}". format(matplotlib.__version__))
import numpy as np #foundational package for scientific computing
print("NumPy version: {}". format(np.__version__))
import scipy as sp #collection of functions for scientific computing and advance mathematics
print("SciPy version: {}". format(sp.__version__))
import sklearn #collection of machine learning algorithms
print("scikit-learn version: {}". format(sklearn.__version__))
#misc libraries
import random
import time
## Load Data Model
#Common Model Algorithms
from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from xgboost import XGBClassifier
#Common Model Helpers
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn import feature_selection
from sklearn import model_selection
from sklearn import metrics
#Visualization
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
mpl.style.use('ggplot')
sns.set_style('white')
pylab.rcParams['figure.figsize'] = 12,8
# Import Data -------------------------------------------------------
# import raw data from file
data_raw = pd.read_csv('./input/train.csv')
# test data
data_val = pd.read_csv('./input/test.csv')
# copy the raw data
data1 = data_raw.copy(deep = True)
#however passing by reference is convenient, because we can clean both datasets at once
data_cleaner = [data1, data_val]
# Preview Data -----------------------------------------------------
print (data_raw.info())
print (data_raw.sample(10))
print ( data_raw.head())
### Clean Data ----------------------------------------------------
#### Find Null Data:
print('Train columns with null values:\n', data1.isnull().sum())
print("-"*10)
print('Test/Validation columns with null values:\n', data_val.isnull().sum())
print("-"*10)
print(data_raw.describe(include = 'all'))
### Clean Null Data
for dataset in data_cleaner:
#complete missing age with median
dataset['Age'].fillna(dataset['Age'].median(), inplace = True)
#complete embarked with mode
dataset['Embarked'].fillna(dataset['Embarked'].mode()[0], inplace = True)
#complete missing fare with median
dataset['Fare'].fillna(dataset['Fare'].median(), inplace = True)
#delete the cabin feature/column and others previously stated to exclude in train dataset
drop_column = ['PassengerId','Cabin', 'Ticket']
data1.drop(drop_column, axis=1, inplace = True)
print(data1.isnull().sum())
print("-"*10)
print(data_val.isnull().sum())
## CREATE: Feature Engineering for train and test/validation dataset
for dataset in data_cleaner:
#Discrete variables
dataset['FamilySize'] = dataset ['SibSp'] + dataset['Parch'] + 1
dataset['IsAlone'] = 1 #initialize to yes/1 is alone
dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 # now update to no/0 if family size is greater than 1
dataset['Title'] = dataset['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
dataset['FareBin'] = pd.qcut(dataset['Fare'], 4)
dataset['AgeBin'] = pd.cut(dataset['Age'].astype(int), 5)
#cleanup rare title names
#print(data1['Title'].value_counts())
stat_min = 10 #while small is arbitrary, we'll use the common minimum in statistics: http://nicholasjjackson.com/2012/03/08/sample-size-is-10-a-magic-number/
title_names = (data1['Title'].value_counts() < stat_min)
#apply and lambda functions are quick and dirty code to find and replace with fewer lines of code: https://community.modeanalytics.com/python/tutorial/pandas-groupby-and-python-lambda-functions/
data1['Title'] = data1['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x)
print(data1['Title'].value_counts())
print("-"*10)
## Convert Formats ----------------------------------------
#code categorical data
label = LabelEncoder()
for dataset in data_cleaner:
dataset['Sex_Code'] = label.fit_transform(dataset['Sex'])
dataset['Embarked_Code'] = label.fit_transform(dataset['Embarked'])
dataset['Title_Code'] = label.fit_transform(dataset['Title'])
dataset['AgeBin_Code'] = label.fit_transform(dataset['AgeBin'])
dataset['FareBin_Code'] = label.fit_transform(dataset['FareBin'])
#define y variable aka target/outcome
Target = ['Survived']
#define x variables for original features aka feature selection
data1_x = ['Sex','Pclass', 'Embarked', 'Title','SibSp', 'Parch', 'Age', 'Fare', 'FamilySize', 'IsAlone'] #pretty name/values for charts
data1_x_calc = ['Sex_Code','Pclass', 'Embarked_Code', 'Title_Code','SibSp', 'Parch', 'Age', 'Fare'] #coded for algorithm calculation
data1_xy = Target + data1_x
print('Original X Y: ', data1_xy, '\n')
#define x variables for original w/bin features to remove continuous variables
data1_x_bin = ['Sex_Code','Pclass', 'Embarked_Code', 'Title_Code', 'FamilySize', 'AgeBin_Code', 'FareBin_Code']
data1_xy_bin = Target + data1_x_bin
print('Bin X Y: ', data1_xy_bin, '\n')
#define x and y variables for dummy features original
data1_dummy = pd.get_dummies(data1[data1_x])
data1_x_dummy = data1_dummy.columns.tolist()
data1_xy_dummy = Target + data1_x_dummy
print('Dummy X Y: ', data1_xy_dummy, '\n')
# -----Split Train/Test Set by 75/25 rule
train1_x, test1_x, train1_y, test1_y = model_selection.train_test_split(data1[data1_x_calc], data1[Target], random_state = 0)
train1_x_bin, test1_x_bin, train1_y_bin, test1_y_bin = model_selection.train_test_split(data1[data1_x_bin], data1[Target] , random_state = 0)
train1_x_dummy, test1_x_dummy, train1_y_dummy, test1_y_dummy = model_selection.train_test_split(data1_dummy[data1_x_dummy], data1[Target], random_state = 0)
print("Data1 Shape: {}".format(data1.shape))
print("Train1 Shape: {}".format(train1_x.shape))
print("Test1 Shape: {}".format(test1_x.shape))
# Perform Basic Analysis -----------------------------------
for x in data1_x:
if data1[x].dtype != 'float64' :
print('Survival Correlation by:', x)
print(data1[[x, Target[0]]].groupby(x, as_index=False).mean())
print('-'*10, '\n')
#using crosstabs: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.crosstab.html
print(pd.crosstab(data1['Title'],data1[Target[0]]))
# and some graphical analysis
#%matplotlib inline (for Ipython console)
fig, saxis = plt.subplots(2, 3,figsize=(16,12))
sns.barplot(x = 'Embarked', y = 'Survived', data=data1, ax = saxis[0,0])
sns.barplot(x = 'Pclass', y = 'Survived', order=[1,2,3], data=data1, ax = saxis[0,1])
sns.barplot(x = 'IsAlone', y = 'Survived', order=[1,0], data=data1, ax = saxis[0,2])
sns.pointplot(x = 'FareBin', y = 'Survived', data=data1, ax = saxis[1,0])
sns.pointplot(x = 'AgeBin', y = 'Survived', data=data1, ax = saxis[1,1])
sns.pointplot(x = 'FamilySize', y = 'Survived', data=data1, ax = saxis[1,2])
#how does embark port factor with class, sex, and survival compare
#facetgrid: https://seaborn.pydata.org/generated/seaborn.FacetGrid.html
e = sns.FacetGrid(data1, col = 'Embarked')
e.map(sns.pointplot, 'Pclass', 'Survived', 'Sex', ci=95.0, palette = 'deep')
e.add_legend()
#correlation heatmap of dataset
def correlation_heatmap(df):
_ , ax = plt.subplots(figsize =(14, 12))
colormap = sns.diverging_palette(220, 10, as_cmap = True)
_ = sns.heatmap(
df.corr(),
cmap = colormap,
square=True,
cbar_kws={'shrink':.9 },
ax=ax,
annot=True,
linewidths=0.1,vmax=1.0, linecolor='white',
annot_kws={'fontsize':12 }
)
plt.title('Pearson Correlation of Features', y=1.05, size=15)
correlation_heatmap(data1)
## Model the Data
MLA = [
#Ensemble Methods
ensemble.AdaBoostClassifier(),
ensemble.BaggingClassifier(),
ensemble.ExtraTreesClassifier(),
ensemble.GradientBoostingClassifier(),
ensemble.RandomForestClassifier(),
#Gaussian Processes
gaussian_process.GaussianProcessClassifier(),
#GLM
linear_model.LogisticRegressionCV(),
linear_model.PassiveAggressiveClassifier(),
linear_model.RidgeClassifierCV(),
linear_model.SGDClassifier(),
linear_model.Perceptron(),
#Navies Bayes
naive_bayes.BernoulliNB(),
naive_bayes.GaussianNB(),
#Nearest Neighbor
neighbors.KNeighborsClassifier(),
#SVM
svm.SVC(probability=True),
svm.NuSVC(probability=True),
svm.LinearSVC(),
#Trees
tree.DecisionTreeClassifier(),
tree.ExtraTreeClassifier(),
#Discriminant Analysis
discriminant_analysis.LinearDiscriminantAnalysis(),
discriminant_analysis.QuadraticDiscriminantAnalysis(),
#xgboost: http://xgboost.readthedocs.io/en/latest/model.html
XGBClassifier()
]
#split dataset in cross-validation with this splitter class:
# http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html#sklearn.model_selection.ShuffleSplit
# note: this is an alternative to train_test_split
cv_split = model_selection.ShuffleSplit(n_splits = 10, test_size = .3, train_size = .6, random_state = 0 )
# run model 10x with 60/30 split intentionally leaving out 10%
#create table to compare MLA metrics
MLA_columns = ['MLA Name', 'MLA Parameters','MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD' ,'MLA Time']
MLA_compare = pd.DataFrame(columns = MLA_columns)
#create table to compare MLA predictions
MLA_predict = data1[Target]
#index through MLA and save performance to table
row_index = 0
for alg in MLA:
#set name and parameters
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
#score model with cross validation: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html#sklearn.model_selection.cross_validate
cv_results = model_selection.cross_validate(alg, data1[data1_x_bin], data1[Target], cv = cv_split)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
#if this is a non-bias random sample, then +/-3 standard deviations (std) from the mean, should statistically capture 99.7% of the subsets
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std()*3 #let's know the worst that can happen!
#save MLA predictions - see section 6 for usage
alg.fit(data1[data1_x_bin], data1[Target])
MLA_predict[MLA_name] = alg.predict(data1[data1_x_bin])
row_index+=1
#print and sort table: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html
MLA_compare.sort_values(by = ['MLA Test Accuracy Mean'], ascending = False, inplace = True)
print(MLA_compare)
sns.barplot(x='MLA Test Accuracy Mean', y = 'MLA Name', data = MLA_compare, color = 'm')
#prettify using pyplot: https://matplotlib.org/api/pyplot_api.html
plt.title('Machine Learning Algorithm Accuracy Score \n')
plt.xlabel('Accuracy Score (%)')
plt.ylabel('Algorithm')
# Cross-Validation (CV) ---------------
# Tune Model with Hyper-Parameters
#base model
dtree = tree.DecisionTreeClassifier(random_state = 0)
base_results = model_selection.cross_validate(dtree, data1[data1_x_bin], data1[Target], cv = cv_split)
dtree.fit(data1[data1_x_bin], data1[Target])
print('BEFORE DT Parameters: ', dtree.get_params())
print("BEFORE DT Training w/bin score mean: {:.2f}". format(base_results['train_score'].mean()*100))
print("BEFORE DT Test w/bin score mean: {:.2f}". format(base_results['test_score'].mean()*100))
print("BEFORE DT Test w/bin score 3*std: +/- {:.2f}". format(base_results['test_score'].std()*100*3))
#print("BEFORE DT Test w/bin set score min: {:.2f}". format(base_results['test_score'].min()*100))
print('-'*10)
#tune hyper-parameters: http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier
param_grid = {'criterion': ['gini', 'entropy'], #scoring methodology; two supported formulas for calculating information gain - default is gini
#'splitter': ['best', 'random'], #splitting methodology; two supported strategies - default is best
'max_depth': [2,4,6,8,10,None], #max depth tree can grow; default is none
#'min_samples_split': [2,5,10,.03,.05], #minimum subset size BEFORE new split (fraction is % of total); default is 2
#'min_samples_leaf': [1,5,10,.03,.05], #minimum subset size AFTER new split split (fraction is % of total); default is 1
#'max_features': [None, 'auto'], #max features to consider when performing split; default none or all
'random_state': [0] #seed or control random number generator: https://www.quora.com/What-is-seed-in-random-number-generation
}
tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param_grid, scoring = 'roc_auc', cv = cv_split)
tune_model.fit(data1[data1_x_bin], data1[Target])
#print(tune_model.cv_results_.keys())
#print(tune_model.cv_results_['params'])
print('AFTER DT Parameters: ', tune_model.best_params_)
#print(tune_model.cv_results_['mean_train_score'])
print("AFTER DT Training w/bin score mean: {:.2f}". format(tune_model.cv_results_['mean_train_score'][tune_model.best_index_]*100))
#print(tune_model.cv_results_['mean_test_score'])
print("AFTER DT Test w/bin score mean: {:.2f}". format(tune_model.cv_results_['mean_test_score'][tune_model.best_index_]*100))
print("AFTER DT Test w/bin score 3*std: +/- {:.2f}". format(tune_model.cv_results_['std_test_score'][tune_model.best_index_]*100*3))
# Tune with Feature selection
#base model
print('BEFORE DT RFE Training Shape Old: ', data1[data1_x_bin].shape)
print('BEFORE DT RFE Training Columns Old: ', data1[data1_x_bin].columns.values)
print("BEFORE DT RFE Training w/bin score mean: {:.2f}". format(base_results['train_score'].mean()*100))
print("BEFORE DT RFE Test w/bin score mean: {:.2f}". format(base_results['test_score'].mean()*100))
print("BEFORE DT RFE Test w/bin score 3*std: +/- {:.2f}". format(base_results['test_score'].std()*100*3))
print('-'*10)
#feature selection
dtree_rfe = feature_selection.RFECV(dtree, step = 1, scoring = 'accuracy', cv = cv_split)
dtree_rfe.fit(data1[data1_x_bin], data1[Target])
#transform x&y to reduced features and fit new model
#alternative: can use pipeline to reduce fit and transform steps: http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
X_rfe = data1[data1_x_bin].columns.values[dtree_rfe.get_support()]
rfe_results = model_selection.cross_validate(dtree, data1[X_rfe], data1[Target], cv = cv_split)
#print(dtree_rfe.grid_scores_)
print('AFTER DT RFE Training Shape New: ', data1[X_rfe].shape)
print('AFTER DT RFE Training Columns New: ', X_rfe)
print("AFTER DT RFE Training w/bin score mean: {:.2f}". format(rfe_results['train_score'].mean()*100))
print("AFTER DT RFE Test w/bin score | |
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
import argparse
import pprint
import urllib
import re
import os
import pdb
import sys
import codecs
import json
import typotools
import traceback
import subprocess
import shlex
import datetime
from typotools import remove_excessive_whitespace, WHITESPACE, \
PUNCTUATION, recursive_typography
QUESTION_LABELS = ['handout', 'question', 'answer',
'accept', 'reject', 'comment', 'source', 'author', 'number',
'setcounter']
SEP = os.linesep
ENC = sys.getdefaultencoding()
if ENC == 'ascii':
ENC = 'cp866'
re_tour = re.compile(r'^ТУР ?([0-9IVXLCDM]*)([\.:])?$', re.I | re.U)
re_tourrev = re.compile(r'^([0-9]+) ТУР([\.:])?$', re.I | re.U)
re_question = re.compile(r'ВОПРОС ?[№N]?([0-9]*) ?[\.:]', re.I | re.U)
re_answer = re.compile(r'ОТВЕТЫ? ?[№N]?([0-9]+)? ?[:]', re.I | re.U)
re_accept = re.compile(r'ЗАЧ[ЕЁ]Т ?[\.:]', re.I | re.U)
re_reject = re.compile(r'НЕЗАЧ[ЕЁ]Т ?[\.:]', re.I | re.U)
re_comment = re.compile(r'КОММЕНТАРИЙ ?[№N]?([0-9]+)? ?[\.:]', re.I | re.U)
re_author = re.compile(r'АВТОР\(?Ы?\)? ?[\.:]', re.I | re.U)
re_source = re.compile(r'ИСТОЧНИК\(?И?\)? ?[\.:]', re.I | re.U)
re_editor = re.compile(r'РЕДАКТОР(Ы|СКАЯ ГРУППА)? ?[\.:]', re.I | re.U)
re_date = re.compile(r'ДАТА ?[\.:]', re.I | re.U)
re_handout = re.compile(r'РАЗДА(ЧА|ТКА|ТОЧНЫЙ МАТЕРИАЛ) ?[\.:]', re.I | re.U)
re_number = re.compile(r'^[0-9]+[\.\)] *')
##### PDB DEBUGGING
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
##### END DEBUGGING
# Exceptions
class WrongFormattingLevelException(Exception):
pass
class WrongPackageInitException(Exception):
pass
# End of exceptions
def partition(alist, indices):
return [alist[i:j] for i, j in zip([0]+indices, indices+[None])]
def check_question(question):
warnings = []
for el in {'question', 'answer', 'source', 'author'}:
if el not in question:
warnings.append(el)
if len(warnings) > 1:
print('WARNING: question {} lacks the following fields: {}{}'
.format(question, ', '.join(warnings), SEP)
.decode('unicode_escape')
.encode(ENC, errors='replace'))
class Question(object):
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
if k in QUESTION_LABELS:
setattr(self, k, v)
else:
raise Exception('Unexpected argument: {}'.format(k))
def format(self, format='4s'):
if format == 'docx':
raise WrongFormattingLevelException(
"Sorry, questions are not meant to be "
"formatted individually in docx.")
elif format == '4s':
return self._format_4s()
elif format == 'tex':
return self._format_tex()
elif format == 'lj':
return self._format_lj()
def _format_docx(self): # needs special treatment
return ''
def _format_4s(self):
return ''
def _format_tex(self):
return ''
def _format_lj(self):
return ''
def __repr__(self):
return "Question('{}{}')".format(
self.question[:100],
'...' if len(self.question) > 100 else '').encode(
ENC, errors='replace')
class ParsingStructure(object):
### constants
BADNEXTFIELDS = set(['question', 'answer'])
regexes = {
'tour' : re_tour,
'tourrev' : re_tourrev,
'question' : re_question,
'answer' : re_answer,
'accept' : re_accept,
'reject' : re_reject,
'comment' : re_comment,
'author' : re_author,
'source' : re_source,
'editor' : re_editor,
'date' : re_date,
}
### end constants
def __init__(self, text):
"""
Parsing rationale: every Question has two required fields: 'question'
and the immediately following 'answer'. All the rest are optional,
as is the order of these fields. On the other hand, everything
except the 'question' is obligatorily marked, while the 'question' is
optionally marked. But IF the question is not marked, 'meta' comments
between Questions will not be parsed as 'meta' but will be merged to
'question's.
Parsing is done by regexes in the following steps:
1. Identify all the fields you can, mark them with their respective
labels, mark all the others with ''
2. Merge fields inside Question with '' lines between them
3. Ensure every 'answer' has a 'question'
4. Mark all remaining '' fields as 'meta'
5. Prettify input
6. Pack Questions into dicts
7. Store the resulting structure as self.structure
"""
self.structure = []
# 1.
for x in re.split(r'\r?\n',text):
if x != '':
self.structure.append(['',remove_excessive_whitespace(x)])
i = 0
st = self.structure
while i < len(st):
matching_regexes = {
(regex, self.regexes[regex].search(st[i][1]).start(0))
for regex in self.regexes
if self.regexes[regex].search(st[i][1])}
# If more than one regex matches string, split it and
# insert into structure separately.
if len(matching_regexes) == 1:
st[i][0] = matching_regexes.pop()[0]
elif len(matching_regexes) > 1:
sorted_r = sorted(matching_regexes, key=lambda x: x[1])
slices = []
for j in range(1, len(sorted_r)):
slices.append(
[sorted_r[j][0], st[i][1][
sorted_r[j][1]
:
sorted_r[j+1][1] if j+1 < len(sorted_r)
else len(st[i][1])]])
for slice_ in slices:
self.structure.insert(
i+1, slice_)
st[i][0] = sorted_r[0][0]
st[i][1] = st[i][1][:sorted_r[1][1]]
i += 1
self.structure = st
i = 0
# 2.
self.merge_y_to_x('question','answer')
self.merge_to_x_until_nextfield('answer')
self.merge_to_x_until_nextfield('comment')
# 3.
i = 0
while i < len(self.structure):
if (self.structure[i][0] == 'answer'
and self.structure[i-1][0] not in ('question',
'newquestion')):
self.structure.insert(i,['newquestion',''])
i = 0
i += 1
i = 0
while i < len(self.structure) - 1:
if (self.structure[i][0] == ''
and self.structure[i+1][0] == 'newquestion'):
self.merge_to_next(i)
if (re_number.search(
remove_excessive_whitespace(self.structure[i][1]))
and not re_number.search(
remove_excessive_whitespace(self.structure[i-1][1]))):
self.structure[i][0] = 'question'
self.structure[i][1] = re_number.sub(
'',remove_excessive_whitespace(self.structure[i][1]))
try:
self.structure.insert(i,
['number', int(re_number.search(
remove_excessive_whitespace(
self.structure[i][1])
).group(0))])
except:
pass # TODO: figure out what this means
i = 0
i += 1
for element in self.structure:
if element[0] == 'newquestion':
element[0] = 'question'
self.dirty_merge_to_x_until_nextfield('source')
for id, element in enumerate(self.structure):
if (element[0] == 'author' and re.search(r'^{}$'.format(re_author.
pattern),
remove_excessive_whitespace(element[1]))
and id + 1 < len(self.structure)):
merge_to_previous(id+1)
self.merge_to_x_until_nextfield('accept')
self.merge_to_x_until_nextfield('reject')
# 4.
self.structure = [x for x in self.structure
if [x[0], remove_excessive_whitespace(x[1])]
!= ['', '']]
if self.structure[0][0] == '' and re_number.search(
remove_excessive_whitespace(self.structure[0][1])):
self.merge_to_next(0)
for id, element in enumerate(self.structure):
if element[0] == '':
element[0] = 'meta'
if (element[0] in self.regexes
and element[0] not in ['tour', 'tourrev']):
if element[0] == 'question':
try:
num = re_question.search(element[1]).group(1)
self.structure.insert(id, ['number', num])
except:
pass
element[1] = self.regexes[element[0]].sub('', element[1])
# 5.
for id, element in enumerate(self.structure):
# typogrify
if element[0] != 'date':
element[1] = recursive_typography(element[1])
# remove question numbers
if element[0] == 'question':
try:
num = re_question.search(element[1]).group(1)
self.structure.insert(id, ['number', num])
except:
pass
element[1] = re_number.sub('', element[1])
# detect inner lists
mo = {m for m
in re.finditer(r'(\s+|^)(\d+)[\.\)]\s*(?!\d)',
element[1], re.U)}
if len(mo) > 1:
sorted_up = sorted(mo, key=lambda m: int(m.group(2)))
j = 0
list_candidate = []
while j == int(sorted_up[j].group(2)) - 1:
list_candidate.append((j+1, sorted_up[j].group(0),
sorted_up[j].start()))
if j+1 < len(sorted_up):
j += 1
else:
break
if len(list_candidate) > 1:
if (element[0] != 'question' or
(element[0] == 'question'
and 'дуплет' in element[1].lower()
or 'блиц' in element[1].lower())):
part = partition(element[1], [x[2] for x in
list_candidate])
lc = 0
while lc < len(list_candidate):
part[lc+1] = part[lc+1].replace(
list_candidate[lc][1], '')
lc += 1
element[1] = ([part[0], part[1:]] if part[0] != ''
else part[1:])
# turn source into list if necessary
if (element[0] == 'source' and isinstance(element[1], basestring)
and len(re.split(r'\r?\n', element[1])) > 1):
element[1] = [re_number.sub('', remove_excessive_whitespace(x))
for x in re.split(r'\r?\n', element[1])]
# 6.
final_structure = []
current_question = {}
for element in self.structure:
if element[0] in set(['tour', 'question', 'meta']):
if current_question != {}:
check_question(current_question)
final_structure.append(Question(**current_question))
current_question = {}
if element[0] in QUESTION_LABELS:
if element[0] in current_question:
try:
current_question[element[0]] += SEP + element[1]
except:
print('{}'.format(
current_question).decode('unicode_escape'))
pdb.set_trace()
else:
current_question[element[0]] = element[1]
else:
final_structure.append([element[0], element[1]])
if current_question != {}:
check_question(current_question)
final_structure.append(Question(**current_question))
# 7.
self.structure = final_structure
def merge_to_previous(self, index):
target = index - 1
self.structure[target][1] = (
self.structure[target][1] + SEP
+ self.structure.pop(index)[1])
def merge_to_next(self, index):
target = self.structure.pop(index)
self.structure[index][1] = (target[1] + SEP
+ self.structure[index][1])
def find_next_specific_field(self, index, fieldname):
target = index + 1
while self.structure[target][0] != fieldname:
target += 1
return target
def find_next_fieldname(self, index):
target = index + 1
if target < len(self.structure):
while (target < len(self.structure)-1
and self.structure[target][0] == ''):
target += 1
return self.structure[target][0]
def merge_y_to_x(self, x, y):
i = 0
while i < len(self.structure):
if self.structure[i][0] == x:
while (i+1 < len(self.structure)
and self.structure[i+1][0] != y):
self.merge_to_previous(i+1)
i += 1
def merge_to_x_until_nextfield(self, x):
i = 0
while i < len(self.structure):
if self.structure[i][0] == x:
while (i+1 < len(self.structure)
and self.structure[i+1][0] == ''
and self.find_next_fieldname(i)
not in self.BADNEXTFIELDS):
self.merge_to_previous(i+1)
i += 1
def dirty_merge_to_x_until_nextfield(self, x):
i = 0
while i < len(self.structure):
if self.structure[i][0] == x:
while (i+1 < len(self.structure)
and self.structure[i+1][0] == ''):
self.merge_to_previous(i+1)
i += 1
def swap_elements(self, x, y):
z = self.structure[y]
self.structure[y] = self.structure[x]
self.structure[x] = z
def __repr__(self):
'...'
pass
class Package(object):
def __init__(self, file=None, string=None):
if not file is None and not string is None:
raise WrongPackageInitException('You must specify either | |
"""JSON Keys core functions.
JSON Key definition:
An ordered sequence of one or more JSON pointer reference tokens
(Object member key or array index) starting with a root-level
key/index and ending with a reference to some value within the
document.
The last key can optionally be Python slice syntax, where # can
zero, a positive or negative integer:
':', '#:', ':#', '#:#', '::#', '#::#', ':#:#' or '#:#:#'
Keys are joined together by dot characters.
Examples:
name
name.last
names.0.name.last
names.2:5
"""
from copy import deepcopy
from functools import reduce
from operator import getitem
import click
from . import exceptions as exc
from .inspector import inspect_json, count_arrays
from .sequencer import Items
from .tokenizer import SLICE_RE, parse_defaults, parse_keystr
from .treecrawler import find_keys
def get_rootkey(d, *keys):
"""Set the root level of the JSON document.
Purpose:
1. Point to an array of objects within the JSON document so that
get, del and friends will operate on the properties for each
item in the JSON array.
2. Extract a single branch or value from a JSON document.
Args:
d (Mapping or Sequence): JSON encodable data (document.)
*keys (str): JSON Keys (name, index or trailing slice.)
Returns:
The value referenced by *keys.
Raises:
KeyNotFound
IndexError
TypeError
Example:
>>> d = {'results': {'rows': [{}, {}]}}
>>> get_rootkey(d, 'results', 'rows')
[{}, {}]
"""
try:
return select_key(d, *keys)
except KeyError as e:
raise exc.KeyNotFound(e, op='rootkey', data=d, keylist=[keys])
except IndexError as e:
raise exc.IndexOutOfRange(e, op='rootkey', data=d, keylist=[keys])
except TypeError as e:
raise exc.KeyTypeError(e, op='rootkey', data=d, keylist=[keys])
def list_keys(d, fullscan=False, fg_nums='yellow'):
"""Generate numbered, sorted list of keys found in JSON document.
Purpose:
1. Show available JSON keys.
2. Show key #'s for JSON Keys; used as a shorthand for names.
Using key numbers makes JSON Cut feel more like the way the
*nix cut command works for tabular data.
List crawls through keys looking for new key names. It does not
crawl through Sequences (JSON arrays); with the exception of an
array located at the root-level of the document.
Args:
d (Mapping or Sequence): JSON encodable data (document)
fullscan (bool): traverse all keys looking for new ones;
default is to skip over previously visited key pointers.
fg_nums (str): a 'click' supported foreground color name used
to highlight the numbers and create a visual separation
between numbers and values (the values will be white.)
Supported color names: red, green, yellow, blue, magenta,
cyan, white.
Returns:
List[str]: sorted, numbered list of JSON keys found in document.
See also:
treecrawler module
Examples:
>>> d = [{'k1': {'k2': []}, 'k3': None}, {'k1': {'k4': []}}]
>>> for key in list_keys(d):
... click.echo(key, color=None)
1 k1
2 k1.k2
3 k3
Note: In the above example fullscan=False, 'k1.k4' does not show
up that is because the key selector 'k1' has already been
visited when it evalutes the 2nd item in the array, so it skips
crawling through the child nodes in this 2nd instance.
>>> for key in list_keys(d, fullscan=True):
... click.echo(key, color=None)
1 k1
2 k1.k2
3 k1.k4
4 k3
Note: When fullscan=True the function will crawl through all
JSON objects looking for any new keys; even if the same full key
selector name has been previosuly visited.
>>> d = {'k1': {'k2': [{'k3': None}]}, 'k4': 5}
>>> for key in list_keys(d, fullscan=True):
... click.echo(key, color=None)
1 k1
2 k1.k2
3 k4
The reason is that it that --list option enumerates items so
that they can be used as
a quick way of specifying JSON selectors from the command-line;
supporting
enumerated keys nested inside of nested indexes adds unnecesary
complexity, and at least to this point there haven't been any
real-world use cases to justify the need for such as feature.
Note: You can still crawl through nested keys in nested indexes
and view them using --inspect, you can also access them
explicitly
using key names & indexes, you just can't treat the results as
numbered shortcuts as you do with --list for specifying key
paths in the command-line.
"""
keys = find_keys(d, fullscan)
padding = len(str(len(keys)))
numbers = (str(i).rjust(padding) for i in range(1, len(keys) + 1))
numbers = (click.style(i, fg=fg_nums) for i in numbers)
return (n + ' ' + i for n, i in zip(numbers, keys))
def get_item(d, key):
"""Try to get item using the key, if fails try as an index or slice.
Args:
d (Mapping or Sequence): JSON encodable data (document)
key (str): JSON Keys.
Returns:
The key's value retrieved from the provided data (document)
Raises:
KeyError
IndexError
TypeError
Examples:
>>> get_item({'0': 'a key'}, '0')
'a key'
>>> get_item(['an index'], '0')
'an index'
"""
try:
return getitem(d, key)
except TypeError:
if key.isdigit():
return getitem(d, int(key))
if SLICE_RE.match(key):
if ':' not in key:
return d[int(key)]
return d[slice(*(int(i) if i else None for i in key.split(':')))]
raise
def select_key(d, *keys, default=None, no_default=False):
"""Get a nested value in a Mapping given the list of keys.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*keys (str): JSON Keys (name, index or trailing slice)
default: Default value if key or index is not found.
no_default (bool): If True, raise KeyNotFound when the key is
not found or the index is out of range otherwise it uses
the 'default' value.
Returns:
The value in the document pointed to by the JSON keys.
Raises:
KeyNotFound: Only returned if no_default option is set.
KeyTypeError: When trying to use a key on a Sequence
or an index on a Mapping.
IndexOutOfRange: When trying to use an index number that
greater than the length of the Sequence.
Examples:
>>> d = {'k1': {'k2': 'Found Key/Value'}}
>>> select_key(d, 'k1', 'k2')
'Found Key/Value'
>>> print(select_key(d, 'k1', 'missing key')
None
If no_default is True it will raise a KeyNotFound error.
>>> select_key(d, 'k1', 'missing key', default='Default Value')
'Default Value'
>>> d = {'k1': [{'k2': 'Found Index/Value'}]}
>>> select_key(d, 'k1', '0', 'k2')
'Found Index/Value Value'
"""
try:
return reduce(get_item, keys, d)
except KeyError as e:
if no_default:
raise exc.KeyNotFound(e)
return default
except IndexError as e:
raise exc.IndexOutOfRange(e)
except TypeError as e:
raise exc.KeyTypeError(e)
def into_key(*keys, fullpath=False):
"""Generate target key name for the data.
Args:
*keys (str): JSON Keys (name, index or trailing slice)
fullpath (bool): Use the full JSON Key path for the target name.
Returns:
str: Key name to store the data in.
Examples:
>>> into_key(['k1', 'k2'])
'k2'
>>> into_key(['k1', 'k2'], fullpath=True)
'k1.k2'
"""
return '.'.join(keys) if fullpath else keys[-1]
def get_items(d, *keylists, fullpath=False, any=True, n=0):
"""Get multiple nested items from a dict given the keys.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*keylists List[str]: JSON Keys (name, index or trailing slice)
fullpath (bool): Use the full JSON Key path in the target name.
any (bool): If True get any instance of the JSON Key value that
exists; otherwise raise KeyNotFound if the key is missing.
n (int): Data item number being processed; shown to user in
exception handling.
Returns:
dict: All Key/Values in data referenced by JSON Keys
Raises:
KeyNotFound: Only returned if 'any' option is not set.
KeyTypeError: When trying to use a key on a Sequence
or an index on a Mapping.
IndexOutOfRange: Only returned if 'any' option is not set.
Examples:
>>> d = {'k1': {'k2': 'item1'}, 'k3': 'item2'}
>>> get_items(d, ['k1', 'k2'], ['k3'])
{'k2': 'item1', 'k3': 'item2'}
>>> get_items(d, ['k1', 'k2'], ['k3'], fullpath=True)
{'k1.k2': 'item1', 'k3': 'item2'}
"""
result = {}
for keylist in keylists:
try:
into = into_key(*keylist, fullpath=fullpath)
result[into] = select_key(d, *keylist, no_default=True)
except exc.KeyNotFound as e:
if not any:
kwds = dict(op='get', itemnum=n, data=d, keylist=keylists)
raise exc.KeyNotFound(e, **kwds)
except exc.IndexOutOfRange as e:
kwds = dict(op='get', itemnum=n, data=d, keylist=keylists)
raise exc.IndexOutOfRange(e, **kwds)
except exc.KeyTypeError as e:
kwds = dict(op='get', itemnum=n, data=d, keylist=keylists)
raise exc.KeyTypeError(e, **kwds)
return result
def get_defaults(d, *defaults, fullpath=False, n=0):
"""Get nested items from keys, set default value if key not found.
Args:
d (Mapping or Sequence): JSON encodable data (document)
*defaults (List[Tuple(List[str], str)]):
(List[str]) - JSON Keys (name, index or trailing slice)
| |
# -*- encoding: utf-8 -*-
# WARNING: This file is managed by Puppet. Changes to this file will be overwritten.
# Script Location: /usr/lib/python2.6/site-packages/cloudbackup-1.0.01-py2.6.egg/cloudbackup/backends.py
# File Mode: 755
import tempfile
import sys
import subprocess
import os
import math
import logging
import shelve
import json
import socket
import httplib
import getpass
import boto
import re
import psutil
from datetime import date
from cStringIO import StringIO
from contextlib import contextmanager
from boto.s3.key import Key
from boto.glacier.exceptions import UnexpectedHTTPResponseError
from boto.exception import S3ResponseError
from boto.s3.lifecycle import Lifecycle, Expiration, Rule, Transition, Expiration
from cloudbackup.conf import config, decrypt_encrypted_pass_file, set_tmp_dir, DEFAULT_LOCATION, CONFIG_FILE
from cloudbackup.models import Inventory, Jobs
log = logging.getLogger(__name__)
tempfile.tempdir = set_tmp_dir()
class cloudbackupBackend(object):
"""Handle Configuration for Backends.
The profile is only useful when no conf is None.
:type conf: dict
:param conf: Custom configuration
:type profile: str
:param profile: Profile name
"""
def __init__(self, conf={}, profile="default"):
self.conf = conf
if not conf:
self.conf = config.get(profile)
if not self.conf:
log.error("No {0} profile defined in {1}.".format(profile, CONFIG_FILE))
if not "access_key" in self.conf or not "secret_key" in self.conf:
log.error("Missing (AWS and/or OpenStack) access_key/secret_key in {0} profile ({1}).".format(profile, CONFIG_FILE))
class S3Backend(cloudbackupBackend):
"""Backend to handle S3 upload/download."""
def __init__(self, conf={}, profile="default"):
cloudbackupBackend.__init__(self, conf, profile)
con = boto.connect_s3(self.conf["access_key"], self.conf["secret_key"])
region_name = self.conf["region_name"]
# DEFAULT_LOCATION ==> "us-east-1"
if region_name == DEFAULT_LOCATION:
region_name = ""
try:
self.bucket = con.get_bucket(self.conf["s3_bucket"])
except S3ResponseError, e:
if e.code == "NoSuchBucket":
log.info('S3 Bucket [ ' + str(self.conf["s3_bucket"]) + ' ] does not exist...')
log.info('Creating S3 Bucket [ ' + str(self.conf["s3_bucket"]) + ' ]...')
try:
self.bucket = con.create_bucket(self.conf["s3_bucket"], location=region_name)
except:
log.info('An error occured whilst trying to create bucket: [' + str(self.conf["s3_bucket"]) + ']' )
raise Exception
else:
log.info('An error occured whilst trying to connect to bucket: [' + str(self.conf["s3_bucket"]) + ']' + "\n" + 'Access key: ' + str(self.conf["access_key"]) + "\n" + 'Secret key: ' + str( self.conf["secret_key"]) + "\n" + 'Region name: ' + str(self.conf["region_name"]) )
log.info('Check that you can connect to AWS S3 by running script: [] on the command line.')
raise e
self.container = self.conf["s3_bucket"]
self.container_key = "s3_bucket"
def return_rotation_policy_dict(self):
# day_to_seconds = 86400 # 1 day has 86400
# month_to_seconds = 2592000 # 30 days
# year_to_seconds = 31536000 # 365 days
# rotation_policy_dict = { s:1, m:60, h:3600, D:86400, W:604800, M:2592000, Y:31536000 }
# return{ "daily":7, "weekly":28, "monthly":336, "yearly":365 }
return {"d":7, "w":28, "m":336, "y":365}
def life_cycle_management_enforcer(self):
bucket = self.bucket
lifecycle = Lifecycle()
log.info("Checking for life cycle management rules.")
try:
lifecycle = bucket.get_lifecycle_config()
except:
log.info("No life cycle management rule configured.")
to_glacier = Transition(days=7, storage_class='GLACIER')
ruleid_year = 'year_long_rule' # Unique identifier for the rule
ruleid_month = 'month_long_rule' # Unique identifier for the rule
ruleid_weekly = 'week_long_rule' # Unique identifier for the rule
ruleid_daily = 'day_long_rule' # Unique identifier for the rule
dict = {
ruleid_year: {'prefix': 'year', 'expiration':Expiration(days=365), 'transition': to_glacier},
ruleid_month: {'prefix': 'month', 'expiration':Expiration(days=336), 'transition': to_glacier},
ruleid_weekly: {'prefix': 'weekly', 'expiration':Expiration(days=28), 'transition': to_glacier},
ruleid_daily: {'prefix': 'daily', 'expiration':Expiration(days=7), 'transition': None},
}
number_life_cycle_rules = len( lifecycle[:] )
if len( lifecycle[:] ) == 0:
log.info("Creating Default Life cycle management rules.")
for x in dict.keys():
lifecycle.add_rule(x, prefix=dict[x]['prefix'], expiration=dict[x]['expiration'] , transition=dict[x]['transition'])
bucket.configure_lifecycle(lifecycle)
log.info("Successfully created default Life cycle management rules.")
array_list = []
for x in range(0, number_life_cycle_rules):
array_list.append(lifecycle[x].id)
if len(array_list) != 0:
for x in dict.keys():
if x in array_list:
log.info("Life Cycle Rule: [" + str(x) + "] exists!")
pass
else:
log.info("Life Cycle Rule: [" + str(x) + "] is missing!")
log.info("Redefining Life Cycle Rule: [" + str(x) + "]")
bucket.delete_lifecycle_configuration()
lifecycle.add_rule(x, prefix=dict[x]['prefix'], expiration=dict[x]['expiration'] , transition=dict[x]['transition'] )
return_true_on_success = bucket.configure_lifecycle(lifecycle)
lifecycle = bucket.get_lifecycle_config()
def show_s3_contents_in_real_time(self):
bucket_list = self.bucket.list()
log.info('Real time S3 object contents in bucket [' + str(self.container) + ']: ')
for file_obj in bucket_list:
filename_keyString = str(file_obj.key)
akey = self.bucket.get_key(file_obj.name)
backupsize = akey.size
storage_class = akey.storage_class
hostame_4rm_s3_metadata = akey.get_metadata("meta_hostname")
os_system_user_4rm_s3_metadata = akey.get_metadata("meta_os_system_user")
log.info('Host Out: [' + str(hostame_4rm_s3_metadata) + ']: OS System User: [' + str(os_system_user_4rm_s3_metadata) + ']: Backup Filesize: [' + str( backupsize ) + ' Bytes]: Storage Class: [' + str(storage_class) + '] Backup File: [' + str(filename_keyString) + '].' )
def decompress_decrypt_downloader(self, bucket, keyname, decrypt_pass):
# using logging module for proccess count:
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Log all notice information to stderr
log_to_console = logging.StreamHandler(sys.stderr)
log_to_console.setFormatter(format)
log.addHandler(log_to_console)
def cb(complete,total):
"""Download callback to log download percentage."""
percent = int(complete * 100.0 / total)
log.info("Download completion: {0}%".format(percent))
k = Key(bucket)
k.key = keyname
try:
os.environ['decrypt_pass'] = decrypt_pass
log.info("Commencing Download :: ... ")
openssl = subprocess.Popen(
['openssl', 'enc', '-base64', '-d', '-aes-256-cbc', '-nosalt', '-pass', 'env:decrypt_pass'], #### new use
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
tar = subprocess.Popen(
['tar', 'xzf', '-' ],
cwd=os.getcwd(),
close_fds=True,
stdin=openssl.stdout,
)
k.get_contents_to_file(openssl.stdin, cb=cb, num_cb=10 )
except Exception, err:
print (err)
except:
print(str(keyname) + "dowload: FAILED")
openssl.kill()
tar.kill()
raise
openssl.stdin.close()
openssl.stdout.close()
openssl.wait()
def cb(self, complete, total):
"""Upload/download progress percentage callback function."""
percent = int(complete * 100.0 / total)
log.info("Upload completion: {0}%".format(percent))
def download(self, keyname, is_file_encrypted=True):
# New Download Code ### : discard commented codes below
# k = Key(self.bucket)
# k.key = keyname
# encrypted_out = tempfile.TemporaryFile()
# k.get_contents_to_file(encrypted_out)
# encrypted_out.seek(0)
# return encrypted_out
decrypted_pass = None
bucket = self.bucket
akey = bucket.get_key(keyname)
if not akey:
log.info("Something isn't right!!!")
log.info('Backup file ===> [' + str( keyname ) + str(']: no longer exist.') )
log.info('You may want to remove this backup entry from the list of available backups - using: ' )
log.info(str( re.sub(r"\..*", "", __name__)) + str(' delete ') + str( keyname ))
log.info('Exiting!')
return
encrypted_pass_keyname = akey.get_metadata("meta_encrypted_pass_filename")
def cb(complete, total):
"""Download callback to log download percentage."""
percent = int(complete * 100.0 / total)
log.info("Download completion: {0}%".format(percent))
def restore_file_from_glacier_to_s3 (bucket, keyname):
akey = bucket.get_key(keyname)
storage_class = akey.storage_class
matchObj = re.match( r'(^glacier)', storage_class, re.M|re.I)
if matchObj:
log.info("File [" + str(keyname) + "] now reside in the Glacier Storage Class.")
log.info("Restoring file [" + str(keyname) + "] from glacier to s3 for 3 days.")
try:
akey.restore(days=3)
except Exception as e:
log.error('Unable to initiate restoration: [' + str (e) + ']')
log.error('Exiting...')
raise
log.info('Restoration from Glacier to S3 initialized successfully!')
log.info('Restoration takes between 3 to 5 hours.')
log.info('Kindly return after 5 hours and re-run the cloudbackup restore command')
"""
### TODO: Enable Email notification after 5 hours:
### By creating a schedule/cron job
try:
input = raw_input
except NameError:
pass
command = str( input("Enter 'Y' to be notified or 'N' to disable notification: ") )
matchObj = re.match( r'(^Y)', command, re.M|re.I)
if matchObj:
log.info('Enabling Restoration Notification.')
else:
log.info('Restoration notification is disabled')
"""
return True
else:
return False
if encrypted_pass_keyname:
is_pass_file_in_glacier = restore_file_from_glacier_to_s3(bucket, encrypted_pass_keyname)
is_file_in_glacier = restore_file_from_glacier_to_s3(bucket, keyname)
elif is_file_encrypted is False:
is_pass_file_in_glacier = restore_file_from_glacier_to_s3(bucket, keyname)
is_file_in_glacier = is_pass_file_in_glacier
else:
log.error("Unable to extract password filename from Object metadata!")
log.error('Exiting!')
raise
if is_file_in_glacier or is_pass_file_in_glacier:
return
if is_file_encrypted:
k = Key(self.bucket)
k.key = encrypted_pass_keyname
pass_tmpfile = str("/tmp/") + str( os.path.basename(__file__) ) + str( os.getpid() )
k.get_contents_to_filename(pass_tmpfile)
decrypted_pass = decrypt_encrypted_pass_file(pass_tmpfile)
if os.path.isfile(pass_tmpfile):
os.remove(pass_tmpfile)
if decrypted_pass == '':
log.error("Unable to extract password for decryption.")
return
self.decompress_decrypt_downloader(bucket, keyname, decrypted_pass)
else:
k = Key(self.bucket)
k.key = keyname
k.get_contents_to_filename(keyname, cb=cb, num_cb=10)
log.info('File: [' + str(keyname) + str('] restored!') )
def iter_chunks(self, file_stream, blocksize=1073741824):
# Default upload chunksize: 1GB = 1024 * 1024 * 1024Bytes
free_ram = psutil.virtual_memory().free
if isinstance(blocksize,int) and blocksize > 0.8 * free_ram:
log.info('Low Memory: Specified upload chunk size truncated.')
blocksize = int( 0.4 * free_ram )
while True:
if isinstance(blocksize,int):
block = file_stream.read(blocksize)
else:
blocksize=1073741824
block = file_stream.read(blocksize)
if not block:
break
yield block
@contextmanager
def compress_encrypt(self, full_path_to_backup_file, PASS_4_ENCRYPTION):
os.environ['PASS_4_ENCRYPTION'] = PASS_4_ENCRYPTION
full_path_to_backup_file = full_path_to_backup_file.rstrip('/')
tar = subprocess.Popen(
['tar', 'czf', '-', os.path.basename(full_path_to_backup_file)],
cwd=os.path.dirname(full_path_to_backup_file),
stdout=subprocess.PIPE,
)
openssl = subprocess.Popen(
['openssl', 'enc', '-base64', '-e', '-aes-256-cbc', '-nosalt', '-pass', 'env:PASS_4_ENCRYPTION'],
stdin=tar.stdout,
stdout=subprocess.PIPE,
)
try:
yield openssl.stdout
except:
openssl.kill()
tar.kill()
raise
finally:
openssl.wait()
tar.wait()
def upload(self, keyname, full_path_to_backup_file, blocksize, PASS_4_ENCRYPTION, encrypted_pass, encrypted_pass_stored_filename, **kwargs):
self.life_cycle_management_enforcer()
backup_expiry_date = os.getenv('expiry_date', 'None')
hostname = socket.gethostname()
os_system_user = getpass.getuser()
# S3 boto connect credentials ####
k = Key(self.bucket)
k.key = keyname
# Setting the call back function for the upload of the encrypted password file: ###
if kwargs.get("cb", True):
upload_kwargs = dict(cb=self.cb, num_cb=10)
# Extract rotation policy ####
| |
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class GetGeneralFormCreatedDeptSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetGeneralFormCreatedDeptSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetGeneralFormCreatedDeptSummaryResponseBodyData(TeaModel):
def __init__(
self,
dept_id: str = None,
dept_name: str = None,
general_form_create_cnt_1d: str = None,
):
# 部门id
self.dept_id = dept_id
# 部门名称
self.dept_name = dept_name
# 最近1天累计发布智能填表数
self.general_form_create_cnt_1d = general_form_create_cnt_1d
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.dept_name is not None:
result['deptName'] = self.dept_name
if self.general_form_create_cnt_1d is not None:
result['generalFormCreateCnt1d'] = self.general_form_create_cnt_1d
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('deptName') is not None:
self.dept_name = m.get('deptName')
if m.get('generalFormCreateCnt1d') is not None:
self.general_form_create_cnt_1d = m.get('generalFormCreateCnt1d')
return self
class GetGeneralFormCreatedDeptSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetGeneralFormCreatedDeptSummaryResponseBodyData] = None,
next_token: int = None,
has_more: bool = None,
):
# 用户版本分布情况列表
self.data = data
# 下一次请 求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetGeneralFormCreatedDeptSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetGeneralFormCreatedDeptSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetGeneralFormCreatedDeptSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetGeneralFormCreatedDeptSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetCalenderSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetCalenderSummaryResponseBody(TeaModel):
def __init__(
self,
calendar_create_user_cnt: str = None,
):
# 最近1天累计创建日程人数
self.calendar_create_user_cnt = calendar_create_user_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.calendar_create_user_cnt is not None:
result['calendarCreateUserCnt'] = self.calendar_create_user_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('calendarCreateUserCnt') is not None:
self.calendar_create_user_cnt = m.get('calendarCreateUserCnt')
return self
class GetCalenderSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetCalenderSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetCalenderSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetAllLabelableDeptsHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel1(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel2(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel3(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel4(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not | |
<gh_stars>0
import os
import glob
import cv2
import scipy.misc as misc
from skimage.transform import resize
import numpy as np
from functools import reduce
from operator import mul
import torch
from torch import nn
import matplotlib.pyplot as plt
import re
try:
import cynetworkx as netx
except ImportError:
import networkx as netx
from scipy.ndimage import gaussian_filter
from skimage.feature import canny
import collections
import shutil
import imageio
import copy
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
from scipy.interpolate import interp1d
from collections import namedtuple
def path_planning(num_frames, x, y, z, path_type=''):
if path_type == 'straight-line':
corner_points = np.array([[0, (0 - y) * 0.5, 0], [(0 + x) * 0.5, 0, (0 + z) * 0.5], [x, (0 + y) * 0.5, z]])
corner_t = np.linspace(0, 1, len(corner_points))
t = np.linspace(0, 1, num_frames)
cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic')
spline = cs(t)
xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)]
elif path_type == 'double-straight-line':
corner_points = np.array([[-x, -y, -z], [0, 0, 0], [x, y, z]])
corner_t = np.linspace(0, 1, len(corner_points))
t = np.linspace(0, 1, num_frames)
cs = interp1d(corner_t, corner_points, axis=0, kind='quadratic')
spline = cs(t)
xs, ys, zs = [xx.squeeze() for xx in np.split(spline, 3, 1)]
elif path_type == 'circle':
xs, ys, zs = [], [], []
for frame_id, bs_shift_val in enumerate(np.arange(-2.0, 2.0, (4./num_frames))):
xs += [np.cos(bs_shift_val * np.pi) * 1 * x]
ys += [np.sin(bs_shift_val * np.pi) * 1 * y]
zs += [np.cos(bs_shift_val * np.pi/2.) * 1 * z]
xs, ys, zs = np.array(xs), np.array(ys), np.array(zs)
return xs, ys, zs
def open_small_mask(mask, context, open_iteration, kernel):
np_mask = mask.cpu().data.numpy().squeeze().astype(np.uint8)
raw_mask = np_mask.copy()
np_context = context.cpu().data.numpy().squeeze().astype(np.uint8)
np_input = np_mask + np_context
for _ in range(open_iteration):
np_input = cv2.erode(cv2.dilate(np_input, np.ones((kernel, kernel)), iterations=1), np.ones((kernel,kernel)), iterations=1)
np_mask[(np_input - np_context) > 0] = 1
out_mask = torch.FloatTensor(np_mask).to(mask)[None, None, ...]
return out_mask
def filter_irrelevant_edge_new(self_edge, comp_edge, other_edges, other_edges_with_id, current_edge_id, context, depth, mesh, context_cc, spdb=False):
other_edges = other_edges.squeeze().astype(np.uint8)
other_edges_with_id = other_edges_with_id.squeeze()
self_edge = self_edge.squeeze()
dilate_bevel_self_edge = cv2.dilate((self_edge + comp_edge).astype(np.uint8), np.array([[1,1,1],[1,1,1],[1,1,1]]), iterations=1)
dilate_cross_self_edge = cv2.dilate((self_edge + comp_edge).astype(np.uint8), np.array([[0,1,0],[1,1,1],[0,1,0]]).astype(np.uint8), iterations=1)
edge_ids = np.unique(other_edges_with_id * context + (-1) * (1 - context)).astype(np.int)
end_depth_maps = np.zeros_like(self_edge)
self_edge_ids = np.sort(np.unique(other_edges_with_id[self_edge > 0]).astype(np.int))
self_edge_ids = self_edge_ids[1:] if self_edge_ids.shape[0] > 0 and self_edge_ids[0] == -1 else self_edge_ids
self_comp_ids = np.sort(np.unique(other_edges_with_id[comp_edge > 0]).astype(np.int))
self_comp_ids = self_comp_ids[1:] if self_comp_ids.shape[0] > 0 and self_comp_ids[0] == -1 else self_comp_ids
edge_ids = edge_ids[1:] if edge_ids[0] == -1 else edge_ids
other_edges_info = []
extend_other_edges = np.zeros_like(other_edges)
if spdb is True:
f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex=True, sharey=True); ax1.imshow(self_edge); ax2.imshow(context); ax3.imshow(other_edges_with_id * context + (-1) * (1 - context)); plt.show()
import pdb; pdb.set_trace()
filter_self_edge = np.zeros_like(self_edge)
for self_edge_id in self_edge_ids:
filter_self_edge[other_edges_with_id == self_edge_id] = 1
dilate_self_comp_edge = cv2.dilate(comp_edge, kernel=np.ones((3, 3)), iterations=2)
valid_self_comp_edge = np.zeros_like(comp_edge)
for self_comp_id in self_comp_ids:
valid_self_comp_edge[self_comp_id == other_edges_with_id] = 1
self_comp_edge = dilate_self_comp_edge * valid_self_comp_edge
filter_self_edge = (filter_self_edge + self_comp_edge).clip(0, 1)
for edge_id in edge_ids:
other_edge_locs = (other_edges_with_id == edge_id).astype(np.uint8)
condition = (other_edge_locs * other_edges * context.astype(np.uint8))
end_cross_point = dilate_cross_self_edge * condition * (1 - filter_self_edge)
end_bevel_point = dilate_bevel_self_edge * condition * (1 - filter_self_edge)
if end_bevel_point.max() != 0:
end_depth_maps[end_bevel_point != 0] = depth[end_bevel_point != 0]
if end_cross_point.max() == 0:
nxs, nys = np.where(end_bevel_point != 0)
for nx, ny in zip(nxs, nys):
bevel_node = [xx for xx in context_cc if xx[0] == nx and xx[1] == ny][0]
for ne in mesh.neighbors(bevel_node):
if other_edges_with_id[ne[0], ne[1]] > -1 and dilate_cross_self_edge[ne[0], ne[1]] > 0:
extend_other_edges[ne[0], ne[1]] = 1
break
else:
other_edges[other_edges_with_id == edge_id] = 0
other_edges = (other_edges + extend_other_edges).clip(0, 1) * context
return other_edges, end_depth_maps, other_edges_info
def clean_far_edge_new(input_edge, end_depth_maps, mask, context, global_mesh, info_on_pix, self_edge, inpaint_id, config):
mesh = netx.Graph()
hxs, hys = np.where(input_edge * mask > 0)
valid_near_edge = (input_edge != 0).astype(np.uint8) * context
valid_map = mask + context
invalid_edge_ids = []
for hx, hy in zip(hxs, hys):
node = (hx ,hy)
mesh.add_node((hx, hy))
eight_nes = [ne for ne in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), \
(hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)]\
if 0 <= ne[0] < input_edge.shape[0] and 0 <= ne[1] < input_edge.shape[1] and 0 < input_edge[ne[0], ne[1]]] # or end_depth_maps[ne[0], ne[1]] != 0]
for ne in eight_nes:
mesh.add_edge(node, ne, length=np.hypot(ne[0] - hx, ne[1] - hy))
if end_depth_maps[ne[0], ne[1]] != 0:
mesh.nodes[ne[0], ne[1]]['cnt'] = True
if end_depth_maps[ne[0], ne[1]] == 0:
import pdb; pdb.set_trace()
mesh.nodes[ne[0], ne[1]]['depth'] = end_depth_maps[ne[0], ne[1]]
elif mask[ne[0], ne[1]] != 1:
four_nes = [nne for nne in [(ne[0] + 1, ne[1]), (ne[0] - 1, ne[1]), (ne[0], ne[1] + 1), (ne[0], ne[1] - 1)]\
if nne[0] < end_depth_maps.shape[0] and nne[0] >= 0 and nne[1] < end_depth_maps.shape[1] and nne[1] >= 0]
for nne in four_nes:
if end_depth_maps[nne[0], nne[1]] != 0:
mesh.add_edge(nne, ne, length=np.hypot(nne[0] - ne[0], nne[1] - ne[1]))
mesh.nodes[nne[0], nne[1]]['cnt'] = True
mesh.nodes[nne[0], nne[1]]['depth'] = end_depth_maps[nne[0], nne[1]]
ccs = [*netx.connected_components(mesh)]
end_pts = []
for cc in ccs:
end_pts.append(set())
for node in cc:
if mesh.nodes[node].get('cnt') is not None:
end_pts[-1].add((node[0], node[1], mesh.nodes[node]['depth']))
predef_npaths = [None for _ in range(len(ccs))]
fpath_map = np.zeros_like(input_edge) - 1
npath_map = np.zeros_like(input_edge) - 1
npaths, fpaths = dict(), dict()
break_flag = False
end_idx = 0
while end_idx < len(end_pts):
end_pt, cc = [*zip(end_pts, ccs)][end_idx]
end_idx += 1
sorted_end_pt = []
fpath = []
iter_fpath = []
if len(end_pt) > 2 or len(end_pt) == 0:
if len(end_pt) > 2:
continue
continue
if len(end_pt) == 2:
ravel_end = [*end_pt]
tmp_sub_mesh = mesh.subgraph(list(cc)).copy()
tmp_npath = [*netx.shortest_path(tmp_sub_mesh, (ravel_end[0][0], ravel_end[0][1]), (ravel_end[1][0], ravel_end[1][1]), weight='length')]
fpath_map1, npath_map1, disp_diff1 = plan_path(mesh, info_on_pix, cc, ravel_end[0:1], global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=tmp_npath)
fpath_map2, npath_map2, disp_diff2 = plan_path(mesh, info_on_pix, cc, ravel_end[1:2], global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None, npath=tmp_npath)
tmp_disp_diff = [disp_diff1, disp_diff2]
self_end = []
edge_len = []
ds_edge = cv2.dilate(self_edge.astype(np.uint8), np.ones((3, 3)), iterations=1)
if ds_edge[ravel_end[0][0], ravel_end[0][1]] > 0:
self_end.append(1)
else:
self_end.append(0)
if ds_edge[ravel_end[1][0], ravel_end[1][1]] > 0:
self_end.append(1)
else:
self_end.append(0)
edge_len = [np.count_nonzero(npath_map1), np.count_nonzero(npath_map2)]
sorted_end_pts = [xx[0] for xx in sorted(zip(ravel_end, self_end, edge_len, [disp_diff1, disp_diff2]), key=lambda x: (x[1], x[2]), reverse=True)]
re_npath_map1, re_fpath_map1 = (npath_map1 != -1).astype(np.uint8), (fpath_map1 != -1).astype(np.uint8)
re_npath_map2, re_fpath_map2 = (npath_map2 != -1).astype(np.uint8), (fpath_map2 != -1).astype(np.uint8)
if np.count_nonzero(re_npath_map1 * re_npath_map2 * mask) / \
(np.count_nonzero((re_npath_map1 + re_npath_map2) * mask) + 1e-6) > 0.5\
and np.count_nonzero(re_fpath_map1 * re_fpath_map2 * mask) / \
(np.count_nonzero((re_fpath_map1 + re_fpath_map2) * mask) + 1e-6) > 0.5\
and tmp_disp_diff[0] != -1 and tmp_disp_diff[1] != -1:
my_fpath_map, my_npath_map, npath, fpath = \
plan_path_e2e(mesh, cc, sorted_end_pts, global_mesh, input_edge, mask, valid_map, inpaint_id, npath_map=None, fpath_map=None)
npath_map[my_npath_map != -1] = my_npath_map[my_npath_map != -1]
fpath_map[my_fpath_map != -1] = my_fpath_map[my_fpath_map != -1]
if len(fpath) > 0:
edge_id = global_mesh.nodes[[*sorted_end_pts][0]]['edge_id']
fpaths[edge_id] = fpath
npaths[edge_id] = npath
invalid_edge_ids.append(edge_id)
else:
if tmp_disp_diff[0] != -1:
ratio_a = tmp_disp_diff[0] / (np.sum(tmp_disp_diff) + 1e-8)
else:
ratio_a = 0
if tmp_disp_diff[1] != -1:
ratio_b = tmp_disp_diff[1] / (np.sum(tmp_disp_diff) + 1e-8)
else:
ratio_b = 0
npath_len = len(tmp_npath)
if npath_len > config['depth_edge_dilate_2'] * 2:
npath_len = npath_len - (config['depth_edge_dilate_2'] * 1)
tmp_npath_a = tmp_npath[:int(np.floor(npath_len * ratio_a))]
tmp_npath_b = tmp_npath[::-1][:int(np.floor(npath_len * ratio_b))]
tmp_merge = []
if len(tmp_npath_a) > 0 and sorted_end_pts[0][0] == tmp_npath_a[0][0] and sorted_end_pts[0][1] == tmp_npath_a[0][1]:
if len(tmp_npath_a) > 0 and mask[tmp_npath_a[-1][0], tmp_npath_a[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[:1], tmp_npath_a])
if len(tmp_npath_b) > 0 and mask[tmp_npath_b[-1][0], tmp_npath_b[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[1:2], tmp_npath_b])
elif len(tmp_npath_b) > 0 and sorted_end_pts[0][0] == tmp_npath_b[0][0] and sorted_end_pts[0][1] == tmp_npath_b[0][1]:
if len(tmp_npath_b) > 0 and mask[tmp_npath_b[-1][0], tmp_npath_b[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[:1], tmp_npath_b])
if len(tmp_npath_a) > 0 and mask[tmp_npath_a[-1][0], tmp_npath_a[-1][1]] > 0:
tmp_merge.append([sorted_end_pts[1:2], tmp_npath_a])
for tmp_idx in range(len(tmp_merge)):
if len(tmp_merge[tmp_idx][1]) == 0:
continue
end_pts.append(tmp_merge[tmp_idx][0])
ccs.append(set(tmp_merge[tmp_idx][1]))
if len(end_pt) == 1:
sub_mesh = mesh.subgraph(list(cc)).copy()
pnodes = netx.periphery(sub_mesh)
if len(end_pt) == 1:
ends = [*end_pt]
elif len(sorted_end_pt) == 1:
ends = [*sorted_end_pt]
else:
import pdb; pdb.set_trace()
try:
edge_id = global_mesh.nodes[ends[0]]['edge_id']
except:
import pdb; pdb.set_trace()
pnodes = sorted(pnodes,
key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])),
reverse=True)[0]
npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')]
for np_node in npath:
npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[ends[0]].get('far') is None:
print("None far")
else:
| |
== len(workplace_names) # Each name should be unique
workplaces = pd.DataFrame({
'ID': range(0, len(workplace_names)),
'MSOA': workplace_msoas,
'SOC': workplace_socs
})
assert len(workplaces) == len(self.all_msoas) * len(possible_jobs) # One location per job per msoa
PopulationInitialisation._add_location_columns(workplaces, location_names=workplace_names)
work_name = ColumnNames.Activities.WORK
# Workplaces dataframe is ready. Now read commuting flows
commuting_flows = PopulationInitialisation.read_commuting_flows_data(self.all_msoas)
num_individuals = len(self.individuals) # (sanity check)
cols = self.individuals.columns
self.individuals = PopulationInitialisation.add_work_flows(flow_type=work_name, individuals=self.individuals,
workplaces=workplaces, commuting_flows=commuting_flows,
flow_threshold=5)
assert num_individuals == len(self.individuals), \
"There was an error reading workplaces (caching?) and the number of individuals has changed!"
assert (self.individuals.columns[0:-3] == cols).all(), \
"There was an error reading workplaces (caching?) the column names don't match!"
del num_individuals, cols
self.activity_locations[work_name] = ActivityLocation(name=work_name, locations=workplaces, flows=None,
individuals=self.individuals, duration_col="pwork")
## Some flows will be very complicated numbers. Reduce the numbers of decimal places across the board.
## This makes it easier to write out the files and to make sure that the proportions add up properly
## Use multiprocessing because swifter doesn't work properly for some reason (wont paralelise)
# with multiprocessing.Pool(processes=int(os.cpu_count()/2)) as pool:
# for name in tqdm(activity_locations.keys(), desc="Rounding all flows"):
# rounded_flows = pool.map( PopulationInitialisation._round_flows, list(individuals[f"{name}{ColumnNames.ACTIVITY_FLOWS}"]))
# individuals[f"{name}{ColumnNames.ACTIVITY_FLOWS}"] = rounded_flows
# # Use swifter, but for some reason it wont paralelise the problem. Not sure why.
# #individuals[f"{name}{ColumnNames.ACTIVITY_FLOWS}"] = \
# # individuals.loc[:,f"{name}{ColumnNames.ACTIVITY_FLOWS}"].\
# # swifter.allow_dask_on_strings(enable=True).progress_bar(True, desc=name).\
# # apply(lambda flows: [round(flow, 5) for flow in flows])
# Round the durations
for name in tqdm(self.activity_locations.keys(), desc="Rounding all durations"):
self.individuals[f"{name}{ColumnNames.ACTIVITY_DURATION}"] = \
self.individuals[f"{name}{ColumnNames.ACTIVITY_DURATION}"].apply(lambda x: round(x, 5))
# Some people's activity durations will not add up to 1.0 because we don't model all their activities.
# Extend the amount of time at home to make up for this
self.individuals = PopulationInitialisation.pad_durations(self.individuals, self.activity_locations)
# Now that we have everone's initial activities, remember the proportions of times that they spend doing things
# so that if these change (e.g. under lockdown) they can return to 'normality' later
for activity_name in self.activity_locations.keys():
self.individuals[f"{activity_name}{ColumnNames.ACTIVITY_DURATION_INITIAL}"] = \
self.individuals[f"{activity_name}{ColumnNames.ACTIVITY_DURATION}"]
# Add some necessary columns for the disease
self.individuals = PopulationInitialisation.add_disease_columns(self.individuals)
print(" ... finished initialisation.")
@staticmethod
def _round_flows(flows):
return [round(flow, 5) for flow in flows]
@classmethod
def _check_no_homeless(cls, individuals, households, warn=True):
"""
Check that each individual has a household. NOTE: this only works for the raw mirosimulation data.
Once the health data has been attached this wont work becuase the unique identifiers change.
If this function is still needed then it will need to take the specific IDs as arguments, but this is
a little complicated because some combination of [area, HID, (PID)] is needed for unique identification.
:param individuals:
:param households:
:param warn: Whether to warn (default, True) or raise an exception (False)
:return: True if there are no homeless, False otherwise (unless `warn==False` in which case an
exception is raised).
:raise: An exception if `warn==False` and there are individuals without a household
"""
print("Checking no homeless (all individuals assigned to a household) ...", )
# This will fail if used on anything other than the raw msm data because once I read in the
# health data the PID and HID columns are renamed to prevent them being accidentally used.
assert "PID" in individuals.columns and "HID" in households.columns
# Households in the msm are uniquely identified by [area,HID] combination.
# Individuals are identified by [House_OA,HID,PID]
hids = households.set_index(["area", "HID"]) # Make a new dataset with a unique index for households
# Find individuals who do not have a related entry in the households dataset
homeless = [(area, hid, pid) for area, hid, pid in individuals.loc[:, ["House_OA", "HID", "PID"]].values if
(area, hid) not in hids.index]
# (version using apply isn't quicker)
# h2 = individuals.reset_index().loc[:, ["House_OA", "HID", "PID"]].swifter.apply(
# lambda x: x[2] if (x[0], x[1]) in hids.index else None, axis=1)
# (Vectorised version doesn't quite work sadly)
# h2 = np.where(individuals.loc[:, ["House_OA", "HID", "PID"]].isin(hids.index), True, False)
if len(homeless) > 0:
msg = f"There are {len(homeless)} individuals without an associated household (HID)."
if warn:
warnings.warn(msg)
return False
else:
raise Exception(msg)
print("... finished checking homeless")
return True
@classmethod
def extract_msoas_from_individuals(cls, individuals: pd.DataFrame) -> List[str]:
"""
Analyse a DataFrame of individuals and extract the unique MSOA codes, returning them as a list in ascending
order
:param individuals:
:return:
"""
areas = list(individuals.area.unique())
areas.sort()
return areas
@classmethod
def read_individual_time_use_and_health_data(cls, home_name: str) -> pd.DataFrame:
"""
Read a population of individuals. Includes time-use & health info.
:param home_name: A string to describe flows to people's homes (probably 'Home')
:return A tuple with new dataframes of individuals and households
"""
print("Reading time use and health data ... ", )
# filename = os.path.join(cls.DATA_DIR, "devon-tu_health", "Devon_simulated_TU_health.txt")
# filename = os.path.join(cls.DATA_DIR, "devon-tu_health", "Devon_keyworker.txt")
# filename = os.path.join(cls.DATA_DIR, "devon-tu_health", "Devon_Complete.txt")
filename = os.path.join(cls.DATA_DIR, "devon-tu_health", "Devon_simulated_TU_keyworker_health.csv")
tuh = pd.read_csv(filename) # , encoding = "ISO-8859-1")
tuh = Optimise.optimize(tuh) # Reduce memory of tuh where possible.
# Drop people that weren't matched to a household originally
nohh = len(tuh.loc[tuh.hid == -1])
if nohh > 0:
warnings.warn(f"{nohh} / {len(tuh)} individuals in the TUH data had not originally been matched "
f"to a household. They're being removed")
tuh = tuh.loc[tuh.hid != -1]
# Indicate that HIDs and PIDs shouldn't be used as indices as they don't uniquely
# identify indivuals / households in this health data
tuh = tuh.rename(columns={'hid': '_hid', 'pid': '_pid'})
# Make a new, unique id for each individual (PIDs have been replicated so no longer uniquely idenfity individuals}
assert len(tuh.index.unique()) == len(tuh) # Index should have been set to row number when tuh was read in
tuh.insert(0, "ID", tuh.index, allow_duplicates=False) # Insert into first position
#
# ********** Create households dataframe *************
#
# Go through each individual. House members can be identified because they have the same [Area, HID]
# combination.
# Maintain a dictionary of (Area, HID) -> House_ID that records a new ID for each house
# Each time a new [Area, HID] combination is found, create a new entry in the households dictionary for that
# household, generate a House_ID, and record that in the dictionary.
# When existing (Area, HID) combinations are found, look up the ID in the dataframe and record it for that
# individual
# Also, maintain a list of house_ids in the same order as individuals in the tuh data which can be used later
# when we link from the individuls in the TUH data to their house id
# This is the main dictionary. It maps (Area, HID) to house id numbers, along with some more information:
house_ids_dict = {} # (Area, HID) -> [HouseIDNumber, NumPeople, area, hid]
house_ids_list = [] # ID of each house for each individual
house_id_counter = 0 # Counter to generate new HouseIDNumbers
unique_individuals = [] # Also store all [Area, HID, PID] combinations to check they're are unique later
# Maybe quicker to loop over 3 lists simultaneously than through a DataFrame
_areas = list(tuh["area"])
_hids = list(tuh["_hid"])
_pids = list(tuh["_pid"])
for i, (area, hid, pid) in enumerate(zip(_areas, _hids, _pids)):
# print(i, area, hid, pid)
unique_individuals.append((area, hid, pid))
house_key = (area, hid) # Uniqely identifies a household
house_id_number = -1
try: # If this lookup works then we've seen this house before. Get it's ID number and increase num people in it
house_info = house_ids_dict[house_key]
# Check the area and hid are the same as the one previously stored in the dictionary
assert area == house_info[2] and hid == house_info[3]
# Also check that the house key (Area, HID) matches the area and HID
assert house_key[0] == house_info[2] and house_key[1] == house_info[3]
# We need the ID number to tell the individual which their house is
house_id_number = house_info[0]
# Increse the number of people in the house and create a new list of info for this house
people_per_house = house_info[1] + 1
house_ids_dict[house_key] = [house_id_number, people_per_house, area, hid]
except KeyError: # If the lookup failed then this is | |
import alignments
import re
import read
import binaryIO
import math
import os
import preprocess
import time
class Compressor:
aligned = None
# 0 - zlib
# 1 - lzma
# 2 - bz2
compressMethod = 0
covSize = 0
totalSize = 0
def __init__(self, frag_len_cutoff):
if self.compressMethod == 0:
self.zlib = __import__('zlib')
elif self.compressMethod == 1:
self.lzma = __import__('lzma')
elif self.compressMethod == 2:
self.bz2 = __import__('bz2')
if frag_len_cutoff:
print('Set fragment length cutoff to %d' % frag_len_cutoff)
self.frag_len_cutoff = frag_len_cutoff
def compress(self, samFilename, compressedFilename, gtf, min_filename, frag_len_z_cutoff, split_diff_strands, split_discordant):
''' Compresses the alignments to 2 files, one for unspliced and one for spliced
file_prefix: Prefix for all output file names
'''
self.p = preprocess.Preprocessor(samFilename, frag_len_z_cutoff, split_diff_strands)
if not self.frag_len_cutoff:
self.frag_len_cutoff = self.p.frag_len_cutoff
print('Using fragment length cutoff of ' + str(self.frag_len_cutoff))
if split_diff_strands:
print('Splitting mates on different strands')
else:
print('Not splitting mates on different strands')
if split_discordant:
print('Splitting discordant')
else:
print('Not splitting discordant')
# Reads on different strands that should be unpaired
self.diff_strand_unpaired = self.p.unpaired
del self.p
# Read header
header = ''
with open(samFilename, 'r') as f:
for line in f:
if line[0] == '@':
header += line
else:
break
self.chromosomes = self.parseSAMHeader(header)
self.aligned = alignments.Alignments(self.chromosomes, self.frag_len_cutoff, split_discordant)
if gtf:
self.aligned.gtf_exons = self.parseGTF(gtf, self.aligned.chromOffsets)
self.compressByBundle(samFilename, compressedFilename, min_filename)
#print('%d unmatched' % self.aligned.numUnmatched)
print('Approximately %d / %d = %f%% of compressed file is coverage' % (self.covSize, self.totalSize, 100.0*float(self.covSize)/float(self.totalSize)))
print('Finished compressing')
def compressByBundle(self, input_name, compressed_name, intermediate_name=None):
'''
Read a sorted SAM file and compress in segments determined by clusters of reads
:param filename:
:return:
'''
# If coverage is 0 for at least this many bases end of a potential gene
overlapRadius = 50
spliced_index = []
bundles = []
first = True
bundle_id = 0
read_id = 0
diff_strand_unpaired_id = 0
num_diff_strand_unpaired = len(self.diff_strand_unpaired)
firstR = None
with open(input_name, 'r') as filehandle:
id = 0
start_id = 0
for line in filehandle:
# Check if header line
if line[0] == '@':
continue
row = line.strip().split('\t')
if row[2] == '*':
# HISAT includes unmapped reads at the end of the file; we just skip them
continue
if not row[2] in self.chromosomes[0]:
print('Error! Chromosome ' + str(row[2]) + ' not found!')
exit()
# Starting position of this read
start = self.aligned.chromOffsets[row[2]] + int(row[3])
if self.aligned.gene_bounds and start > (self.aligned.gene_bounds[-1] + overlapRadius):
# Compress most recent bundle
self.aligned.finalizeExons()
self.aligned.finalizeUnmatched()
self.aligned.finalize_cross_bundle_reads()
#if self.aligned.gene_bounds[0] < 100480943 and self.aligned.gene_bounds[1] > 100478955:
# print(bundle_id)
# print(self.aligned.gene_bounds)
# print(self.aligned.exons)
# print(self.aligned.gene_bounds[0] - self.aligned.chromOffsets['X'])
# print(self.aligned.gene_bounds[1] - self.aligned.chromOffsets['X'])
# exit()
bundle_id += 1
start_id = id
bundles.append(self.aligned.exons)
# Write to intermediate file
if intermediate_name:
if first:
# If it's the first bundle, write the header as well
with open(intermediate_name, 'w') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id)
else:
with open(intermediate_name, 'a') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id)
junctions, maxReadLen = self.aligned.computeBuckets()
self.sortedJuncs = sorted(junctions.keys())
# Compress bundle to temporary file
if first:
mode = 'wb'
else:
mode = 'ab'
with open('temp.bin', mode) as f:
l = self.compressBundle(junctions, maxReadLen, f)
spliced_index.append(l)
# Start new bundle
self.aligned.resetBundle()
self.aligned.exons.add(start)
first = False
# Process read
if row[5] == '*':
# HISAT occasionally prints * as the cigar string when it is identical to its mate
#print('No cigar string')
#print(row[0])
#exit()
exons = None
else:
exons = self.parseCigar(row[5], int(row[3]))
# find XS (strand) and NH values
strand = None
NH = 1
for r in row[11 : len(row)]:
if r[0:5] == 'XS:A:' or r[0:5] == 'XS:a:':
strand = r[5]
elif r[0:3] == 'NH:':
NH = int(r[5:])
flags = int(row[1])
if flags & 4:
# Read is unmapped
continue
r = read.Read(row[2], int(row[3]), exons, strand, NH)
#r.name = row[0]
if row[6] == '*' or (flags & 8):
paired = False
elif diff_strand_unpaired_id < num_diff_strand_unpaired and id == self.diff_strand_unpaired[diff_strand_unpaired_id]:
#if not row[6] == '*':
# print('\t'.join(row))
paired = False
diff_strand_unpaired_id += 1
else:
paired = True
r.bundle = bundle_id
r.pairOffset = int(row[7])
if row[6] == '=':
r.pairChrom = row[2]
else:
r.pairChrom = row[6]
self.aligned.processRead(row[0], r, paired)
id += 1
# Compress final cluster
self.aligned.finalizeExons()
self.aligned.finalizeUnmatched()
self.aligned.finalize_cross_bundle_reads()
bundle_id += 1
bundles.append(self.aligned.exons)
# Write to intermediate file
if intermediate_name:
if first:
# If it's the first bundle, write the header as well
with open(intermediate_name, 'w') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id)
first = False
else:
with open(intermediate_name, 'a') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id)
junctions, maxReadLen = self.aligned.computeBuckets()
self.sortedJuncs = sorted(junctions.keys())
# Compress bundle to temporary file
if first:
mode = 'wb'
else:
mode = 'ab'
with open('temp.bin', mode) as f:
l = self.compressBundle(junctions, maxReadLen, f)
spliced_index.append(l)
leftovers = 0
for k,v in self.aligned.cross_bundle_reads.items():
#if len(v) > 0:
# print(k)
# print(v)
# exit()
leftovers += len(v)
print('%d cross-bundle reads unmatched' % leftovers)
bundle_lens = [c[-1]-c[0] for c in bundles]
print('Minimum bundle length: %d' % min(bundle_lens))
print('Maximum bundle length: %d' % max(bundle_lens))
print('Average bundle length: %d'% (sum(bundle_lens) / len(bundle_lens)))
# Write index information and append spliced and unspliced files
with open(compressed_name, 'wb') as f:
s = binaryIO.writeChroms(self.chromosomes)
s += binaryIO.writeClusters(bundles)
s += binaryIO.writeList(spliced_index)
f.write(s)
# Compress bundle-spanning buckets
self.compressCrossBundle(self.aligned.cross_bundle_buckets, self.aligned.max_cross_bundle_read_len, bundle_id, f)
# Move contents of temporary file to output file
with open('temp.bin', 'rb') as f2:
f.write(f2.read())
os.remove('temp.bin')
def compressBundle(self, junctions, maxReadLen, filehandle):
# Determine the number of bytes for read lengths
readLenBytes = binaryIO.findNumBytes(maxReadLen)
cluster = binaryIO.valToBinary(1, readLenBytes)
cluster += binaryIO.writeJunctionsList(self.sortedJuncs, 2)
self.totalSize += len(cluster)
# TODO: No need for junc_lens?
junc_lens = []
junc_string = b''
for j in self.sortedJuncs:
#if self.aligned.exons[0] == 100476370 and j == [2, None, 1]:
#
s, c, t = binaryIO.writeJunction(readLenBytes, junctions[j])
self.covSize += c
self.totalSize += t
junc_lens.append(len(s))
junc_string += s
#cluster += binaryIO.writeList(junc_lens)
cluster += junc_string
# Write to file
start = filehandle.tell()
filehandle.write(self.compressString(cluster))
# return length of cluster in file
return filehandle.tell() - start
def compressCrossBundle(self, cross_bundle_buckets, maxReadLen, num_bundles, filehandle):
'''
Compress the bundle-spanning buckets
'''
readLenBytes = binaryIO.findNumBytes(maxReadLen)
bundleIdBytes = binaryIO.findNumBytes(num_bundles)
buckets_sorted = sorted(cross_bundle_buckets.keys())
if len(buckets_sorted) > 0:
print('%d cross-bundle buckets' % len(buckets_sorted))
pos = filehandle.tell()
chunk_size = 20
num_chunks = math.ceil(len(buckets_sorted) / chunk_size)
chunk_lens = [0] * num_chunks
index = binaryIO.valToBinary(4, len(buckets_sorted))
index += binaryIO.valToBinary(2, chunk_size)
index += binaryIO.valToBinary(1, readLenBytes)
index += binaryIO.writeCrossBundleBucketNames(bundleIdBytes, cross_bundle_buckets, buckets_sorted)
self.totalSize += len(index)
main = b''
chunk = b''
chunk_id = 0
for i in range(len(buckets_sorted)):
b = buckets_sorted[i]
ch, c, t = binaryIO.writeCrossBundleBucket(readLenBytes, cross_bundle_buckets[b])
chunk += ch
self.covSize += c
self.totalSize += t
if (i+1) % chunk_size == 0:
compressed = self.compressString(chunk)
chunk_lens[chunk_id] = len(compressed)
chunk_id += 1
main += compressed
chunk = b''
if len(chunk) > 0:
compressed = self.compressString(chunk)
chunk_lens[chunk_id] = len(compressed)
main += compressed
index += binaryIO.writeList(chunk_lens)
index = self.compressString(index)
length = len(index)
numBytes = binaryIO.findNumBytes(length)
binaryIO.writeVal(filehandle, 1, numBytes)
binaryIO.writeVal(filehandle, numBytes, length)
filehandle.write(index)
filehandle.write(main)
print('Compressed size: %d' % (filehandle.tell() - pos))
else:
binaryIO.writeVal(filehandle, 1, 1)
binaryIO.writeVal(filehandle, 1, 0)
def parseCigar(self, cigar, offset):
''' Parse the cigar string starting at the given index of the genome
Returns a list of offsets for each exonic region of the read [(start1, end1), (start2, end2), ...]
'''
exons = []
newExon = True
# Parse cigar string
match = re.search("\D", cigar)
while match:
index = match.start()
length = int(''.join(cigar[:index]))
if cigar[index] == 'N':
# Separates contiguous exons, so set boolean to start a new one
newExon = True
elif cigar[index] == 'M':
# If in the middle of a contiguous exon, append the length to it, otherwise start a new exon
if newExon:
exons.append([offset, offset+length])
newExon = False
else:
exons[-1][1] += length
elif cigar[index] == 'D':
# If in the middle of a contiguous exon, append the deleted length to it
if not newExon:
exons[-1][1] += length
# Skip soft clipping
if not cigar[index] == 'S':
offset += length
cigar = cigar[index+1:]
match = re.search("\D", cigar)
return exons
def parseSAMHeader(self, header):
# In the order they appear in the header
chromNames = []
chromLens = | |
import h5py
import numpy as np
from scipy.io import loadmat
from operator import itemgetter
import math
import scipy as sp
import cv2
import matplotlib.pyplot as plt
import os, sys
import time
import multiprocessing
import random
# Generate Observation Map
def func(theta, m, I, imax, L, w, N, anglemask):
print('*',end='')
rotmat = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = np.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = np.int32(p)
q = np.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = np.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normal = [np.transpose(pn), np.transpose(qn), N[:,2]]
normal = np.transpose(normal)
temp = I*anglemask/np.transpose(imax)
embed = np.zeros((m, w*w), np.float32)
embed[:, light_idx] = temp
embed = np.reshape(embed, (m, w, w))
mask = np.zeros((m, w*w), np.bool_)
mask[:, light_idx] = anglemask
mask = np.reshape(mask, (m, w, w))
return embed, mask, normal, rotmat
def wrapper(args):
return func(*args)
# for multi core cpu
def light_embedding_2d_rot_invariant_multi(I, imax, L, w, N, div, isRandomThresh):
m = I.shape[0]
rows = w
cols = w
embed_rot = []
normal_rot = []
mask_rot = []
rot = []
anglemask = np.zeros((I.shape[0],I.shape[1]),np.float32)
for k in range(I.shape[0]): # numpixel
angle1 = 180*np.arccos(L[:,2])/np.pi
if isRandomThresh == True:
tgt = np.where(angle1<random.randint(20,90))
tgtrandom = np.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,np.min([1000,L.shape[0]]))]
else:
tgt = np.where(angle1<90)
anglemask[k,tgt] = 1
n = multiprocessing.cpu_count()
p = multiprocessing.Pool(n)
params = [(np.pi*(i*360.0/div)/180, m, I, imax, L, w, N, anglemask) for i in range(np.int32(div))]
result = p.map(wrapper, params)
p.close()
embed_list = []
mask_list = []
nml_list = []
rot_list = []
for i in range(div):
embed_list.append(result[i][0].copy())
mask_list.append(result[i][1].copy())
nml_list.append(result[i][2].copy())
rot_list.append(result[i][3].copy())
embed_list = np.array(embed_list)
embed_list = np.transpose(embed_list, (1,0,2,3))
mask_list = np.array(mask_list)
mask_list = np.transpose(mask_list, (1,0,2,3))
nml_list = np.array(nml_list)
nml_list = np.transpose(nml_list, (1,0,2))
del result,anglemask
return np.array(embed_list), np.array(mask_list), np.array(nml_list), np.array(rot_list), rows, cols
# for single core cpu
def light_embedding_2d_rot_invariant(I, imax, L, w, N, div, isRandomThresh):
m = I.shape[0]
embed_rot = []
normal_rot = []
mask_rot = []
rot = []
count = 0
anglemask = np.zeros((I.shape[0],I.shape[1]),np.float32)
for k in range(I.shape[0]):
angle1 = 180*np.arccos(L[:,2])/np.pi
if isRandomThresh == True:
tgt = np.where(angle1<random.randint(20,90))
tgtrandom = np.random.permutation(tgt[0])
tgt = tgtrandom[:random.randint(50,np.min([1000,L.shape[0]]))]
else:
tgt = np.where(angle1<90)
anglemask[k,tgt] = 1
for k in range(div):
theta = k*360/div
if theta < 360:
count = count + 1
theta = np.pi*theta/180
rotmat = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1
q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1
x = [p-0.5*(w-1), q-0.5*(w-1)]
x_ = np.dot(rotmat, x)
p = x_[0,:]+0.5*(w-1);
q = x_[1,:]+0.5*(w-1);
p = np.int32(p)
q = np.int32(q)
light_idx = q*w + p # 0:w*w-1
x = [N[:,0], N[:,1]]
x_ = np.dot(rotmat, x)
pn = x_[0,:];
qn = x_[1,:];
normal = [np.transpose(pn), np.transpose(qn), N[:,2]]
normal = np.transpose(normal)
temp = I*anglemask/np.transpose(imax)
embed = np.zeros((m, w*w), np.float32)
embed[:, light_idx] = temp
embed = np.reshape(embed, (m, w, w))
embed_rot.append(embed.copy())
mask = np.zeros((m, w*w), np.bool_)
mask[:, light_idx] = anglemask
mask = np.reshape(mask, (m, w, w))
mask_rot.append(mask.copy())
normal_rot.append(normal.copy())
rot.append(rotmat.copy())
del embed, temp, normal, mask
rows = w
cols = w
embed_rot = np.array(embed_rot)
embed_rot = np.transpose(embed_rot, (1,0,2,3))
mask_rot = np.array(mask_rot)
mask_rot = np.transpose(mask_rot, (1,0,2,3))
normal_rot = np.array(normal_rot)
normal_rot = np.transpose(normal_rot, (1,0,2))
return np.array(embed_rot), np.array(mask_rot), np.array(normal_rot), np.array(rot), rows, cols
# main function for generating the observation map
def light_embedding_main(Iv, Nv, L, w, rotdiv, validind, isRandomThresh):
imax = np.amax(Iv,axis=1) # for entire image
valid = np.intersect1d(validind, np.where(imax>0))
Iv = Iv[valid,:]
Nv = Nv[valid,:]
imax = imax[valid]
if rotdiv > 1:
embed, mask, nm, rot, rows, cols = light_embedding_2d_rot_invariant_multi(Iv, [imax], L, w, Nv, rotdiv, isRandomThresh)
else:
embed, mask, nm, rot, rows, cols = light_embedding_2d_rot_invariant(Iv, [imax], L, w, Nv, rotdiv, isRandomThresh)
embed = np.reshape(embed, (embed.shape[0]*embed.shape[1],w,w))
embed = np.reshape(embed, (embed.shape[0],1,w,w))
mask = np.reshape(mask, (mask.shape[0]*mask.shape[1],w,w))
mask = np.reshape(mask, (mask.shape[0],1,w,w))
nm = np.reshape(nm, (nm.shape[0]*nm.shape[1],3))
return embed, mask, nm
# prepare observation map for cyclesPS dataset (for training)
def prep_data_2d_from_images_cycles(dirlist, dirname, scale, w, rotdiv_in, rotdiv_on):
S = []
M = []
N = []
for d in dirlist:
dirpath = d
images_dir = dirpath + '/' + dirname
normal_path = dirpath + '/' + 'gt_normal.tif'
inboundary_path = dirpath + '/' + 'inboundary.png'
onboundary_path = dirpath + '/' + 'onboundary.png'
# read ground truth surface normal
nml = np.float32(cv2.imread(normal_path,-1))/65535.0 # [-1,1]
nml = nml[:,:,::-1]
nml = 2*nml-1
nml = cv2.resize(nml, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)
nShape = np.shape(nml)
height = nShape[0]
width = nShape[1]
# read mask images_metallic
inboundary = cv2.imread(inboundary_path,-1)
inboundary = cv2.resize(inboundary, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)
inboundary = np.where(inboundary>0)
inboundary_ind = inboundary[0]*height + inboundary[1]
onboundary = cv2.imread(onboundary_path,-1)
onboundary = cv2.resize(onboundary, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)
onboundary = np.where(onboundary>0)
onboundary_ind = onboundary[0]*height + onboundary[1]
# read light filenames
f = open(dirpath + '/' 'light.txt')
data = f.read()
f.close
lines = data.split('\n')
numLight = len(lines)-1 # the last line is empty (how to fix it?)
L = np.zeros((numLight,3), np.float32)
for i,l in enumerate(lines):
s = l.split(' ')
if len(s) == 3:
L[i,0] = float(s[0])
L[i,1] = float(s[1])
L[i,2] = float(s[2])
# read images
I = np.zeros((numLight, height, width), np.float32)
for i in range(numLight):
if i % np.floor(numLight/10) == 0:
print('.',end='')
image_path = images_dir + '/' + '%05d.tif' % i
cv2_im = cv2.imread(image_path, -1)/65535.0
cv2_im = (cv2_im[:,:,0] + cv2_im[:,:,1] + cv2_im[:,:,2])/3
cv2_im = cv2.resize(cv2_im, (height,width), interpolation = cv2.INTER_NEAREST)
I[i,:,:] = cv2_im
Iv = np.reshape(I,(numLight, height*width))
Iv = np.transpose(Iv)
Nv = np.reshape(nml,(height*width,3))
embed_in, mask_in, nm_in = light_embedding_main(Iv, Nv, L, w, rotdiv_in, inboundary_ind, True)
embed_on, mask_on, nm_on = light_embedding_main(Iv, Nv, L, w, rotdiv_on, onboundary_ind, True)
embed = []
embed.append(embed_in.copy())
embed.append(embed_on.copy())
embed = np.concatenate(embed, axis=0 )
mask = []
mask.append(mask_in.copy())
mask.append(mask_on.copy())
mask = np.concatenate(mask, axis=0 )
nm = []
nm.append(nm_in.copy())
nm.append(nm_on.copy())
nm = np.concatenate(nm, axis=0 )
S.append(embed.copy())
M.append(mask.copy())
N.append(nm.copy())
print('')
del embed_in, mask_in, nm_in
del embed_on, mask_on, nm_on
del embed, mask, nm, I, Iv, Nv
S = np.concatenate(S, axis=0 )
M = np.concatenate(M, axis=0 )
N = np.concatenate(N, axis=0 )
S = np.reshape(S, (S.shape[0], S.shape[2], S.shape[3], 1))
M = np.reshape(M, (M.shape[0], M.shape[2], M.shape[3], 1))
return np.array(S), np.array(M), np.array(N)
# prepare observation maps for test data (i.e., DiLiGenT dataset)
def prep_data_2d_from_images_test(dirlist, scale, w, rotdiv, index=-1):
SList = []
NList = []
RList = []
IDList = []
SizeList = []
for d in dirlist:
print('load' + '%s' % d)
S = []
N = []
dirpath = d
images_dir = dirpath
normal_path = dirpath + '/' + 'normal.txt'
mask_path = dirpath + '/' + 'mask.png'
# get image imgSize
image_path = images_dir + '/' + '001.png'
cv2_im = cv2.imread(image_path, -1)
nShape = np.shape(cv2_im)
height = nShape[0]
width = nShape[1]
# read ground truth surface normal
f = open(normal_path)
data = f.read()
f.close
lines = np.float32(np.array(data.split('\n')))
nml = np.reshape(lines, (height,width,3))
nml = cv2.resize(nml, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)
# nml = np.flipud(nml) # Uncomment when test on Harvest, the surface noraml needs to be fliped upside down
nShape = np.shape(nml)
height = nShape[0]
width = nShape[1]
# uncomment if you want to see the ground truth normal map
# plt.figure(figsize=(16,16))
# plt.imshow(np.uint8(127*(nml+1)))
# plt.axis('off')
# plt.show()
# read mask
mask = cv2.imread(mask_path,-1)
mask = cv2.resize(mask, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)
validsub = np.where(mask>0)
validind = validsub[0]*width + validsub[1]
# read light directions
f = open(dirpath + '/' 'light_directions.txt')
data = f.read()
f.close
lines = data.split('\n')
numLight = len(lines)-1 # the last line is empty (how to fix it?)
L = np.zeros((numLight,3), np.float32)
for i,l in enumerate(lines):
s = l.split(' ')
if len(s) == 3:
L[i,0] = float(s[0])
L[i,1] = float(s[1])
L[i,2] = float(s[2])
# read light intensities
f = open(dirpath + '/' 'light_intensities.txt')
data = f.read()
f.close
lines = data.split('\n')
Li = np.zeros((numLight,3), np.float32)
for i,l in enumerate(lines):
s = l.split(' ')
if len(s) == 3:
Li[i,0] = float(s[0])
Li[i,1] = float(s[1])
Li[i,2] = float(s[2])
if index == -1:
setName = os.path.basename(dirpath.rstrip('/')) # if dirpath ends in '/' basename returns the empty string
if setName == 'bearPNG':
# the first 20 images of | |
<gh_stars>10-100
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import sys, os, math, copy
import json
from math import sin, cos, sqrt, radians
import numpy as np
from decimal import Decimal
from collections import OrderedDict
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import Patch
from matplotlib.widgets import Slider, Cursor, Button
from matplotlib.backend_bases import MouseEvent
import matplotlib.patches as patches
import matplotlib.ticker as ticker
import tkMessageBox
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tool'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'laban_tool'))
import settings
import labanProcessor as lp
import kp_extractor as kpex
import tool.accessory as ac
import tool.wavfilter as wf
import tool.cluster as cl
class Algorithm:
algorithm = None
ax = None
jointFrames = []
timeS = None
all_laban = []
unfilteredTimeS = None
unfilteredLaban = []
labandata = OrderedDict()
line_ene = None
vlines = None
y_data = []
points = []
data_fps = 30
dragging_sb = False
dragging_point = None
selectedFrame = 0
selectedFrameMarker = None
default_gauss_window_size = 31
default_gauss_sigma = 5
gauss_window_size = default_gauss_window_size
gauss_sigma = default_gauss_sigma
#------------------------------------------------------------------------------
# Class initialization
#
def __init__(self, algorithm):
self.algorithm = algorithm
#------------------------------------------------------------------------------
# reset class variables
#
def reset(self):
self.jointFrames = []
self.timeS = None
self.all_laban = []
self.unfilteredTimeS = None
self.unfilteredLaban = []
self.labandata = OrderedDict()
self.line_ene = None
self.vlines = None
self.y_data = []
self.points = []
self.data_fps = 30
self.dragging_sb = False
self.dragging_point = None
self.selectedFrame = 0
self.selectedFrameMarker = None
#------------------------------------------------------------------------------
# convert joint data frames to labanotation
#
def convertToLabanotation(self, ax, jointD, forceReset):
if (forceReset):
self.reset()
self.ax = ax
self.jointFrames = copy.copy(jointD)
cnt = len(jointD)
self.data_fps = 30
self.duration = jointD[cnt-1]['timeS'][0] if (cnt > 0) else 0.0
# clear canvas
if (self.ax != None):
self.ax.clear()
self.selectedFrameMarker = None
self.line_ene = None
self.vlines = None
self.y_data = []
self.points = []
self.calculateUnfilteredLaban()
return self.totalEnergy()
#------------------------------------------------------------------------------
# unfiltered labanotation
#
def calculateUnfilteredLaban(self):
cnt = len(self.jointFrames)
# get hand position
self.unfilteredTimeS = np.zeros(cnt)
elR = np.zeros((cnt, 3))
elL = np.zeros((cnt, 3))
wrR = np.zeros((cnt, 3))
wrL = np.zeros((cnt, 3))
for i in range(0, cnt):
self.unfilteredTimeS[i] = self.jointFrames[i]['timeS'][0]
(elR[i], elL[i], wrR[i], wrL[i]) = lp.raw2sphere(self.jointFrames[i])
# [right upper/elbow, right lower/wrist, left upper/elbow, left lower/wrist]
# use coordinate2laban to generate labanotation for all frames
self.unfilteredLaban = []
for i in range(0, cnt):
temp = []
temp.append(lp.coordinate2laban(elR[i][1], elR[i][2]))
temp.append(lp.coordinate2laban(wrR[i][1], wrR[i][2]))
temp.append(lp.coordinate2laban(elL[i][1], elL[i][2]))
temp.append(lp.coordinate2laban(wrL[i][1], wrL[i][2]))
self.unfilteredLaban.append(temp)
#------------------------------------------------------------------------------
# apply total energy algoritm to joint data frames and calculate labanotation
#
def totalEnergy(self):
cnt = len(self.jointFrames)
handR = np.zeros((cnt, 3))
handL = np.zeros((cnt, 3))
for i in range(0, cnt):
handR[i][0] = self.jointFrames[i]['wristR']['x'][0] # meters to centimeters
handR[i][1] = self.jointFrames[i]['wristR']['y'][0]
handR[i][2] = self.jointFrames[i]['wristR']['z'][0]
handL[i][0] = self.jointFrames[i]['wristL']['x'][0]
handL[i][1] = self.jointFrames[i]['wristL']['y'][0]
handL[i][2] = self.jointFrames[i]['wristL']['z'][0]
# filtered by a Gaussian filter with window-size of 101 and sigma of 10
# window-size of 61 also works
gauss_window_size = self.gauss_window_size
gauss_large_sigma = self.gauss_sigma
gauss_small_sigma = 1
gauss = wf.gaussFilter(gauss_window_size, gauss_large_sigma)
handRF = wf.calcFilter(handR, gauss)
handLF = wf.calcFilter(handL, gauss)
handRv = ac.vel(self.unfilteredTimeS, handRF)
handLv = ac.vel(self.unfilteredTimeS, handLF)
handRa = ac.acc(self.unfilteredTimeS, handRv)
handLa = ac.acc(self.unfilteredTimeS, handLv)
# calculate energy
energy = kpex.energy_function_ijcv(v_l=handLv, a_l=handLa, v_r=handRv, a_r=handRa)
# calculate energy again with gauss_small_sigma
gauss_small = wf.gaussFilter(gauss_window_size, gauss_small_sigma)
handRF_small = wf.calcFilter(handR, gauss_small)
handLF_small = wf.calcFilter(handL, gauss_small)
handRv_small = ac.vel(self.unfilteredTimeS, handRF_small)
handLv_small = ac.vel(self.unfilteredTimeS, handLF_small)
handRa_small = ac.acc(self.unfilteredTimeS, handRv_small)
handLa_small = ac.acc(self.unfilteredTimeS, handLv_small)
# calculate energy
energy_small = kpex.energy_function_ijcv(v_l=handLv_small, a_l=handLa_small, v_r=handRv_small, a_r=handRa_small)
indices = kpex.gaussian_pecdec(energy)
self.y_data = []
self.y_data = energy
self.points = {}
self.points = dict(zip(indices, self.y_data[indices]))
if (self.ax != None):
xmax = max(self.unfilteredTimeS) / 1000.0
self.ax.plot(energy, color='dimgray', label='Total')
self.ax.plot(energy_small, color='mediumpurple', label='Naive')
self.ax.set_xlim((0, len(energy)-1))
self.ax.set_ylim((min(energy)-0.5, max(energy)+0.5))
def format_func(value, tick_number):
cnt = len(self.unfilteredTimeS)
idx = int(value)
if (idx < 0) or (idx >= cnt):
return ""
time = self.unfilteredTimeS[idx] / 1000.0
return r"${:.2f}$".format(time)
# look at https://matplotlib.org/3.1.1/gallery/ticks_and_spines/tick-locators.html for fine-tuning ticks
self.ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
self.ax.tick_params(axis='y', labelsize=8)
legend_elements = [Line2D([0], [0], color='dimgray', label='Energy'),
Line2D([0], [0], color='mediumpurple', label='Naive Energy'),
Patch(facecolor='wheat', edgecolor='wheat', alpha=0.4, label='Labanotation Frame Blocks'),
Patch(facecolor='tan', edgecolor='tan', alpha=0.4, label='Labanotation Transition Block'),
Line2D([0], [0], marker='o', color='w', label='Peaks', markerfacecolor='slategrey', markersize=10),
Line2D([0], [0], marker='o', color='w', label='Inflection', markerfacecolor='k', markersize=10),
Line2D([0], [0], marker='*', color='w', label='Labanotation Key Frames', markerfacecolor='g', markersize=16)]
self.ax.legend(handles=legend_elements, bbox_to_anchor=(0, 1), loc=3, ncol=7) # , mode='expand', borderaxespad=0)
self.updateEnergyPlotAndLabanScore(True)
self.highlightLabanotationRegions(self.unfilteredLaban, (min(energy)-0.5, max(energy)+0.5))
# additional energy markers
if (self.ax != None):
corner,_,_ = cl.peak_dect(energy, y_thres=0)
self.ax.plot(energy, '.', color = 'slategrey', mew=3, markersize=14, markevery=corner) # bottom
infl = ac.inflection(energy)
self.ax.plot(energy, '.', color = 'k', mew=3, markersize=12, markevery=infl)
self.setSelectedFrameMarker()
return (self.timeS, self.all_laban)
#------------------------------------------------------------------------------
# plot different colors for each labanotation region.
#
def highlightLabanotationRegions(self, laban, y):
if (self.ax == None):
return
laban_sect = ac.split(laban)
cnt = len(laban)
for i in range(len(laban_sect)):
start = laban_sect[i][0]
end = laban_sect[i][1]
# color and alpha
c = 'wheat'
a = 0.4
if start == end:
c = 'tan'
a = 0.4
# (x_start, y_start), x_width, y_width, alpha
if (i < len(laban_sect) - 1):
x_width = end - start + 0.5
else:
x_width = cnt - start + 0.25
p = patches.Rectangle((start-0.25, y[0]), x_width, y[1]-y[0], alpha=a, color=c)
self.ax.add_patch(p)
#------------------------------------------------------------------------------
#
def getLabanotationKeyframeData(self, idx, time, dur, laban):
data = OrderedDict()
data["start time"] = [str(time)]
data["duration"] = [str(dur)]
data["head"] = ['Forward','Normal']
data["right elbow"] = [laban[0][0], laban[0][1]]
data["right wrist"] = [laban[1][0], laban[1][1]]
data["left elbow"] = [laban[2][0], laban[2][1]]
data["left wrist"] = [laban[3][0], laban[3][1]]
data["rotation"] = ['ToLeft','0']
return data
#------------------------------------------------------------------------------
# update labanotation key frames
#
def updateLaban(self, indices):
self.labandata = OrderedDict()
positions = []
self.timeS = []
self.all_laban = []
# generate labanotation json data structure
idx = 0
cnt = len(indices)
if (cnt == 0):
return
for i in range(cnt):
j = indices[i]
# add an initial labanotation keyframe
if ((i==0) and (j != i)):
time = int(self.unfilteredTimeS[i])
dur = 1
# store new time and laban
self.timeS.append(time)
self.all_laban.append(self.unfilteredLaban[i])
positions.append("Position"+str(i))
self.labandata[positions[idx]] = self.getLabanotationKeyframeData(idx, time, dur, self.unfilteredLaban[i])
idx = idx + 1
time = int(self.unfilteredTimeS[j])
if (j == (cnt-1)):
dur = '-1'
else:
dur = '1'
# store new time and laban
self.timeS.append(time)
self.all_laban.append(self.unfilteredLaban[j])
positions.append("Position"+str(i))
self.labandata[positions[idx]] = self.getLabanotationKeyframeData(idx, time, dur, self.unfilteredLaban[j])
idx = idx + 1
# add a final labanotation keyframe
i = len(self.unfilteredLaban) - 1
j = indices[cnt - 1]
if (j != i):
time = int(self.unfilteredTimeS[i])
dur = '-1'
# store new time and laban
self.timeS.append(time)
self.all_laban.append(self.unfilteredLaban[i])
positions.append("Position"+str(i))
self.labandata[positions[idx]] = self.getLabanotationKeyframeData(idx, time, dur, self.unfilteredLaban[i])
idx = idx + 1
#------------------------------------------------------------------------------
# update energy markers and lines, and labanotation score
#
def updateEnergyPlotAndLabanScore(self, updateLabanScore=False):
if (self.ax != None):
if not self.points:
return
x, y = zip(*sorted(self.points.items()))
if not self.line_ene:
# Add new plot
self.line_ene, = self.ax.plot(self.y_data, '*', color = 'g', mew=3, markersize=14, markevery=list(x))
else:
# Update current plot
self.line_ene.set_data(range(len(self.y_data)),self.y_data)
self.line_ene.set_markevery(list(x))
self.ax.draw_artist(self.line_ene)
# plot vertical lines to denote labanotation keyframes
xs = list(x)
xs = np.array((xs, ) if np.isscalar(xs) else xs, copy=False)
lims = self.ax.get_ylim()
x_points = np.repeat(xs[:, None], repeats=3, axis=1).flatten()
y_points = np.repeat(np.array(lims + (np.nan, ))[None, :], repeats=len(xs), axis=0).flatten()
if not self.vlines:
# Add new plot
self.vlines, = self.ax.plot(x_points, y_points, scaley = False, color='g')
else:
# Update current plot
self.vlines.set_data(x_points, y_points)
self.ax.draw_artist(self.vlines)
self.ax.figure.canvas.draw_idle()
# update laban score
if (updateLabanScore) and (self.points):
tmp_indices, _ = zip(*sorted(self.points.items()))
new_indices = list(tmp_indices)
self.updateLaban(new_indices)
settings.application.updateLaban(self.timeS, self.all_laban)
#------------------------------------------------------------------------------
#
def add_point(self, x, y=None):
if isinstance(x, MouseEvent):
x, y = int(x.xdata), int(x.ydata)
y_on_curve = self.y_data[x]
self.points[x] = y_on_curve
return x, y_on_curve
#------------------------------------------------------------------------------
#
def remove_point(self, x, _):
if x in self.points:
self.points.pop(x)
#------------------------------------------------------------------------------
#
def setSelectedFrameMarker(self):
if (self.ax is None):
return
cnt = len(self.jointFrames)
idx = self.selectedFrame
if ((idx is None) or (idx < 0) or (idx >= cnt)):
return
time = idx
padding = 1.0 / 6.0
if (self.selectedFrameMarker is None):
yy = self.ax.get_ylim()
self.selectedFrameMarker = patches.Rectangle((time-padding, yy[0]), 2*padding, (yy[1]-yy[0]), alpha=0.5, color='purple')
self.ax.add_patch(self.selectedFrameMarker)
else:
self.selectedFrameMarker.set_x(time-padding)
#------------------------------------------------------------------------------
#
def findNearestFrameForTime(self, time):
cnt = len(self.jointFrames)
if (cnt == 0):
return None
timeMS = time
# find the frame corresponding to the given time
for idx in range(0, cnt):
kt = self.unfilteredTimeS[idx]
if (kt == timeMS):
return idx
elif (kt > timeMS):
break
# should not get here if idx == 0, but let's check anyway
if (idx == 0):
return idx
# now that we | |
<reponame>AT-jamesp0013/Theano
from __future__ import absolute_import, division, print_function
import os
import warnings
import pkg_resources
import numpy as np
from numpy.linalg.linalg import LinAlgError
import theano
from theano import Op, config, tensor
from theano.scalar import bool as bool_t
from theano.gof import COp, ParamsType
from theano.gpuarray import GpuArrayType
from .basic_ops import as_gpuarray_variable, gpu_contiguous, infer_context_name
from .type import gpu_context_type
try:
import pygpu
from pygpu.basic import triu, tril
pygpu_available = True
except ImportError:
pygpu_available = False
cusolver_available = False
try:
import skcuda
from skcuda import cusolver
cusolver_available = True
except (ImportError, OSError, RuntimeError, pkg_resources.DistributionNotFound):
pass
if cusolver_available:
# Add cusolver call as it is missing in skcuda
# SPOTRS
cusolver._libcusolver.cusolverDnSpotrs.restype = int
cusolver._libcusolver.cusolverDnSpotrs.argtypes = [cusolver.ctypes.c_void_p,
cusolver.ctypes.c_int,
cusolver.ctypes.c_int,
cusolver.ctypes.c_int,
cusolver.ctypes.c_void_p,
cusolver.ctypes.c_int,
cusolver.ctypes.c_void_p,
cusolver.ctypes.c_int,
cusolver.ctypes.c_void_p]
def cusolverDnSpotrs(handle, uplo, n, nrhs, A, lda,
B, ldb, devInfo):
"""
Solve real single precision linear system for hermitian matrices.
References
----------
`cusolverDn<t>potrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-potrs>`_
"""
status = cusolver._libcusolver.cusolverDnSpotrs(handle, uplo, n, nrhs,
int(A), lda, int(B),
ldb, int(devInfo))
cusolver.cusolverCheckStatus(status)
def attach_cusolver_handle_to_context(ctx):
handle = getattr(ctx, 'cusolver_handle', None)
if handle is None:
with ctx:
ctx.cusolver_handle = cusolver.cusolverDnCreate()
# it is a subset of all cases available in slinalg's MATRIX_STRUCTURE
MATRIX_STRUCTURES_SOLVE = (
'general',
'symmetric')
class GpuCusolverSolve(Op):
"""
CUSOLVER GPU solver OP.
Parameters
----------
trans
Whether to take the transpose of the input matrix or not.
"""
__props__ = ('A_structure', 'trans', 'inplace')
def __init__(self, A_structure='general', trans='N', inplace=False):
self.trans = trans
self.inplace = inplace
self.A_structure = A_structure
if self.inplace:
self.destroy_map = {0: [0]}
assert A_structure in MATRIX_STRUCTURES_SOLVE
super(GpuCusolverSolve, self).__init__()
def make_node(self, inp1, inp2):
if not cusolver_available:
raise RuntimeError('CUSOLVER is not available and '
'GpuCusolverSolve Op can not be constructed.')
if skcuda.__version__ <= '0.5.1':
warnings.warn('The GpuSolve op requires scikit-cuda > 0.5.1 to work with CUDA 8')
context_name = infer_context_name(inp1, inp2)
inp1 = as_gpuarray_variable(inp1, context_name)
inp2 = as_gpuarray_variable(inp2, context_name)
inp1 = gpu_contiguous(inp1)
inp2 = gpu_contiguous(inp2)
# this op can only operate on float32 matrices
assert inp1.ndim == 2
assert inp2.ndim == 2
assert inp1.dtype == 'float32'
assert inp2.dtype == 'float32'
return theano.Apply(
self, [inp1, inp2],
[GpuArrayType('float32',
broadcastable=inp1.broadcastable,
context_name=context_name)()])
def prepare_node(self, node, storage_map, compute_map, impl):
ctx = node.inputs[0].type.context
attach_cusolver_handle_to_context(ctx)
def check_dev_info(self, dev_info):
val = np.asarray(dev_info)[0]
if val > 0:
raise LinAlgError('A is singular')
def perform(self, node, inputs, outputs):
context = inputs[0][0].context
# Size of the matrices to invert.
z = outputs[0]
# Matrix.
A = inputs[0]
# Solution vectors.
b = inputs[1]
assert(len(A.shape) == 2)
assert(len(b.shape) == 2)
if self.trans in ['T', 'C']:
trans = 1
l, n = A.shape
k, m = b.shape
elif self.trans == 'N':
trans = 0
n, l = A.shape
k, m = b.shape
else:
raise ValueError('Invalid value for trans')
if l != n:
raise ValueError('A must be a square matrix')
if n != k:
raise ValueError('A and b must be aligned.')
lda = max(1, n)
ldb = max(1, k)
# We copy A and b as cusolver operates inplace
b = pygpu.array(b, copy=True, order='F')
if not self.inplace:
A = pygpu.array(A, copy=True)
A_ptr = A.gpudata
b_ptr = b.gpudata
# cusolver expects a F ordered matrix, but A is not explicitly
# converted between C and F order, instead we switch the
# "transpose" flag.
if A.flags['C_CONTIGUOUS']:
trans = 1 - trans
if self.A_structure == 'symmetric':
with context:
workspace_size = cusolver.cusolverDnSpotrf_bufferSize(
context.cusolver_handle, 0, n, A_ptr, lda)
workspace = pygpu.zeros(workspace_size, dtype='float32',
context=context)
dev_info = pygpu.zeros((1,), dtype='int32', context=context)
workspace_ptr = workspace.gpudata
dev_info_ptr = dev_info.gpudata
with context:
cusolver.cusolverDnSpotrf(
context.cusolver_handle, 0, n, A_ptr, lda, workspace_ptr,
workspace_size, dev_info_ptr)
self.check_dev_info(dev_info)
cusolverDnSpotrs(
context.cusolver_handle, 0, n, m, A_ptr, lda,
b_ptr, ldb, dev_info_ptr)
else:
# general case for A
with context:
workspace_size = cusolver.cusolverDnSgetrf_bufferSize(
context.cusolver_handle, n, n, A_ptr, lda)
workspace = pygpu.zeros(workspace_size, dtype='float32',
context=context)
pivots = pygpu.zeros(n, dtype='int32', context=context)
dev_info = pygpu.zeros((1,), dtype='int32', context=context)
workspace_ptr = workspace.gpudata
pivots_ptr = pivots.gpudata
dev_info_ptr = dev_info.gpudata
with context:
cusolver.cusolverDnSgetrf(
context.cusolver_handle, n, n, A_ptr, lda, workspace_ptr,
pivots_ptr, dev_info_ptr)
self.check_dev_info(dev_info)
cusolver.cusolverDnSgetrs(
context.cusolver_handle, trans, n, m, A_ptr, lda,
pivots_ptr, b_ptr, ldb, dev_info_ptr)
z[0] = b
def gpu_solve(A, b, A_structure='general', trans='N'):
return GpuCusolverSolve(A_structure, trans)(A, b)
class GpuCholesky(Op):
"""
CUSOLVER GPU Cholesky Op.
Given a real positive definite matrix `A` returns either a lower
triangular matrix `L` such that `A == dot(L, L.T)` if `lower == True`
else returns an upper triangular matrix `U` such that `A == dot(U.T, U)`
if `lower == False`.
Parameters
----------
lower
Whether to return a lower rather than upper triangular decomposition.
"""
__props__ = ('lower', 'inplace')
def __init__(self, lower=True, inplace=False):
self.lower = lower
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
super(GpuCholesky, self).__init__()
def make_node(self, inp):
if not cusolver_available:
raise RuntimeError('CUSOLVER is not available and '
'GpuCholesky Op can not be constructed.')
if skcuda.__version__ <= '0.5.1':
warnings.warn('The GpuCholesky op requires scikit-cuda > 0.5.1 to work with CUDA 8')
if not pygpu_available:
raise RuntimeError('Missing pygpu or triu/tril functions.'
'Install or update libgpuarray.')
context_name = infer_context_name(inp)
inp = as_gpuarray_variable(inp, context_name)
inp = gpu_contiguous(inp)
# this op can only operate on float32 matrices
# because of current implementation of triu/tril.
# TODO: support float64 for triu/tril in GpuArray and for GpuCholesky/GpuCusolverSolve in Theano.
assert inp.ndim == 2
assert inp.dtype == 'float32'
return theano.Apply(self, [inp], [inp.type()])
def prepare_node(self, node, storage_map, compute_map, impl):
ctx = node.inputs[0].type.context
attach_cusolver_handle_to_context(ctx)
def perform(self, node, inputs, outputs):
context = inputs[0][0].context
# Input matrix.
A = inputs[0]
l, n = A.shape
if l != n:
raise ValueError('A must be a square matrix')
lda = max(1, n)
# cusolver operates on F ordered matrices, but A is expected
# to be symmetric so it does not matter.
# We copy A if needed
if self.inplace:
L = A
else:
L = pygpu.array(A, copy=True)
# The output matrix will contain only the upper or lower
# triangular factorization of A. If L is C ordered (it
# probably is as it is the default in Theano) we just switch
# the fill mode parameter of cusolver
l_parameter = 0 if self.lower else 1
if L.flags['C_CONTIGUOUS']:
l_parameter = 1 - l_parameter
L_ptr = L.gpudata
with context:
workspace_size = cusolver.cusolverDnSpotrf_bufferSize(
context.cusolver_handle, l_parameter, n, L_ptr, lda)
workspace = pygpu.zeros(workspace_size, dtype='float32',
context=context)
dev_info = pygpu.zeros((1,), dtype='int32', context=context)
workspace_ptr = workspace.gpudata
dev_info_ptr = dev_info.gpudata
cusolver.cusolverDnSpotrf(
context.cusolver_handle, l_parameter, n, L_ptr, lda, workspace_ptr,
workspace_size, dev_info_ptr)
val_dev_info = np.asarray(dev_info)[0]
if val_dev_info > 0:
raise LinAlgError('Cholesky decomposition failed (is A SPD?)')
# cusolver leaves the elements in the matrix outside the considered
# upper or lower triangle unchanged, so we need to put zeros outside
# the triangle
if self.lower:
tril(L)
else:
triu(L)
outputs[0][0] = L
def gpu_cholesky(A, lower=True):
return GpuCholesky(lower)(A)
class GpuMagmaSVD(COp):
"""Computes the svd of a matrix :math:`A` using magma library.
.. warning::
Because of implementation constraints, this Op returns outputs
in order ``S, U, VT``. Use :func:`theano.gpuarray.linalg.gpu_svd`
to get them in expected order ``U, S, VT``.
"""
__props__ = ('full_matrices', 'compute_uv')
_cop_num_inputs = 1
_cop_num_outputs = 3
check_input = False
params_type = ParamsType(full_matrices=bool_t, context=gpu_context_type)
def __init__(self, full_matrices=True, compute_uv=True):
self.full_matrices = full_matrices
self.compute_uv = compute_uv
COp.__init__(self, ['magma_svd.c'], 'APPLY_SPECIFIC(magma_svd)')
def c_headers(self):
return ['gpuarray/types.h', 'gpuarray/array.h', 'gpuarray/ext_cuda.h',
'gpuarray_helper.h', 'magma.h']
def c_header_dirs(self):
dirs = [os.path.dirname(__file__), pygpu.get_include()]
if config.magma.include_path:
dirs.append(config.magma.include_path)
return dirs
def c_libraries(self):
return ['magma']
def c_lib_dirs(self):
if config.magma.library_path:
return [config.magma.library_path]
return []
def make_node(self, A):
ctx_name = infer_context_name(A)
A = as_gpuarray_variable(A, ctx_name)
if A.ndim != 2:
raise LinAlgError("Matrix rank error")
assert A.dtype == 'float32'
if self.compute_uv:
return theano.Apply(self, [A],
# return S, U, VT
[GpuArrayType(A.dtype, broadcastable=[False],
context_name=ctx_name)(),
A.type(),
A.type()])
else:
return theano.Apply(self, [A],
# return only S
[GpuArrayType(A.dtype, broadcastable=[False],
context_name=ctx_name)()])
def prepare_node(self, node, storage_map, compute_map, impl):
# Check node to prevent eventual errors with old pickled nodes.
if self.compute_uv:
A, B, C = node.outputs
# We expect order: S (vector), U (matrix), VT (matrix)
assert A.type.ndim == 1 and B.type.ndim == C.type.ndim == 2, \
"Due to implementation constraints, GpuMagmaSVD interface has changed and now returns (S, U, VT) " \
"instead of (U, S, VT). Either update your code, or use gpu_svd() to get the expected (U, S, VT) order."
def get_params(self, node):
return self.params_type.get_params(self, context=node.inputs[0].type.context)
def infer_shape(self, node, shapes):
x_shape, = shapes
M, N = x_shape
K = tensor.minimum(M, N)
s_shape = (K, )
if self.compute_uv:
| |
Grey": 0xA8C0BB,
"Harbour Light": 0xD7E0E7,
"Harbour Mist": 0xDAE1E3,
"Harbour Mist Grey": 0x778071,
"Harbour Rat": 0x757D75,
"Harbour Sky": 0x7EB6D0,
"Harbourmaster": 0x4E536B,
"Hard Candy": 0xFFBBBB,
"Hard Coal": 0x656464,
"Hardware": 0x8B8372,
"<NAME>": 0x006383,
"H<NAME>": 0xA52A2A,
"Harlequin": 0x3FFF00,
"Harlequin Green": 0x46CB18,
"<NAME>": 0xC93413,
"Harlock's Cape": 0xBB0000,
"Harmonic Tan": 0xC1B287,
"Harmonious": 0xAFC195,
"Harmonious Gold": 0xEACFA3,
"Harmonious Rose": 0xF29CB7,
"Harold": 0x6D6353,
"Harp": 0xCBCEC0,
"Harpoon": 0x283B4C,
"<NAME>": 0x493C2B,
"Harrison Grey": 0x989B9E,
"Harrison Rust": 0x9A5F3F,
"Harrow Gate": 0xDBD4C7,
"Harrow's Gate": 0x7E8E90,
"Harvard Crimson": 0xC90016,
"Harvest at Dusk": 0xCB862C,
"Harvest Blessing": 0xBA8E4E,
"Harvest Brown": 0xB9A589,
"Harvest Dance": 0xA5997C,
"Harvest Eve Gold": 0xDA9100,
"Harvest Gold": 0xEAB76A,
"Harvest Home": 0xCBAE84,
"Harvest Night": 0x554488,
"Harvest Oak": 0x65564F,
"Harvest Pumpkin": 0xD56231,
"Harvest Time": 0xCF875F,
"Harvest Wreath": 0xF7D7C4,
"Harvester": 0xEDC38E,
"Hashibami Brown": 0xBFA46F,
"Hashita Purple": 0x8D608C,
"Hashut Copper": 0xC9643B,
"Hassan II Mosque": 0x009E6D,
"Hat Box Brown": 0x8F775D,
"Hatching Chameleon": 0xCFEBDE,
"Hatoba Pigeon": 0x95859C,
"Hatoba-Nezumi Grey": 0x9E8B8E,
"Haunted Dreams": 0x333355,
"Haunted Hills": 0x003311,
"Haunting Hue": 0xD3E0EC,
"Haunting Melody": 0x824855,
"Haute Couture": 0xA0252A,
"Haute Pink": 0xD899B1,
"Haute Red": 0xA11729,
"Havana": 0x3B2B2C,
"Havana Blue": 0xA5DBE5,
"Havana Cigar": 0xAF884A,
"Havana Coffee": 0x554941,
"Havana Cream": 0xF9E5C2,
"Havasu": 0x007993,
"Havasupai Falls": 0x0FAFC6,
"Havelock Blue": 0x5784C1,
"Haven": 0xA3B48C,
"Hawaii Morning": 0x00BBFF,
"Hawaiian Breeze": 0x75C7E0,
"Hawaiian Cinder": 0x6F4542,
"Hawaiian Coconut": 0x99522C,
"Hawaiian Cream": 0xFAE8B8,
"Hawaiian Ocean": 0x008DB9,
"Hawaiian Passion": 0xFFA03E,
"Hawaiian Pineapple": 0xFDD773,
"Hawaiian Shell": 0xF3DBD9,
"Hawaiian Sky": 0x83A2BD,
"Hawaiian Sunset": 0xBB5C14,
"Hawaiian Surf": 0x0078A7,
"Hawaiian Vacation": 0x77CABD,
"Hawk Grey": 0x77757D,
"Hawk Turquoise": 0x00756A,
"Hawk’s Eye": 0x34363A,
"Hawkbit": 0xFDDB6D,
"Hawker's Gold": 0xF4C26C,
"Hawkes Blue": 0xD2DAED,
"Hawkesbury": 0x729183,
"Hawthorn Berry": 0xCC1111,
"Hawthorn Blossom": 0xEEFFAA,
"Hawthorn Rose": 0x884C5E,
"Hawthorne": 0xCED7C1,
"Hay": 0xD3CCA3,
"Hay Day": 0xDACD81,
"Hay Wain": 0xCDAD59,
"Hay Yellow": 0xC2A770,
"Hayden Valley": 0x5F5D50,
"Hayloft": 0xCDBA96,
"Hayride": 0xD4AC99,
"Haystack": 0xF1E3C7,
"Haystacks": 0xCFAC47,
"Haze": 0xC8C2C6,
"Haze Blue": 0xB7C0BE,
"Hazed Nuts": 0xC39E6D,
"Hazel": 0xAE7250,
"Hazel Blush": 0xEAE2DE,
"Hazel Gaze": 0xB8BFB1,
"Hazel Woods": 0x4A564D,
"Hazelnut": 0xA8715A,
"Hazelnut Chocolate": 0x7B3F00,
"Hazelnut Cream": 0xE6DFCF,
"Hazelnut Milk": 0xEEAA77,
"Hazelnut Turkish Delight": 0xFCE974,
"Hazelwood": 0xFFF3D5,
"Hazy Blue": 0xBCC8CC,
"Hazy Daze": 0xA5B8C5,
"Hazy Grove": 0xF2F1DC,
"Hazy Mauve": 0xC8C6CE,
"Hazy Moon": 0xF1DCA1,
"Hazy Rose": 0xB39897,
"Hazy Skies": 0xADBBC4,
"Hazy Sky": 0xB7BDD6,
"Hazy Taupe": 0xD5C3B5,
"Hazy Trail": 0xDCDACE,
"He Loves Me": 0xE1DBE3,
"Hè Sè Brown": 0x7F5E00,
"Head in the Clouds": 0xD1DDE1,
"Head in the Sand": 0xEBE2DE,
"Healing Aloe": 0xB9CAB3,
"Healing Plant": 0x6C7D42,
"Healing Retreat": 0xBAC2AA,
"Heart Gold": 0x808000,
"Heart of Gold": 0x9D7F4C,
"Heart Stone": 0xEDE3DF,
"Heart Throb": 0xCB3D3C,
"Heart to Heart": 0xD4A9C3,
"Heart's Content": 0xE2B5BD,
"Heart's Desire": 0xAC3E5F,
"Heartbeat": 0xAA0000,
"Heartbreaker": 0xCC76A3,
"Heartfelt": 0xFFADC9,
"Hearth": 0xE1CCA6,
"Hearth Gold": 0xA17135,
"Hearthstone": 0xC7BEB2,
"Heartless": 0x623B70,
"Hearts of Palm": 0xCFC291,
"Heartthrob": 0xA82E33,
"Heartwood": 0x6F4232,
"Hearty Hosta": 0x96BF83,
"Hearty Orange": 0xB44B34,
"Heat of Summer": 0xE98D5B,
"Heat Signature": 0xE3000E,
"Heat Wave": 0xFF7A00,
"Heath": 0x4F2A2C,
"Heath Green": 0x9ACDA9,
"Heath Grey": 0xC9CBC2,
"Heath Spotted Orchid": 0x9F5F9F,
"Heather": 0xA484AC,
"Heather Berry": 0xE75480,
"Heather Feather": 0xC3ADC5,
"Heather Field": 0x909095,
"Heather Grey": 0x9C9DA4,
"Heather Hill": 0xBBB0BB,
"Heather Moor": 0x998E8F,
"Heather Plume": 0xA39699,
"Heather Red Grey": 0x988E94,
"Heather Rose": 0xAD6D7F,
"Heather Sachet": 0x7B7173,
"Heather Violet": 0xB18398,
"Heathered Grey": 0xB6B095,
"Heating Lamp": 0xEE4422,
"Heaven Sent": 0xEEE1EB,
"Heaven Sent Storm": 0xCAD6DE,
"Heavenly": 0x7EB2C5,
"Heavenly Aromas": 0xEEDFD5,
"Heavenly Blue": 0xA3BBCD,
"Heavenly Cocoa": 0xBEA79D,
"Heavenly Garden": 0x93A394,
"Heavenly Haze": 0xD8D5E3,
"Heavenly Pink": 0xF4DEDE,
"Heavenly Sky": 0x6B90B3,
"Heavenly Song": 0xFBD9C6,
"Heavenly White": 0xEBE8E6,
"Heavy Black Green": 0x3A514D,
"Heavy Blue Grey": 0x9FABAF,
"Heavy Brown": 0x73624A,
"Heavy Charcoal": 0x565350,
"Heavy Cream": 0xE8DDC6,
"Heavy Gluten": 0xDDCCAA,
"Heavy Goldbrown": 0xBAAB74,
"Heavy Green": 0x49583E,
"Heavy Grey": 0x82868A,
"Heavy Hammock": 0xBEB9A2,
"Heavy Heart": 0x771122,
"Heavy Khaki": 0x5E6A34,
"Heavy Metal": 0x46473E,
"Heavy Metal Armor": 0x888A8E,
"Heavy Ochre": 0x9B753D,
"Heavy Orange": 0xEE4328,
"Heavy Rain": 0x898A86,
"Heavy Red": 0x9E1212,
"Heavy Siena": 0x735848,
"Heavy Skintone": 0x927A71,
"Heavy Sugar": 0xEFF5F1,
"Heavy Violet": 0x4F566C,
"Heavy Warm Grey": 0xBDB3A7,
"Hectorite": 0xF0E4D2,
"Hedge Garden": 0x00AA11,
"Hedge Green": 0x768A75,
"Hedgehog Cactus Yellow Green": 0xC4AA5E,
"Hedgehog Mushroom": 0xFAF0DA,
"Hēi Sè Black": 0x142030,
"Heidelberg Red": 0x960117,
"Heifer": 0xC3BDB1,
"Heirloom": 0xB67B71,
"Heirloom Apricot": 0xF4BEA6,
"Heirloom Hydrangea": 0x327CCB,
"Heirloom Lace": 0xF5E6D6,
"Heirloom Lilac": 0x9D96B2,
"Heirloom Orchid": 0xAE9999,
"Heirloom Quilt": 0xAB979A,
"Heirloom Rose": 0xD182A0,
"Heirloom Shade": 0xDCD8D4,
"Heirloom Silver": 0xB5B6AD,
"Heirloom Tomato": 0x833633,
"Heisenberg Blue": 0x70D4FB,
"Helen of Troy": 0xC3B89F,
"Hel<NAME>": 0xD28B72,
"Heliotrope": 0xD94FF5,
"Heliotrope Grey": 0xAB98A9,
"Heliotrope Magenta": 0xAA00BB,
"Heliotropic Mauve": 0x9187BD,
"Helium": 0xEAE5D8,
"Hellebore": 0x646944,
"Hellion Green": 0x87C5AE,
"Hello Darkness My Old Friend": 0x802280,
"Hello Fall": 0x995533,
"Hello Spring": 0x44DD66,
"Hello Summer": 0x55BBFF,
"Hello Winter": 0x99FFEE,
"Hello Yellow": 0xFFE59D,
"Helvetia Red": 0xF00000,
"Hematite": 0x5F615F,
"Hematitic Sand": 0xDC8C59,
"Hemisphere": 0x5285A4,
"Hemlock": 0x69684B,
"Hemlock Bud": 0xECEEDF,
"Hemoglobin Red": 0xC61A1B,
"Hemp": 0x987D73,
"Hemp Fabric": 0xB5AD88,
"Hemp Rope": 0xB9A379,
"Hemp Tea": 0xB5B35C,
"Hen of the Woods": 0xEED9C4,
"Henna": 0x7C423C,
"Henna Red": 0x6E3530,
"Henna Shade": 0xB3675D,
"Hep Green": 0xC4B146,
"Hepatica": 0xFBE5EA,
"Hephaestus": 0xE1D4B6,
"Hephaestus Gold": 0xFF9911,
"Hera Blue": 0x7777EE,
"Herald of Spring": 0xA46366,
"Herald's Trumpet": 0xCE9F2F,
"Heraldic": 0x444161,
"<NAME>": 0xE7E0D3,
"Her<NAME>ucopia": 0x6E7357,
"Herb Garden": 0xE9F3E1,
"<NAME>": 0xDDA0DF,
"Herbal": 0x29AB87,
"Herbal Garden": 0x9CAD60,
"Herbal Mist": 0xD2E6D3,
"Herbal Scent": 0x8E9B7C,
"Herbal Tea": 0xF9FEE9,
"Herbal Vapors": 0xDDFFCC,
"Herbal Wash": 0xA49B82,
"Herbalist": 0x969E86,
"Her<NAME>oney": 0xEEEE22,
"Herbivore": 0x88EE77,
"Here Comes the Sun": 0xFCDF63,
"<NAME>": 0x5F3B36,
"<NAME>": 0x6C2E1F,
"Heritage": 0xB0BACC,
"Heritage Blue": 0x5D96BC,
"Heritage Oak": 0x5C453D,
"Heritage Park": 0x69756C,
"Heritage Taffeta": 0x956F7B,
"Hermosa Pink": 0x8A474C,
"Hero": 0x005D6A,
"Heroic Blue": 0x1166FF,
"Heron": 0x62617E,
"Heron Plume": 0xE5E1D8,
"Herring Silver": 0xC6C8CF,
"Hesperide Apple Gold": 0xFFE296,
"Hestia Red": 0xEE2200,
"Hexed Lichen": 0x6E0060,
"Hexos Palesun": 0xFBFF0A,
"Hey Blue!": 0x16F8FF,
"Hi Def Lime": 0xBBB465,
"Hibernate": 0xACA69F,
"Hibernation": 0x6F5166,
"Hibiscus": 0xB6316C,
"Hibiscus Delight": 0xFE9773,
"Hibiscus Flower": 0xBC555E,
"Hibiscus Leaf": 0x6E826E,
"Hibiscus Petal": 0xEDAAAC,
"Hibiscus Pop": 0xDD77DD,
"Hibiscus Punch": 0x5C3D45,
"Hibiscus Red": 0xA33737,
"Hickory": 0xB7A28E,
"Hickory Branch": 0xAB8274,
"Hickory Cliff": 0x7C6E6D,
"Hickory Grove": 0x655341,
"Hickory Nut": 0x78614C,
"Hickory Stick": 0x997772,
"Hidcote": 0x9C949B,
"Hidden Cottage": 0x8D7F64,
"Hidden Cove": 0xCEC6BD,
"Hidden Creek": 0xD5DAE0,
"Hidden Depths": 0x305451,
"Hidden Diary": 0xEDE4CC,
"Hidden Forest": 0x4F5A51,
"Hidden Glade": 0x98AD8E,
"Hidden Hills": 0xC5D2B1,
"Hidden Jade": 0xEBF1E2,
"Hidden Mask": 0x96748A,
"Hidden Meadow": 0xBBCC5A,
"Hidden Paradise": 0x5E8B3D,
"Hidden Peak": 0x727D7F,
"Hidden Sapphire": 0x445771,
"Hidden Sea Glass": 0x6FD1C9,
"Hidden Trail": 0x5F5B4D,
"Hidden Treasure": 0xA59074,
"Hidden Tribe": 0xBB9900,
"Hidden Waters": 0x225258,
"Hideaway": 0xC8C0AA,
"Hideout": 0x5386B7,
"Hierba Santa": 0x77A373,
"High Altar": 0x334F7B,
"High Blue": 0x4CA8E0,
"High Chaparral": 0x75603D,
"High Dive": 0x59B9CC,
"High Drama": 0x9A3843,
"High Elf Blue": 0x8CBED6,
"High Forest Green": 0x665D25,
"High Grass": 0xBBDD00,
"High Hopes": 0xDEEAAA,
"High Maintenance": 0xD88CB5,
"High Noon": 0xCFB999,
"High Plateau": 0xE4B37A,
"High Point": 0xBCD8D2,
"High Priest": 0x643949,
"High Profile": 0x005A85,
"High Rank": 0x645453,
"High Reflective White": 0xF7F7F1,
"High Rise": 0xAEB2B5,
"High Risk Red": 0xC71F2D,
"High Salute": 0x445056,
"High Sierra": 0xCEDEE2,
"High Society": 0xCAB7C0,
"High Speed Access": 0xBDBEBF,
"High Strung": 0xAC9825,
"High Style": 0xA8B1D7,
"High Style Beige": 0xE4D7C3,
"High Tea": 0x7F6F57,
"High Tea Green": 0x567063,
"High Voltage": 0xEEFF11,
"Highball": 0x928C3C,
"Highland": 0x7A9461,
"Highland Green": 0x305144,
"Highland Ridge": 0x8F714B,
"Highland Thistle": 0xB9A1AE,
"Highlander": 0x3A533D,
"Highlands Moss": 0x445500,
"Highlands Twilight": 0x484A80,
"Highlight": 0xEEF0DE,
"Highlight Gold": 0xDFC16D,
"Highlighter": 0xFFE536,
"Highlighter Blue": 0x3AAFDC,
"Highlighter Green": 0x1BFC06,
"Highlighter Lavender": 0x85569C,
"Highlighter Lilac": 0xD72E83,
"Highlighter Orange": 0xF39539,
"Highlighter Pink": 0xEA5A79,
"Highlighter Red": 0xE94F58,
"Highlighter Turquoise": 0x009E6C,
"Highlighter Yellow": 0xF1E740,
"Highway": 0xBDB388,
"Highway to Hell": 0xCD1102,
"Hihada Brown": 0x752E23,
"Hiker's Delight": 0xD2B395,
"Hiking Boots": 0x5E5440,
"Hiking Trail": 0xA99170,
"Hill Giant": 0xE0EEDF,
"Hillary": 0xA7A07E,
"Hills of Ireland": 0x417B42,
"Hillsbrad Grass": 0x7FA91F,
"Hillside Green": 0x8F9783,
"Hillside View": 0x8DA090,
"Hilltop": 0x587366,
"Hilo Bay": 0x768AA1,
"Himalaya": 0x736330,
"Himalaya Blue": 0xAECDE0,
"Himalaya Peaks": 0xE2EAF0,
"Himalaya Sky": 0x7695C2,
"Himalaya White Blue": 0xB9DEE9,
"Himalayan Balsam": 0xFF99CC,
"Himalayan Mist": 0xE1F0ED,
"Himalayan Poppy": 0xBEC6D6,
"Himalayan Salt": 0xC07765,
"Himawari Yellow": 0xFCC800,
"Hindsight": 0xBDC9E3,
"Hindu Lotus": 0x8E8062,
"Hinoki": 0xF8DDB7,
"Hinomaru Red": 0xBC002D,
"Hint of Blue": 0xCEE1F2,
"Hint of Green": 0xDFEADE,
"Hint of Mauve": 0xE1DBD5,
"Hint of Mint": 0xDFF1D6,
"Hint of Orange": 0xF8E6D9,
"Hint of Pink": 0xF1E4E1,
"Hint of Red": 0xF6DFE0,
"Hint of Vanilla": 0xEEE8DC,
"Hint of Violet": 0xD2D5E1,
"Hint of Yellow": 0xFAF1CD,
"Hinterland": 0x616C51,
"Hinterlands Green": 0x304112,
"Hinting Blue": 0xCED9DD,
"Hip Hop": 0xE4E8A7,
"Hip Waders": 0x746A51,
"Hippie Blue": 0x49889A,
"Hippie Green": 0x608A5A,
"Hippie Pink": 0xAB495C,
"Hippie Trail": 0xC6AA2B,
"Hippogriff Brown": 0x5C3C0D,
"Hippolyta": 0xCFC294,
"Hippy": 0xEAE583,
"Hipster": 0xF2F1D9,
"Hipster Hippo": 0xBFB3AB,
"Hipster Salmon": | |
"{}_prepro.tiff".format(modality_name)
cache_transform_fp = self.image_cache / "{}_init_tforms.json".format(
cache_im_fp.stem
)
cache_osize_tform_fp = (
self.image_cache
/ "{}_orig_size_tform.json".format(cache_im_fp.stem)
)
if cache_im_fp.is_file() is False:
sitk.WriteImage(reg_image.reg_image, str(cache_im_fp), True)
if reg_image.mask is not None:
cache_mask_im_fp = self.image_cache / "{}_prepro_mask.tiff".format(
modality_name
)
if cache_mask_im_fp.is_file() is False:
sitk.WriteImage(reg_image.mask, str(cache_mask_im_fp), True)
if cache_transform_fp.is_file() is False:
pmap_dict_to_json(
reg_image.pre_reg_transforms, str(cache_transform_fp)
)
if (
cache_osize_tform_fp.is_file() is False
and reg_image.original_size_transform is not None
):
pmap_dict_to_json(
reg_image.original_size_transform, str(cache_osize_tform_fp)
)
def _find_nonreg_modalities(self):
registered_modalities = [
edge.get("modalities").get("source")
for edge in self.reg_graph_edges
]
non_reg_modalities = list(
set(self.modality_names).difference(registered_modalities)
)
# remove attachment modalities
for attachment_modality in self.attachment_images.keys():
non_reg_modalities.pop(
non_reg_modalities.index(attachment_modality)
)
return non_reg_modalities
def save_config(self, registered=False):
ts = time.strftime('%Y%m%d-%H%M%S')
status = "registered" if registered is True else "setup"
reg_paths = {}
for idx, edge in enumerate(self.reg_graph_edges):
src_modality = edge.get("modalities").get("source")
if len(self.reg_paths[src_modality]) > 1:
thru_modality = self.reg_paths[src_modality][0]
else:
thru_modality = None
tgt_modality = self.reg_paths[src_modality][-1]
reg_paths.update(
{
f"reg_path_{idx}": {
"src_modality_name": edge.get("modalities").get(
"source"
),
"tgt_modality_name": tgt_modality,
"thru_modality": thru_modality,
"reg_params": edge.get("params"),
}
}
)
reg_graph_edges = deepcopy(self.reg_graph_edges)
[rge.pop("reg_transforms", None) for rge in reg_graph_edges]
modalities_out = deepcopy(self.modalities)
for mod, data in modalities_out.items():
if isinstance(data["image_filepath"], ARRAYLIKE_CLASSES):
data["image_filepath"] = "ArrayLike"
config = {
"project_name": self.project_name,
"output_dir": str(self.output_dir),
"cache_images": self.cache_images,
"modalities": modalities_out,
"reg_paths": reg_paths,
"reg_graph_edges": reg_graph_edges
if status == "registered"
else None,
"original_size_transforms": self.original_size_transforms
if status == "registered"
else None,
"attachment_shapes": self.shape_sets
if len(self._shape_sets) > 0
else None,
"attachment_images": self.attachment_images
if len(self.attachment_images) > 0
else None,
}
output_path = (
self.output_dir
/ f"{ts}-{self.project_name}-configuration-{status}.yaml"
)
with open(str(output_path), "w") as f:
yaml.dump(config, f, sort_keys=False)
def register_images(self, parallel=False):
"""
Start image registration process for all modalities
Parameters
----------
parallel : bool
whether to run each edge in parallel (not implemented yet)
"""
if self.cache_images is True:
self.image_cache.mkdir(parents=False, exist_ok=True)
self.save_config(registered=False)
for reg_edge in self.reg_graph_edges:
if (
reg_edge.get("registered") is None
or reg_edge.get("registered") is False
):
src_name = reg_edge["modalities"]["source"]
tgt_name = reg_edge["modalities"]["target"]
(
src_reg_image_fp,
src_res,
src_prepro,
src_transforms,
src_mask,
src_original_size_transform,
) = self._prepare_modality(src_name, reg_edge, "source")
(
tgt_reg_image_fp,
tgt_res,
tgt_prepro,
tgt_transforms,
tgt_mask,
tgt_original_size_transform,
) = self._prepare_modality(tgt_name, reg_edge, "target")
src_reg_image = reg_image_loader(
src_reg_image_fp,
src_res,
preprocessing=src_prepro,
pre_reg_transforms=src_transforms,
mask=src_mask,
)
tgt_reg_image = reg_image_loader(
tgt_reg_image_fp,
tgt_res,
preprocessing=tgt_prepro,
pre_reg_transforms=tgt_transforms,
mask=tgt_mask,
)
src_reg_image.read_reg_image()
tgt_reg_image.read_reg_image()
if (
tgt_original_size_transform is None
and tgt_reg_image.original_size_transform is not None
):
tgt_original_size_transform = (
tgt_reg_image.original_size_transform
)
if self.cache_images is True:
if reg_edge.get("override_prepro") is not None:
if (
reg_edge.get("override_prepro").get("source")
is None
):
self._cache_images(src_name, src_reg_image)
if (
reg_edge.get("override_prepro").get("target")
is None
):
self._cache_images(tgt_name, tgt_reg_image)
else:
self._cache_images(src_name, src_reg_image)
self._cache_images(tgt_name, tgt_reg_image)
reg_params = reg_edge["params"]
output_path = (
self.output_dir
/ "{}-{}_to_{}_reg_output".format(
self.project_name,
reg_edge["modalities"]["source"],
reg_edge["modalities"]["target"],
)
)
output_path.mkdir(parents=False, exist_ok=True)
output_path_tform = (
self.output_dir
/ "{}-{}_to_{}_transformations.json".format(
self.project_name,
reg_edge["modalities"]["source"],
reg_edge["modalities"]["target"],
)
)
reg_tforms = register_2d_images_itkelx(
src_reg_image,
tgt_reg_image,
reg_params,
output_path,
)
reg_tforms = [sitk_pmap_to_dict(tf) for tf in reg_tforms]
if src_transforms is not None:
initial_transforms = src_transforms[0]
else:
initial_transforms = src_reg_image.pre_reg_transforms
reg_edge["transforms"] = {
'initial': initial_transforms,
'registration': reg_tforms,
}
self.original_size_transforms.update(
{tgt_name: tgt_original_size_transform}
)
reg_edge["registered"] = True
pmap_dict_to_json(
reg_edge["transforms"], str(output_path_tform)
)
self.transformations = self.reg_graph_edges
self.save_config(registered=True)
@property
def transformations(self):
return self._transformations
@transformations.setter
def transformations(self, reg_graph_edges):
self._transformations = self._collate_transformations()
def add_merge_modalities(self, merge_name, modalities):
for modality in modalities:
try:
self.modalities[modality]
except KeyError:
raise ValueError(
f"Modality for merger [{modality}] is not a modality "
f"within the graph, current modalitles : "
f"{self.modality_names}"
)
self.merge_modalities.update({merge_name: modalities})
def _generate_reg_transforms(self):
self._reg_graph_edges["reg_transforms"]
def _collate_transformations(self):
transforms = {}
for reg_edge in self.reg_graph_edges:
if reg_edge["transforms"]["initial"] is not None:
initial_transforms = [
RegTransform(t) for t in reg_edge["transforms"]["initial"]
]
else:
initial_transforms = []
reg_edge["reg_transforms"] = {
'initial': initial_transforms,
'registration': [
RegTransform(t)
for t in reg_edge["transforms"]["registration"]
],
}
edge_modality_pairs = [v['modalities'] for v in self.reg_graph_edges]
for modality, tform_edges in self.transform_paths.items():
for idx, tform_edge in enumerate(tform_edges):
reg_edge_tforms = self.reg_graph_edges[
edge_modality_pairs.index(tform_edge)
]["reg_transforms"]
if idx == 0:
transforms[modality] = {
'initial': reg_edge_tforms['initial'],
idx: reg_edge_tforms['registration'],
}
else:
transforms[modality][idx] = reg_edge_tforms['registration']
return transforms
def _prepare_nonreg_image_transform(
self, modality_key, to_original_size=True
):
print(
"transforming non-registered modality : {} ".format(modality_key)
)
output_path = self.output_dir / "{}-{}_registered".format(
self.project_name, modality_key
)
im_data = self.modalities[modality_key]
transformations = {"initial": None, "registered": None}
if (
im_data.get("preprocessing").get("rot_cc") is not None
or im_data.get("preprocessing").get("flip") is not None
or im_data.get("preprocessing").get("mask_to_bbox") is True
or im_data.get("preprocessing").get("mask_bbox") is not None
):
transformations.update(
{
"initial": self._check_cache_modality(modality_key)[1][0],
"registered": None,
}
)
if to_original_size is True:
transformations.update(
{"registered": self.original_size_transforms.get(modality_key)}
)
if (
transformations.get("initial") is None
and transformations.get("registered") is None
):
transformations = None
return im_data, transformations, output_path
def _prepare_reg_image_transform(
self,
edge_key,
attachment=False,
attachment_modality=None,
to_original_size=True,
):
im_data = self.modalities[edge_key]
if attachment is True:
final_modality = self.reg_paths[attachment_modality][-1]
transformations = deepcopy(
self.transformations[attachment_modality]
)
else:
final_modality = self.reg_paths[edge_key][-1]
transformations = deepcopy(self.transformations[edge_key])
print("transforming {} to {}".format(edge_key, final_modality))
output_path = self.output_dir / "{}-{}_to_{}_registered".format(
self.project_name,
edge_key,
final_modality,
)
if (
self.original_size_transforms.get(final_modality) is not None
and to_original_size is True
):
original_size_transform = self.original_size_transforms[
final_modality
]
transformations.update(
{"orig": [RegTransform(original_size_transform)]}
)
return im_data, transformations, output_path
def _transform_write_image(
self, im_data, transformations, output_path, file_writer="ome.tiff"
):
tfregimage = reg_image_loader(
im_data["image_filepath"],
im_data["image_res"],
channel_names=im_data.get("channel_names"),
channel_colors=im_data.get("channel_colors"),
)
ometiffwriter = OmeTiffWriter(tfregimage)
if transformations:
(
composite_transform,
itk_transforms,
final_transform,
) = prepare_wsireg_transform_data(transformations)
else:
composite_transform, itk_transforms, final_transform = (
None,
None,
None,
)
if (
file_writer == "ome.tiff-bytile"
and ometiffwriter.reg_image.reader not in ["czi", "sitk"]
):
im_fp = ometiffwriter.write_image_by_tile(
output_path.stem,
itk_transforms=itk_transforms,
composite_transform=composite_transform,
final_transform=final_transform,
output_dir=str(self.output_dir),
)
else:
im_fp = ometiffwriter.write_image_by_plane(
output_path.stem,
composite_transform=composite_transform,
final_transform=final_transform,
output_dir=str(self.output_dir),
)
return im_fp
def _transform_write_merge_images(self, to_original_size=True):
for merge_name, sub_images in self.merge_modalities.items():
im_fps = []
im_res = []
im_ch_names = []
transformations = []
final_modalities = []
for sub_image in sub_images:
im_data = self.modalities[sub_image]
im_fps.append(im_data["image_filepath"])
im_res.append(im_data["image_res"])
im_ch_names.append(im_data.get("channel_names"))
try:
transforms = deepcopy(self.transformations[sub_image])
except KeyError:
transforms = None
try:
final_modalities.append(self.reg_paths[sub_image][-1])
except KeyError:
initial_transforms = self._check_cache_modality(sub_image)[
1
][0]
if initial_transforms:
initial_transforms = [
RegTransform(t) for t in initial_transforms
]
final_modalities.append(sub_image)
transforms = {"initial": initial_transforms}
else:
transforms = None
transformations.append(transforms)
if all(final_modalities):
final_modality = final_modalities[0]
else:
raise ValueError("final modalities do not match on merge")
if (
self.original_size_transforms.get(final_modality) is not None
and to_original_size is True
):
original_size_transform = self.original_size_transforms[
final_modality
]
for transformation in transformations:
if transformation is None:
transformation = {}
transformation.update(
{"orig": [RegTransform(original_size_transform)]}
)
output_path = self.output_dir / "{}-{}_merged-registered".format(
self.project_name,
merge_name,
)
merge_regimage = MergeRegImage(
im_fps,
im_res,
channel_names=im_ch_names,
)
merge_ometiffwriter = MergeOmeTiffWriter(merge_regimage)
im_fp = merge_ometiffwriter.merge_write_image_by_plane(
output_path.stem,
sub_images,
transformations=transformations,
output_dir=str(self.output_dir),
)
return im_fp
def transform_images(
self,
file_writer="ome.tiff",
transform_non_reg=True,
remove_merged=True,
to_original_size=True,
):
"""
Transform and write images to disk after registration. Also transforms all attachment images
Parameters
----------
file_writer : str
output type to use, sitk writes a single resolution tiff, "zarr" writes an ome-zarr multiscale
zarr store
transform_non_reg : bool
whether to write the images that aren't transformed during registration as well
remove_merged: bool
whether to remove images that are stored in merged store, if True, images that are merged
will not be written as individual images as well
to_original_size: bool
write images that have been cropped for registration back to their original coordinate space
"""
image_fps = []
if all(
[reg_edge.get("registered") for reg_edge in self.reg_graph_edges]
):
# prepare workflow
merge_modalities = []
if len(self.merge_modalities.keys()) > 0:
for k, v in self.merge_modalities.items():
merge_modalities.extend(v)
reg_path_keys = list(self.reg_paths.keys())
nonreg_keys = self._find_nonreg_modalities()
if remove_merged:
for merge_mod in merge_modalities:
try:
m_idx = reg_path_keys.index(merge_mod)
reg_path_keys.pop(m_idx)
except ValueError:
pass
try:
m_idx = nonreg_keys.index(merge_mod)
nonreg_keys.pop(m_idx)
except ValueError:
pass
for modality in reg_path_keys:
(
im_data,
transformations,
output_path,
) = self._prepare_reg_image_transform(
modality,
attachment=False,
to_original_size=to_original_size,
)
im_fp = self._transform_write_image(
im_data,
transformations,
output_path,
file_writer=file_writer,
)
image_fps.append(im_fp)
for (
modality,
attachment_modality,
) in self.attachment_images.items():
(
im_data,
transformations,
output_path,
) = self._prepare_reg_image_transform(
modality,
attachment=True,
attachment_modality=attachment_modality,
to_original_size=to_original_size,
)
im_fp = self._transform_write_image(
im_data,
transformations,
output_path,
file_writer=file_writer,
)
image_fps.append(im_fp)
if len(self.merge_modalities.items()) > 0:
im_fp = self._transform_write_merge_images(
to_original_size=to_original_size
)
image_fps.append(im_fp)
if transform_non_reg is True:
# preprocess and save unregistered nodes
for modality in nonreg_keys:
(
im_data,
transformations,
output_path,
) = self._prepare_nonreg_image_transform(
modality,
to_original_size=to_original_size,
)
im_fp = self._transform_write_image(
im_data,
transformations,
output_path,
file_writer=file_writer,
)
image_fps.append(im_fp)
return image_fps
def transform_shapes(self):
"""
Transform all attached shapes and write out shape data to geoJSON.
"""
for set_name, set_data in self.shape_sets.items():
attachment_modality = set_data["attachment_modality"]
final_modality = self.reg_paths[attachment_modality][-1]
print(
"transforming shape set {} associated with {} to {}".format(
set_name, attachment_modality, final_modality
)
)
rs = RegShapes(
set_data["shape_files"], source_res=set_data["image_res"]
)
rs.transform_shapes(
self.transformations[attachment_modality],
)
output_path = (
self.output_dir
/ "{}-{}-{}_to_{}-transformed_shapes.geojson".format(
self.project_name,
set_name,
attachment_modality,
final_modality,
)
)
rs.save_shape_data(output_path, transformed=True)
def save_transformations(self):
"""
Save all transformations for a | |
out of :ref:`range <ref-matrixrange>`.
"""
ncols = _stp._st_getmatrixcol(name)
nrows = _stp._st_getmatrixrow(name)
if rows is None:
mrows = None
else:
mrows = _get_matrix_index(rows, name, nrows, ncols, True)
if cols is None:
mcols = None
else:
mcols = _get_matrix_index(cols, name, nrows, ncols, False)
return _stp._st_listmatrix(name, mrows, mcols)
@staticmethod
def setColNames(name, colNames):
"""
Set the column names of a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
colNames : list or tuple
A string list or tuple containing the column names for the
matrix. The list or tuple length must match the number of
columns in the matrix.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- number of column names specified in `colNames` does not match the number of columns of the matrix.
"""
return _stp._st_setmatrixcolnames(name, colNames)
@staticmethod
def setRowNames(name, rowNames):
"""
Set the row names of a Stata matrix.
Parameters
----------
name : str
Name of the matrix.
rowNames : list or tuple
A string list or tuple containing the row names for the
matrix. The list or tuple length must match the number of
rows in the matrix.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- number of row names specified in `rowNames` does not match the number of rows of the matrix.
"""
return _stp._st_setmatrixrownames(name, rowNames)
@staticmethod
def store(name, val):
"""
Store elements in an existing Stata matrix or create a new
Stata matrix if the matrix does not exist.
Parameters
----------
name : str
Name of the matrix.
val : array-like
Values to store. The dimensions of `val` should match the
dimensions of the matrix. Each value of `val` must be a
real number.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- dimensions of `val` do not match the dimensions of the matrix.
"""
mexist = True
try:
ncols = _stp._st_getmatrixcol(name)
nrows = _stp._st_getmatrixrow(name)
except:
mexist = False
def listimize(x):
if isinstance(x, str):
raise TypeError("Value of matrix can not be string")
if not hasattr(x, "__iter__"):
return [x]
return list(x)
if isinstance(val, str):
raise TypeError("Value of matrix can not be string")
if not hasattr(val, "__iter__"):
val = [[val]]
else:
val = list(listimize(v) for v in val)
if mexist is True:
if (nrows == 1 and len(val) == ncols and
_check_all(len(v) == 1 for v in val)):
val = [[v[0] for v in val]]
if not len(val) == nrows:
raise ValueError("compatibility error; rows unmatch")
if not _check_all(len(v) == ncols for v in val):
raise ValueError("compatibility error; columns unmatch")
else:
if len(val) <= 0:
raise ValueError("compatibility error; val is empty")
ncols = len(val[0])
if not _check_all(len(v) == ncols for v in val):
raise ValueError("compatibility error; columns unmatch")
return _stp._st_storematrix(name, val)
@staticmethod
def storeAt(name, row, col, val):
"""
Store an element in an existing Stata matrix.
Parameters
----------
name : str
Name of the matrix.
row : int
Row in which to store.
col : int
Column in which to store.
val : float
Value to store.
Raises
------
ValueError
This error can be raised if
- matrix `name` does not exist.
- `row` is out of :ref:`range <ref-matrixrange>`.
- `col` is out of :ref:`range <ref-matrixrange>`.
"""
return _stp._st_storematrixat(name, row, col, val)
class Missing:
"""
This class provides access to Stata missing values.
"""
a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z = \
[chr(i+97) for i in range(26)]
@staticmethod
def getValue(val=None):
"""
Get the numeric value that represents the missing value in Stata.
Parameters
----------
val : {'a', 'b', ..., 'z'}, optional
The character indicating which missing value to get. If `val` is
not specified, the Stata system missing value is returned. If `val`
is specified, it must be one of 'a', 'b', ..., or 'z', and the
corresponding extended missing value is returned.
Returns
-------
float
The numeric value of a missing value in Stata.
"""
missIndex = 0
if val is not None:
if val not in [chr(i+97) for i in range(26)]:
raise ValueError("val must be one of 'a', 'b', ..., or 'z'")
missIndex = ord(val)-96
return _stp._st_getmissingvalue(missIndex)
@staticmethod
def isMissing(val):
"""
Test if a value is a Stata missing.
Parameters
----------
val : float
The value to test.
Returns
-------
bool
True if the value is a Stata missing.
"""
return _stp._st_isvaluemissing(val)
@staticmethod
def parseIsMissing(s):
"""
Test if a string is a Stata missing.
Parameters
----------
s : str
The string to test.
Returns
-------
bool
True if the string matches a Stata system missing value or an
extended missing value.
"""
if s == '.':
return True
else:
if not isinstance(s, str):
raise TypeError("s must be str")
if len(s) != 2:
return False
if s[0] != '.' or s[1] > 'z' or s[1] < 'a':
return False
return True
class Platform:
"""
This class provides a set of utilities for getting platform
information.
"""
def __init__(self):
pass
@staticmethod
def isLinux():
"""
Determine if the platform is Linux.
Returns
-------
bool
True if the platform is Linux.
"""
pf = platform()
if pf.lower().find("linux") >= 0:
return True
else:
return False
@staticmethod
def isMac():
"""
Determine if the platform is Mac OS.
Returns
-------
bool
True if the platform is Mac OS.
"""
pf = platform()
if pf.lower().find("darwin") >= 0:
return True
else:
return False
@staticmethod
def isSolaris():
"""
Determine if the platform is Solaris.
Returns
-------
bool
True if the platform is Solaris.
"""
pf = platform()
if pf.lower().find("sunos") >= 0:
return True
else:
return False
@staticmethod
def isUnix():
"""
Determine if the platform is Unix or Linux.
Returns
-------
bool
True if the platform is Unix or Linux.
"""
pf = platform()
if pf.lower().find("sunos") >= 0:
return True
elif pf.lower().find("nux") >= 0:
return True
elif pf.lower().find("nix") >= 0:
return True
else:
return False
@staticmethod
def isWindows(version=None):
"""
Determine if the platform is Windows and the version number is
greater than or equal to the version specified.
Parameters
----------
version : float, optional
The Windows version to verify. Version numbers correspond
to internal Microsoft versions, such as 5.1, 6.0, 6.1, 6.2,
6.3, and 10.0.
Returns
-------
bool
True if the platform is Windows and the version is not
less than specified.
"""
pf = platform()
if pf.lower().find("darwin") >= 0:
return False
else:
if pf.lower().find("win") < 0:
return False
if version is not None:
version = str(version)
sv = sys.getwindowsversion()
try:
version = float(version)
except:
raise TypeError("version must be a number")
if float(str(sv[0])+"."+str(sv[1])) < version:
return False
return True
class Preference:
"""
This class provides a set of utilities for loading and saving
preferences.
"""
def __init__(self):
pass
@staticmethod
def deleteSavedPref(section, key):
"""
Delete a saved preference.
Parameters
----------
section : str
The preference section.
key : str
The preference key.
"""
return _stp._st_deletesavedpref(section, key)
@staticmethod
def getSavedPref(section, key, defaultValue):
"""
Get a saved preference.
Parameters
----------
section : str
The preference section.
key : str
The preference key.
defaultValue : str
The default value if the key is not found.
Returns
-------
str
The saved preference.
"""
return _stp._st_getsavedpref(section, key, defaultValue)
@staticmethod
def setSavedPref(section, key, value):
"""
Write a saved preference.
Parameters
----------
section : str
The preference section.
key : str
The preference key.
value : str
The value to save.
"""
return _stp._st_setsavedpref(section, key, value)
class Scalar:
"""
This class provides access to Stata scalars.
"""
def __init__(self):
pass
@staticmethod
def getString(name):
"""
Get the contents of a Stata string scalar.
Parameters
----------
name : str
Name of the scalar. It can be one of the following:
* global scalar such as **"myname"**
* **c()** scalar such as **"c(current_date)"**
Returns
-------
str
Value of the scalar. Returns an empty string if the scalar
| |
= reshape(matmul(gate_mat, reshape(new_state.vector, [2, half_length])), [new_state_len, 1])
# Update the order of active vertices and the background state
self.vertex.active = new_state.system
self.__bg_state = State(new_state.vector, new_state.system)
def __create_graph_state(self, which_qubit):
r"""以待测量的比特为输入参数,生成测量当前节点所需要的最小的量子图态。
Note:
这是内部方法,用户并不需要直接调用到该方法。
Args:
which_qubit (any): 待测量比特的系统标签。
可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 模型中节点的标签类型匹配
"""
# Find the neighbors of 'which_qubit'
which_qubit_neighbors = set(self.__graph.neighbors(which_qubit))
# Exclude the qubits already measured
neighbors_not_measured = which_qubit_neighbors.difference(set(self.vertex.measured))
# Create a list of system labels that will be applied to cz gates
cz_list = [(which_qubit, qubit) for qubit in neighbors_not_measured]
# Get the qubits to be activated
append_qubits = {which_qubit}.union(neighbors_not_measured).difference(set(self.vertex.active))
# Update active and pending lists
self.vertex.active += list(append_qubits)
self.vertex.pending = list(set(self.vertex.pending).difference(self.vertex.active))
# Compute the new background state vector
new_bg_state_vector = kron([self.__bg_state.vector] + [plus_state() for _ in append_qubits])
# Update the background state and apply cz
self.__bg_state = State(new_bg_state_vector, self.vertex.active)
self.__apply_cz(cz_list)
self.__draw_process("active", which_qubit)
def __update(self):
r"""更新历史列表和量子态信息。
"""
self.__history.append(self.__bg_state)
self.__status = self.__history[-1]
def measure(self, which_qubit, basis_list):
r"""以待测量的比特和测量基为输入参数,对该比特进行测量。
Note:
这是用户在实例化 MBQC 类之后最常调用的方法之一,此处我们对单比特测量模拟进行了最大程度的优化,
随着用户对该函数的调用,MBQC 类将自动完成激活相关节点、生成所需的图态以及对特定比特进行测量的全过程,
并记录测量结果和对应测量后的量子态。用户每调用一次该函数,就完成一次对单比特的测量操作。
Warning:
当且仅当用户调用 ``measure`` 类方法时,MBQC 模型才真正进行运算。
Args:
which_qubit (any): 待测量量子比特的系统标签,
可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 模型的图上标签匹配
basis_list (list): 测量基向量构成的列表,列表元素为 ``Tensor`` 类型的列向量
代码示例:
.. code-block:: python
from paddle_quantum.mbqc.simulator import MBQC
from paddle_quantum.mbqc.qobject import State
from paddle_quantum.mbqc.utils import zero_state, basis
G = [['1', '2', '3'], [('1', '2'), ('2', '3')]]
mbqc = MBQC()
mbqc.set_graph(G)
state = State(zero_state(), ['1'])
mbqc.set_input_state(state)
mbqc.measure('1', basis('X'))
mbqc.measure('2', basis('X'))
print("Measurement outcomes: ", mbqc.get_classical_output())
::
Measurement outcomes: {'1': 0, '2': 1}
"""
self.__draw_process("measuring", which_qubit)
self.__create_graph_state(which_qubit)
assert which_qubit in self.vertex.active, 'the qubit to be measured must be activated first.'
new_bg_state = permute_to_front(self.__bg_state, which_qubit)
self.vertex.active = new_bg_state.system
half_length = int(new_bg_state.length / 2)
eps = 10 ** (-10)
prob = [0, 0]
state_unnorm = [0, 0]
# Calculate the probability and post-measurement states
for result in [0, 1]:
basis_dagger = t(conj(basis_list[result]))
# Reshape the state, multiply the basis and reshape it back
state_unnorm[result] = reshape(matmul(basis_dagger,
reshape(new_bg_state.vector, [2, half_length])), [half_length, 1])
probability = matmul(t(conj(state_unnorm[result])), state_unnorm[result])
is_complex128 = probability.dtype == to_tensor([], dtype='complex128').dtype
prob[result] = real(probability) if is_complex128 else probability
# Randomly choose a result and its corresponding post-measurement state
if prob[0].numpy().item() < eps:
result = 1
post_state_vector = state_unnorm[1]
elif prob[1].numpy().item() < eps:
result = 0
post_state_vector = state_unnorm[0]
else: # Take a random choice of outcome
result = random.choice(2, 1, p=[prob[0].numpy().item(), prob[1].numpy().item()]).item()
# Normalize the post-measurement state
post_state_vector = state_unnorm[result] / prob[result].sqrt()
# Write the measurement result into the dict
self.__outcome.update({which_qubit: int(result)})
# Update measured, active lists
self.vertex.measured.append(which_qubit)
self.max_active = max(len(self.vertex.active), self.max_active)
self.vertex.active.remove(which_qubit)
# Update the background state and history list
self.__bg_state = State(post_state_vector, self.vertex.active)
self.__update()
self.__draw_process("measured", which_qubit)
def sum_outcomes(self, which_qubits, start=0):
r"""根据输入的量子系统标签,在存储测量结果的字典中找到对应的测量结果,并进行求和。
Note:
在进行副产品纠正操作和定义适应性测量角度时,用户可以调用该方法对特定比特的测量结果求和。
Args:
which_qubits (list): 需要查找测量结果并求和的比特的系统标签列表
start (int): 对结果进行求和后需要额外相加的整数
Returns:
int: 指定比特的测量结果的和
代码示例:
.. code-block:: python
from paddle_quantum.mbqc.simulator import MBQC
from paddle_quantum.mbqc.qobject import State
from paddle_quantum.mbqc.utils import zero_state, basis
G = [['1', '2', '3'], [('1', '2'), ('2', '3')]]
mbqc = MBQC()
mbqc.set_graph(G)
input_state = State(zero_state(), ['1'])
mbqc.set_input_state(input_state)
mbqc.measure('1', basis('X'))
mbqc.measure('2', basis('X'))
mbqc.measure('3', basis('X'))
print("All measurement outcomes: ", mbqc.get_classical_output())
print("Sum of outcomes of qubits '1' and '2': ", mbqc.sum_outcomes(['1', '2']))
print("Sum of outcomes of qubits '1', '2' and '3' with an extra 1: ", mbqc.sum_outcomes(['1', '2', '3'], 1))
::
All measurement outcomes: {'1': 0, '2': 0, '3': 1}
Sum of outcomes of qubits '1' and '2': 0
Sum of outcomes of qubits '1', '2' and '3' with an extra 1: 2
"""
assert isinstance(start, int), "'start' must be of type int."
return sum([self.__outcome[label] for label in which_qubits], start)
def correct_byproduct(self, gate, which_qubit, power):
r"""对测量后的量子态进行副产品纠正。
Note:
这是用户在实例化 MBQC 类并完成测量后,经常需要调用的一个方法。
Args:
gate (str): ``'X'`` 或者 ``'Z'``,分别表示 Pauli X 或 Z 门修正
which_qubit (any): 待操作的量子比特的系统标签,可以是 ``str``, ``tuple`` 等任意数据类型,但需要和 MBQC 中图的标签类型匹配
power (int): 副产品纠正算符的指数
代码示例:
此处展示的是 MBQC 模型下实现隐形传态的一个例子。
.. code-block:: python
from paddle_quantum.mbqc.simulator import MBQC
from paddle_quantum.mbqc.qobject import State
from paddle_quantum.mbqc.utils import random_state_vector, basis, compare_by_vector
G = [['1', '2', '3'], [('1', '2'), ('2', '3')]]
state = State(random_state_vector(1), ['1'])
mbqc = MBQC()
mbqc.set_graph(G)
mbqc.set_input_state(state)
mbqc.measure('1', basis('X'))
mbqc.measure('2', basis('X'))
outcome = mbqc.get_classical_output()
mbqc.correct_byproduct('Z', '3', outcome['1'])
mbqc.correct_byproduct('X', '3', outcome['2'])
state_out = mbqc.get_quantum_output()
state_std = State(state.vector, ['3'])
compare_by_vector(state_out, state_std)
::
Norm difference of the given states is:
0.0
They are exactly the same states.
"""
assert gate in ['X', 'Z'], "'gate' must be 'X' or 'Z'."
assert isinstance(power, int), "'power' must be of type 'int'."
if power % 2 == 1:
self.__apply_pauli_gate(gate, which_qubit)
self.__update()
def __run_cmd(self, cmd):
r"""执行测量或副产品处理命令。
Args:
cmd (Pattern.CommandM / Pattern.CommandX / Pattern.CommandZ): 测量或副产品处理命令
"""
assert cmd.name in ["M", "X", "Z"], "the input 'cmd' must be CommandM, CommandX or CommandZ."
if cmd.name == "M": # Execute measurement commands
signal_s = self.sum_outcomes(cmd.domain_s)
signal_t = self.sum_outcomes(cmd.domain_t)
# The adaptive angle is (-1)^{signal_s} * angle + {signal_t} * pi
adaptive_angle = multiply(to_tensor([(-1) ** signal_s], dtype="float64"), cmd.angle) \
+ to_tensor([signal_t * pi], dtype="float64")
self.measure(cmd.which_qubit, basis(cmd.plane, adaptive_angle))
else: # Execute byproduct correction commands
power = self.sum_outcomes(cmd.domain)
self.correct_byproduct(cmd.name, cmd.which_qubit, power)
def __run_cmd_lst(self, cmd_lst, bar_start, bar_end):
r"""对列表执行测量或副产品处理命令。
Args:
cmd_lst (list): 命令列表,包含测量或副产品处理命令
bar_start (int): 进度条的开始点
bar_end (int): 进度条的结束点
"""
for i in range(len(cmd_lst)):
cmd = cmd_lst[i]
self.__run_cmd(cmd)
print_progress((bar_start + i + 1) / bar_end, "Pattern Running Progress", self.__track)
def __kron_unmeasured_qubits(self):
r"""该方法将没有被作用 CZ 纠缠的节点初始化为 |+> 态,并与当前的量子态做张量积。
Warning:
该方法仅在用户输入测量模式时调用,当用户输入图时,如果节点没有被激活,我们默认用户没有对该节点进行任何操作。
"""
# Turn off the plot switch
self.__draw = False
# As the create_graph_state function would change the measured qubits list, we need to record it
measured_qubits = self.vertex.measured[:]
for qubit in list(self.__graph.nodes):
if qubit not in self.vertex.measured:
self.__create_graph_state(qubit)
# Update vertices and backgrounds
self.vertex.measured.append(qubit)
self.max_active = max(len(self.vertex.active), self.max_active)
self.__bg_state = State(self.__bg_state.vector, self.vertex.active)
# Restore the measured qubits
self.vertex.measured = measured_qubits
def run_pattern(self):
r"""按照设置的测量模式对 MBQC 模型进行模拟。
Warning:
该方法必须在 ``set_pattern`` 调用后调用。
"""
assert self.__pattern is not None, "please use this method after calling 'set_pattern'!"
# Execute measurement commands and correction commands
cmd_m_lst = [cmd for cmd in self.__pattern.commands if cmd.name == "M"]
cmd_c_lst = [cmd for cmd in self.__pattern.commands if cmd.name in ["X", "Z"]]
bar_end = len(cmd_m_lst + cmd_c_lst)
self.__run_cmd_lst(cmd_m_lst, 0, bar_end)
# Activate unmeasured qubits before byproduct corrections
self.__kron_unmeasured_qubits()
self.__run_cmd_lst(cmd_c_lst, len(cmd_m_lst), bar_end)
# The output state's label is messy (e.g. [(2, 0), (0, 1), (1, 3)...]),
# so we permute the systems in order
q_output = self.__pattern.output_[1]
self.__bg_state = permute_systems(self.__status, q_output)
self.__update()
@staticmethod
def __map_qubit_to_row(out_lst):
r"""将输出比特的标签与行数对应起来,便于查找其对应关系。
Returns:
dict: 返回字典,代表行数与标签的对应关系
"""
return {int(div_str_to_float(qubit[0])): qubit for qubit in out_lst}
def get_classical_output(self):
r"""获取 MBQC 模型运行后的经典输出结果。
Returns:
str or dict: 如果用户输入是测量模式,则返回测量输出节点得到的比特串,与原电路的测量结果相一致,没有被测量的比特位填充 "?",如果用户输入是图,则返回所有节点的测量结果
"""
# If the input is pattern, return the equivalent result as the circuit model
if self.__pattern is not None:
width = len(self.__pattern.input_)
c_output = self.__pattern.output_[0]
q_output = self.__pattern.output_[1]
# Acquire the relationship between row number and corresponding output qubit label
output_lst = c_output + q_output
row_and_qubit = self.__map_qubit_to_row(output_lst)
# Obtain the string, with classical outputs denoted as their measurement outcomes
# and quantum outputs denoted as "?"
bit_str = [str(self.__outcome[row_and_qubit[i]])
if row_and_qubit[i] in c_output else '?'
for i in range(width)]
string = "".join(bit_str)
return string
# If the input is graph, return the outcome dictionary
else:
return self.__outcome
def get_history(self):
r"""获取 MBQC 计算模拟时的中间步骤信息。
Returns:
list: 生成图态、进行测量、纠正副产品后运算结果构成的列表
"""
return self.__history
def get_quantum_output(self):
r"""获取 MBQC 模型运行后的量子态输出结果。
Returns:
State: MBQC 模型运行后的量子态
"""
return self.__status
def simulate_by_mbqc(circuit, input_state=None):
r"""使用等价的 MBQC 模型模拟量子电路。
该函数通过将量子电路转化为等价的 MBQC 模型并运行,从而获得等价于原始量子电路的输出结果。
Warning:
与 ``UAnsatz`` 不同,此处输入的 ``circuit`` 参数包含了测量操作。
另,MBQC 模型默认初始态为加态,因此,如果用户不输入参数 ``input_state`` 设置初始量子态,则默认为加态。
Args:
circuit (Circuit): 量子电路图
input_state (State, optional): 量子电路的初始量子态,默认为 :math:`|+\rangle` 态
Returns:
tuple: 包含如下两个元素:
- str: 经典输出
- State: 量子输出
"""
if input_state is not None:
assert isinstance(input_state, State), "the 'input_state' must be of type 'State'."
pattern = transpile(circuit)
mbqc = MBQC()
mbqc.set_pattern(pattern)
mbqc.set_input_state(input_state)
mbqc.run_pattern()
c_output = mbqc.get_classical_output()
q_output = mbqc.get_quantum_output()
# Return the classical and quantum outputs
return c_output, q_output
def __get_sample_dict(bit_num, mea_bits, samples):
r"""根据比特数和测量比特索引的列表,统计采样结果。
Args:
bit_num (int): 比特数
mea_bits (list): 测量的比特列表
samples (list): 采样结果
Returns:
dict: 统计得到的采样结果
"""
sample_dict = {}
for i in range(2 ** len(mea_bits)):
str_of_order = bin(i)[2:].zfill(len(mea_bits))
bit_str = []
idx = 0
for j in range(bit_num):
if j in mea_bits:
bit_str.append(str_of_order[idx])
idx += 1
else:
bit_str.append('?')
string = "".join(bit_str)
sample_dict[string] = 0
# Count sampling results
for string in list(set(samples)):
sample_dict[string] += samples.count(string)
return sample_dict
def sample_by_mbqc(circuit, input_state=None, plot=False, shots=1024, print_or_not=True):
r"""将 MBQC 模型重复运行多次,获得经典结果的统计分布。
| |
from __future__ import absolute_import
import sys
import warnings
from typing import Any, List, Tuple, Type
import numpy as np
from pandas import DataFrame
from scipy import interpolate, signal
from scipy.stats import pearsonr
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
if sys.version_info >= (3, 8):
from typing import TypedDict # pylint: disable=no-name-in-module
else:
from typing_extensions import TypedDict
Array = Type[np.ndarray]
class UQDistDict(TypedDict):
"""Definition of the dictionary structure expected for the parsing of UQ
parameters for generating distributions."""
uq_train_fr: float
uq_valid_fr: float
uq_test_fr: float
uq_train_vec: List[int]
uq_valid_vec: List[int]
uq_test_vec: List[int]
uq_train_bks: int
uq_valid_bks: int
uq_test_bks: int
def generate_index_distribution(
numTrain: int, numTest: int, numValidation: int, params: UQDistDict
) -> Tuple[Any, ...]:
"""
Generates a vector of indices to partition the data for training. NO
CHECKING IS DONE: it is assumed that the data could be partitioned in the
specified blocks and that the block indices describe a coherent partition.
:param int numTrain: Number of training data points
:param int numTest: Number of testing data points
:param int numValidation: Number of validation data points (may be zero)
:param Dict params: Contains the keywords that control the behavior of the function \
(uq_train_fr, uq_valid_fr, uq_test_fr for fraction specification, \
uq_train_vec, uq_valid_vec, uq_test_vec for block list specification, and \
uq_train_bks, uq_valid_bks, uq_test_bks for block number specification)
:return: Tuple of numpy arrays
- indexTrain (int numpy array): Indices for data in training
- indexValidation (int numpy array): Indices for data in validation (if any)
- indexTest (int numpy array): Indices for data in testing (if merging)
"""
if all(k in params for k in ("uq_train_fr", "uq_valid_fr", "uq_test_fr")):
# specification by fraction
print("Computing UQ cross-validation - Distributing by FRACTION")
return generate_index_distribution_from_fraction(
numTrain, numTest, numValidation, params
)
elif all(k in params for k in ("uq_train_vec", "uq_valid_vec", "uq_test_vec")):
# specification by block list
print("Computing UQ cross-validation - Distributing by BLOCK LIST")
return generate_index_distribution_from_block_list(
numTrain, numTest, numValidation, params
)
elif all(k in params for k in ("uq_train_bks", "uq_valid_bks", "uq_test_bks")):
# specification by block size
print("Computing UQ cross-validation - Distributing by BLOCK NUMBER")
return generate_index_distribution_from_blocks(
numTrain, numTest, numValidation, params
)
else:
print("ERROR !! No consistent UQ parameter specification found !! ... exiting ")
raise KeyError(
"No valid triplet of ('uq_train_*', 'uq_valid_*', 'uq_test_*') found. (* is any of fr, vec or bks)"
)
def generate_index_distribution_from_fraction(
numTrain: int, numTest: int, numValidation: int, params: UQDistDict
) -> Tuple[Any, ...]:
"""Generates a vector of indices to partition the data for training. It
checks that the fractions provided are (0, 1) and add up to 1.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_fr, uq_valid_fr, uq_test_fr)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
tol = 1e-7
# Extract required parameters
fractionTrain = params["uq_train_fr"]
fractionValidation = params["uq_valid_fr"]
fractionTest = params["uq_test_fr"]
if (fractionTrain < 0.0) or (fractionTrain > 1.0):
raise ValueError(
"uq_train_fr is not in (0, 1) range. uq_train_fr: ", fractionTrain
)
if (fractionValidation < 0.0) or (fractionValidation > 1.0):
raise ValueError(
"uq_valid_fr is not in (0, 1) range. uq_valid_fr: ", fractionValidation
)
if (fractionTest < 0.0) or (fractionTest > 1.0):
raise ValueError(
"uq_test_fr is not in (0, 1) range. uq_test_fr: ", fractionTest
)
fractionSum = fractionTrain + fractionValidation + fractionTest
# if (fractionSum > 1.) or (fractionSum < 1.):
if abs(fractionSum - 1.0) > tol:
raise ValueError(
"Specified UQ fractions (uq_train_fr, uq_valid_fr, uq_test_fr) do not add up to 1. No cross-validation partition is computed ! sum:",
fractionSum,
)
# Determine data size and block size
if fractionTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
sizeTraining = int(np.round(numData * fractionTrain))
sizeValidation = int(np.round(numData * fractionValidation))
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if fractionValidation > 0:
indexValidation = Folds[sizeTraining : sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if fractionTest > 0:
indexTest = Folds[sizeTraining + sizeValidation :]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_blocks(
numTrain: int, numTest: int, numValidation: int, params: UQDistDict
) -> Tuple[Any, ...]:
"""Generates a vector of indices to partition the data for training. NO
CHECKING IS DONE: it is assumed that the data could be partitioned in the
specified block quantities and that the block quantities describe a
coherent partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_bks, uq_valid_bks, uq_test_bks)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
numBlocksTrain = params["uq_train_bks"]
numBlocksValidation = params["uq_valid_bks"]
numBlocksTest = params["uq_test_bks"]
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
# Determine data size and block size
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (
numData + numBlocksTotal // 2
) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print(
"Warning ! Requested partition does not distribute data evenly between blocks. "
"Testing (if specified) or Validation (if specified) will use different block size."
)
sizeTraining = numBlocksTrain * blockSize
sizeValidation = numBlocksValidation * blockSize
# Fill partition indices
# Fill train partition
Folds = np.arange(numData)
np.random.shuffle(Folds)
indexTrain = Folds[:sizeTraining]
# Fill validation partition
indexValidation = None
if numBlocksValidation > 0:
indexValidation = Folds[sizeTraining : sizeTraining + sizeValidation]
# Fill test partition
indexTest = None
if numBlocksTest > 0:
indexTest = Folds[sizeTraining + sizeValidation :]
return indexTrain, indexValidation, indexTest
def generate_index_distribution_from_block_list(
numTrain: int, numTest: int, numValidation: int, params: UQDistDict
) -> Tuple[Any, ...]:
"""Generates a vector of indices to partition the data for training. NO
CHECKING IS DONE: it is assumed that the data could be partitioned in the
specified list of blocks and that the block indices describe a coherent
partition.
Parameters
----------
numTrain : int
Number of training data points
numTest : int
Number of testing data points
numValidation : int
Number of validation data points (may be zero)
params : dictionary with parameters
Contains the keywords that control the behavior of the function
(uq_train_vec, uq_valid_vec, uq_test_vec)
Return
----------
indexTrain : int numpy array
Indices for data in training
indexValidation : int numpy array
Indices for data in validation (if any)
indexTest : int numpy array
Indices for data in testing (if merging)
"""
# Extract required parameters
blocksTrain = params["uq_train_vec"]
blocksValidation = params["uq_valid_vec"]
blocksTest = params["uq_test_vec"]
# Determine data size and block size
numBlocksTrain = len(blocksTrain)
numBlocksValidation = len(blocksValidation)
numBlocksTest = len(blocksTest)
numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest
if numBlocksTest > 0:
# Use all data and re-distribute the partitions
numData = numTrain + numValidation + numTest
else:
# Preserve test partition
numData = numTrain + numValidation
blockSize = (
numData + numBlocksTotal // 2
) // numBlocksTotal # integer division with rounding
remainder = numData - blockSize * numBlocksTotal
if remainder != 0:
print(
"Warning ! Requested partition does not distribute data evenly between blocks. "
"Last block will have different size."
)
if remainder < 0:
remainder = 0
# Fill partition indices
# Fill train partition
maxSizeTrain = blockSize * numBlocksTrain + remainder
indexTrain | |
<reponame>Bhumbra/probayes
""" Provides probability distribution funciontlaity based on Distribution """
#-------------------------------------------------------------------------------
import collections
import numpy as np
from probayes.named_dict import NamedDict
from probayes.pd_utils import str_margcond, margcond_str, product, summate, \
rekey_dict, ismonotonic
from probayes.vtypes import isscalar
from probayes.pscales import eval_pscale, rescale, iscomplex
from probayes.pscales import div_prob
from probayes.distribution import Distribution
#-------------------------------------------------------------------------------
class PD (Distribution):
""" A probability distribution is a distribution with corresponding
probabilities. The dimensions of the probability scalar or array must be
commensurate with the values of the distribution according to their assigned
dimensions.
While it is intended for PD instances to come from RV, RF, SD, or SP calls,
PDs can be instantiated directly.
"""
# Protected
_prob = None # Probability
_pscale = None # Same convention as Prob()
_marg = None # Ordered dictionary of marginals: {key: name}
_cond = None # Ordered dictionary of conditionals: key: name}
#-------------------------------------------------------------------------------
def __init__(self, name, *args, **kwds):
""" Initialises the PD with a name, args, and kwds in the same way as
Distribution(), except with the following reserved keywords:
'dims': sets the dimensionality.
'prob': sets the probability scalar or array.
'pscale': sets the pscale
"""
args = tuple(args)
kwds = dict(kwds)
prob = None if 'prob' not in kwds else kwds.pop('prob')
pscale = None if 'pscale' not in kwds else kwds.pop('pscale')
super().__init__(name, *args, **kwds)
self.pscale = pscale
self.prob = prob
#-------------------------------------------------------------------------------
@property
def name(self):
return self._name
@property
def marg(self):
return self._marg
@property
def cond(self):
return self._cond
@name.setter
def name(self, name):
# Only the name is sensitive to what are marginal and conditional variables
NamedDict.name.fset(self, name)
self._marg, self._cond = str_margcond(self.name)
#-------------------------------------------------------------------------------
@property
def pscale(self):
return self._pscale
@pscale.setter
def pscale(self, pscale=None):
self._pscale = eval_pscale(pscale)
#-------------------------------------------------------------------------------
@property
def prob(self):
return self._prob
@prob.setter
def prob(self, prob=None):
self._prob = prob
if self._prob is None:
return
if self._issingleton:
assert isscalar(self._prob), "Singleton vals with non-scalar prob"
else:
assert not isscalar(self._prob), "Non singleton values with scalar prob"
assert self._ndim == self._prob.ndim, \
"Mismatch in dimensionality between values {} and probabilities {}".\
format(self.ndim, self._prob.ndim)
assert np.all(np.array(self._shape) == np.array(self._prob.shape)), \
"Mismatch in dimensions between values {} and probabilities {}".\
format(self._shape, self._prob.shape)
#-------------------------------------------------------------------------------
@property
def dims(self):
return self._dims
@dims.setter
def dims(self, dims=None):
""" Sets the dimensions for each of the variables.
:param dims: a dictionary of {variable_name: variable_dim}
The keys should correspond to that of a dictionary. If dims
is None, then the dimensionality is set according to the
order in values.
"""
Distribution.dims.fset(self, dims)
# Override name entries for scalar values
for i, key in enumerate(self._keylist):
assert key in self._keyset, \
"Value key {} not found among name keys {}".format(key, self._keyset)
if self._aresingleton[i]:
if key in self.marg.keys():
self.marg[key] = "{}={}".format(key, self[key])
elif key in self.cond.keys():
self.cond[key] = "{}={}".format(key, self[key])
else:
raise ValueError("Variable {} not accounted for in name {}".format(
key, self.name))
self._name = margcond_str(self.marg, self.cond)
#-------------------------------------------------------------------------------
def marginalise(self, keys):
# from p(A, key | B), returns P(A | B)
if isinstance(keys, str):
keys = [keys]
for key in keys:
assert key in self._marg.keys(), \
"Key {} not marginal in distribution {}".format(key, self._name)
keys = set(keys)
marg = collections.OrderedDict(self._marg)
cond = collections.OrderedDict(self._cond)
vals = collections.OrderedDict()
dims = collections.OrderedDict()
dim_delta = 0
sum_axes = set()
for i, key in enumerate(self._keylist):
if key in keys:
assert not self._aresingleton[i], \
"Cannot marginalise along scalar for key {}".format(key)
sum_axes.add(self._dims[key])
marg.pop(key)
dim_delta += 1
else:
if not self._aresingleton[i]:
dims.update({key: self._dims[key] - dim_delta})
vals.update({key:self[key]})
name = margcond_str(marg, cond)
prob = rescale(self._prob, self._pscale, 1.)
sum_prob = np.sum(prob, axis=tuple(sum_axes), keepdims=False)
prob = rescale(sum_prob, 1., self._pscale)
return PD(name, vals, dims=dims, prob=prob, pscale=self._pscale)
#-------------------------------------------------------------------------------
def marginal(self, keys):
# from p(A, key | B), returns P(key | B)
if isinstance(keys, str):
keys = [keys]
# Check keys arg marginal
keys = set(keys)
dims = set()
for key in keys:
assert key in self._marg.keys(), \
"Key {} not marginal in distribution {}".format(key, self._name)
dim = self._dims[key]
if dim is not None:
dims.add(dim)
# Check consistency of marginal dims
for key in self._keylist:
dim = self._dims[key]
if dim in dims:
assert key in keys, \
"Dimensionality precludes marginalising {} without: {}".\
format(keys, key)
# Determine keys to marginalise by exclusion
marginalise_keys = set()
aresingletons = []
marg_scalars = set()
for i, key in enumerate(self._keylist):
singleton = self._aresingleton[i]
marginal = key in keys
if key in self._marg.keys():
aresingletons.append(singleton)
if singleton:
marg_scalars.add(key)
if not singleton and not marginal:
marginalise_keys.add(key)
# If including any marginal scalars, must include all scalars
if any(aresingletons):
assert marg_scalars.issubset(keys), \
"If evaluating marginal for key {}".format(key) + ", " + \
"must include all marginal scalars in {}".format(self._marg.keys())
return self.marginalise(marginalise_keys)
#-------------------------------------------------------------------------------
def conditionalise(self, keys):
# from P(A, key | B), returns P(A | B, key).
# if vals[key] is a scalar, this effectively normalises prob
if isinstance(keys, str):
keys = [keys]
keys = set(keys)
for key in keys:
assert key in self._marg.keys(), \
"Key {} not marginal in distribution {}".format(key, self.name)
dims = collections.OrderedDict()
marg = collections.OrderedDict(self._marg)
cond = collections.OrderedDict(self._cond)
normalise = False
delta = 0
marg_scalars = set()
for i, key in enumerate(self._keylist):
if key in keys:
cond.update({key: marg.pop(key)})
if self._aresingleton[i]:
dims.update({key: None})
if key in keys:
normalise = True
elif key in self._marg.keys():
if self._aresingleton[i]:
marg_scalars.add(key)
if key in keys:
delta += 1 # Don't add to dim just yet
else:
dim = self._dims[key]
dims.update({key: dim})
else:
dim = self.dims[key] - delta
dims.update({key: dim})
# Reduce remaining marginals to lowest dimension
dim_val = [val for val in dims.values() if val is not None]
dim_max = 0
if len(dim_val):
dim_min = min(dim_val)
for key in dims.keys():
if dims[key] is not None:
dim = dims[key]-dim_min
dims.update({key: dim})
dim_max = max(dim_max, dim)
dim_min = self.ndim
for key in keys:
dim = self.dims[key]
if dim is not None:
dim_min = min(dim_min, dim)
for key in keys:
dim = self._dims[key]
if dim is not None:
dims.update({key: dim-dim_min+dim_max+1})
if normalise:
assert marg_scalars.issubset(set(keys)), \
"If conditionalising for key {}".format(key) + "," + \
"must include all marginal scalars in {}".format(self._marg.keys())
# Setup vals dimensions and evaluate probabilities
name = margcond_str(marg, cond)
vals = collections.OrderedDict(super().redim(dims))
old_dims = []
new_dims = []
sum_axes = set()
for key in self._keylist:
old_dim = self._dims[key]
if old_dim is not None and old_dim not in old_dims:
old_dims.append(old_dim)
new_dims.append(dims[key])
if key not in keys and key in self._marg.keys():
sum_axes.add(dims[key])
prob = np.moveaxis(self._prob, old_dims, new_dims)
if normalise and iscomplex(self._pscale):
prob = prob - prob.max()
prob = rescale(prob, self._pscale, 1.)
if normalise:
prob = div_prob(prob, np.sum(prob))
if len(sum_axes):
prob = div_prob(prob, \
np.sum(prob, axis=tuple(sum_axes), keepdims=True))
prob = rescale(prob, 1., self._pscale)
return PD(name, vals, dims=dims, prob=prob, pscale=self._pscale)
#-------------------------------------------------------------------------------
def redim(self, dims):
"""
Returns a distribution according to redimensionised values in dims, index-
ordered by the order in dims
"""
dist = super().redim(dims)
vals, dims = dict(dist), dist.dims
prob = self._prob
# Need to realign prob axes to new dimensions
if not self._issingleton:
old_dims = []
new_dims = []
for i, key in enumerate(self._keylist):
if not self._aresingletons[i]:
old_dims.append(self._dims[key])
new_dims.append(dims[key])
prob = np.moveaxis(prob, old_dims, new_dims)
return PD(self._name, vals, dims=dims, prob=prob, pscale=self._pscale)
#-------------------------------------------------------------------------------
def rekey(self, keymap):
"""
Returns a distribution with modified key names without axes changes.
"""
dist = super().rekey(keymap)
marg = rekey_dict(self._marg, keymap)
cond = rekey_dict(self._cond, keymap)
name = margcond_str(marg, cond)
return PD(name, dict(dist), dims=dist.dims,
prob=self.prob, pscale=self._pscale)
#-------------------------------------------------------------------------------
def prod(self, keys):
# from P(A, key | B), returns P(A, {} | B)
if isinstance(keys, str):
keys = [keys]
for key in keys:
assert key in self.marg.keys(), \
"Key {} not marginal in distribution {}".format(key, self.name)
keys = set(keys)
marg = collections.OrderedDict(self._marg)
cond = collections.OrderedDict(self._cond)
vals = collections.OrderedDict()
dims = collections.OrderedDict()
dim_delta = 0
prod_axes = []
for i, key in enumerate(self._keylist):
if key in keys:
assert not self._aresingleton[i], \
"Cannot apply product along scalar for key {}".format(key)
if self._dims[key] not in prod_axes:
prod_axes.append(self.dims[key])
dim_delta += 1
marg.update({key: key+"={}"})
vals.update({key: {self[key].size}})
else:
if not self._aresingleton[i]:
dims.update({key: self._dims[key] - dim_delta})
vals.update({key:self[key]})
name = margcond_str(marg, cond)
pscale = self._pscale
pscale_product = pscale
if pscale_product not in [0., 1.]:
pscale_scaling = np.prod(np.array(self.shape)[prod_axes])
if | |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import discord
import os
from typing import Dict, List, Set, Tuple
from cache import PssCache
import emojis
import pss_assert
import pss_entity as entity
import pss_core as core
import pss_lookups as lookups
import settings
import utility as util
# ---------- Constants ----------
CHARACTER_DESIGN_BASE_PATH = 'CharacterService/ListAllCharacterDesigns2?languageKey=en'
CHARACTER_DESIGN_KEY_NAME = 'CharacterDesignId'
CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME = 'CharacterDesignName'
COLLECTION_DESIGN_BASE_PATH = 'CollectionService/ListAllCollectionDesigns?languageKey=en'
COLLECTION_DESIGN_KEY_NAME = 'CollectionDesignId'
COLLECTION_DESIGN_DESCRIPTION_PROPERTY_NAME = 'CollectionName'
__PRESTIGE_FROM_BASE_PATH = f'CharacterService/PrestigeCharacterFrom?languagekey=en&characterDesignId='
__PRESTIGE_TO_BASE_PATH = f'CharacterService/PrestigeCharacterTo?languagekey=en&characterDesignId='
# ---------- Initilization ----------
__prestige_from_cache_dict = {}
__prestige_to_cache_dict = {}
# ---------- Classes ----------
class CharDesignDetails(entity.EntityDesignDetails):
def __init__(self, char_design_info: dict, collections_designs_data: dict = None, level: int = None):
special = _get_ability_name(char_design_info)
equipment_slots = _convert_equipment_mask(int(char_design_info['EquipmentMask']))
collection_name = _get_collection_name(char_design_info, collections_designs_data)
walk_speed = char_design_info['WalkingSpeed']
run_speed = char_design_info['RunSpeed']
ability = _get_stat('SpecialAbilityArgument', level, char_design_info)
if special:
ability += f' ({special})'
self.__ability: str = ability
self.__collection_name: str = collection_name
self.__equipment_slots: str = equipment_slots
self.__gender: str = char_design_info['GenderType']
self.__level: int = level
self.__race: str = char_design_info['RaceType']
self.__rarity: str = char_design_info['Rarity']
self.__speed: str = f'{walk_speed}/{run_speed}'
self.__stat_attack: str = _get_stat('Attack', level, char_design_info)
self.__stat_engine: str = _get_stat('Engine', level, char_design_info)
self.__stat_fire_resistance: str = char_design_info['FireResistance']
self.__stat_hp: str = _get_stat('Hp', level, char_design_info)
self.__stat_pilot: str = _get_stat('Pilot', level, char_design_info)
self.__stat_repair: str = _get_stat('Repair', level, char_design_info)
self.__stat_science: str = _get_stat('Science', level, char_design_info)
self.__stat_weapon: str = _get_stat('Weapon', level, char_design_info)
self.__training_capacity: str = char_design_info['TrainingCapacity']
details_long: List[Tuple[str, str]] = [
('Level', self.__level),
('Rarity', self.__rarity),
('Race', self.__race),
('Collection', self.__collection_name),
('Gender', self.__gender),
('Ability', self.__ability),
('HP', self.__stat_hp),
('Attack', self.__stat_attack),
('Repair', self.__stat_repair),
('Pilot', self.__stat_pilot),
('Science', self.__stat_science),
('Engine', self.__stat_engine),
('Weapon', self.__stat_weapon),
('Walk/run speed', self.__speed),
('Fire resist', self.__stat_fire_resistance),
('Training cap', self.__training_capacity),
('Slots', self.__equipment_slots)
]
details_short: List[Tuple[str, str, bool]] = [
('Rarity', self.__rarity, False),
('Ability', self.__ability, True),
('Collection', self.__collection_name, True)
]
super().__init__(
name=char_design_info[CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME],
description=char_design_info['CharacterDesignDescription'],
details_long=details_long,
details_short=details_short
)
@property
def ability(self) -> str:
return self.__ability
@property
def attack(self) -> str:
return self.__stat_attack
@property
def collection_name(self) -> str:
return self.__collection_name
@property
def engine(self) -> str:
return self.__stat_engine
@property
def equipment_slots(self) -> str:
return self.__equipment_slots
@property
def fire_resistance(self) -> str:
return self.__stat_fire_resistance
@property
def gender(self) -> str:
return self.__gender
@property
def hp(self) -> str:
return self.__stat_hp
@property
def level(self) -> int:
return self.__level
@property
def pilot(self) -> str:
return self.__stat_pilot
@property
def race(self) -> str:
return self.__race
@property
def rarity(self) -> str:
return self.__rarity
@property
def repair(self) -> str:
return self.__stat_repair
@property
def science(self) -> str:
return self.__stat_science
@property
def speed(self) -> str:
return self.__speed
@property
def training_capacity(self) -> str:
return self.__training_capacity
@property
def weapon(self) -> str:
return self.__stat_weapon
class CollectionDesignDetails(entity.EntityDesignDetails):
def __init__(self, collection_design_info: dict):
collection_crew = _get_collection_chars_designs_infos(collection_design_info)
collection_perk = collection_design_info['EnhancementType']
collection_perk = lookups.COLLECTION_PERK_LOOKUP.get(collection_design_info['EnhancementType'], collection_design_info['EnhancementType'])
min_combo = collection_design_info['MinCombo']
max_combo = collection_design_info['MaxCombo']
base_enhancement_value = collection_design_info['BaseEnhancementValue']
step_enhancement_value = collection_design_info['StepEnhancementValue']
self.__characters: str = ', '.join(collection_crew)
self.__min_max_combo = f'{min_combo}...{max_combo}'
self.__enhancement = f'{base_enhancement_value} (Base), {step_enhancement_value} (Step)'
details_long: List[Tuple[str, str]] = [
('Combo Min...Max', self.__min_max_combo),
(collection_perk, self.__enhancement),
('Characters', self.__characters)
]
details_short: List[Tuple[str, str, bool]] = [
]
super().__init__(
name=collection_design_info[COLLECTION_DESIGN_DESCRIPTION_PROPERTY_NAME],
description=collection_design_info['CollectionDescription'],
details_long=details_long,
details_short=details_short,
hyperlink='https://pixelstarships.fandom.com/wiki/Category:Crew_Collections'
)
@property
def characters(self) -> str:
return self.__characters
@property
def min_max_combo(self) -> str:
return self.__min_max_combo
@property
def enhancement(self) -> str:
return self.__enhancement
class PrestigeDetails(entity.EntityDesignDetails):
def __init__(self, char_design_info: dict, prestige_infos: Dict[str, List[str]], error_message: str, title_template: str, sub_title_template: str):
self.__char_design_name: str = char_design_info[CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME]
self.__count: int = sum([len(prestige_partners) for prestige_partners in prestige_infos.values()])
self.__error: str = error_message
self.__prestige_infos: Dict[str, List[str]] = prestige_infos
self.__title_template: str = title_template or '**$char_design_name$** has **$count$** combinations:'
self.__sub_title_template: str = sub_title_template or '**$char_design_name$**:'
@property
def char_design_name(self) -> str:
return self.__char_design_name
@property
def count(self) -> int:
return self.__count
@property
def error(self) -> str:
return self.__error
@property
def prestige_infos(self) -> Dict[str, List[str]]:
return self.__prestige_infos
@property
def title(self) -> str:
result = self.__title_template
result = result.replace('$char_design_name$', self.char_design_name)
result = result.replace('$count$', str(self.count))
return result
def get_details_as_embed(self) -> discord.Embed:
return None
def get_details_as_text_long(self) -> List[str]:
result = [self.title]
if self.error:
result.append(self.error)
else:
for char_design_name in sorted(list(self.prestige_infos.keys())):
prestige_partners = sorted(self.prestige_infos[char_design_name])
result.append(self._get_sub_title(char_design_name))
result.append(f'> {", ".join(prestige_partners)}')
return result
def get_details_as_text_short(self) -> List[str]:
return self.get_details_as_text_long()
def _get_sub_title(self, char_design_name: str) -> str:
result = self.__sub_title_template.replace('$char_design_name$', char_design_name)
return result
class PrestigeFromDetails(PrestigeDetails):
def __init__(self, char_from_design_info: dict, chars_designs_data: dict = None, prestige_from_data: dict = None):
chars_designs_data = chars_designs_data or character_designs_retriever.get_data_dict3()
error = None
prestige_infos = {}
template_title = '**$char_design_name$** has **$count$** prestige combinations:'
template_subtitle = 'To **$char_design_name$** with:'
if prestige_from_data:
for value in prestige_from_data.values():
char_info_2_name = chars_designs_data[value['CharacterDesignId2']][CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME]
char_info_to_name = chars_designs_data[value['ToCharacterDesignId']][CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME]
prestige_infos.setdefault(char_info_to_name, []).append(char_info_2_name)
else:
if char_from_design_info['Rarity'] == 'Special':
error = 'One cannot prestige **Special** crew.'
elif char_from_design_info['Rarity'] == 'Legendary':
error = 'One cannot prestige **Legendary** crew.'
else:
error = 'noone'
super().__init__(char_from_design_info, prestige_infos, error, template_title, template_subtitle)
class PrestigeToDetails(PrestigeDetails):
def __init__(self, char_to_design_info: dict, chars_designs_data: dict = None, prestige_to_data: dict = None):
chars_designs_data = chars_designs_data or character_designs_retriever.get_data_dict3()
error = None
prestige_infos = {}
template_title = '**$char_design_name$** has **$count$** prestige recipes:'
template_subtitle = '**$char_design_name$** with:'
if prestige_to_data:
prestige_recipes: Dict[str, Set[str]] = {}
for value in prestige_to_data.values():
char_1_design_name = chars_designs_data[value['CharacterDesignId1']][CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME]
char_2_design_name = chars_designs_data[value['CharacterDesignId2']][CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME]
prestige_recipes.setdefault(char_1_design_name, set()).add(char_2_design_name)
prestige_recipes.setdefault(char_2_design_name, set()).add(char_1_design_name)
prestige_recipe_ingredients: List[Tuple[str, Set[str]]] = [(char_design_name, prestige_partners) for char_design_name, prestige_partners in prestige_recipes.items()]
prestige_infos: Dict[str, List[str]] = {}
while prestige_recipe_ingredients:
prestige_recipe_ingredients = sorted(prestige_recipe_ingredients, key=lambda t: len(t[1]), reverse=True)
(char_design_name, prestige_partners) = prestige_recipe_ingredients[0]
prestige_infos[char_design_name] = list(prestige_partners)
prestige_recipe_ingredients = PrestigeToDetails._update_prestige_recipe_ingredients(prestige_recipe_ingredients)
else:
if char_to_design_info['Rarity'] == 'Special':
error = 'One cannot prestige to **Special** crew.'
elif char_to_design_info['Rarity'] == 'Common':
error = 'One cannot prestige to **Common** crew.'
else:
error = 'noone'
super().__init__(char_to_design_info, prestige_infos, error, template_title, template_subtitle)
@staticmethod
def _update_prestige_recipe_ingredients(prestige_recipe_ingredients: List[Tuple[str, Set[str]]]) -> List[Tuple[str, Set[str]]]:
result: List[Tuple[str, Set[str]]] = []
# Take 1st char name & prestige partners
# Remove that pair from the result
# Iterate through
(base_char_design_name, base_prestige_partners) = prestige_recipe_ingredients[0]
for (char_design_name, prestige_partners) in prestige_recipe_ingredients[1:]:
if base_char_design_name in prestige_partners and char_design_name in base_prestige_partners:
prestige_partners = [x for x in prestige_partners if x != base_char_design_name]
if prestige_partners:
result.append((char_design_name, prestige_partners))
return result
# ---------- Helper functions ----------
def _convert_equipment_mask(equipment_mask: int) -> str:
result = []
for k in lookups.EQUIPMENT_MASK_LOOKUP.keys():
if (equipment_mask & k) != 0:
result.append(lookups.EQUIPMENT_MASK_LOOKUP[k])
if result:
return ', '.join(result)
else:
return '-'
def _get_ability_name(char_design_info: dict) -> str:
if char_design_info:
special = char_design_info['SpecialAbilityType']
if special in lookups.SPECIAL_ABILITIES_LOOKUP.keys():
return lookups.SPECIAL_ABILITIES_LOOKUP[special]
return None
def _get_collection_chars_designs_infos(collection_design_info: Dict[str, str]) -> list:
collection_id = collection_design_info[COLLECTION_DESIGN_KEY_NAME]
chars_designs_data = character_designs_retriever.get_data_dict3()
chars_designs_infos = [chars_designs_data[char_id] for char_id in chars_designs_data.keys() if chars_designs_data[char_id][COLLECTION_DESIGN_KEY_NAME] == collection_id]
result = [char_design_info[CHARACTER_DESIGN_DESCRIPTION_PROPERTY_NAME] for char_design_info in chars_designs_infos]
result.sort()
return result
def _get_collection_name(char_design_info: dict, collections_designs_data: dict = None) -> str:
if char_design_info:
collection_id = char_design_info[COLLECTION_DESIGN_KEY_NAME]
if collection_id and collection_id != '0':
collection_design_info = collection_designs_retriever.get_entity_design_info_by_id(collection_id)
return collection_design_info[COLLECTION_DESIGN_DESCRIPTION_PROPERTY_NAME]
return None
def _get_stat(stat_name: str, level: int, char_design_info: dict) -> str:
is_special_stat = stat_name.lower().startswith('specialability')
if is_special_stat:
max_stat_name = 'SpecialAbilityFinalArgument'
else:
max_stat_name = f'Final{stat_name}'
min_value = float(char_design_info[stat_name])
max_value = float(char_design_info[max_stat_name])
progression_type = char_design_info['ProgressionType']
result = _get_stat_value(min_value, max_value, level, progression_type)
return result
def _get_stat_value(min_value: float, max_value: float, level: int, progression_type: str) -> str:
if level is None or level < 1 or level > 40:
return f'{min_value:0.1f} - {max_value:0.1f}'
else:
return f'{_calculate_stat_value(min_value, max_value, level, progression_type):0.1f}'
def _calculate_stat_value(min_value: float, max_value: float, level: int, progression_type: str) -> float:
exponent = lookups.PROGRESSION_TYPES[progression_type]
result = min_value + (max_value - min_value) * ((level - 1) / 39) ** exponent
return result
# ---------- Crew info ----------
def get_char_design_details_by_id(char_design_id: str, level: int, chars_designs_data: dict = None, collections_designs_data: dict = None) -> CharDesignDetails:
if char_design_id:
if chars_designs_data is None:
chars_designs_data = character_designs_retriever.get_data_dict3()
if char_design_id and char_design_id in chars_designs_data.keys():
char_design_info = chars_designs_data[char_design_id]
char_design_details = CharDesignDetails(char_design_info, collections_designs_data=collections_designs_data, level=level)
return char_design_details
return None
def get_char_design_details_by_name(char_name: str, level: int, as_embed: bool = settings.USE_EMBEDS):
pss_assert.valid_entity_name(char_name, 'char_name')
pss_assert.parameter_is_valid_integer(level, 'level', min_value=1, max_value=40, allow_none=True)
char_design_info = character_designs_retriever.get_entity_design_info_by_name(char_name)
if char_design_info is None:
return [f'Could not find a crew named **{char_name}**.'], False
else:
char_design_details = CharDesignDetails(char_design_info, level=level)
if as_embed:
return char_design_details.get_details_as_embed(), True
else:
return char_design_details.get_details_as_text_long(), True
# ---------- Collection Info ----------
def get_collection_design_details_by_name(collection_name: str, as_embed: bool = settings.USE_EMBEDS):
pss_assert.valid_entity_name(collection_name)
collection_design_info = collection_designs_retriever.get_entity_design_info_by_name(collection_name)
if collection_design_info is None:
return [f'Could not find a collection named **{collection_name}**.'], False
else:
collection_design_details = CollectionDesignDetails(collection_design_info)
if as_embed:
return collection_design_details.get_details_as_embed(), True
else:
return collection_design_details.get_details_as_text_long(), True
# ---------- Prestige from Info ----------
def get_prestige_from_info(char_name: str, as_embed: bool = settings.USE_EMBEDS):
pss_assert.valid_entity_name(char_name)
chars_designs_data = character_designs_retriever.get_data_dict3()
char_from_design_info = character_designs_retriever.get_entity_design_info_by_name(char_name, entity_designs_data=chars_designs_data)
if not char_from_design_info:
return [f'Could not find a crew named **{char_name}**.'], False
else:
prestige_from_data = _get_prestige_from_data(char_from_design_info)
prestige_from_details = PrestigeFromDetails(char_from_design_info, chars_designs_data=chars_designs_data, prestige_from_data=prestige_from_data)
if as_embed:
return prestige_from_details.get_details_as_embed(), True
else:
return prestige_from_details.get_details_as_text_long(), True
def _get_prestige_from_data(char_design_info: dict) -> dict:
if not char_design_info:
return {}
char_design_id = char_design_info[CHARACTER_DESIGN_KEY_NAME]
if char_design_id in __prestige_from_cache_dict.keys():
prestige_from_cache = __prestige_from_cache_dict[char_design_id]
else:
prestige_from_cache = _create_and_add_prestige_from_cache(char_design_id)
return | |
98:
return '~'
if table2Version == 129 and indicatorOfParameter == 97:
return '~'
if table2Version == 129 and indicatorOfParameter == 96:
return '~'
if table2Version == 129 and indicatorOfParameter == 95:
return '~'
if table2Version == 129 and indicatorOfParameter == 94:
return '~'
if table2Version == 129 and indicatorOfParameter == 93:
return '~'
if table2Version == 129 and indicatorOfParameter == 92:
return '~'
if table2Version == 129 and indicatorOfParameter == 91:
return '~'
if table2Version == 129 and indicatorOfParameter == 90:
return '~'
if table2Version == 129 and indicatorOfParameter == 89:
return '~'
if table2Version == 129 and indicatorOfParameter == 88:
return '~'
if table2Version == 129 and indicatorOfParameter == 87:
return '~'
if table2Version == 129 and indicatorOfParameter == 86:
return '~'
if table2Version == 129 and indicatorOfParameter == 85:
return '~'
if table2Version == 129 and indicatorOfParameter == 84:
return '~'
if table2Version == 129 and indicatorOfParameter == 83:
return '~'
if table2Version == 129 and indicatorOfParameter == 82:
return '~'
if table2Version == 129 and indicatorOfParameter == 81:
return '~'
if table2Version == 129 and indicatorOfParameter == 80:
return '~'
if table2Version == 129 and indicatorOfParameter == 79:
return '~'
if table2Version == 129 and indicatorOfParameter == 78:
return '~'
if table2Version == 129 and indicatorOfParameter == 71:
return '~'
if table2Version == 129 and indicatorOfParameter == 70:
return '~'
if table2Version == 129 and indicatorOfParameter == 69:
return '~'
if table2Version == 129 and indicatorOfParameter == 68:
return '~'
if table2Version == 129 and indicatorOfParameter == 67:
return '~'
if table2Version == 129 and indicatorOfParameter == 66:
return '~'
if table2Version == 129 and indicatorOfParameter == 65:
return '~'
if table2Version == 129 and indicatorOfParameter == 64:
return '~'
if table2Version == 129 and indicatorOfParameter == 63:
return '~'
if table2Version == 129 and indicatorOfParameter == 62:
return 'obctgrd'
if table2Version == 129 and indicatorOfParameter == 61:
return 'tpogrd'
if table2Version == 129 and indicatorOfParameter == 60:
return 'pvgrd'
if table2Version == 129 and indicatorOfParameter == 59:
return 'capegrd'
if table2Version == 129 and indicatorOfParameter == 58:
return 'pargrd'
if table2Version == 129 and indicatorOfParameter == 57:
return 'uvbgrd'
if table2Version == 129 and indicatorOfParameter == 56:
return 'mn2d24grd'
if table2Version == 129 and indicatorOfParameter == 55:
return 'mean2t24grd'
if table2Version == 129 and indicatorOfParameter == 54:
return 'presgrd'
if table2Version == 129 and indicatorOfParameter == 53:
return 'montgrd'
if table2Version == 129 and indicatorOfParameter == 52:
return 'mn2t24grd'
if table2Version == 129 and indicatorOfParameter == 51:
return 'mx2t24grd'
if table2Version == 129 and indicatorOfParameter == 50:
return 'lspfgrd'
if table2Version == 129 and indicatorOfParameter == 49:
return '10fggrd'
if table2Version == 129 and indicatorOfParameter == 48:
return 'magssgrd'
if table2Version == 129 and indicatorOfParameter == 47:
return 'dsrpgrd'
if table2Version == 129 and indicatorOfParameter == 46:
return 'sdurgrd'
if table2Version == 129 and indicatorOfParameter == 45:
return 'smltgrd'
if table2Version == 129 and indicatorOfParameter == 44:
return 'esgrd'
if table2Version == 129 and indicatorOfParameter == 43:
return 'sltgrd'
if table2Version == 129 and indicatorOfParameter == 42:
return 'swvl4grd'
if table2Version == 129 and indicatorOfParameter == 41:
return 'swvl3grd'
if table2Version == 129 and indicatorOfParameter == 40:
return 'swvl2grd'
if table2Version == 129 and indicatorOfParameter == 39:
return 'swvl1grd'
if table2Version == 129 and indicatorOfParameter == 38:
return 'istl4grd'
if table2Version == 129 and indicatorOfParameter == 37:
return 'istl3grd'
if table2Version == 129 and indicatorOfParameter == 36:
return 'istl2grd'
if table2Version == 129 and indicatorOfParameter == 35:
return 'istl1grd'
if table2Version == 129 and indicatorOfParameter == 34:
return 'sstkgrd'
if table2Version == 129 and indicatorOfParameter == 33:
return 'rsngrd'
if table2Version == 129 and indicatorOfParameter == 32:
return 'asngrd'
if table2Version == 129 and indicatorOfParameter == 31:
return 'sicgrd'
if table2Version == 129 and indicatorOfParameter == 30:
return 'tvhgrd'
if table2Version == 129 and indicatorOfParameter == 29:
return 'tvlgrd'
if table2Version == 129 and indicatorOfParameter == 28:
return 'cvhgrd'
if table2Version == 129 and indicatorOfParameter == 27:
return 'cvlgrd'
if table2Version == 129 and indicatorOfParameter == 26:
return 'clgrd'
if table2Version == 129 and indicatorOfParameter == 25:
return '~'
if table2Version == 129 and indicatorOfParameter == 24:
return '~'
if table2Version == 129 and indicatorOfParameter == 23:
return 'ucdvgrd'
if table2Version == 129 and indicatorOfParameter == 22:
return 'uclngrd'
if table2Version == 129 and indicatorOfParameter == 21:
return 'uctpgrd'
if table2Version == 129 and indicatorOfParameter == 14:
return 'vrtwgrd'
if table2Version == 129 and indicatorOfParameter == 13:
return 'urtwgrd'
if table2Version == 129 and indicatorOfParameter == 12:
return 'vdvwgrd'
if table2Version == 129 and indicatorOfParameter == 11:
return 'udvwgrd'
if table2Version == 129 and indicatorOfParameter == 5:
return 'septgrd'
if table2Version == 129 and indicatorOfParameter == 4:
return 'eqptgrd'
if table2Version == 129 and indicatorOfParameter == 3:
return 'ptgrd'
if table2Version == 129 and indicatorOfParameter == 2:
return 'vpotgrd'
if table2Version == 129 and indicatorOfParameter == 1:
return 'strfgrd'
if table2Version == 228 and indicatorOfParameter == 123:
return 'totalx'
if table2Version == 228 and indicatorOfParameter == 121:
return 'kx'
if table2Version == 228 and indicatorOfParameter == 109:
return 'ceil'
if table2Version == 254 and indicatorOfParameter == 48:
return 'tprate'
if table2Version == 235 and indicatorOfParameter == 70:
return 'mper'
if table2Version == 235 and indicatorOfParameter == 69:
return 'msdwlwrfcs'
if table2Version == 235 and indicatorOfParameter == 68:
return 'msdwswrfcs'
if table2Version == 235 and indicatorOfParameter == 67:
return 'mlsrr'
if table2Version == 235 and indicatorOfParameter == 66:
return 'mcrr'
if table2Version == 235 and indicatorOfParameter == 65:
return 'mrr'
if table2Version == 235 and indicatorOfParameter == 64:
return 'mcderf'
if table2Version == 235 and indicatorOfParameter == 63:
return 'mcdgppf'
if table2Version == 235 and indicatorOfParameter == 62:
return 'mcdneef'
if table2Version == 235 and indicatorOfParameter == 61:
return 'msdfswrfcs'
if table2Version == 235 and indicatorOfParameter == 60:
return 'msdfswrf'
if table2Version == 235 and indicatorOfParameter == 59:
return 'msdrswrfcs'
if table2Version == 235 and indicatorOfParameter == 58:
return 'msdrswrf'
if table2Version == 235 and indicatorOfParameter == 57:
return 'mlssr'
if table2Version == 235 and indicatorOfParameter == 56:
return 'mcsr'
if table2Version == 235 and indicatorOfParameter == 55:
return 'mtpr'
if table2Version == 235 and indicatorOfParameter == 54:
return 'mvimd'
if table2Version == 235 and indicatorOfParameter == 53:
return 'mtdwswrf'
if table2Version == 235 and indicatorOfParameter == 52:
return 'msnlwrfcs'
if table2Version == 235 and indicatorOfParameter == 51:
return 'msnswrfcs'
if table2Version == 235 and indicatorOfParameter == 50:
return 'mtnlwrfcs'
if table2Version == 235 and indicatorOfParameter == 49:
return 'mtnswrfcs'
if table2Version == 235 and indicatorOfParameter == 48:
return 'mror'
if table2Version == 235 and indicatorOfParameter == 47:
return 'mgwd'
if table2Version == 235 and indicatorOfParameter == 46:
return 'mngwss'
if table2Version == 235 and indicatorOfParameter == 45:
return 'megwss'
if table2Version == 235 and indicatorOfParameter == 44:
return 'sdf'
if table2Version == 235 and indicatorOfParameter == 43:
return 'mer'
if table2Version == 235 and indicatorOfParameter == 42:
return 'mntss'
if table2Version == 235 and indicatorOfParameter == 41:
return 'metss'
if table2Version == 235 and indicatorOfParameter == 40:
return 'mtnlwrf'
if table2Version == 235 and indicatorOfParameter == 39:
return 'mtnswrf'
if table2Version == 235 and indicatorOfParameter == 38:
return 'msnlwrf'
if table2Version == 235 and indicatorOfParameter == 37:
return 'msnswrf'
if table2Version == 235 and indicatorOfParameter == 36:
return 'msdwlwrf'
if table2Version == 235 and indicatorOfParameter == 35:
return 'msdwswrf'
if table2Version == 235 and indicatorOfParameter == 34:
return 'mslhf'
if table2Version == 235 and indicatorOfParameter == 33:
return 'msshf'
if table2Version == 235 and indicatorOfParameter == 32:
return 'mbld'
if table2Version == 235 and indicatorOfParameter == 31:
return 'msr'
if table2Version == 235 and indicatorOfParameter == 30:
return 'mcpr'
if table2Version == 235 and indicatorOfParameter == 29:
return 'mlspr'
if table2Version == 235 and indicatorOfParameter == 28:
return 'msparf'
if table2Version == 235 and indicatorOfParameter == | |
_get_lun_status(self, lun_name):
if not lun_name:
err = _('Param [lun_name] is invalid.')
raise exception.InvalidParameterValue(err=err)
try:
lun_info = self._get_lun_info(lun_name,
['status', 'is_action_locked'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _get_lun_status. [%s]', lun_name)
if not self.check_value_valid(lun_info, ['status'], string_types):
raise exception.MalformedResponse(cmd='_get_lun_status',
reason=_('status not found'))
if not self.check_value_valid(lun_info, ['is_action_locked'], bool):
raise exception.MalformedResponse(cmd='_get_lun_status',
reason=_('action_locked '
'not found'))
return lun_info['status'], lun_info['is_action_locked']
def _get_snapshot_info(self, snapshot_uuid, additional=None):
if not snapshot_uuid:
err = _('Param [snapshot_uuid] is invalid.')
raise exception.InvalidParameterValue(err=err)
params = {'snapshot_uuid': snapshot_uuid}
if additional is not None:
params['additional'] = additional
try:
out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
'get_snapshot',
1,
**params)
self.check_response(out, snapshot_id=snapshot_uuid)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _get_snapshot_info. [%s]',
snapshot_uuid)
if not self.check_value_valid(out, ['data', 'snapshot'], object):
raise exception.MalformedResponse(cmd='_get_snapshot_info',
reason=_('snapshot info not '
'found'))
return out['data']['snapshot']
def _get_snapshot_status(self, snapshot_uuid):
if not snapshot_uuid:
err = _('Param [snapshot_uuid] is invalid.')
raise exception.InvalidParameterValue(err=err)
try:
snapshot_info = self._get_snapshot_info(snapshot_uuid,
['status',
'is_action_locked'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _get_snapshot_info. [%s]',
snapshot_uuid)
if not self.check_value_valid(snapshot_info, ['status'], string_types):
raise exception.MalformedResponse(cmd='_get_snapshot_status',
reason=_('status not found'))
if not self.check_value_valid(snapshot_info,
['is_action_locked'],
bool):
raise exception.MalformedResponse(cmd='_get_snapshot_status',
reason=_('action_locked '
'not found'))
return snapshot_info['status'], snapshot_info['is_action_locked']
def _get_metadata_value(self, obj, key):
if key not in obj['metadata']:
if isinstance(obj, volume.Volume):
raise exception.VolumeMetadataNotFound(
volume_id=obj['id'],
metadata_key=key)
elif isinstance(obj, snapshot.Snapshot):
raise exception.SnapshotMetadataNotFound(
snapshot_id=obj['id'],
metadata_key=key)
else:
raise exception.MetadataAbsent()
return obj['metadata'][key]
def _get_backend_name(self):
return self.config.safe_get('volume_backend_name') or 'Synology'
def _target_create(self, identifier):
if not identifier:
err = _('Param [identifier] is invalid.')
raise exception.InvalidParameterValue(err=err)
# 0 for no auth, 1 for single chap, 2 for mutual chap
auth_type = 0
chap_username = ''
chap_password = ''
provider_auth = ''
if self.config.safe_get('use_chap_auth') and self.config.use_chap_auth:
auth_type = 1
chap_username = (self.config.safe_get('chap_username') or
volutils.generate_username(12))
chap_password = (self.config.safe_get('chap_password') or
volutils.generate_password())
provider_auth = ' '.join(('CHAP', chap_username, chap_password))
trg_prefix = self.config.safe_get('target_prefix')
trg_name = (self.TARGET_NAME_PREFIX + '%s') % identifier
iqn = trg_prefix + trg_name
try:
out = self.exec_webapi('SYNO.Core.ISCSI.Target',
'create',
1,
name=trg_name,
iqn=iqn,
auth_type=auth_type,
user=chap_username,
password=<PASSWORD>,
max_sessions=0)
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _target_create. [%s]',
identifier)
if not self.check_value_valid(out, ['data', 'target_id']):
msg = _('Failed to get target_id of target [%s]') % trg_name
raise exception.VolumeDriverException(message=msg)
trg_id = out['data']['target_id']
return iqn, trg_id, provider_auth
def _target_delete(self, trg_id):
if 0 > trg_id:
err = _('trg_id is invalid: %d.') % trg_id
raise exception.InvalidParameterValue(err=err)
try:
out = self.exec_webapi('SYNO.Core.ISCSI.Target',
'delete',
1,
target_id=('%d' % trg_id))
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _target_delete. [%d]', trg_id)
# is_map True for map, False for ummap
def _lun_map_unmap_target(self, volume_name, is_map, trg_id):
if 0 > trg_id:
err = _('trg_id is invalid: %d.') % trg_id
raise exception.InvalidParameterValue(err=err)
try:
lun_uuid = self._get_lun_uuid(volume_name)
out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
'map_target' if is_map else 'unmap_target',
1,
uuid=lun_uuid,
target_ids=['%d' % trg_id])
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _lun_map_unmap_target. '
'[%(action)s][%(vol)s].',
{'action': ('map_target' if is_map
else 'unmap_target'),
'vol': volume_name})
def _lun_map_target(self, volume_name, trg_id):
self._lun_map_unmap_target(volume_name, True, trg_id)
def _lun_unmap_target(self, volume_name, trg_id):
self._lun_map_unmap_target(volume_name, False, trg_id)
def _modify_lun_name(self, name, new_name):
try:
out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
'set',
1,
uuid=name,
new_name=new_name)
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _modify_lun_name [%s].', name)
def _check_lun_status_normal(self, volume_name):
status = ''
try:
while True:
status, locked = self._get_lun_status(volume_name)
if not locked:
break
eventlet.sleep(2)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to get lun status. [%s]',
volume_name)
LOG.debug('Lun [%(vol)s], status [%(status)s].',
{'vol': volume_name,
'status': status})
return status == 'normal'
def _check_snapshot_status_healthy(self, snapshot_uuid):
status = ''
try:
while True:
status, locked = self._get_snapshot_status(snapshot_uuid)
if not locked:
break
eventlet.sleep(2)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to get snapshot status. [%s]',
snapshot_uuid)
LOG.debug('Lun [%(snapshot)s], status [%(status)s].',
{'snapshot': snapshot_uuid,
'status': status})
return status == 'Healthy'
def _check_storage_response(self, out, **kwargs):
data = 'internal error'
exc = exception.VolumeBackendAPIException(data=data)
message = 'Internal error'
return (message, exc)
def _check_iscsi_response(self, out, **kwargs):
LUN_BAD_LUN_UUID = 18990505
LUN_NO_SUCH_SNAPSHOT = 18990532
if not self.check_value_valid(out, ['error', 'code'], int):
raise exception.MalformedResponse(cmd='_check_iscsi_response',
reason=_('no error code found'))
code = out['error']['code']
exc = None
message = ''
if code == LUN_BAD_LUN_UUID:
exc = exception.SynoLUNNotExist(**kwargs)
message = 'Bad LUN UUID'
elif code == LUN_NO_SUCH_SNAPSHOT:
exc = exception.SnapshotNotFound(**kwargs)
message = 'No such snapshot'
else:
data = 'internal error'
exc = exception.VolumeBackendAPIException(data=data)
message = 'Internal error'
message = '%s [%d]' % (message, code)
return (message, exc)
def _check_ds_pool_status(self):
pool_info = self._get_pool_info()
if not self.check_value_valid(pool_info, ['readonly'], bool):
raise exception.MalformedResponse(cmd='_check_ds_pool_status',
reason=_('no readonly found'))
if pool_info['readonly']:
message = (_('pool [%s] is not writable') %
self.config.synology_pool_name)
raise exception.VolumeDriverException(message=message)
def _check_ds_version(self):
try:
out = self.exec_webapi('SYNO.Core.System',
'info',
1,
type='firmware')
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _check_ds_version')
if not self.check_value_valid(out,
['data', 'firmware_ver'],
string_types):
raise exception.MalformedResponse(cmd='_check_ds_version',
reason=_('data not found'))
firmware_version = out['data']['firmware_ver']
# e.g. 'DSM 6.1-7610', 'DSM 6.0.1-7370', 'DSM 6.0-7321 update 3'
version = firmware_version.split()[1].split('-')[0]
versions = version.split('.')
major, minor, hotfix = (versions[0],
versions[1],
versions[2] if len(versions) is 3 else '0')
major, minor, hotfix = (int(major), int(minor), int(hotfix))
if (6 > major) or (major is 6 and minor is 0 and hotfix < 2):
m = (_('DS version %s is not supperted') %
firmware_version)
raise exception.VolumeDriverException(message=m)
def _check_ds_ability(self):
try:
out = self.exec_webapi('SYNO.Core.System',
'info',
1,
type='define')
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _check_ds_ability')
if not self.check_value_valid(out, ['data'], dict):
raise exception.MalformedResponse(cmd='_check_ds_ability',
reason=_('data not found'))
define = out['data']
if 'usbstation' in define and define['usbstation'] == 'yes':
m = _('usbstation is not supported')
raise exception.VolumeDriverException(message=m)
if ('support_storage_mgr' not in define
or define['support_storage_mgr'] != 'yes'):
m = _('Storage Manager is not supported in DS')
raise exception.VolumeDriverException(message=m)
if ('support_iscsi_target' not in define
or define['support_iscsi_target'] != 'yes'):
m = _('iSCSI target feature is not supported in DS')
raise exception.VolumeDriverException(message=m)
if ('support_vaai' not in define
or define['support_vaai'] != 'yes'):
m = _('VAAI feature is not supported in DS')
raise exception.VolumeDriverException(message=m)
if ('supportsnapshot' not in define
or define['supportsnapshot'] != 'yes'):
m = _('Snapshot feature is not supported in DS')
raise exception.VolumeDriverException(message=m)
def check_response(self, out, **kwargs):
if out['success']:
return
data = 'internal error'
exc = exception.VolumeBackendAPIException(data=data)
message = 'Internal error'
api = out['api_info']['api']
if (api.startswith('SYNO.Core.ISCSI.')):
message, exc = self._check_iscsi_response(out, **kwargs)
elif (api.startswith('SYNO.Core.Storage.')):
message, exc = self._check_storage_response(out, **kwargs)
LOG.exception('%(message)s', {'message': message})
raise exc
def exec_webapi(self, api, method, version, **kwargs):
result = self.synoexec(api, method, version, **kwargs)
if 'http_status' in result and 200 != result['http_status']:
raise exception.SynoAPIHTTPError(code=result['http_status'])
result['api_info'] = {'api': api,
'method': method,
'version': version}
return result
def check_value_valid(self, obj, key_array, value_type=None):
curr_obj = obj
for key in key_array:
if key not in curr_obj:
LOG.error('key [%(key)s] is not in %(obj)s',
{'key': key,
'obj': curr_obj})
return False
curr_obj = curr_obj[key]
if value_type and not isinstance(curr_obj, value_type):
LOG.error('[%(obj)s] is %(type)s, not %(value_type)s',
{'obj': curr_obj,
'type': type(curr_obj),
'value_type': value_type})
return False
return True
def get_ip(self):
return self.config.target_ip_address
def get_provider_location(self, iqn, trg_id):
portals = ['%(ip)s:%(port)d' % {'ip': self.get_ip(),
'port': self.target_port}]
sec_ips = self.config.safe_get('iscsi_secondary_ip_addresses')
for ip in sec_ips:
portals.append('%(ip)s:%(port)d' %
{'ip': ip,
'port': self.target_port})
return '%s,%d %s 0' % (
';'.join(portals),
trg_id,
iqn)
def is_lun_mapped(self, lun_name):
if not lun_name:
err = _('Param [lun_name] is invalid.')
raise exception.InvalidParameterValue(err=err)
try:
lun_info = self._get_lun_info(lun_name, ['is_mapped'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to _is_lun_mapped. [%s]', lun_name)
if not self.check_value_valid(lun_info, ['is_mapped'], bool):
raise exception.MalformedResponse(cmd='_is_lun_mapped',
reason=_('is_mapped not found'))
return lun_info['is_mapped']
def check_for_setup_error(self):
self._check_ds_pool_status()
self._check_ds_version()
self._check_ds_ability()
def update_volume_stats(self):
"""Update volume statistics.
Three kinds of data are stored on the Synology backend pool:
1. Thin volumes (LUNs on the pool),
2. Thick volumes (LUNs on the pool),
3. Other user data.
other_user_data_gb is the size of the 3rd one.
lun_provisioned_gb is the summation of all thin/thick volume
provisioned size.
Only thin type is available for Cinder volumes.
"""
free_gb, total_gb, other_user_data_gb = self._get_pool_size()
lun_provisioned_gb = self._get_pool_lun_provisioned_size()
data = {}
data['volume_backend_name'] = self.volume_backend_name
data['vendor_name'] = self.vendor_name
data['storage_protocol'] = self.config.target_protocol
data['consistencygroup_support'] = False
data['QoS_support'] = False
data['thin_provisioning_support'] = True
data['thick_provisioning_support'] = False
data['reserved_percentage'] = self.config.reserved_percentage
data['free_capacity_gb'] = free_gb
data['total_capacity_gb'] = total_gb
data['provisioned_capacity_gb'] = (lun_provisioned_gb +
other_user_data_gb)
data['max_over_subscription_ratio'] = (self.config.
max_over_subscription_ratio)
data['target_ip_address'] = self.config.target_ip_address
data['pool_name'] = self.config.synology_pool_name
data['backend_info'] = ('%s:%s:%s' %
(self.vendor_name,
self.driver_type,
self.host_uuid))
return data
def create_volume(self, volume):
try:
out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
'create',
1,
name=volume['name'],
type=self.CINDER_LUN,
location=('/' +
self.config.synology_pool_name),
size=volume['size'] * units.Gi)
self.check_response(out)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to create_volume. [%s]',
volume['name'])
if not self._check_lun_status_normal(volume['name']):
message = _('Lun [%s] status is not normal') % volume['name']
raise exception.VolumeDriverException(message=message)
def delete_volume(self, volume):
try:
lun_uuid = self._get_lun_uuid(volume['name'])
out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
'delete',
1,
uuid=lun_uuid)
self.check_response(out)
except exception.SynoLUNNotExist:
LOG.warning('LUN does not exist')
| |
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 97
@nottest
def test_search_h_empty(self):
"""Tests POST /forms/search: is NULL."""
json_query = json.dumps(
{'query': {'filter': ['Form', 'narrow_phonetic_transcription', '=', None]}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 75
# Same as above but with a double negative
json_query = json.dumps(
{'query': {'filter': ['not', ['Form', 'narrow_phonetic_transcription', '!=', None]]}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 75
@nottest
def test_search_i_not_empty(self):
"""Tests SEARCH /forms: is not NULL."""
json_query = json.dumps(
{'query': {'filter': ['not', ['Form', 'narrow_phonetic_transcription', '=', None]]}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 25
# Same as above, but with !=, i.e., __ne__
json_query = json.dumps(
{'query': {'filter': ['Form', 'narrow_phonetic_transcription', '!=', None]}})
response = self.app.request(url('forms'), body=json_query, method='SEARCH',
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 25
@nottest
def test_search_j_invalid_json(self):
"""Tests POST /forms/search: invalid JSON params."""
json_query = json.dumps(
{'query': {'filter': ['not', ['Form', 'narrow_phonetic_transcription', '=', None]]}})
json_query = json_query[:-1] # Cut off the end to make it bad!
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['error'] == \
u'JSON decode error: the parameters provided were not valid JSON.'
@nottest
def test_search_k_malformed_query(self):
"""Tests SEARCH /forms: malformed query."""
# TypeError - bad num args: 'NOT' will be treated as the first arg to
# _get_simple_filter_expression and ['Form', 'transcription', '=', 10] will be passed
# as the second -- two more are required.
json_query = json.dumps({'query': {'filter': ['NOT', ['Form', 'id', '=', 10]]}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Malformed OLD query error'] == u'The submitted query was malformed'
# After recognizing 'not', the query builder will look at only the next
# list and ignore all the rest.
json_query = json.dumps(
{'query': {'filter':
['not',
['Form', 'transcription', '=', 'transcription 10'],
['Form', 'transcription', '=', 'transcription 10'],
['Form', 'transcription', '=', 'transcription 10']]}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 99
assert 'transcription 10' not in [f['transcription'] for f in resp]
# IndexError will be raised when python[1] is called.
json_query = json.dumps({'query': {'filter': ['not']}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Malformed OLD query error'] == u'The submitted query was malformed'
# IndexError will be raised when python[0] is called.
json_query = json.dumps({'query': {'filter': []}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Malformed OLD query error'] == u'The submitted query was malformed'
# IndexError will be raised when python[1] is called.
json_query = json.dumps({'query': {'filter': ['and']}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Malformed OLD query error'] == u'The submitted query was malformed'
assert resp['errors']['IndexError'] == u'list index out of range'
# TypeError bad num args will be triggered when _get_simple_filter_expression is
# called on a string whose len is not 4, i.e., 'id' or '='.
json_query = json.dumps({'query': {'filter': ['and', ['Form', 'id', '=', '1099']]}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert 'TypeError' in resp['errors']
assert resp['errors']['Malformed OLD query error'] == u'The submitted query was malformed'
# TypeError when asking whether [] is in a dict (lists are unhashable)
json_query = json.dumps({'query': {'filter': [[], 'a', 'a', 'a']}})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['TypeError'] == u"unhashable type: 'list'"
assert resp['errors']['Malformed OLD query error'] == u'The submitted query was malformed'
# With no 'query' attribute, the SQLAQueryBuilder will be passed None
# will immediately raise an AttributeError.
json_query = json.dumps({'filter': ['Form', 'id', '=', 2]})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'The specified search parameters generated an invalid database query'
# With no 'filter' attribute, the SQLAQueryBuilder will be passed a list
# will immediately raise an AttributeError when it tries to call [...].get('filter').
json_query = json.dumps({'query': ['Form', 'id', '=', 2]})
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'The specified search parameters generated an invalid database query'
@nottest
def test_search_l_lexical_semantic_error(self):
"""Tests POST /forms/search: lexical & semantic errors.
These are when SQLAQueryBuilder.py raises a OLDSearchParseError because a
relation is not permitted, e.g., 'contains', or not permitted for a
given attribute.
"""
# search_parser.py does not allow the contains relation (OLDSearchParseError)
json_query = json.dumps(
{'query': {'filter': ['Form', 'transcription', 'contains', None]}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert 'Form.transcription.contains' in resp['errors']
# model.Form.translations.__eq__('abcdefg') will raise a custom OLDSearchParseError
json_query = json.dumps(
{'query': {'filter': ['Form', 'translations', '=', u'abcdefg']}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['InvalidRequestError'] == \
u"Can't compare a collection to an object or collection; use contains() to test for membership."
# model.Form.tags.regexp('xyz') will raise a custom OLDSearchParseError
json_query = json.dumps({'query': {'filter': ['Form', 'tags', 'regex', u'xyz']}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Form.tags.regex'] == u'The relation regex is not permitted for Form.tags'
# model.Form.translations.like('transcription') will raise a custom OLDSearchParseError
json_query = json.dumps({'query': {'filter': ['Form', 'translations', 'like', u'abc']}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Form.translations.like'] == \
u'The relation like is not permitted for Form.translations'
# model.Form.tags.__eq__('tag') will raise a custom OLDSearchParseError
json_query = json.dumps({'query': {'filter': ['Form', 'tags', '__eq__', u'tag']}})
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert u'InvalidRequestError' in resp['errors']
@nottest
def test_search_m_conjunction(self):
"""Tests SEARCH /forms: conjunction."""
users = h.get_users()
contributor = [u for u in users if u.role == u'contributor'][0]
models = _get_test_models()
# 1 conjunct -- pointless, but it works...
query = {'query': {'filter': [
'and', [
['Form', 'transcription', 'like', u'%2%']
]
]}}
json_query = json.dumps(query)
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 19
# 2 conjuncts
query = {'query': {'filter': [
'and', [
['Form', 'transcription', 'like', u'%2%'],
['Form', 'transcription', 'like', u'%1%']
]
]}}
json_query = json.dumps(query)
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 2
assert sorted([f['transcription'] for f in resp]) == ['transcription 12', 'transcription 21']
# More than 2 conjuncts
query = {'query': {'filter': [
'and', [
['Form', 'transcription', 'like', u'%1%'],
['Form', 'elicitor', 'id', '=', contributor.id],
['Form', 'elicitation_method', 'id', '=', models['elicitation_methods'][49].id]
]
]}}
json_query = json.dumps(query)
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 3
assert sorted([f['transcription'] for f in resp]) == \
['TRANSCRIPTION 51', 'TRANSCRIPTION 61', 'TRANSCRIPTION 71']
# Multiple redundant conjuncts -- proof of possibility
query = {'query': {'filter': [
'and', [
['Form', 'transcription', 'like', u'%1%'],
['Form', 'transcription', 'like', u'%1%'],
['Form', 'transcription', 'like', u'%1%'],
['Form', 'transcription', 'like', u'%1%'],
['Form', 'transcription', 'like', u'%1%'],
['Form', 'transcription', 'like', u'%1%'],
]
]}}
json_query = json.dumps(query)
response = self.app.request(url('forms'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 20
@nottest
def test_search_n_disjunction(self):
"""Tests POST /forms/search: disjunction."""
users = h.get_users()
contributor = [u for u in users if u.role == u'contributor'][0]
# 1 disjunct -- pointless, but it works...
query = {'query': {'filter': [
'or', [
['Form', 'transcription', 'like', u'%2%'] # 19 total
]
]}}
json_query = json.dumps(query)
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 19
# 2 disjuncts
query = {'query': {'filter': [
'or', [
['Form', 'transcription', 'like', u'%2%'], # 19; Total: 19
['Form', 'transcription', 'like', u'%1%'] # 18 (20 but '12' and '21' shared with '2'); Total: 37
]
]}}
json_query = json.dumps(query)
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 37
# 3 disjuncts
query = {'query': {'filter': [
'or', [
['Form', 'transcription', 'like', u'%2%'], # 19; Total: 19
['Form', 'transcription', 'like', u'%1%'], # 18 (20 but '12' and '21' shared with '2'); Total: 37
['Form', 'elicitor', 'id', '=', contributor.id] # 39 (50 but 11 shared with '2' and '1'); Total: 76
]
]}}
json_query = json.dumps(query)
response = self.app.post(url('/forms/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 76
assert response.content_type == | |
# ------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Provides the definitions needed for the SECURE station type.
"""
from multiprocessing import Process
import logging
import logging.handlers
import sys
import time
#import random
import numpy
import pibrella
#import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
import Image
import ImageDraw
import ImageFont
sys.path.append('/user/lib/python2.7/dist-packages')
import pygame
from station.interfaces import IStation
from station.state import State # TODO: get rid of this dependency!!
# ------------------------------------------------------------------------------
class Station(IStation):
"""
Provides the implementation for a SECURE station type. Supports tone
generation, 128x32C I2C OLED display for messages, and reading the
code via light pulses. The OLED display gives an Error message on the top
line and a message on the bottom line. During tone generation, the current
tone number is displayed. The pushbutton is used to advance to the next
tone. Once the user generates a code, the photo detector circuit reads the
pulse code. The circuit contains a red, green, and red LED. Green signifies
ready to read, yellow signifies light detected, and red is set by the pi to
acknoledge "Detected" pulse. The pibrella LEDs signify the SECURE station
state, where RED=OFF, YELLOW=TONES, GREEN=PHOTODECTOR.
"""
# --------------------------------------------------------------------------
def __init__(self,
config,
hwModule):
"""SECURE station constructor.
Loads tone generation, display, and pulse read classes and configurations.
Args:
config: a Config object containing properties to configure station characteristics
hwModule: python module object that defines hardware interfaces
"""
logger.debug('Constructing SECURE')
ledClassName = config.LedClassName
# vibrationMotorClassName = config.VibrationMotorClassName
# urgencyLedClassName = config.UrgencyLedClassName
ledClass = getattr(hwModule, ledClassName)
self._leds = {}
for i in config.Leds:
self._leds[i.Name] = ledClass(i.Name, i)
# TODO: SS - Should I be placing the color from the config file here?
# vibrationMotorClass = getattr(hwModule, vibrationMotorClassName)
# urgencyLedClass = getattr(hwModule, urgencyLedClassName)
self.ConnectionManager = None
self.tonegen = None
# --------------------------------------------------------------------------
@property
def stationTypeId(self):
"""Identifies this station's type as SECURE.
Args:
N/A.
Returns:
Station name as a string.
Raises:
N/A.
"""
return "SECURE"
# --------------------------------------------------------------------------
def start(self):
""" Station startup
Args:
N/A
Returns:
N/A
Raises:
N/A
"""
logger.info('Starting SECURE.')
self._leds['red'].turnOn()
self._leds['green'].turnOff()
self._leds['yellow'].turnOff()
# Turn on the display and blank the display
self._display = LCDdisplay()
logger.info('SECURE LCD Display Configured.')
# --------------------------------------------------------------------------
def stop(self, signal):
"""Station stop. Cleanup
Args:
signal - integer for system signal
Returns:
N/A
Raises:
N/A
"""
self._leds['red'].turnOff()
self._leds['green'].turnOff()
self._leds['yellow'].turnOff()
pibrella.output.h.off()
self._display.display_message(" ", " ")
logger.info('Received signal "%s". Stopping SECURE.', signal)
# TODO. Turn off LEDs, pulse monitor, blank display
# --------------------------------------------------------------------------
def onReady(self):
"""Transition station to the Ready state
Args:
N/A.
Returns:
Station name as a string.
Raises:
N/A.
"""
logger.info('SECURE transitioned to Ready state.')
self._leds['red'].turnOn()
self._leds['green'].turnOff()
self._leds['yellow'].turnOff()
# Blank the display
self._display.display_message(" ", "READY")
# --------------------------------------------------------------------------
def onProcessing(self,
args):
"""Transition station to the Processing state
Args:
args
Returns:
Station name as a string.
Raises:
N/A.
"""
logger.info('SECURE transitioned to Processing state with args [%s].' % (args))
self._leds['red'].turnOff()
self._leds['yellow'].turnOn()
self._leds['green'].turnOff()
self._secure_tone_pattern = args
if self.tonegen is None:
self.tonegen = ToneGenerator(self._secure_tone_pattern, self._display)
pibrella.button.pressed(self.tonegen.button_pressed)
else:
self.tonegen.stop()
self.tonegen.reinit(self._secure_tone_pattern)
logger.info('SECURE tonegen initialized')
def onProcessing2(self, args):
"""Transition station to the Processing2 state
Args:
args
Returns:
Station name as a string.
Raises:
N/A.
"""
logger.info('SECURE transitioned to Processing2 state.' )
self.tonegen.stop()
#pibrella.button.clear_events()
self._leds['red'].turnOff()
self._leds['yellow'].turnOff()
self._leds['green'].turnOn()
self._secure_code = args[0]
rc = ReadCode(self._secure_code, self._display, self.ConnectionManager._callback)
# wait for a code to be read
self._display.display_message(" ", "TRANSMIT")
t = Process(target = rc.run)
t.start()
# --------------------------------------------------------------------------
def onProcessingCompleted(self, args):
"""Transition station to the ProcessingCompleted state
This state will be entered when the graphics thread sets the state
to ProcessingCompleted.
Args:
isCorrect
Code
errorMsg
"""
logger.info('SECURE transitioned to ProcessingCompleted state.' )
logger.info('TODO implement method body.' )
isCorrect,code,error_msg = args
self._error_msg = error_msg
logger.debug('Submitted')
logger.info('Submitting code: {} , match = {}, {}'.format(repr(code), isCorrect, error_msg))
self.ConnectionManager.submit(candidateAnswer=code,
isCorrect=isCorrect,
failMessage=error_msg)
# --------------------------------------------------------------------------
def onFailed(self,
args):
"""Transition station to Failed state
Args:
args
Returns:
N/A.
Raises:
N/A.
"""
logger.info('SECURE transitioned to Failed state with args [%s].' % (args))
is_correct, challenge_complete = args
# time.sleep(theatric_delay/1000.0)
if challenge_complete.lower() == "true":
logger.debug('Challenge complete.')
self._display.display_message(self._error_msg, "FAILED")
self._leds['red'].turnOn()
self._leds['yellow'].turnOff()
self._leds['green'].turnOff()
pibrella.output.h.off()
else:
logger.debug('Challenge not complete. Turning on red LED')
self._display.display_message(self._error_msg,"ERROR")
self._leds['red'].turnOn()
self._leds['yellow'].turnOff()
self._leds['green'].turnOff()
pibrella.output.h.off()
# --------------------------------------------------------------------------
def onPassed(self,
args):
"""Transition station to the Passed state
Args:
args
Returns:
N/A.
Raises:
N/A.
"""
logger.info('SECURE transitioned to Passed state with args [%s].' % (args))
self._display.display_message(" ", "PASSED")
self._leds['red'].turnOff()
self._leds['yellow'].turnOff()
self._leds['green'].turnOn()
pibrella.output.h.off()
# --------------------------------------------------------------------------
def onUnexpectedState(self, value):
"""Attempted to transition to unexpected state
Args:
value
Returns:
N/A.
Raises:
N/A.
"""
logger.critical('SECURE transitioned to Unexpected state %s', value)
# ------------------------------------------------------------------------------
def parse_secure_error(argument):
switcher = {
0: " ",
1: "Timeout",
2: "Short pulse width",
4: "Long pulse width",
8: ">Bits or <stop gap",
16: "Long stop gap",
32: "Failed"
}
return switcher.get(argument, "nothing")
# ------------------------------------------------------------------------------
class LCDdisplay:
"""
Generates the tones.
"""
# --------------------------------------------------------------------------
def __init__(self):
"""INIT
"""
# Raspberry Pi pin configuration:
self._RST = 22
self._address = 0x3C
# driver for 128x32 OLED display via I2C interface
# self._disp = Adafruit_SSD1306.SSD1306_128_32(rst=self._RST)
self._disp = Adafruit_SSD1306.SSD1306_128_64(rst=self._RST, i2c_address=self._address)
# Initialize the didplay library.
self._disp.begin()
# Non-Invert display
self._disp.command(0xA6)
# Invert display
#self._disp.command(0xA7)
# Clear display.
self._disp.clear()
self._disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
self._width = self._disp.width
self._height = self._disp.height
self._image = Image.new('1', (self._width, self._height))
self._padding = 2
self._top = self._padding
self._bottom = self._height-self._padding
# Get drawing object to draw on image.
self._draw = ImageDraw.Draw(self._image)
self._font_name = '/usr/share/fonts/truetype/freefont/FreeMono.ttf'
self._font_top = ImageFont.truetype(self._font_name, 12)
self._font_bot = ImageFont.truetype(self._font_name, 20)
self.display_message(' ', 'READY')
logger.debug('Constructing LCDdisplay')
# --------------------------------------------------------------------------
def __enter__(self):
""" Enter """
logger.debug('Entering LCDdisplay')
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
""" Exit """
logger.debug('Exiting LCDdisplay')
def display_message(self, Line1, Line2):
# First define some constants to allow easy resizing of shapes.
# Write two lines of text.
self._draw.rectangle((0,0,self._width,self._height), outline=0, fill=0)
x = self._padding
self._draw.text((x, self._top+7), Line1, font=self._font_top, fill=255)
self._draw.text((x, self._top+40), Line2, font=self._font_bot, fill=255)
# Display image.
self._disp.clear()
self._disp.display()
self._disp.image(self._image.rotate(180))
self._disp.display()
# ------------------------------------------------------------------------------
class ToneGenerator:
"""
Generates the tones.
"""
# --------------------------------------------------------------------------
def __init__(self, tone_IDs, lcdDisplay):
"""INIT
"""
# display messages for tones
self._disp_msg = ["Tone #0",
"Tone #1",
"Tone #2",
"Tone #3",
"Tone #4",
"Tone #5",
"Tone #6",
"Tone #7",
"Tone #8",
"Tone #9"]
self._bits = 16 # audio resolution
self._duration = 10 # Length of audio playback
self._sample_rate = 44100 # sample rate
self._tone = 0 # start out in tone 0 (off)
# frequencies of tones
#self._f = [44100, 276, 308, 340, 372, 404, 436, 468, 500]
self._f = [44100, 300, 400, 500, 600, 700, 800, 900, 1000]
# challenge tone IDs from MS
tmp = tone_IDs[0]
self._tone_ID = tmp[0:9]
logger.debug('Challenge tone IDs = %s', self._tone_ID)
self._tone_order = self._tone_ID
# add the zero frequency (off) to the order list
self._tone_order.insert(0, -1)
logger.debug('Challenge tone IDs for list = %s', self._tone_order)
# number of samples
self._n_samples = int(round(self._duration*self._sample_rate))
# init the mixer
pygame.mixer.pre_init(44100, -self._bits, 2, 1024)
pygame.init()
#initialize the tones
self._slist = [self.generate_sound(self._f[count]) for count in range(len(self._f))]
#self._slist[self._tone].play(loops = -1)
self._display = lcdDisplay
self._display.display_message(" ", self._disp_msg[self._tone])
logger.debug('Constructing Tone Generator')
# --------------------------------------------------------------------------
def __enter__(self):
""" Enter """
logger.debug('Entering Tone Generator')
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
""" Exit """
logger.debug('Exiting Tone Generator')
# def run(self)
# _running = True
# while _running:
# pibrella.button.pressed.(self.button_pressed)
# # TODO set _running to false if station state changes
# --------------------------------------------------------------------------
def reinit(self, tone_IDs):
# challenge tone IDs from MS
tmp = tone_IDs[0]
self._tone_ID = tmp[0:9]
logger.debug('Challenge tone IDs = %s', self._tone_ID)
self._tone_order = self._tone_ID
# add the zero frequency (off) to the order list
self._tone_order.insert(0, -1)
logger.debug('Challenge tone IDs for list = %s', self._tone_order)
self._display.display_message(" ", self._disp_msg[self._tone])
logger.debug('Constructing Tone Generator')
def generate_sound(self, freq):
#setup | |
# Python wrapper for EDSDK
# Camera operating function
import ctypes
import os
import platform
from .pyEDSDKTypes import *
if platform.uname()[0] == "Windows":
dir_path = os.path.dirname(os.path.realpath(__file__))
dllPath = os.path.join(dir_path, 'EDSDK.dll')
dllImagePath = os.path.join(dir_path, 'EdsImage.dll')
EDSDK = ctypes.WinDLL(dllPath)
EDSDKImageDll = ctypes.WinDLL(dllImagePath)
EdsPropertyEventHandler = WINFUNCTYPE(EdsError, EdsPropertyEvent, EdsPropertyID, EdsUInt32, py_object)
EdsStateEventHandler = WINFUNCTYPE(c_uint, c_uint, IntPtr, IntPtr)
EdsProgressCallback = WINFUNCTYPE(c_uint, c_void_p, IntPtr, c_bool)
EdsObjectEventHandler = WINFUNCTYPE(EdsError, EdsObjectEvent, POINTER(c_void_p), py_object)
else:
print("Only support windows operating system.")
pass
# region Callback Functions
# public delegate uint EdsProgressCallback( uint inPercent, IntPtr inContext, ref bool outCancel);
# public delegate uint EdsCameraAddedHandler(IntPtr inContext);
# def EdsPropertyEventHandler(delegateFunc):
# delegateFunc
# pass
# public delegate uint EdsPropertyEventHandler(uint inEvent, uint inPropertyID, uint inParam, IntPtr inContext);
# public delegate uint EdsPropertyEventHandler(uint inEvent, uint inPropertyID, uint inParam, IntPtr inContext);
# public delegate uint EdsObjectEventHandler( uint inEvent, IntPtr inRef, IntPtr inContext);
# public delegate uint EdsStateEventHandler( uint inEvent, uint inParameter, IntPtr inContext);
# region Proto type definition of EDSDK API
'''
*----------------------------------
Basic functions
----------------------------------*
'''
# *-----------------------------------------------------------------------------
#
# Function: EdsInitializeSDK
#
# Description:
# Initializes the libraries.
# When using the EDSDK libraries, you must call this API once
# before using EDSDK APIs.
#
# Parameters:
# In: None
# Out: None
#
# Returns: Any of the sdk errors.
# ----------------------------------------------------------------------------- *
def EdsInitializeSDK():
EDSDK.EdsInitializeSDK.restype = c_uint
# C#原型:public extern static uint EdsInitializeSDK()
return EDSDK.EdsInitializeSDK()
# *-----------------------------------------------------------------------------
#
# Function: EdsTerminateSDK
#
# Description:
# Terminates use of the libraries.
# This function muse be called when ending the SDK.
# Calling this function releases all resources allocated by the libraries.
#
# Parameters:
# In: None
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*#
def EdsTerminateSDK():
EDSDK.EdsTerminateSDK.restype = c_uint
# C#原型:public extern static uint EdsInitializeSDK()
return EDSDK.EdsTerminateSDK()
# *-----------------------------------------------------------------------------
#
# Function: EdsRelease
#
# Description:
# Decrements the reference counter to an object.
# When the reference counter reaches 0, the object is released.
#
# Parameters:
# In: inRef - The reference of the item.
# Out: None
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*#
# EdsUInt32 EDSAPI EdsRelease( EdsBaseRef inRef )
def EdsRelease(cameraList):
EDSDK.EdsRelease.argtype = IntPtr
EDSDK.EdsRelease.restype = c_uint
# C#原型:public extern static uint EdsRelease( IntPtr inRef )
return EDSDK.EdsRelease(cameraList)
'''
#*----------------------------------
Item-tree operating functions
----------------------------------*#
'''
# *-----------------------------------------------------------------------------
#
# Function: EdsGetChildCount
#
# Description:
# Gets the number of child objects of the designated object.
# Example: Number of files in a directory
#
# Parameters:
# In: inRef - The reference of the list.
# Out: outCount - Number of elements in this list.
#
# Returns: Any of the sdk errors.
# ----------------------------------------------------------------------------- * #
def EdsGetChildCount(inRef, outCount):
EDSDK.EdsGetChildCount.argtype = (c_void_p, c_int)
EDSDK.EdsGetChildCount.restype = c_int
# C#原型: public extern static uint EdsGetCameraList( out IntPtr outCameraListRef)
return EDSDK.EdsGetChildCount(inRef, byref(outCount))
# *-----------------------------------------------------------------------------
#
# Function: EdsGetChildAtIndex
#
# Description:
# Gets an indexed child object of the designated object.
#
# Parameters:
# In: inRef - The reference of the item.
# inIndex - The index that is passed in, is zero based.
# Out: outRef - The pointer which receives reference of the
# specified index .
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*#
def EdsGetChildAtIndex(inRef, inIndex, outRef):
EDSDK.EdsGetChildCount.argtype = (c_void_p, c_int, c_void_p)
EDSDK.EdsGetChildCount.restype = c_int
# C#原型: EdsGetChildAtIndex( IntPtr inRef, int inIndex, out IntPtr outRef)
return EDSDK.EdsGetChildAtIndex(inRef, inIndex, byref(outRef))
'''
#*----------------------------------
Property operating functions
----------------------------------*#
'''
# /*-----------------------------------------------------------------------------
#
# Function: EdsGetPropertySize
#
# Description:
# Gets the byte size and data type of a designated property
# from a camera object or image object.
#
# Parameters:
# In: inRef - The reference of the item.
# inPropertyID - The ProprtyID
# inParam - Additional information of property.
# We use this parameter in order to specify an index
# in case there are two or more values over the same ID.
# Out: outDataType - Pointer to the buffer that is to receive the property
# type data.
# outSize - Pointer to the buffer that is to receive the property
# size.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsGetPropertySize(inRef, inPropertyID, inParam, outDataType, outSize):
EDSDK.EdsGetPropertySize.argtype = (IntPtr, c_uint, c_int, c_uint, c_int)
EDSDK.EdsGetPropertySize.restype = c_uint
# C#原型: public extern static uint EdsGetPropertySize(IntPtr inRef, uint inPropertyID, int inParam,
# out EdsDataType outDataType, out int outSize)
return EDSDK.EdsGetPropertySize(inRef, inPropertyID, inParam, byref(outDataType), byref(outSize))
# *-----------------------------------------------------------------------------
#
# Function: EdsGetPropertyData
#
# Description:
# Gets property information from the object designated in inRef.
#
# Parameters:
# In: inRef - The reference of the item.
# inPropertyID - The ProprtyID
# inParam - Additional information of property.
# We use this parameter in order to specify an index
# in case there are two or more values over the same ID.
# inPropertySize - The number of bytes of the prepared buffer
# for receive property-value.
# Out: outPropertyData - The buffer pointer to receive property-value.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsGetPropertyData(inRef, inPropertyID, inParam, inPropertySize, outPropertyData):
EDSDK.EdsGetPropertyData.argtype = (c_void_p, c_uint, c_int, c_int, c_void_p)
EDSDK.EdsGetPropertyData.restype = c_uint
outProp = byref(outPropertyData)
# C#原型: public extern static uint EdsGetPropertyData(IntPtr inRef, uint inPropertyID, int inParam,
# int inPropertySize, IntPtr outPropertyData)
# if hasattr(outPropertyData, 'value'):
# if outPropertyData.value == EdsDataType.UInt32:
# EDSDK.EdsGetPropertyData.argtype = (IntPtr, c_uint, c_int, c_uint, c_int)
# outProp = outPropertyData
# elif outPropertyData.value == EdsDataType.Int32:
# EDSDK.EdsGetPropertyData.argtype = (IntPtr, c_uint, c_int, c_int, c_int)
#
# elif outPropertyData.value == EdsDataType.String:
# EDSDK.EdsGetPropertyData.argtype = (IntPtr, c_uint, c_int, c_void_p, c_int)
#
# elif outPropertyData.value == EdsDataType.FocusInfo:
# EDSDK.EdsGetPropertyData.argtype = (IntPtr, c_uint, c_int, c_void_p, c_int)
return EDSDK.EdsGetPropertyData(inRef, inPropertyID, inParam, inPropertySize, outProp)
# *-----------------------------------------------------------------------------
#
# Function: EdsSetPropertyData
#
# Description:
# Sets property data for the object designated in inRef.
#
# Parameters:
# In: inRef - The reference of the item.
# inPropertyID - The ProprtyID
# inParam - Additional information of property.
# inPropertySize - The number of bytes of the prepared buffer
# for set property-value.
# inPropertyData - The buffer pointer to set property-value.
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsSetPropertyData(inRef, inPropertyID, inParam, inPropertySize, inPropertyData):
EDSDK.EdsSetPropertyData.argtype = (c_void_p, c_uint, c_int, c_int, c_void_p)
EDSDK.EdsSetPropertyData.restype = c_uint
# C#原型:public extern static uint EdsSetPropertyData( IntPtr inRef, uint inPropertyID,
# int inParam, int inPropertySize, [MarshalAs(UnmanagedType.AsAny), In] object inPropertyData);
return EDSDK.EdsSetPropertyData(inRef, inPropertyID, inParam, inPropertySize, byref(inPropertyData))
# *-----------------------------------------------------------------------------
#
# Function: EdsGetPropertyDesc
#
# Description:
# Gets a list of property data that can be set for the object
# designated in inRef, as well as maximum and minimum values.
# This API is intended for only some shooting-related properties.
#
# Parameters:
# In: inRef - The reference of the camera.
# inPropertyID - The Property ID.
# Out: outPropertyDesc - Array of the value which can be set up.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsGetPropertyDesc(inRef, inPropertyID, outPropertyDesc):
EDSDK.EdsGetPropertyDesc.argtype = (c_void_p, c_uint, c_void_p)
EDSDK.EdsGetPropertyDesc.restype = c_uint
# C#原型:public extern static uint EdsGetPropertyDesc( IntPtr inRef, uint inPropertyID,
# out EdsPropertyDesc outPropertyDesc);
return EDSDK.EdsGetPropertyDesc(inRef, inPropertyID, byref(outPropertyDesc))
'''
#*--------------------------------------------
Device-list and device operating functions
---------------------------------------------*#
'''
# *-----------------------------------------------------------------------------
#
# Function: EdsGetCameraList
#
# Description:
# Gets camera list objects.
#
# Parameters:
# In: None
# Out: outCameraListRef - Pointer to the camera-list.
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*#
def EdsGetCameraList(outCameraListRef):
EDSDK.EdsGetCameraList.restype = c_int
# C#原型: public extern static uint EdsGetCameraList( out IntPtr outCameraListRef)
return EDSDK.EdsGetCameraList(byref(outCameraListRef))
'''
*----------------------------------
Camera operating functions
----------------------------------*
'''
# *-----------------------------------------------------------------------------
##
# Function: EdsGetDeviceInfo
#
# Description:
# Gets device information, such as the device name.
# Because device information of remote cameras is stored
# on the host computer, you can use this API
# before the camera object initiates communication
# (that is, before a session is opened).
#
# Parameters:
# In: inCameraRef - The reference of the camera.
# Out: outDeviceInfo - Information as device of camera.
#
# Returns: Any of the sdk errors.
# ----------------------------------------------------------------------------- *
def EdsGetDeviceInfo(camera, deviceInfo):
EDSDK.EdsGetDeviceInfo.argtype = (c_void_p, c_void_p)
EDSDK.EdsGetDeviceInfo.restype = c_uint
# C#原型:public extern static uint EdsGetDeviceInfo( IntPtr inCameraRef, out EdsDeviceInfo outDeviceInfo)
return EDSDK.EdsGetDeviceInfo(camera, byref(deviceInfo))
# *-----------------------------------------------------------------------------
#
# Function: EdsOpenSession
#
# Description:
# Establishes a logical connection with a remote camera.
# Use this API after getting the camera's EdsCamera object.
#
# Parameters:
# In: inCameraRef - The reference of the camera
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsOpenSession(inCameraRef):
EDSDK.EdsOpenSession.argtype = c_void_p
EDSDK.EdsOpenSession.restype = c_uint
# C#原型:ublic extern static uint EdsOpenSession( IntPtr inCameraRef)
return EDSDK.EdsOpenSession(inCameraRef)
# *-----------------------------------------------------------------------------
#
# Function: EdsCloseSession
#
# Description:
# Closes a logical connection with a remote camera.
#
# Parameters:
# In: inCameraRef - The reference of the camera
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsCloseSession(inCameraRef):
EDSDK.EdsCloseSession.argtype = c_void_p
EDSDK.EdsCloseSession.restype = c_uint
# C#原型:public extern static uint EdsCloseSession( IntPtr inCameraRef)
return EDSDK.EdsCloseSession(inCameraRef)
# *-----------------------------------------------------------------------------
#
# Function: EdsSendCommand
#
# Description:
# Sends a command such as "Shoot" to a remote camera.
#
# Parameters:
# In: inCameraRef - The reference of the camera which will receive the
# command.
# inCommand - Specifies the command to be sent.
# inParam - Specifies additional command-specific information.
# Out: None
#
# Returns: Any of the sdk errors.
# -----------------------------------------------------------------------------*/
def EdsSendCommand(inCameraRef, inCommand, inParam):
EDSDK.EdsSendCommand.argtype = (c_void_p, c_uint, c_int)
| |
<filename>src/models/portfolio.py
import datetime
import pandas as pd
from src.models.utils import get_current_port, get_advised_port, get_recommendation
from src.models.load_data import Balance, Instruments, AdvisedPortfolios, PriceDB, Singleton
import copy
import math
from typing import Sequence
import numpy as np
from src.models.asset import Asset
from src.models.cash import Cash
from src.models.price import Price
from src.models import rebalancing_helper
class Portfolio:
"""
Portfolio class.
Defines a :class:`.Portfolio` of :class:`.Asset` s and :class:`.Cash` and performs rebalancing of the portfolio.
"""
def __init__(self):
"""
Initialization.
"""
self._assets = {}
self._cash = {}
self._is_selling_allowed = False
self._common_currency = "KRW"
@property
def cash(self):
"""
Dict[str, Cash]: Portfolio's dictionary of cash. The keys are currency symbols.
"""
return self._cash
@cash.setter
def cash(self, cash):
self._cash = cash
def add_cash(self, amount, currency='KRW'):
"""
Adds cash to portfolio.
Args:
amount (float) : Amount of cash
currency (str) : Currency of cash
"""
if currency.upper() not in self._cash:
self._cash[currency.upper()] = Cash(amount, currency)
else:
self._cash[currency.upper()].amount += amount
def easy_add_cash(self, amounts, currencies):
"""
An easy way of adding cash of various currencies to portfolio.
Args:
amounts (Sequence[float]): Amounts of cash from different curriencies.
currencies (Sequence[str]): Specifies curriency of each of the amounts. Must be in the same order as ``amounts``.
"""
assert len(amounts) == len(
currencies
), "`amounts` and `currencies` should be of the same length."
for amount, currency in zip(amounts, currencies):
self._cash[currency.upper()] = Cash(amount, currency)
@property
def assets(self):
"""
Dict[str, Asset]: Dictionary of assets in portfolio. The keys of the dictionary are the tickers of the assets.
No setter allowed.
"""
return self._assets
@property
def selling_allowed(self):
"""
bool: Flag indicating if selling of assets is allowed or not when rebalancing portfolio.
"""
return self._is_selling_allowed
@selling_allowed.setter
def selling_allowed(self, flag):
self._is_selling_allowed = flag
def add_asset(self, asset):
"""
Adds specified :class:`.Asset` to the portfolio.
Args:
asset (Asset): Asset to add to portfolio.
"""
self._assets[asset.ticker] = copy.deepcopy(asset)
def easy_add_assets(self, tickers, quantities, prices):
"""
An easy way to add multiple assets to portfolio.
Args:
tickers (Sequence[str]): Ticker of assets in portfolio.
quantities (Sequence[float]): Quantities of respective assets in portfolio. Must be in the same order as ``tickers``.
prices (Sequence[float]): Prices of respective assets in portfolio. Must be in the same order as ``tickers``.
reference: https://github.com/siavashadpey/rebalance/blob/master/rebalance/portfolio/portfolio.py
"""
assert (len(tickers) == len(quantities)) & (len(quantities) == len(prices)), \
"`names`, `quantities` and `prices` must be of the same length."
for ticker, quantity, price in zip(tickers, quantities, prices):
self._assets[ticker] = Asset(ticker, quantity, price)
def asset_allocation(self):
"""
Computes the portfolio's asset allocation.
Returns:
Dict[str, Asset]: Asset allocation of the portfolio (in %). The keys of the dictionary are the tickers of the assets.
"""
# Obtain all market values in 1 currency (doesn't matter which)
total_value = self.market_value(self._common_currency)
total_value = max(
1., total_value
) # protect against division by 0 (total_value = 0, means new portfolio)
asset_allocation = {}
for name, asset in self._assets.items():
asset_allocation[name] = asset.market_value_in(
self._common_currency) / total_value * 100.
return asset_allocation
def market_value(self, currency='KRW'):
"""
Computes the total market value of the assets in the portfolio.
Args:
currency (str): The currency in which to obtain the value.
Returns:
float: The total market value of the assets in the portfolio.
"""
mv = 0.
for asset in self.assets.values():
mv += asset.market_value_in(currency)
return mv
def cash_value(self, currency='KRW'):
"""
Computes the cash value in the portfolio.
Args:
currency (str): The currency in which to obtain the value.
Returns:
float: The total cash value in the portfolio.
"""
cv = 0.
for cash in self.cash.values():
cv += cash.amount_in(currency)
return cv
def value(self, currency="KRW"):
"""
Computes the total value (cash and assets) in the portfolio.
Args:
currency (str): The currency in which to obtain the value.
Returns:
float: The total value in the portfolio.
"""
return self.market_value(currency) + self.cash_value(currency)
def buy_asset(self, ticker, quantity):
"""
Buys (or sells) the specified amount of an asset.
Args:
ticker (str): Ticker of asset to buy.
quantity (int): If positive, it is the quantity to buy. If negative, it is the quantity to sell.
Return:
float: Cost of transaction (in asset's own currency)
"""
if quantity == 0:
return 0.00
asset = self.assets[ticker]
cost = asset.buy(quantity)
self.add_cash(-cost, asset.currency)
return cost
def exchange_currency(self,
to_currency,
from_currency,
to_amount=None,
from_amount=None):
"""
Performs currency exchange in Portfolio.
Args:
to_currency (str): Currency to which to perform the exchange
from_currency (str): Currency from which to perform the exchange
to_amount (float, optional): If specified, it is the amount to which we want to convert
from_amount (float, optional): If specified, it is the amount from which we want to convert
Note: either the `to_amount` or `from_amount` needs to be specifed.
"""
from_currency = from_currency.upper()
to_currency = to_currency.upper()
# add cash instances of both currencies to portfolio if non-existent
self.add_cash(0.0, from_currency)
self.add_cash(0.0, to_currency)
if to_amount is None and from_amount is None:
raise Exception(
"Argument `to_amount` or `from_amount` must be specified.")
if to_amount is not None and from_amount is not None:
raise Exception(
"Please specify only `to_amount` or `from_amount`, not both.")
if to_amount is not None:
from_amount = self.cash[to_currency].exchange_rate(
from_currency) * to_amount
elif from_amount is not None:
to_amount = self.cash[from_currency].exchange_rate(
to_currency) * from_amount
self.add_cash(to_amount, to_currency)
self.add_cash(-from_amount, from_currency)
def rebalance(self, target_allocation, verbose=False):
"""
Rebalances the portfolio using the specified target allocation, the portfolio's current allocation,
and the available cash.
Args:
target_allocation (Dict[str, float]): Target asset allocation of the portfolio (in %). The keys of the dictionary are the tickers of the assets.
verbose (bool, optional): Verbosity flag. Default is False.
Returns:
(tuple): tuple containing:
* new_units (Dict[str, int]): Units of each asset to buy. The keys of the dictionary are the tickers of the assets.
* prices (Dict[str, [float, str]]): The keys of the dictionary are the tickers of the assets. Each value of the dictionary is a 2-entry list. The first entry is the price of the asset during the rebalancing computation. The second entry is the currency of the asset.
* exchange_rates (Dict[str, float]): The keys of the dictionary are currencies. Each value is the exchange rate to CAD during the rebalancing computation.
* max_diff (float): Largest difference between target allocation and optimized asset allocation.
"""
# order target_allocation dict in the same order as assets dict and upper key
target_allocation_reordered = {}
try:
for key in self.assets:
target_allocation_reordered[key] = target_allocation[key]
except:
raise Exception(
"'target_allocation not compatible with the assets of the portfolio."
)
target_allocation_np = np.fromiter(
target_allocation_reordered.values(), dtype=float)
assert abs(np.sum(target_allocation_np) -
100.) <= 1E-2, "target allocation must sum up to 100%. it's {}".format(np.sum(target_allocation_np))
# offload heavy work
(balanced_portfolio, new_units, prices, cost,
exchange_history) = rebalancing_helper.rebalance(self, target_allocation_np)
# compute old and new asset allocation
# and largest diff between new and target asset allocation
old_alloc = self.asset_allocation()
new_alloc = balanced_portfolio.asset_allocation()
max_diff = max(
abs(target_allocation_np -
np.fromiter(new_alloc.values(), dtype=float)))
if verbose:
print("")
# Print shares to buy, cost, new allocation, old allocation target, and target allocation
print(
" Ticker Ask Quantity Amount Currency Old allocation New allocation Target allocation"
)
print(
" to buy ($) (%) (%) (%)"
)
print(
"---------------------------------------------------------------------------------------------------------------"
)
for ticker in balanced_portfolio.assets:
print("%8s %7.2f %6.d %8.2f %4s %5.2f %5.2f %5.2f" %
(ticker, prices[ticker][0], new_units[ticker], cost[ticker], prices[ticker][1],
old_alloc[ticker], new_alloc[ticker], target_allocation[ticker]))
print("")
print(
"Largest discrepancy between the new and the target asset allocation is %.2f %%."
% (max_diff))
# Print conversion exchange
if len(exchange_history) > 0:
print("")
if len(exchange_history) > 1:
print(
"Before making the above purchases, the following currency conversions are required:"
)
else:
print(
"Before making the above purchases, the following currency conversion is required:"
)
for exchange in exchange_history:
(from_amount, from_currency, to_amount, to_currency,
rate) = exchange
print(" %.2f %s to %.2f %s at a rate of %.4f." %
(from_amount, from_currency, to_amount, to_currency,
rate))
# Print remaining cash
print("")
print("Remaining cash:")
for cash in balanced_portfolio.cash.values():
print(" %.2f %s." % (cash.amount, cash.currency))
remaining_cash = sum([cash.amount for cash in balanced_portfolio.cash.values()])
# Now that we're done, we can replace old portfolio with the new one
self.__dict__.update(balanced_portfolio.__dict__)
return (new_units, prices, remaining_cash, max_diff)
def _sell_everything(self):
"""
Sells all assets in the portfolio and converts them to cash.
"""
for ticker, asset | |
# -*- coding: utf-8 -*-
import collections
import getpass
import os
import pytest
@pytest.fixture()
def mock_getuser(mocker):
"""Mock 'getpass.getuser' function."""
return mocker.patch.object(getpass, "getuser", return_value="john-doe")
@pytest.fixture()
def mocked_expanduser(mocker):
"""Return mocked 'os.path.expanduser' function."""
return mocker.patch.object(os.path, "expanduser", return_value="__HOME__")
@pytest.fixture()
def mocked_toml_load(mocker):
"""Return mocked 'nomenclator.vendor.toml.load' function."""
import nomenclator.vendor.toml
return mocker.patch.object(nomenclator.vendor.toml, "load")
@pytest.fixture()
def mocked_toml_dump(mocker):
"""Return mocked 'nomenclator.vendor.toml.dump' function."""
import nomenclator.vendor.toml
return mocker.patch.object(nomenclator.vendor.toml, "dump")
@pytest.fixture()
def mocked_path(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.path' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "path")
@pytest.fixture()
def mocked_load(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.load' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "load")
@pytest.fixture()
def mocked_load_template_configs(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.load_template_configs' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "load_template_configs")
@pytest.fixture()
def mocked_load_output_template_configs(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.load_output_template_configs' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "load_output_template_configs")
@pytest.fixture()
def mocked_dump(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.dump' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "dump")
@pytest.fixture()
def mocked_dump_template_configs(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.dump_template_configs' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "dump_template_configs")
@pytest.fixture()
def mocked_dump_output_template_configs(mocker, temporary_directory):
"""Return mocked 'nomenclator.config.dump_output_template_configs' function."""
import nomenclator.config
return mocker.patch.object(nomenclator.config, "dump_output_template_configs")
def test_path(mocked_expanduser, monkeypatch):
"""Return path to configuration file."""
monkeypatch.delenv("NOMENCLATOR_CONFIG_PATH", raising=False)
import nomenclator.config
path = nomenclator.config.path()
assert path == os.path.join("__HOME__", ".nuke", "nomenclator.toml")
mocked_expanduser.assert_called_once_with("~")
def test_path_from_env(mocked_expanduser, monkeypatch):
"""Return path to configuration file fetch from environment"""
monkeypatch.setenv("NOMENCLATOR_CONFIG_PATH", "__CONFIG__")
import nomenclator.config
path = nomenclator.config.path()
assert path == os.path.join("__CONFIG__", "nomenclator.toml")
mocked_expanduser.assert_called_once_with("~")
def test_fetch_empty(mocked_path, mocked_load):
"""Return empty configuration object."""
import nomenclator.config
mocked_path.return_value = "/path"
config = nomenclator.config.fetch()
assert config == mocked_load.return_value
mocked_load.assert_called_once_with({})
def test_fetch(mocked_path, mocked_load, temporary_file, mocked_toml_load):
"""Return configuration object."""
import nomenclator.config
mocked_path.return_value = temporary_file
config = nomenclator.config.fetch()
assert config == mocked_load.return_value
mocked_load.assert_called_once_with(mocked_toml_load.return_value)
mocked_path.assert_called_once()
stream = mocked_toml_load.call_args_list[0][0][0]
assert stream.mode == "r"
assert stream.name == temporary_file
def test_save(mocker, mocked_path, mocked_dump, temporary_file, mocked_toml_dump):
"""Save configuration object."""
import nomenclator.config
mocked_path.return_value = temporary_file
nomenclator.config.save("__CONFIG__")
mocked_dump.assert_called_once_with("__CONFIG__")
mocked_toml_dump.assert_called_once_with(
mocked_dump.return_value, mocker.ANY
)
stream = mocked_toml_dump.call_args_list[0][0][1]
assert stream.mode == "w"
assert stream.name == temporary_file
def test_dump_empty():
"""Return data mapping from empty config"""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict()
def test_dump_descriptions():
"""Return data mapping with updated 'descriptions'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("test1", "test2", "test3"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("descriptions", ("test1", "test2", "test3"))
])
def test_dump_default_description():
"""Return data mapping with updated 'default-description'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description="comp",
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("default-description", "comp")
])
def test_dump_create_subfolders():
"""Return data mapping with updated 'create-subfolders'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=True,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("create-subfolders", True)
])
def test_dump_colorspace_aliases():
"""Return data mapping with updated 'colorspace-aliases'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("color1", "alias1"), ("color2", "alias2")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("colorspace-aliases", collections.OrderedDict([
("color1", "alias1"), ("color2", "alias2")
]))
])
def test_dump_tokens():
"""Return data mapping with updated 'tokens'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=(("token1", "value1"), ("token2", "value2")),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("tokens", collections.OrderedDict([
("token1", "value1"), ("token2", "value2")
]))
])
def test_dump_max_locations():
"""Return data mapping with updated 'max-locations'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=10,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([("max-locations", 10)])
def test_dump_max_padding():
"""Return data mapping with updated 'max-padding'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=3,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([("max-padding", 3)])
def test_dump_default_padding():
"""Return data mapping with updated 'default-padding'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding="###",
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("default-padding", "###")
])
def test_dump_username():
"""Return data mapping with updated 'username'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=False
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([("username", "john-doe")])
def test_dump_comp_templates(mocked_dump_template_configs):
"""Return data mapping with updated 'comp-templates'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=("__TEMPLATE1__", "__TEMPLATE2__"),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("comp-templates", mocked_dump_template_configs.return_value)
])
mocked_dump_template_configs.assert_called_once_with(
("__TEMPLATE1__", "__TEMPLATE2__"), include_outputs=True
)
def test_dump_project_templates(mocked_dump_template_configs):
"""Return data mapping with updated 'project-templates'."""
import nomenclator.config
config = nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=("__TEMPLATE1__", "__TEMPLATE2__"),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
data = nomenclator.config.dump(config)
assert data == collections.OrderedDict([
("project-templates", mocked_dump_template_configs.return_value)
])
mocked_dump_template_configs.assert_called_once_with(
("__TEMPLATE1__", "__TEMPLATE2__")
)
def test_dump_template_configs_empty(mocked_dump_output_template_configs):
"""Return empty list of data mapping for template configs."""
import nomenclator.config
data = nomenclator.config.dump_template_configs([])
assert data == []
mocked_dump_output_template_configs.assert_not_called()
def test_dump_template_configs(mocked_dump_output_template_configs):
"""Return list of data mapping for template configs."""
import nomenclator.config
configs = (
nomenclator.config.TemplateConfig(
id="Episodic",
pattern_path="/path/{project}/{episode}/{shot}/scripts",
pattern_base="{episode}_{shot}_{description}_v{version}",
default_expression=r"[\w_.-]+",
match_start=True,
match_end=False,
append_username_to_name=True,
outputs=("__TEMPLATE11__", "__TEMPLATE12__")
),
nomenclator.config.TemplateConfig(
id="Element",
pattern_path="/path/{project}/build/{element}/scripts",
pattern_base="{element}_{description}_v{version}",
default_expression=r"\w+",
match_start=False,
match_end=True,
append_username_to_name=False,
outputs=("__TEMPLATE21__", "__TEMPLATE22__")
),
)
data = nomenclator.config.dump_template_configs(configs)
assert data == [
collections.OrderedDict([
("id", "Episodic"),
("pattern-path", "/path/{project}/{episode}/{shot}/scripts"),
("pattern-base", "{episode}_{shot}_{description}_v{version}"),
("match-end", False),
("append-username-to-name", True)
]),
collections.OrderedDict([
("id", "Element"),
("pattern-path", "/path/{project}/build/{element}/scripts"),
("pattern-base", "{element}_{description}_v{version}"),
("default-expression", r"\w+"),
("match-start", False),
]),
]
mocked_dump_output_template_configs.assert_not_called()
def test_dump_template_configs_with_outputs(mocked_dump_output_template_configs):
"""Return list of data mapping for template configs with outputs included."""
import nomenclator.config
configs = (
nomenclator.config.TemplateConfig(
id="Episodic",
pattern_path="/path/{project}/{episode}/{shot}/scripts",
pattern_base="{episode}_{shot}_{description}_v{version}",
default_expression=r"[\w_.-]+",
match_start=True,
match_end=False,
append_username_to_name=True,
outputs=("__TEMPLATE11__", "__TEMPLATE12__")
),
nomenclator.config.TemplateConfig(
id="Element",
pattern_path="/path/{project}/build/{element}/scripts",
pattern_base="{element}_{description}_v{version}",
default_expression=r"\w+",
match_start=False,
match_end=True,
append_username_to_name=False,
outputs=("__TEMPLATE21__", "__TEMPLATE22__")
),
)
data = nomenclator.config.dump_template_configs(
configs, include_outputs=True
)
assert data == [
collections.OrderedDict([
("id", "Episodic"),
("pattern-path", "/path/{project}/{episode}/{shot}/scripts"),
("pattern-base", "{episode}_{shot}_{description}_v{version}"),
("match-end", False),
("append-username-to-name", True),
("outputs", mocked_dump_output_template_configs.return_value)
]),
collections.OrderedDict([
("id", "Element"),
("pattern-path", "/path/{project}/build/{element}/scripts"),
("pattern-base", "{element}_{description}_v{version}"),
("default-expression", r"\w+"),
("match-start", False),
("outputs", mocked_dump_output_template_configs.return_value)
]),
]
assert mocked_dump_output_template_configs.call_count == 2
mocked_dump_output_template_configs.assert_any_call(
("__TEMPLATE11__", "__TEMPLATE12__")
)
mocked_dump_output_template_configs.assert_any_call(
("__TEMPLATE21__", "__TEMPLATE22__")
)
def test_dump_output_template_configs_empty():
"""Return empty list of data mapping for template configs."""
import nomenclator.config
data = nomenclator.config.dump_output_template_configs([])
assert data == []
def test_dump_output_template_configs():
"""Return list of data mapping for template configs."""
import nomenclator.config
configs = [
nomenclator.config.OutputTemplateConfig(
id="Comp",
pattern_path="/path/{project}/{episode}/{shot}/comps",
pattern_base="{episode}_{shot}_comp_v{version}",
append_username_to_name=False,
append_colorspace_to_name=True,
append_passname_to_name=False,
append_passname_to_subfolder=True,
),
nomenclator.config.OutputTemplateConfig(
id="Precomp",
pattern_path="/path/{project}/{episode}/{shot}/precomps",
pattern_base="{episode}_{shot}_precomp_v{version}",
append_username_to_name=True,
append_colorspace_to_name=False,
append_passname_to_name=True,
append_passname_to_subfolder=False,
),
]
data = nomenclator.config.dump_output_template_configs(configs)
assert data == [
collections.OrderedDict([
("id", "Comp"),
("pattern-path", "/path/{project}/{episode}/{shot}/comps"),
("pattern-base", "{episode}_{shot}_comp_v{version}"),
("append-colorspace-to-name", True),
("append-passname-to-subfolder", True),
]),
collections.OrderedDict([
("id", "Precomp"),
("pattern-path", "/path/{project}/{episode}/{shot}/precomps"),
("pattern-base", "{episode}_{shot}_precomp_v{version}"),
("append-username-to-name", True),
("append-passname-to-name", True),
]),
]
@pytest.mark.usefixtures("mock_getuser")
def test_load_empty():
"""Return config object from empty data mapping"""
import nomenclator.config
config = nomenclator.config.load({})
assert config == nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
@pytest.mark.usefixtures("mock_getuser")
def test_load_descriptions():
"""Return config with updated 'descriptions'."""
import nomenclator.config
config = nomenclator.config.load({
"descriptions": ("test1", "test2", "test3")
})
assert config == nomenclator.config.Config(
descriptions=("test1", "test2", "test3"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
@pytest.mark.usefixtures("mock_getuser")
def test_load_default_description():
"""Return config with updated 'default-description'."""
import nomenclator.config
config = nomenclator.config.load({
"default-description": "comp"
})
assert config == nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description="comp",
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
@pytest.mark.usefixtures("mock_getuser")
def test_load_create_subfolders():
"""Return config with updated 'create-subfolders'."""
import nomenclator.config
config = nomenclator.config.load({
"create-subfolders": True
})
assert config == nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=True,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
@pytest.mark.usefixtures("mock_getuser")
def test_load_colorspace_aliases():
"""Return config with updated 'colorspace-aliases'."""
import nomenclator.config
config = nomenclator.config.load({
"colorspace-aliases": {
"color1": "alias1",
"color2": "alias2",
}
})
assert config == nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("color1", "alias1"), ("color2", "alias2")),
tokens=tuple(),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
@pytest.mark.usefixtures("mock_getuser")
def test_load_tokens():
"""Return config with updated 'tokens'."""
import nomenclator.config
config = nomenclator.config.load({
"tokens": {
"token1": "value1",
"token2": "value2",
}
})
assert config == nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
comp_template_configs=tuple(),
project_template_configs=tuple(),
colorspace_aliases=(("linear", "lin"), ("sRGB", "srgb")),
tokens=(("token1", "value1"), ("token2", "value2")),
max_locations=5,
max_padding=5,
default_padding=None,
username="john-doe",
username_is_default=True
)
@pytest.mark.usefixtures("mock_getuser")
def test_load_max_locations():
"""Return config with updated 'max-locations'."""
import nomenclator.config
config = nomenclator.config.load({
"max-locations": 10
})
assert config == nomenclator.config.Config(
descriptions=("comp", "precomp", "roto", "cleanup"),
default_description=None,
create_subfolders=False,
| |
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
@property
def open_folder(self):
"""Open the data folder"""
from ..utils.functions import open_folder
open_folder(self.path)
def extract(self, time_range: list):
"""
Extracts data from the specified range
Parameters
----------
time_range : list
"""
start = time_range[0]
end = time_range[-1]
ind = np.where((self.timestamp >= start) & (self.timestamp <= end))
return self.timestamp[ind], self.data[ind]
def spectrogram(self, timestamp, data, freq_range=[300, 8000]):
"""Calculate spectrogram"""
from ..utils.spect import spectrogram
spect, spect_freq, _ = spectrogram(data, self.sample_rate, freq_range=freq_range)
spect_time = np.linspace(timestamp[0], timestamp[-1], spect.shape[1]) # timestamp for spectrogram
return spect_time, spect, spect_freq
def get_spectral_entropy(self, spect, normalize=True, mode=None):
"""
Calculate spectral entropy
Parameters
----------
normalize : bool
Get normalized spectral entropy
mode : {'spectral', ''spectro_temporal'}
Returns
-------
array of spectral entropy
"""
from ..analysis.functions import get_spectral_entropy
return get_spectral_entropy(spect, normalize=normalize, mode=mode)
class NeuralData:
def __init__(self, path, channel_nb, format='rhd', update=False):
self.path = path
self.channel_nb = str(channel_nb).zfill(2)
self.format = format # format of the file (e.g., rhd), this info should be in the database
file_name = self.path / f"NeuralData_Ch{self.channel_nb}.npy"
if update or not file_name.exists(): # if .npy doesn't exist or want to update the file
data_info = self.load_neural_data()
# Save event_info as a numpy object
else:
data_info = np.load(file_name, allow_pickle=True).item()
# Set the dictionary values to class attributes
for key in data_info:
setattr(self, key, data_info[key])
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
def load_neural_data(self):
"""
Load and concatenate all neural data files (e.g., .rhd) in the input dir (path)
"""
from ..analysis.load import read_rhd
from ..analysis.parameters import sample_rate
print("")
print("Load neural data")
# List .rhd files
files = list(self.path.glob(f'*.{self.format}'))
# Initialize
timestamp_concat = np.array([], dtype=np.float64)
amplifier_data_concat = np.array([], dtype=np.float64)
# Store values in these lists
file_list = []
if self.format == 'cbin':
# if the neural data is in .cbin format, read from .mat files that has contains concatenated data
# currently does not have files to extract data from .cbin files in python
import scipy.io
mat_file = list(self.path.glob(f'*Ch{self.channel_nb}(merged).mat'))[0]
timestamp_concat = scipy.io.loadmat(mat_file)['t_amplifier'][0].astype(np.float64)
amplifier_data_concat = scipy.io.loadmat(mat_file)['amplifier_data'][0].astype(np.float64)
else:
# Loop through Intan .rhd files
for file in files:
# Load data file
print('Loading... ' + file.stem)
file_list.append(file.name)
intan = read_rhd(file) # note that the timestamp is in second
# Concatenate timestamps
intan['t_amplifier'] -= intan['t_amplifier'][0] # start from t = 0
if timestamp_concat.size == 0:
timestamp_concat = np.append(timestamp_concat, intan['t_amplifier'])
else:
intan['t_amplifier'] += (timestamp_concat[-1] + (1 / sample_rate[self.format]))
timestamp_concat = np.append(timestamp_concat, intan['t_amplifier'])
# Concatenate neural data
for ind, ch in enumerate(intan['amplifier_channels']):
if int(self.channel_nb) == int(ch['native_channel_name'][-2:]):
amplifier_data_concat = np.append(amplifier_data_concat, intan['amplifier_data'][ind, :])
timestamp_concat *= 1E3 # convert to microsecond
# Organize data into a dictionary
data_info = {
'files': file_list,
'timestamp': timestamp_concat,
'data': amplifier_data_concat,
'sample_rate': sample_rate[self.format]
}
file_name = self.path / f"NeuralData_Ch{self.channel_nb}.npy"
np.save(file_name, data_info)
return data_info
def extract(self, time_range: list):
"""
Extracts data from the specified range
Parameters
----------
time_range : list
list of time stamps [start, end]
Returns
-------
timestamp : arr
data : arr
"""
start = time_range[0]
end = time_range[-1]
ind = np.where((self.timestamp >= start) & (self.timestamp <= end))
return self.timestamp[ind], self.data[ind]
@property
def open_folder(self):
"""Open the data folder"""
from ..utils.functions import open_folder
open_folder(self.path)
class Correlogram():
"""
Class for correlogram analysis
"""
def __init__(self, correlogram):
from ..analysis.parameters import spk_corr_parm, burst_hz
corr_center = round(correlogram.shape[0] / 2) + 1 # center of the correlogram
self.data = correlogram
self.time_bin = np.arange(-spk_corr_parm['lag'],
spk_corr_parm['lag'] + spk_corr_parm['bin_size'],
spk_corr_parm['bin_size'])
if self.data.sum():
self.peak_ind = np.min(
np.abs(np.argwhere(correlogram == np.amax(correlogram)) - corr_center)) + corr_center # index of the peak
self.peak_latency = self.time_bin[self.peak_ind] - 1
self.peak_value = self.data[self.peak_ind]
burst_range = np.arange(corr_center - (1000 / burst_hz) - 1, corr_center + (1000 / burst_hz),
dtype='int') # burst range in the correlogram
self.burst_index = round(self.data[burst_range].sum() / self.data.sum(), 3)
else:
self.peak_ind = self.peak_latency = self.peak_value = self.burst_index = np.nan
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
def category(self, correlogram_jitter: np.ndarray) -> str:
"""
Get bursting category of a neuron based on autocorrelogram
Parameters
----------
correlogram_jitter : np.ndarray
Random time-jittered correlogram for baseline setting
Returns
-------
Category of a neuron ('Bursting' or 'Nonbursting')
"""
from ..analysis.parameters import corr_burst_crit
corr_mean = correlogram_jitter.mean(axis=0)
if corr_mean.sum():
corr_std = correlogram_jitter.std(axis=0)
upper_lim = corr_mean + (corr_std * 2)
lower_lim = corr_mean - (corr_std * 2)
self.baseline = upper_lim
# Check peak significance
if self.peak_value > upper_lim[self.peak_ind] and self.peak_latency <= corr_burst_crit:
self.category = 'Bursting'
else:
self.category = 'NonBursting'
else:
self.baseline = self.category = np.array(np.nan)
return self.category
def plot_corr(self, ax, time_bin, correlogram,
title, xlabel=None, ylabel=None,
font_size=10,
peak_line_width=0.8,
normalize=False,
peak_line=True,
baseline=True):
"""
Plot correlogram
Parameters
----------
ax : axis object
axis to plot the figure
time_bin : np.ndarray
correlogram : np.ndarray
title : str
font_size : int
title font size
normalize : bool
normalize the correlogram
"""
import matplotlib.pyplot as plt
from ..utils.draw import remove_right_top
from ..utils.functions import myround
if correlogram.sum():
ax.bar(time_bin, correlogram, color='k', rasterized=True)
ymax = max([self.baseline.max(), correlogram.max()])
round(ymax / 10) * 10
ax.set_ylim(0, ymax)
plt.yticks([0, ax.get_ylim()[1]], [str(0), str(int(ymax))])
ax.set_title(title, size=font_size)
ax.set_xlabel(xlabel)
if normalize:
ax.set_ylabel(ylabel)
else:
ax.set_ylabel(ylabel)
remove_right_top(ax)
if peak_line and not np.isnan(self.peak_ind):
# peak_time_ind = np.where(self.time_bin == self.peak_latency)
ax.axvline(x=self.time_bin[self.peak_ind], color='r', linewidth=peak_line_width, ls='--')
if baseline and not np.isnan(self.baseline.mean()):
ax.plot(self.time_bin, self.baseline, 'm', lw=0.5, ls='--')
else:
ax.axis('off')
ax.set_title(title, size=font_size)
class BurstingInfo:
def __init__(self, ClassInfo, *input_context):
from ..analysis.parameters import burst_hz
# ClassInfo can be BaselineInfo, MotifInfo etc
if input_context: # select data based on social context
spk_list = [spk_ts for spk_ts, context in zip(ClassInfo.spk_ts, ClassInfo.contexts) if
context == input_context[0]]
duration_list = [duration for duration, context in zip(ClassInfo.durations, ClassInfo.contexts) if
context == input_context[0]]
self.context = input_context
else:
spk_list = ClassInfo.spk_ts
duration_list = ClassInfo.durations
# Bursting analysis
burst_spk_list = []
burst_duration_arr = []
nb_bursts = []
nb_burst_spk_list = []
for ind, spks in enumerate(spk_list):
# spk = bi.spk_ts[8]
isi = np.diff(spks) # inter-spike interval
inst_fr = 1E3 / np.diff(spks) # instantaneous firing rates (Hz)
bursts = np.where(inst_fr >= burst_hz)[0] # burst index
# Skip if no bursting detected
if not bursts.size:
continue
# Get the number of bursts
temp = np.diff(bursts)[np.where(np.diff(bursts) == 1)].size # check if the spikes occur in bursting
nb_bursts = np.append(nb_bursts, bursts.size - temp)
# Get burst onset
temp = np.where(np.diff(bursts) == 1)[0]
spk_ind = temp + 1
# Remove consecutive spikes in a burst and just get burst onset
burst_onset_ind = bursts
for i, ind in enumerate(temp):
burst_spk_ind = spk_ind[spk_ind.size - 1 - i]
burst_onset_ind = np.delete(burst_onset_ind, burst_spk_ind)
# Get burst offset index
burst_offset_ind = np.array([], dtype=np.int)
for i in range(bursts.size - 1):
if bursts[i + 1] - bursts[i] > 1: # if not successive spikes
burst_offset_ind = np.append(burst_offset_ind, bursts[i] + 1)
# Need to add the subsequent spike time stamp since it is not included (burst is the difference between successive spike time stamps)
burst_offset_ind = np.append(burst_offset_ind, bursts[bursts.size - 1] + 1)
burst_onset = spks[burst_onset_ind]
burst_offset = spks[burst_offset_ind]
burst_spk_list.append(spks[burst_onset_ind[0]: burst_offset_ind[0] + 1])
burst_duration_arr = np.append(burst_duration_arr, burst_offset - burst_onset)
# Get the number of burst spikes
nb_burst_spks = 1 # note that it should always be greater than 1
if nb_bursts.size:
if bursts.size == 1:
nb_burst_spks = 2
nb_burst_spk_list.append(nb_burst_spks)
elif bursts.size > 1:
for ind in range(bursts.size - 1):
if bursts[ind + 1] - bursts[ind] == 1:
nb_burst_spks += 1
else:
nb_burst_spks += 1
nb_burst_spk_list.append(nb_burst_spks)
nb_burst_spks = 1
if ind == bursts.size - 2:
nb_burst_spks += 1
nb_burst_spk_list.append(nb_burst_spks)
# print(nb_burst_spk_list)
if sum(nb_burst_spk_list):
self.spk_list = burst_spk_list
self.nb_burst_spk = sum(nb_burst_spk_list)
self.fraction = (round(sum(nb_burst_spk_list) / sum([len(spks) for spks in spk_list]), 3)) * 100
self.duration = round((burst_duration_arr).sum(), 3) # total duration
self.freq = round(nb_bursts.sum() / (sum(duration_list) / 1E3), 3)
self.mean_nb_spk = round(np.array(nb_burst_spk_list).mean(), 3)
self.mean_duration = round(burst_duration_arr.mean(), 3) # mean duration
else: # no burst spike detected
self.spk_list = []
self.nb_burst_spk = self.fraction = self.duration = self.freq = self.mean_nb_spk = self.mean_duration = np.nan
def __repr__(self): # print attributes
return str([key for key in self.__dict__.keys()])
class ISI:
"""
Class object for inter-spike interval analysis
"""
def __init__(self, isi):
"""
Parameters
----------
isi : np.ndarray
Inter-spike interval array
"""
from ..analysis.parameters import isi_win, isi_scale, isi_bin
self.data = isi
self.hist, self.time_bin = np.histogram(np.log10(isi), bins=isi_bin)
self.time_bin = self.time_bin[:-1]
| |
<filename>examples/libs/gif/img_bulb_gif.py
img_bulb_gif_map = [
0x47, 0x49, 0x46, 0x38, 0x39, 0x61, 0x3c, 0x00, 0x50, 0x00, 0xf7, 0x00, 0x00, 0xfa, 0xfb, 0xfb,
0xfd, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xd9, 0xec, 0xfe, 0x1e, 0x93, 0xfe, 0x23, 0x95, 0xfd, 0x5f,
0xb2, 0xff, 0x52, 0xac, 0xfe, 0xb1, 0xd8, 0xff, 0xce, 0xe7, 0xff, 0xa3, 0xd2, 0xff, 0x80, 0xc0,
0xfe, 0xe2, 0xf1, 0xfe, 0xca, 0xe5, 0xff, 0xf4, 0xf7, 0xf9, 0x8b, 0xc4, 0xff, 0x7e, 0xbe, 0xff,
0xe0, 0xee, 0xff, 0xc6, 0xe4, 0xff, 0xbc, 0xde, 0xff, 0xec, 0xf5, 0xff, 0x1d, 0x92, 0xfd, 0x3f,
0x9f, 0xfe, 0x71, 0xbb, 0xff, 0x9f, 0xcf, 0xfe, 0xf2, 0xf9, 0xff, 0x31, 0x9c, 0xfe, 0x98, 0xcb,
0xff, 0x49, 0xa8, 0xfe, 0xbe, 0xe0, 0xff, 0x6d, 0xb3, 0xfe, 0x4c, 0xa9, 0xfe, 0xc4, 0xe2, 0xff,
0x4a, 0xa8, 0xfe, 0x45, 0xa3, 0xfa, 0xd2, 0xe7, 0xfc, 0xbd, 0xde, 0xff, 0xf9, 0xf9, 0xf9, 0xf7,
0xf8, 0xf8, 0xe2, 0xe2, 0xe2, 0xdd, 0xde, 0xdd, 0xe5, 0xea, 0xef, 0xff, 0xb7, 0x50, 0xff, 0xba,
0x56, 0xfd, 0xb7, 0x51, 0xff, 0xb7, 0x4f, 0xff, 0xb8, 0x50, 0xc5, 0xc4, 0xc1, 0xfa, 0xb7, 0x55,
0xee, 0xb3, 0x5b, 0xf7, 0xb5, 0x57, 0xf1, 0xb5, 0x5e, 0xf5, 0xb6, 0x5b, 0xfd, 0xb7, 0x54, 0xcd,
0xcd, 0xcc, 0xe5, 0xe8, 0xea, 0xcf, 0xb2, 0x85, 0xf5, 0xb8, 0x5f, 0xee, 0xef, 0xf0, 0xc0, 0xbf,
0xbe, 0xe4, 0xb1, 0x66, 0xff, 0xb7, 0x51, 0xda, 0xda, 0xda, 0xec, 0xed, 0xee, 0xdf, 0xdf, 0xdf,
0xd1, 0xd2, 0xd1, 0xe4, 0xe5, 0xe5, 0xff, 0xe5, 0xbf, 0xff, 0xc5, 0x73, 0xff, 0xeb, 0xce, 0xf0,
0xf3, 0xf5, 0xc8, 0xc8, 0xc8, 0xed, 0xb4, 0x61, 0xd8, 0xd8, 0xd7, 0xff, 0xdb, 0xa7, 0xff, 0xe1,
0xb7, 0xff, 0xc1, 0x68, 0xfc, 0xf7, 0xee, 0xff, 0xc9, 0x7c, 0xff, 0xd2, 0x92, 0xfa, 0xba, 0x5e,
0xff, 0xcc, 0x84, 0xff, 0xe8, 0xc6, 0xff, 0xfb, 0xf6, 0xff, 0xf5, 0xe8, 0xff, 0xf9, 0xf1, 0xff,
0xed, 0xd5, 0xc9, 0xba, 0xa0, 0xf1, 0xf1, 0xf1, 0xf6, 0xf6, 0xf6, 0xeb, 0xb7, 0x6a, 0xe3, 0xb5,
0x6f, 0xff, 0xbd, 0x5f, 0xa9, 0xa9, 0xa9, 0xd6, 0xd6, 0xd6, 0xb2, 0xb2, 0xb2, 0xb9, 0xb9, 0xb9,
0x97, 0x98, 0x96, 0x9e, 0x9e, 0x9e, 0xa5, 0xa5, 0xa5, 0x45, 0x45, 0x45, 0x46, 0x46, 0x46, 0x47,
0x47, 0x47, 0x4a, 0x4a, 0x4a, 0x4d, 0x4d, 0x4d, 0x50, 0x50, 0x50, 0x53, 0x53, 0x53, 0x58, 0x58,
0x58, 0x5a, 0x5a, 0x5a, 0x5d, 0x5d, 0x5d, 0x5f, 0x5f, 0x5f, 0x63, 0x63, 0x63, 0x66, 0x66, 0x66,
0x6a, 0x6a, 0x6a, 0x6d, 0x6d, 0x6d, 0x70, 0x70, 0x70, 0x75, 0x75, 0x75, 0x79, 0x79, 0x79, 0x7c,
0x7c, 0x7c, 0x80, 0x80, 0x80, 0x84, 0x84, 0x84, 0x88, 0x88, 0x88, 0x8b, 0x8b, 0x8b, 0x8f, 0x8f,
0x8f, 0xd5, 0xd5, 0xd5, 0xe7, 0xe7, 0xe7, 0xea, 0xea, 0xea, 0xf4, 0xf4, 0xf4, 0xf2, 0xf4, 0xf5,
0xff, 0xd6, 0x9b, 0xff, 0xde, 0xaf, 0xff, 0xfe, 0xfc, 0xff, 0xf1, 0xde, 0xff, 0xd6, 0x9c, 0xff,
0xcf, 0x8b, 0xfa, 0xd6, 0xa1, 0xf7, 0xc4, 0x7b, 0xf8, 0xbd, 0x67, 0xeb, 0xb5, 0x65, 0xf9, 0xf9,
0xf8, 0xf1, 0xdb, 0xbb, 0xdb, 0xd7, 0xce, 0xe3, 0xdb, 0xcc, 0xda, 0xd2, 0xc3, 0xe3, 0xd6, 0xc0,
0xae, 0xae, 0xae, 0xe4, 0xcb, 0xa6, 0xe5, 0xe2, 0xdc, 0xd0, 0xb8, 0x90, 0x56, 0x56, 0x56, 0xcd,
0xcb, 0xc6, 0xcf, 0xc4, 0xaf, 0xcf, 0xc9, 0xbc, 0xd0, 0xbe, 0xa0, 0xd8, 0xbb, 0x8e, 0xd8, 0xbf,
0x98, 0xd8, 0xc5, 0xa5, 0xd8, 0xd4, 0xcb, 0xda, 0xb7, 0x82, 0xdb, 0xca, 0xaf, 0xdb, 0xcd, 0xb6,
0xdd, 0xbe, 0x8e, 0xdd, 0xc3, 0x9b, 0xdd, 0xdb, 0xd5, 0xde, 0xc8, 0xa5, 0xe0, 0xbb, 0x82, 0xe2,
0xb8, 0x79, 0xe4, 0xc2, 0x8e, 0xe4, 0xc6, 0x99, 0xe4, 0xd1, 0xb3, 0xe5, 0xbe, 0x84, 0xe5, 0xdf,
0xd4, 0xe9, 0xdb, 0xc5, 0xe9, 0xde, 0xce, 0xea, 0xbe, 0x7b, 0xea, 0xc1, 0x85, 0xea, 0xd3, 0xb0,
0xea, 0xd8, 0xbc, 0xeb, 0xba, 0x71, 0xeb, 0xc5, 0x8e, 0xeb, 0xcb, 0x9a, 0xeb, 0xe4, 0xd9, 0xec,
0xd0, 0xa5, 0xec, 0xe9, 0xe3, 0xed, 0xec, 0xea, 0xef, 0xe7, 0xd9, 0xf0, 0xbc, 0x71, 0xf0, 0xc0,
0x7a, 0xf1, 0xb9, 0x68, 0xf3, 0xf0, 0xea, 0xf4, 0xc7, 0x86, 0xf4, 0xcb, 0x8e, 0xf7, 0xc0, 0x71,
0xf9, 0xf7, 0xf4, 0xfd, 0xf5, 0xeb, 0xf5, 0xd0, 0x99, 0xda, 0xcf, 0xbd, 0xdc, 0xb1, 0x6f, 0xf2,
0xd7, 0xb0, 0xed, 0xdf, 0xca, 0xc8, 0xb3, 0x8f, 0xd9, 0xb3, 0x78, 0xd2, 0xb0, 0x7c, 0xf3, 0xed,
0xe4, 0xf4, 0xf3, 0xf1, 0xf5, 0xea, 0xd8, 0xf5, 0xe3, 0xc9, 0xc1, 0xb7, 0xa3, 0xc5, 0xbe, 0xaf,
0xd7, 0xac, 0x6b, 0xc1, 0xb2, 0x95, 0xd2, 0xd6, 0xd6, 0xcf, 0xa9, 0x70, 0xbc, 0xb9, 0xb1, 0xc7,
0xa8, 0x76, 0x41, 0x84, 0xd5, 0xb0, 0xa2, 0x86, 0x39, 0x81, 0xd9, 0x78, 0x92, 0xad, 0x88, 0xa0,
0xb5, 0xb9, 0xcf, 0xe9, 0xbf, 0xa5, 0x7b, 0x40, 0x83, 0xd4, 0x80, 0x95, 0xa8, 0x80, 0x96, 0xa9,
0x49, 0x86, 0xcf, 0x87, 0x96, 0xa2, 0x48, 0x85, 0xcd, 0x60, 0x8c, 0xbe, 0x2e, 0x7e, 0xdf, 0x18,
0x78, 0xee, 0x9e, 0xbd, 0xe1, 0x06, 0x74, 0xfc, 0x00, 0x72, 0xff, 0x01, 0x73, 0xff, 0x47, 0xa1,
0xfe, 0x0e, 0x76, 0xf7, 0x16, 0x7d, 0xfc, 0x32, 0x8d, 0xfe, 0x61, 0xa7, 0xfe, 0x24, 0x85, 0xfc,
0x55, 0xa1, 0xff, 0x54, 0xa1, 0xff, 0x1e, 0x88, 0xfa, 0x20, 0x7b, 0xea, 0x9e, 0x9c, 0x93, 0x61,
0x9c, 0xdc, 0x6f, 0x90, 0xb3, 0x84, 0xaf, 0xe1, 0x54, 0x89, 0xc6, 0x24, 0x7c, 0xe7, 0xa7, 0x9f,
0x8c, 0x68, 0x8e, 0xb7, 0x8f, 0x98, 0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0xff, 0x0b,
0x4e, 0x45, 0x54, 0x53, 0x43, 0x41, 0x50, 0x45, 0x32, 0x2e, 0x30, 0x03, 0x01, 0x00, 0x00, 0x00,
0x21, 0xf9, 0x04, 0x05, 0x25, 0x00, 0x04, 0x00, 0x21, 0xfe, 0x23, 0x52, 0x65, 0x73, 0x69, 0x7a,
0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x65, 0x7a,
0x67, 0x69, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x72, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x00, 0x2c,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x50, 0x00, 0x00, 0x08, 0xff, 0x00, 0x03, 0x08, 0x1c, 0x48,
0xb0, 0xa0, 0xc1, 0x83, 0x08, 0x13, 0x2a, 0x5c, 0xc8, 0xb0, 0xa1, 0xc3, 0x87, 0x10, 0x23, 0x4a,
0x9c, 0x48, 0xb1, 0xa2, 0xc5, 0x8b, 0x18, 0x33, 0x6a, 0xdc, 0xc8, 0xb1, 0xa3, 0xc7, 0x8f, 0x20,
0x43, 0x8a, 0x1c, 0x49, 0xb2, 0xa4, 0xc9, 0x93, 0x28, 0x53, 0xaa, 0x5c, 0xc9, 0xb2, 0xa5, 0xcb,
0x97, 0x30, 0x63, 0xca, 0x9c, 0x49, 0xb3, 0xa6, 0xcd, 0x9b, 0x38, 0x63, 0x52, 0x50, 0xf0, 0xae,
0x5d, 0x3a, 0x75, 0xf1, 0x36, 0x44, 0x88, 0x99, 0xe1, 0x81, 0xba, 0x74, 0x48, 0x93, 0x22, 0x7d,
0xc7, 0xc0, 0x65, 0x04, 0x77, 0x48, 0xdd, 0x61, 0x18, 0x81, 0x41, 0x69, 0xba, 0x76, 0x0d, 0x58,
0x46, 0x80, 0xf7, 0x13, 0x41, 0x86, 0x06, 0x0f, 0xe2, 0x59, 0xfd, 0x99, 0x35, 0x65, 0x06, 0xa8,
0xed, 0x06, 0x8c, 0x80, 0x3a, 0x36, 0x69, 0xbb, 0xa6, 0x28, 0x37, 0x20, 0x8d, 0x47, 0xe2, 0x68,
0x5b, 0xa5, 0x1e, 0x50, 0x52, 0x38, 0xea, 0xce, 0xee, 0x5d, 0xab, 0x43, 0x4d, 0x22, 0x40, 0x1a,
0xc1, 0xc3, 0xdf, 0xb1, 0x18, 0x4e, 0x1a, 0x4e, 0x17, 0xcf, 0xef, 0xe1, 0xa5, 0x27, 0xd9, 0x3e,
0x1e, 0x0b, 0xef, 0xa4, 0xcf, 0xc9, 0x63, 0xd5, 0x45, 0xc6, 0x4c, 0x59, 0x31, 0x67, 0xab, 0xef,
0x4e, 0x0e, 0xfe, 0x9c, 0x54, 0xc1, 0xc9, 0xbd, 0xa4, 0x7f, 0xc2, 0x35, 0xf9, 0x20, 0x35, 0x04,
0xb3, 0x92, 0x27, 0xc3, 0xa3, 0xa0, 0x32, 0xc2, 0xe5, 0xc7, 0xea, 0x06, 0xb0, 0x1c, 0xe1, 0xb8,
0xad, 0xba, 0xb2, 0x2c, 0xe5, 0x1e, 0xde, 0xf0, 0x32, 0x03, 0xd7, 0xbb, 0xed, 0x32, 0xc0, 0x14,
0xde, 0xf6, 0x41, 0xcc, 0x11, 0x7f, 0x47, 0x10, 0xed, 0x8d, 0x54, 0x9d, 0xf2, 0x98, 0x08, 0x7a,
0xab, 0x43, 0x40, 0x93, 0xc2, 0x88, 0xef, 0xdf, 0x57, 0xe7, 0x9c, 0xa4, 0xf9, 0xe7, 0x8b, 0xf9,
0xf3, 0xe6, 0xb3, 0xc8, 0xec, 0xc3, 0xa6, 0xbd, 0xfb, 0xf6, 0x7d, 0x64, 0x9a, 0x78, 0xff, 0x1e,
0xcb, 0xcc, 0x3d, 0xf4, 0xd9, 0xe4, 0xa1, 0x29, 0x44, 0x4d, 0x99, 0xff, 0x65, 0xa4, 0x01, 0x44,
0x4d, 0x7d, 0xec, 0x71, 0x86, 0x19, 0x7a, 0x9c, 0x80, 0x13, 0x00, 0x00, 0x8c, 0xe7, 0xe0, 0x83,
0x30, 0x99, 0xf0, 0x07, 0x16, 0x3a, 0xfc, 0x80, 0x8b, 0x1f, 0x7e, 0xf4, 0xa1, 0xe1, 0x86, 0x1c,
0xf6, 0x81, 0xa1, 0x1f, 0xb8, 0xfc, 0xa0, 0x03, 0x16, 0x7f, 0x98, 0xf0, 0xd1, 0x0f, 0x7d, 0x08,
0x71, 0x02, 0x10, 0x28, 0xf8, 0xe0, 0x43, 0x12, 0x49, 0x78, 0xc1, 0xc7, 0x8c, 0x34, 0xce, 0xe8,
0x05, 0x8c, 0x2e, 0xa2, 0x00, 0xc4, 0x09, 0x42, 0xf4, 0xf1, 0x03, 0x84, 0x40, 0x06, 0x29, 0xe4,
0x90, 0x44, 0x16, 0x69, 0xe4, 0x91, 0x48, 0x56, 0x74, 0x5d, 0x00, 0x4b, 0xce, 0x94, 0x01, 0x03,
0x14, 0x30, 0x00, 0x00, 0x94, 0x51, 0x52, 0x40, 0x1b, 0x4c, 0x03, 0x50, 0x99, 0xc1, 0x00, 0x19,
0x58, 0x49, 0x41, 0x93, 0x2d, 0x05, 0x04, 0x00, 0x21, 0xf9, 0x04, 0x05, 0x06, | |
<filename>gui.py
import sys, random
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GetFromJson
import PandasModel
#Main Window
class mainWindow(QMainWindow):
#Application Stylesheet
def mainStyle(self):
self.setStyleSheet("""
background-color: #2A3036;
color: #FFF;
""")
def __init__(self):
super().__init__(parent=None)
self.mainStyle()
self.setGeometry(20, 20, 800, 600)
self.app_widget = App()
self.setCentralWidget(self.app_widget)
self.setWindowTitle('PyQAR Project')
#Global Menu
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('File')
editMenu = mainMenu.addMenu('Edit')
viewMenu = mainMenu.addMenu('View')
toolsMenu = mainMenu.addMenu('Tools')
helpMenu = mainMenu.addMenu('Help')
#File Menu
openFileButton = QAction('Open File', self)
openFileButton.setShortcut('Ctrl+O')
openFileButton.triggered.connect(self.openFile)
fileMenu.addAction(openFileButton)
exitButton = QAction('Exit', self)
exitButton.setShortcut('Ctrl+Q')
exitButton.triggered.connect(self.close)
fileMenu.addAction(exitButton)
#Edit Menu
undoButton = QAction('Undo', self)
undoButton.setShortcut('Ctrl+Z')
editMenu.addAction(undoButton)
redoButton = QAction('Redo', self)
redoButton.setShortcut('Ctrl+Y')
editMenu.addAction(redoButton)
#View Menu
viewQAR = QAction('View Association Rules', self, checkable=True)
viewQAR.setChecked(True)
viewQAR.triggered.connect(self.toggleQAR)
viewMenu.addAction(viewQAR)
viewPlot = QAction('View Plot', self, checkable=True)
viewPlot.setChecked(True)
viewPlot.triggered.connect(self.togglePlot)
viewMenu.addAction(viewPlot)
#Tools Menu
globalSettingsButton = QAction('Global Settings', self)
toolsMenu.addAction(globalSettingsButton)
#Help Menu
documentationButton = QAction('Documentation', self )
documentationButton.triggered.connect(self.doclink)
helpMenu.addAction(documentationButton)
aboutButton = QAction('About', self)
aboutButton.triggered.connect(self.about)
helpMenu.addAction(aboutButton)
#About Function
def about(self):
QMessageBox.information(self, "About", "Version: 1.0.0.0.0.0.0.0.1 \n Program made by: \n \n <NAME> \n <NAME> \n ")
#Open File Function
def openFile(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
GetFromJson.read_file_path(fileName)
#Documentation Function
def doclink(self):
QDesktopServices.openUrl(QUrl('https://github.com/jpniels/Bachelor'))
#Settings Function
def globalSettings(self):
print('hej')
#Global CloseEvent function
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Quit Dialog',
"\n Are you sure to quit?", QMessageBox.Yes |
QMessageBox.Cancel, QMessageBox.Cancel)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def toggleQAR(self, state):
if state:
self.app_widget.supportbutton.show()
self.app_widget.allbutton.show()
self.app_widget.liftbutton.show()
self.app_widget.confidencebutton.show()
self.app_widget.tableWidget.show()
else:
self.app_widget.supportbutton.hide()
self.app_widget.allbutton.hide()
self.app_widget.liftbutton.hide()
self.app_widget.confidencebutton.hide()
self.app_widget.tableWidget.hide()
def togglePlot(self, state):
if state:
self.app_widget.canvas.show()
else:
self.app_widget.canvas.hide()
#Central widget within mainWindow
class App(QWidget):
#Application Stylesheet
def appStyle(self):
self.setStyleSheet("""
.QWidget {
background-color: #2A3036;
}
.QComboBox, .QLineEdit, .QSpinBox, .QDoubleSpinBox{
background-color: #434C55;
color: #fff;
height: 30px;
selection-color: #434C55;
selection-background-color: #FFB36C;
}
.QTableView {
selection-color: #434C55;
selection-background-color: #FFB36C;
border: none;
width: 100%;
}
.QRadioButton {
color: #fff;
}
.QRadioButton::indicator::unchecked{
border: 1px solid #5C656E;
background-color: #434C55;
height: 13px;
}
.QRadioButton::indicator::checked{
border: 1px solid #434C55;
background-color: #FFB36C;
height: 13px;
}
.QLabel {
color: darkgrey;
}
""")
#Global initialization
def __init__(self):
super().__init__()
self.initUI()
self.appStyle()
def initUI(self):
#Plot Styling
plt.style.use('seaborn-pastel')
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['xtick.color'] = '#96A391'
plt.rcParams['ytick.color'] = '#96A391'
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelcolor'] = 'darkgrey'
plt.rcParams['axes.labelweight'] = 'normal'
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['figure.facecolor'] = '#2A3036'
plt.rcParams['axes.edgecolor'] = '#96A391'
plt.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.facecolor'] = '#2A3036'
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.color'] = '#343B43'
plt.rcParams['text.color'] = 'darkgrey'
plt.xticks(rotation=90)
#Grid/layout handling
l = QGridLayout(self)
subhorizontallayout = QHBoxLayout()
sublayout = QVBoxLayout()
sublayout2 = QVBoxLayout()
sublayout3 = QVBoxLayout()
sublayout.setAlignment(Qt.AlignTop)
sublayout2.setAlignment(Qt.AlignTop)
sublayout3.setAlignment(Qt.AlignTop)
subsublayout1 = QHBoxLayout()
subsublayout2 = QVBoxLayout()
subsublayout3 = QVBoxLayout()
subsublayout2.setAlignment(Qt.AlignTop)
subsublayout3.setAlignment(Qt.AlignTop)
self.figure = plt.figure(figsize=(5,7))
self.canvas = FigureCanvas(self.figure)
self.canvas.setMinimumWidth(800)
self.canvas.setMaximumHeight(800)
sublayout2.addWidget(self.canvas)
self.threshold = QDoubleSpinBox(self)
self.threshold.setValue(0.1)
self.threshold.valueChanged.connect(self.plot)
self.threshold.setFixedWidth(250)
self.threshold.setSuffix(' Threshold')
self.threshold.setRange(0.1, 5)
self.threshold.setSingleStep(0.1)
subsublayout2.addWidget(self.threshold)
#Support Button
self.supportbutton = QRadioButton("Calculate Support", self)
self.supportbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.supportbutton)
#Conviction Button
self.confidencebutton = QRadioButton("Calculate Confidence", self)
self.confidencebutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.confidencebutton)
#Lift Button
self.liftbutton = QRadioButton("Calculate Lift", self)
self.liftbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.liftbutton)
#Conviction Button
self.convictionbutton = QRadioButton("Calculate Conviction", self)
self.convictionbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.convictionbutton)
#Lift Button
self.allbutton = QRadioButton("Calculate All", self)
self.allbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.allbutton)
####################################################################################################################
### Grid 1
####################################################################################################################
#Room Box 1
self.roomBoxlabel = QLabel("Select Room:")
self.roomBox = QComboBox(self)
self.roomBox.addItem('Room')
self.roomBox.model().item(0).setEnabled(False)
for element in GetFromJson.getRooms():
self.roomBox.addItem(element)
self.roomBox.currentTextChanged.connect(self.roomBoxChanged)
self.roomBox.setFixedWidth(250)
sublayout.addWidget(self.roomBoxlabel)
sublayout.addWidget(self.roomBox)
#Media Box 1
self.mediaBoxlabel = QLabel("Select Media:")
self.mediaBox = QComboBox(self)
self.mediaBox.setEnabled(False)
self.mediaBox.addItem('Media')
self.mediaBox.model().item(0).setEnabled(False)
self.mediaBox.currentTextChanged.connect(self.plot)
self.mediaBox.setFixedWidth(250)
sublayout.addWidget(self.mediaBoxlabel)
sublayout.addWidget(self.mediaBox)
#Outliers Radiobutton 1
self.outlierBtn = QRadioButton("Remove Outliers", self)
self.outlierBtn.setAutoExclusive(False)
self.outlierBtn.toggled.connect(self.outlierstoggled)
sublayout.addWidget(self.outlierBtn)
#Outliers Selection Box
self.outliermethod = QComboBox(self)
self.outliermethod.hide()
self.outliermethod.addItem('Standard Deviation')
self.outliermethod.addItem('Interquartile Range')
self.outliermethod.currentTextChanged.connect(self.plot)
self.outliermethod.setFixedWidth(250)
sublayout.addWidget(self.outliermethod)
#Interolate Radiobutton 1
self.interpolateBtn = QRadioButton("Interpolate data", self)
self.interpolateBtn.setAutoExclusive(False)
self.interpolateBtn.toggled.connect(self.interpolatetoggled)
sublayout.addWidget(self.interpolateBtn)
#Interpolate Selection Box
self.interpolateBox = QComboBox(self)
self.interpolateBox.hide()
self.interpolateBox.addItem('5Min')
self.interpolateBox.addItem('15Min')
self.interpolateBox.addItem('30Min')
self.interpolateBox.addItem('45Min')
self.interpolateBox.addItem('1H')
self.interpolateBox.addItem('2H')
self.interpolateBox.currentTextChanged.connect(self.plot)
self.interpolateBox.setFixedWidth(250)
sublayout.addWidget(self.interpolateBox)
#Intervals Radiobutton 1
self.intervalsBtn = QRadioButton("Use intervals", self)
self.intervalsBtn.setAutoExclusive(False)
self.intervalsBtn.toggled.connect(self.intervalstoggled)
sublayout.addWidget(self.intervalsBtn)
#Intervals spinbox 1
self.spinbox = QSpinBox(self)
self.spinbox.setValue(1)
self.spinbox.valueChanged.connect(self.plot)
self.spinbox.hide()
self.spinbox.setFixedWidth(250)
self.spinbox.setSuffix(' Intervals')
self.spinbox.setRange(1, 25)
sublayout.addWidget(self.spinbox)
#Time Frequency Radiobutton
self.freqButton = QRadioButton("Set Time Frequency", self)
self.freqButton.setAutoExclusive(False)
self.freqButton.toggled.connect(self.frequencytoggled)
sublayout.addWidget(self.freqButton)
#Time Frequency Box
self.timefreqBox = QComboBox(self)
self.timefreqBox.hide()
self.timefreqBox.addItem('30Min')
self.timefreqBox.addItem('1H')
self.timefreqBox.addItem('2H')
self.timefreqBox.addItem('12H')
self.timefreqBox.addItem('1D')
self.timefreqBox.addItem('1W')
self.timefreqBox.addItem('2W')
self.timefreqBox.addItem('1M')
self.timefreqBox.currentTextChanged.connect(self.plot)
self.timefreqBox.setFixedWidth(250)
sublayout.addWidget(self.timefreqBox)
#Calendar From Widget
self.dateTimelabel = QLabel("Select Start Date: ")
self.calendar = QCalendarWidget(self)
format = QTextCharFormat()
format.setBackground(QColor('#434C55'))
weekendformat = QTextCharFormat()
weekendformat.setForeground(QColor('#fff'))
self.calendar.setHeaderTextFormat(format)
self.calendar.setStyleSheet('selection-background-color: #FFB36C; selection-color: #434C55;')
self.calendar.setWeekdayTextFormat(Qt.Saturday, weekendformat)
self.calendar.setWeekdayTextFormat(Qt.Sunday, weekendformat)
self.calendar.setFixedWidth(250)
self.calendar.setMaximumHeight(220)
sublayout.addWidget(self.dateTimelabel)
sublayout.addWidget(self.calendar)
#Date time From widget for converting to ms - nonvisible
self.datetime = QDateTimeEdit()
self.datetime.setCalendarPopup(True)
self.datetime.setCalendarWidget(self.calendar)
self.datetime.dateTimeChanged.connect(self.plot)
self.datetime.setVisible(False)
sublayout.addStretch()
####################################################################################################################
### Grid 2
####################################################################################################################
#Room Box 2
self.roomBoxlabel2 = QLabel("Select Second Room:")
self.roomBox2 = QComboBox(self)
self.roomBox2.addItem('Room')
self.roomBox2.model().item(0).setEnabled(False)
for element in GetFromJson.getRooms():
self.roomBox2.addItem(element)
self.roomBox2.currentTextChanged.connect(self.roomBox2Changed)
self.roomBox2.setFixedWidth(250)
sublayout3.addWidget(self.roomBoxlabel2)
sublayout3.addWidget(self.roomBox2)
#Media Box 2
self.mediaBoxlabel2 = QLabel("Select Second Media:")
self.mediaBox2 = QComboBox(self)
self.mediaBox2.setEnabled(False)
self.mediaBox2.addItem('Media')
self.mediaBox2.model().item(0).setEnabled(False)
self.mediaBox2.currentTextChanged.connect(self.plot)
self.mediaBox2.setFixedWidth(250)
sublayout3.addWidget(self.mediaBoxlabel2)
sublayout3.addWidget(self.mediaBox2)
#Outliers Radiobutton 2
self.outlierBtn2 = QRadioButton("Remove Outliers", self)
self.outlierBtn2.setAutoExclusive(False)
self.outlierBtn2.toggled.connect(self.outlierstoggled2)
sublayout3.addWidget(self.outlierBtn2)
#Outliers Selection Box
self.outliermethod2 = QComboBox(self)
self.outliermethod2.hide()
self.outliermethod2.addItem('Standard Deviation', 1)
self.outliermethod2.addItem('Interquartile Range', 2)
self.outliermethod2.currentTextChanged.connect(self.plot)
self.outliermethod2.setFixedWidth(250)
sublayout3.addWidget(self.outliermethod2)
#Interpolate Radiobutton 2
self.interpolateBtn2 = QRadioButton("Interpolate data", self)
self.interpolateBtn2.setAutoExclusive(False)
self.interpolateBtn2.toggled.connect(self.interpolatetoggled2)
sublayout3.addWidget(self.interpolateBtn2)
#Interpolate Selection Box
self.interpolateBox2 = QComboBox(self)
self.interpolateBox2.hide()
self.interpolateBox2.addItem('5Min')
self.interpolateBox2.addItem('15Min')
self.interpolateBox2.addItem('30Min')
self.interpolateBox2.addItem('45Min')
self.interpolateBox2.addItem('1H')
self.interpolateBox2.addItem('2H')
self.interpolateBox2.currentTextChanged.connect(self.plot)
self.interpolateBox2.setFixedWidth(250)
sublayout3.addWidget(self.interpolateBox2)
#Intervals Radiobutton 2
self.intervalsBtn2 = QRadioButton("Use intervals", self)
self.intervalsBtn2.setAutoExclusive(False)
self.intervalsBtn2.toggled.connect(self.intervalstoggled2)
sublayout3.addWidget(self.intervalsBtn2)
#Intervals spinbox 2
self.spinbox2 = QSpinBox(self)
self.spinbox2.setValue(1)
self.spinbox2.valueChanged.connect(self.plot)
self.spinbox2.hide()
self.spinbox2.setFixedWidth(250)
self.spinbox2.setSuffix(' Intervals')
self.spinbox2.setRange(1, 25)
sublayout3.addWidget(self.spinbox2)
#Time Frequency Radiobutton
self.freqButton2 = QRadioButton("Set Time Frequency", self)
self.freqButton2.setAutoExclusive(False)
self.freqButton2.toggled.connect(self.frequencytoggled2)
sublayout3.addWidget(self.freqButton2)
#Time Frequency Box 2
self.timefreqBox2 = QComboBox(self)
self.timefreqBox2.hide()
self.timefreqBox2.addItem('30Min')
self.timefreqBox2.addItem('1H')
self.timefreqBox2.addItem('2H')
self.timefreqBox2.addItem('12H')
self.timefreqBox2.addItem('1D')
self.timefreqBox2.addItem('1W')
self.timefreqBox2.addItem('2W')
self.timefreqBox2.addItem('1M')
self.timefreqBox2.currentTextChanged.connect(self.plot)
self.timefreqBox2.setFixedWidth(250)
sublayout3.addWidget(self.timefreqBox2)
#Calendar To Widget
self.dateTimelabelto = QLabel("Select End Date: ")
self.calendarto = QCalendarWidget(self)
self.calendarto.setHeaderTextFormat(format)
self.calendarto.setStyleSheet('selection-background-color: #FFB36C; selection-color: #434C55;')
self.calendarto.setWeekdayTextFormat(Qt.Saturday, weekendformat)
self.calendarto.setWeekdayTextFormat(Qt.Sunday, weekendformat)
self.calendarto.setFixedWidth(250)
self.calendarto.setMaximumHeight(220)
sublayout3.addWidget(self.dateTimelabelto)
sublayout3.addWidget(self.calendarto)
#Date time From widget for converting to ms - nonvisible
self.datetimeto = QDateTimeEdit(QDate.currentDate())
self.datetimeto.setCalendarPopup(True)
self.datetimeto.setCalendarWidget(self.calendarto)
self.datetimeto.dateTimeChanged.connect(self.plot)
self.datetimeto.setVisible(False)
sublayout3.addStretch()
##########################################################################################################################
#Table Widget
self.tableWidget = QTableView()
self.header = self.tableWidget.horizontalHeader()
self.header.setStretchLastSection(True)
subsublayout3.addWidget(self.tableWidget)
#Add layouts to grid
subsublayout1.addLayout(subsublayout2)
subsublayout1.addLayout(subsublayout3)
sublayout2.addLayout(subsublayout1)
subhorizontallayout.addLayout(sublayout)
subhorizontallayout.addLayout(sublayout2)
subhorizontallayout.addLayout(sublayout3)
sizeable = QWidget()
sizeable.setLayout(subhorizontallayout)
l.addWidget(sizeable, 1, 1, 1, 1)
l.setAlignment(Qt.AlignCenter)
self.compute_initial_figure()
#When a room is selected get the medias and show them
def roomBoxChanged(self):
self.mediaBox.setEnabled(True)
self.mediaBox.clear()
medialist = []
for k, v in GetFromJson.getMedias(self.roomBox.currentText()).items():
if v not in medialist:
medialist.append(v)
self.mediaBox.addItems(medialist)
#Same as above for room2 selected
def roomBox2Changed(self):
self.mediaBox2.setEnabled(True)
self.mediaBox2.clear()
medialist2 = []
for k, v in GetFromJson.getMedias(self.roomBox2.currentText()).items():
if v not in medialist2:
medialist2.append(v)
self.mediaBox2.addItems(medialist2)
def outlierstoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.outliermethod.show()
self.plot()
else:
self.outlierBtn.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.outliermethod.hide()
self.plot()
def interpolatetoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.interpolateBox.show()
self.plot()
else:
self.interpolateBtn.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.interpolateBox.hide()
self.plot()
def intervalstoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.spinbox.show()
self.plot()
else:
self.intervalsBtn.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.spinbox.hide()
self.plot()
def frequencytoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.timefreqBox.show()
self.plot()
else:
self.freqButton.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.timefreqBox.hide()
self.plot()
def outlierstoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.outliermethod2.show()
self.plot()
else:
self.outlierBtn2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.outliermethod2.hide()
self.plot()
def interpolatetoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.interpolateBox2.show()
self.plot()
else:
self.interpolateBtn2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.interpolateBox2.hide()
self.plot()
def intervalstoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.spinbox2.show()
self.plot()
else:
self.intervalsBtn2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.spinbox2.hide()
self.plot()
def frequencytoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.timefreqBox2.show()
self.plot()
else:
self.freqButton2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.timefreqBox2.hide()
self.plot()
def aprioritoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media' or self.mediaBox2.currentText() != 'Media':
self.plot()
else:
QMessageBox.warning(self, "Error", "You must pick two rooms and medias before using this function.")
else:
self.plot()
#Dont mess with this shit, just the initial empty plot.. useless
def compute_initial_figure(self):
| |
import inspect
from inspect import Signature, Parameter
from typing import Dict, List, Callable, Any, Optional, Union
from illuminate_core.support.utils import call_user_func
from illuminate_core.contract.container import Container as ContainerInterface, ContextualBindingBuilder as ContextualBindingBuilderInterface
from illuminate_core.container import bound
from .builder import ContextualBindingBuilder
from .exception import BindingResolutionException, EntryNotFoundException
from .types import ClassAnnotation, Abstract, Concrete, Parameters
class Container(ContainerInterface):
_resolved: Dict[ClassAnnotation, bool]
bindings: Dict[ClassAnnotation, Dict[str, Any]]
methodBindings: Dict[str, Callable]
instances: Dict[ClassAnnotation, Any]
aliases: Dict[ClassAnnotation, ClassAnnotation]
abstractAliases: Dict[ClassAnnotation, List[ClassAnnotation]]
extenders: Dict[ClassAnnotation, List[Callable[[Any, ContainerInterface], Any]]]
tags: Dict[Any, List[ClassAnnotation]]
buildStack: List[str]
withParameters: List[Parameters]
reboundCallbacks: Dict[ClassAnnotation, List[Callable[[ContainerInterface, Any], Any]]]
globalResolvingCallbacks: List[Callable[[Any, ContainerInterface], Any]]
globalAfterResolvingCallbacks: List[Callable[[Any, ContainerInterface], Any]]
resolvingCallbacks: Dict[str, List[Callable]]
afterResolvingCallbacks: Dict[str, List[Callable]]
contextual: Dict[Concrete, Dict[ClassAnnotation, Union[ClassAnnotation, Callable]]]
def __init__(self):
self._resolved: Dict[ClassAnnotation, bool] = {}
self.bindings: Dict[ClassAnnotation, Dict[str, Any]] = {}
self.methodBindings = {}
self.instances: Dict[ClassAnnotation, Any] = {}
self.aliases: Dict[ClassAnnotation, ClassAnnotation] = {}
self.abstractAliases: Dict[ClassAnnotation, List[ClassAnnotation]] = {}
self.extenders: Dict[ClassAnnotation, List[Callable[[Any, ContainerInterface], Any]]] = {}
self.tags: Dict[Any, List[ClassAnnotation]] = {}
self.buildStack: List[str] = []
self.withParameters: List[Parameters] = []
self.reboundCallbacks: Dict[ClassAnnotation, List[Callable[[ContainerInterface, Any], Any]]] = {}
self.globalResolvingCallbacks: List[Callable[[Any, ContainerInterface], Any]] = {}
self.globalAfterResolvingCallbacks: List[Callable[[Any, ContainerInterface], Any]] = {}
self.resolvingCallbacks: Dict[str, List[Callable]] = {}
self.afterResolvingCallbacks: Dict[str, List[Callable]] = {}
self.contextual: Dict[Concrete, Dict[ClassAnnotation, Union[ClassAnnotation, Callable]]] = {}
def when(self, concrete: ClassAnnotation) -> ContextualBindingBuilderInterface:
"""
Define a contextual binding.
"""
return ContextualBindingBuilder(self, self.get_alias(concrete))
def bound(self, abstract: ClassAnnotation) -> bool:
"""
Determine if the given abstract type has been bound.
"""
return abstract in self.bindings or abstract in self.instances or self.is_alias(abstract)
def has(self, name: ClassAnnotation) -> bool:
return self.bound(name)
def resolved(self, abstract: ClassAnnotation) -> bool:
"""
Determine if the given abstract type has been resolved.
"""
if self.is_alias(abstract):
abstract = self.get_alias(abstract)
return abstract in self._resolved or abstract in self.instances
def is_shared(self, abstract: ClassAnnotation) -> bool:
"""
Determine if a given type is shared.
"""
return abstract in self.instances or \
(
abstract in self.bindings and
'shared' in self.bindings[abstract] and
self.bindings[abstract]['shared'] is True
)
def is_alias(self, name: ClassAnnotation) -> bool:
"""
Determine if a given string is an alias.
"""
return name in self.aliases
def bind(self, abstract: ClassAnnotation, concrete: Optional[Union[ClassAnnotation, Callable]] = None, shared: bool = False):
"""
Register a binding with the container.
"""
self.drop_stale_instances(abstract)
if concrete is None:
concrete = abstract
if not callable(concrete):
concrete = self.get_closure(abstract, concrete)
self.bindings[abstract] = {
'concrete': concrete,
'shared': shared
}
if abstract in self._resolved:
self.rebound(abstract)
def get_closure(self, abstract: ClassAnnotation, concrete: ClassAnnotation) -> Callable:
"""
Get the Closure to be used when building a type.
"""
def closure(container: Container, parameters: Parameters = None):
if parameters is None:
parameters = []
if abstract == concrete:
return container.build(concrete)
return container.make(concrete, parameters)
return closure
def has_method_binding(self, method: str) -> bool:
"""
Determine if the container has a method binding.
"""
return method in self.methodBindings
def bind_method(self, method: Union[str, List], callback: Callable) -> None:
"""
Bind a callback to resolve with Container::call.
"""
self.methodBindings[self.parse_bind_method(method)] = callback
def parse_bind_method(self, method: Union[str, List]) -> str:
"""
Get the method to be bound in class@method format.
"""
if isinstance(method, list):
return '{0}@{1}'.format(*method)
return method
def call_method_binding(self, method: str, instance: Any) -> Any:
"""
Get the method binding for the given method.
"""
return self.methodBindings[method](instance, self)
def add_contextual_binding(self, concrete: Concrete, abstract: ClassAnnotation, implementation: Union[ClassAnnotation, Callable]) -> None:
"""
Add a contextual binding to the container.
"""
if concrete not in self.contextual:
self.contextual[concrete] = {}
self.contextual[concrete][self.get_alias(abstract)] = implementation
def bind_if(self, abstract: ClassAnnotation, concrete: Optional[Concrete] = None, shared: bool = False) -> None:
"""
Register a binding if it hasn't already been registered.
"""
if not self.bound(abstract):
self.bind(abstract, concrete, shared)
def singleton(self, abstract: Abstract, concrete: Concrete = None) -> None:
"""
Register a shared binding in the container.
"""
self.bind(abstract, concrete, True)
def extend(self, abstract: ClassAnnotation, closure: Callable) -> None:
"""
"Extend" an abstract type in the container.
"""
abstract = self.get_alias(abstract)
if abstract in self.instances:
self.instances[abstract] = closure(self.instances[abstract], self)
self.rebound(abstract)
else:
self.extenders[abstract].append(closure)
if self.resolved(abstract):
self.rebound(abstract)
def instance(self, abstract: ClassAnnotation, instance: Any) -> Any:
"""
Register an existing instance as shared in the container.
"""
self.remove_abstract_alias(abstract)
is_bound = self.bound(abstract)
self.aliases.pop(abstract, None)
self.instances[abstract] = instance
if is_bound:
self.rebound(abstract)
return instance
def remove_abstract_alias(self, searched: ClassAnnotation) -> None:
"""
Remove an alias from the contextual binding alias cache.
"""
if searched not in self.aliases:
return
for abstract, aliases in self.abstractAliases.items():
for index, alias in enumerate(aliases):
if alias == searched:
del self.abstractAliases[abstract][index]
def tag(self, abstracts: Abstract, *tags):
"""
Assign a set of tags to a given binding.
"""
for tag in tags:
if tag not in self.tags:
self.tags[tag] = []
if not isinstance(abstracts, list):
abstracts = [abstracts]
for abstract in abstracts:
self.tags[tag].append(abstract)
def tagged(self, tag: str) -> List:
"""
Resolve all of the bindings for a given tag.
"""
results = []
if tag in self.tags:
for abstract in self.tags[tag]:
results.append(self.make(abstract))
return results
def alias(self, abstract: ClassAnnotation, alias: ClassAnnotation):
"""
Alias a type to a different name.
"""
self.aliases[alias] = abstract
if abstract not in self.abstractAliases:
self.abstractAliases[abstract] = []
self.abstractAliases[abstract].append(alias)
def rebinding(self, abstract: ClassAnnotation, callback: Callable):
"""
Bind a new callback to an abstract's rebind event.
"""
abstract = self.get_alias(abstract)
self.reboundCallbacks[abstract].append(callback)
if self.bound(abstract):
self.make(abstract)
def refresh(self, abstract: ClassAnnotation, target: Any, method: str) -> Any:
"""
Refresh an instance on the given target and method.
"""
def closure(app, instance):
func = getattr(target, method)
func(instance)
return self.rebinding(abstract, closure)
def rebound(self, abstract: ClassAnnotation) -> None:
"""
Fire the "rebound" callbacks for the given abstract type.
"""
instance = self.make(abstract)
for callback in self.get_rebound_callbacks(abstract):
callback(self, instance)
def get_rebound_callbacks(self, abstract: ClassAnnotation) -> List[Callable[[ContainerInterface, Any], Any]]:
"""
Get the rebound callbacks for a given type.
"""
if abstract in self.reboundCallbacks:
return self.reboundCallbacks[abstract]
return []
def wrap(self, callback: Callable, parameters: Parameters) -> Callable:
"""
Wrap the given closure such that its dependencies will be injected when executed.
"""
def closure():
self.call(callback, parameters)
return closure
def call(self, callback: Callable, parameters: Parameters = None, default_method: Callable = None):
"""
Call the given Closure / class@method and inject its dependencies.
"""
return bound.call(self, callback, parameters, default_method)
def factory(self, abstract: ClassAnnotation) -> Callable:
"""
Get a closure to resolve the given type from the container.
"""
def closure():
return self.make(abstract)
return closure
def make_with(self, abstract: ClassAnnotation, parameters: Parameters = None) -> Any:
"""
An alias function name for make().
"""
return self.make(abstract, parameters)
def make(self, abstract: ClassAnnotation, parameters: Parameters = None) -> Any:
"""
Resolve the given type from the container.
"""
if parameters is None:
parameters = []
return self.resolve(abstract, parameters)
def get(self, name):
if self.has(name):
return self.resolve(name)
raise EntryNotFoundException()
def resolve(self, abstract: ClassAnnotation, parameters: Parameters = None) -> Any:
"""
Resolve the given type from the container.
"""
if parameters is None:
parameters = []
abstract = self.get_alias(abstract)
needs_contextual_build = len(parameters) > 0 or self.get_contextual_concrete(abstract) is not None
if abstract in self.instances and not needs_contextual_build:
return self.instances[abstract]
self.withParameters.append(parameters)
concrete = self.get_concrete(abstract)
if self.is_buildable(concrete, abstract):
obj = self.build(concrete)
else:
obj = self.make(concrete)
for extender in self.get_extenders(abstract):
obj = extender(obj, self)
if self.is_shared(abstract) and not needs_contextual_build:
self.instances[abstract] = obj
self.fire_resolving_callbacks(abstract, obj)
self._resolved[abstract] = True
self.withParameters.pop()
return obj
def get_concrete(self, abstract: ClassAnnotation) -> Any:
"""
Get the concrete type for a given abstract.
"""
concrete = self.get_contextual_concrete(abstract)
if concrete is not None:
return concrete
if abstract in self.bindings:
return self.bindings[abstract]['concrete']
return abstract
def get_contextual_concrete(self, abstract: ClassAnnotation) -> Any:
binding = self.find_in_contextual_bindings(abstract)
if binding is not None:
return binding
if abstract not in self.abstractAliases or len(self.abstractAliases[abstract]) == 0:
return None
for alias in self.abstractAliases[abstract]:
binding = self.find_in_contextual_bindings(alias)
if binding is not None:
return binding
return None
def find_in_contextual_bindings(self, abstract: str) -> Any:
last_stack = self.buildStack[-1] if len(self.buildStack) > 0 else None
if last_stack is not None and last_stack in self.contextual and abstract in self.contextual[last_stack]:
return self.contextual[last_stack][abstract]
return None
def is_buildable(self, concrete: Any, abstract: ClassAnnotation) -> bool:
return concrete == abstract or callable(concrete)
def build(self, concrete: Union[Callable, ClassAnnotation]) -> Any:
"""
Instantiate a concrete instance of the given type.
"""
if callable(concrete) and type(concrete) is not type:
return call_user_func(concrete, self, *self.get_last_parameter_override())
self.buildStack.append(concrete)
constructor = getattr(concrete, '__init__')
dependencies | |
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DocumentState]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_document_state_by_id(self, id, **kwargs):
"""
Get a documentState by its unique ID
Retrieve a documentState by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_document_state_by_id(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The identifier of the resource. (required)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:return: DocumentState
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'select', 'populate']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_document_state_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_document_state_by_id`")
resource_path = '/documentStates/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DocumentState',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_document_state_case_record(self, id, **kwargs):
"""
Retrieves the linked caseRecord.
Retrieves the linked caseRecord.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_document_state_case_record(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of a DocumentState. (required)
:return: CaseRecord
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_document_state_case_record" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_document_state_case_record`")
resource_path = '/documentStates/{id}/caseRecord'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CaseRecord',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_document_state_document(self, id, **kwargs):
"""
Retrieves the linked document.
Retrieves the linked document.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_document_state_document(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of a DocumentState. (required)
:return: RequiredDocument
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_document_state_document" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_document_state_document`")
resource_path = '/documentStates/{id}/document'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RequiredDocument',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def query_document_state(self, **kwargs):
"""
Query some documentStates
Query over documentStates.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.query_document_state(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str select: Select which paths will be returned by the query. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#select)
:param str populate: Specify which paths to populate. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#populate)
:param str sort: Set the fields by which to sort. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#sort)
:param bool count: Set to true to return count instead of documents. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#count)
:param int skip: How many documents to skip. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#skip)
:param int limit: The maximum number of documents to send. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#limit)
:param str conditions: Set the conditions used to find or remove the document(s). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#conditions)
:param str distinct: Set to a path name to retrieve an array of distinct values. [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#distinct)
:param str hint: Add an index hint to the query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#hint)
:param str comment: Add a comment to a query (must be enabled per controller). [doc](https://github.com/wprl/baucis/wiki/Query-String-Parameters#comment)
:return: list[DocumentState]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['select', 'populate', 'sort', 'count', 'skip', 'limit', 'conditions', 'distinct', 'hint', 'comment']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method query_document_state" % key
)
params[key] = val
del params['kwargs']
resource_path = '/documentStates'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'select' in params:
query_params['select'] = params['select']
if 'populate' in params:
query_params['populate'] = params['populate']
if 'sort' in params:
query_params['sort'] = params['sort']
if 'count' in params:
query_params['count'] = params['count']
if 'skip' in params:
query_params['skip'] = params['skip']
if 'limit' in params:
query_params['limit'] = params['limit']
if 'conditions' in params:
query_params['conditions'] = params['conditions']
if 'distinct' in params:
query_params['distinct'] = params['distinct']
if 'hint' in params:
query_params['hint'] = params['hint']
if 'comment' in params:
query_params['comment'] = params['comment']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey', 'basic']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DocumentState]',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def set_document_state_case_record(self, id, document, **kwargs):
"""
Link CaseRecord.
Link CaseRecord.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_document_state_case_record(id, document, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of a DocumentState. (required)
:param BodyIdParameter document: The ID of a caseRecord. (required)
:return: DocumentState
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'document']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_document_state_case_record" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `set_document_state_case_record`")
# verify the required parameter 'document' is set
if ('document' not in params) or (params['document'] is None):
raise ValueError("Missing the required parameter `document` when calling `set_document_state_case_record`")
resource_path = '/documentStates/{id}/caseRecord'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# make_unicode_property_data.py
# Copyright (c) 2016-2018 K.Kosako
import sys
import re
POSIX_LIST = [
'NEWLINE', 'Alpha', 'Blank', 'Cntrl', 'Digit', 'Graph', 'Lower',
'Print', 'Punct', 'Space', 'Upper', 'XDigit', 'Word', 'Alnum', 'ASCII'
]
MAX_CODE_POINT = 0x10ffff
GRAPHEME_CLUSTER_BREAK_NAME_PREFIX = 'Grapheme_Cluster_Break_'
UD_FIRST_REG = re.compile("<.+,\s*First>")
UD_LAST_REG = re.compile("<.+,\s*Last>")
PR_TOTAL_REG = re.compile("#\s*Total\s+(?:code\s+points|elements):")
PR_LINE_REG = re.compile("([0-9A-Fa-f]+)(?:..([0-9A-Fa-f]+))?\s*;\s*(\w+)")
PA_LINE_REG = re.compile("(\w+)\s*;\s*(\w+)")
PVA_LINE_REG = re.compile("(sc|gc)\s*;\s*(\w+)\s*;\s*(\w+)(?:\s*;\s*(\w+))?")
BL_LINE_REG = re.compile("([0-9A-Fa-f]+)\.\.([0-9A-Fa-f]+)\s*;\s*(.*)")
VERSION_REG = re.compile("#\s*.*-(\d+\.\d+\.\d+)\.txt")
VERSION_INFO = None
DIC = { }
KDIC = { }
PropIndex = { }
PROPERTY_NAME_MAX_LEN = 0
PROPS = None
def normalize_prop_name(name):
name = re.sub(r'[ _]', '', name)
name = name.lower()
return name
def fix_block_name(name):
s = re.sub(r'[- ]+', '_', name)
return 'In_' + s
def check_version_info(s):
global VERSION_INFO
m = VERSION_REG.match(s)
if m is not None:
VERSION_INFO = m.group(1)
def print_ranges(ranges):
for (start, end) in ranges:
print "0x%06x, 0x%06x" % (start, end)
print len(ranges)
def print_prop_and_index(prop, i):
print "%-35s %3d" % (prop + ',', i)
PropIndex[prop] = i
PRINT_CACHE = { }
def print_property(prop, data, desc):
print ''
print "/* PROPERTY: '%s': %s */" % (prop, desc)
prev_prop = dic_find_by_value(PRINT_CACHE, data)
if prev_prop is not None:
print "#define CR_%s CR_%s" % (prop, prev_prop)
else:
PRINT_CACHE[prop] = data
print "static const OnigCodePoint"
print "CR_%s[] = { %d," % (prop, len(data))
for (start, end) in data:
print "0x%04x, 0x%04x," % (start, end)
print "}; /* END of CR_%s */" % prop
def dic_find_by_value(dic, v):
for key, val in dic.items():
if val == v:
return key
return None
def make_reverse_dic(dic):
rev = {}
for key, val in dic.items():
d = rev.get(val, None)
if d is None:
rev[val] = [key]
else:
d.append(key)
return rev
def normalize_ranges(in_ranges, sort=False):
if sort:
ranges = sorted(in_ranges)
else:
ranges = in_ranges
r = []
prev = None
for (start, end) in ranges:
if prev >= start - 1:
(pstart, pend) = r.pop()
end = max(pend, end)
start = pstart
r.append((start, end))
prev = end
return r
def inverse_ranges(in_ranges):
r = []
prev = 0x000000
for (start, end) in in_ranges:
if prev < start:
r.append((prev, start - 1))
prev = end + 1
if prev < MAX_CODE_POINT:
r.append((prev, MAX_CODE_POINT))
return r
def add_ranges(r1, r2):
r = r1 + r2
return normalize_ranges(r, True)
def sub_one_range(one_range, rs):
r = []
(s1, e1) = one_range
n = len(rs)
for i in range(0, n):
(s2, e2) = rs[i]
if s2 >= s1 and s2 <= e1:
if s2 > s1:
r.append((s1, s2 - 1))
if e2 >= e1:
return r
s1 = e2 + 1
elif s2 < s1 and e2 >= s1:
if e2 < e1:
s1 = e2 + 1
else:
return r
r.append((s1, e1))
return r
def sub_ranges(r1, r2):
r = []
for one_range in r1:
rs = sub_one_range(one_range, r2)
r.extend(rs)
return r
def add_ranges_in_dic(dic):
r = []
for k, v in dic.items():
r = r + v
return normalize_ranges(r, True)
def normalize_ranges_in_dic(dic, sort=False):
for k, v in dic.items():
r = normalize_ranges(v, sort)
dic[k] = r
def merge_dic(to_dic, from_dic):
to_keys = to_dic.keys()
from_keys = from_dic.keys()
common = list(set(to_keys) & set(from_keys))
if len(common) != 0:
print >> sys.stderr, "merge_dic: collision: %s" % sorted(common)
to_dic.update(from_dic)
def merge_props(to_props, from_props):
common = list(set(to_props) & set(from_props))
if len(common) != 0:
print >> sys.stderr, "merge_props: collision: %s" % sorted(common)
to_props.extend(from_props)
def add_range_into_dic(dic, name, start, end):
d = dic.get(name, None)
if d is None:
d = [(start, end)]
dic[name] = d
else:
d.append((start, end))
def list_sub(a, b):
x = set(a) - set(b)
return list(x)
def parse_unicode_data_file(f):
dic = { }
assigned = []
for line in f:
s = line.strip()
if len(s) == 0:
continue
if s[0] == '#':
continue
a = s.split(';')
code = int(a[0], 16)
desc = a[1]
prop = a[2]
if UD_FIRST_REG.match(desc) is not None:
start = code
end = None
elif UD_LAST_REG.match(desc) is not None:
end = code
else:
start = end = code
if end is not None:
assigned.append((start, end))
add_range_into_dic(dic, prop, start, end)
if len(prop) == 2:
add_range_into_dic(dic, prop[0:1], start, end)
normalize_ranges_in_dic(dic)
return dic, assigned
def parse_properties(path, klass, prop_prefix = None):
with open(path, 'r') as f:
dic = { }
prop = None
props = []
for line in f:
s = line.strip()
if len(s) == 0:
continue
if s[0] == '#':
if VERSION_INFO is None:
check_version_info(s)
m = PR_LINE_REG.match(s)
if m:
prop = m.group(3)
if prop_prefix is not None:
prop = prop_prefix + prop
if m.group(2):
start = int(m.group(1), 16)
end = int(m.group(2), 16)
add_range_into_dic(dic, prop, start, end)
else:
start = int(m.group(1), 16)
add_range_into_dic(dic, prop, start, start)
elif PR_TOTAL_REG.match(s) is not None:
KDIC[prop] = klass
props.append(prop)
normalize_ranges_in_dic(dic)
return (dic, props)
def parse_property_aliases(path):
a = { }
with open(path, 'r') as f:
for line in f:
s = line.strip()
if len(s) == 0:
continue
m = PA_LINE_REG.match(s)
if not(m):
continue
if m.group(1) == m.group(2):
continue
a[m.group(1)] = m.group(2)
return a
def parse_property_value_aliases(path):
a = { }
with open(path, 'r') as f:
for line in f:
s = line.strip()
if len(s) == 0:
continue
m = PVA_LINE_REG.match(s)
if not(m):
continue
cat = m.group(1)
x2 = m.group(2)
x3 = m.group(3)
x4 = m.group(4)
if cat == 'sc':
if x2 != x3:
a[x2] = x3
if x4 and x4 != x3:
a[x4] = x3
else:
if x2 != x3:
a[x3] = x2
if x4 and x4 != x2:
a[x4] = x2
return a
def parse_blocks(path):
dic = { }
blocks = []
with open(path, 'r') as f:
for line in f:
s = line.strip()
if len(s) == 0:
continue
m = BL_LINE_REG.match(s)
if not(m):
continue
start = int(m.group(1), 16)
end = int(m.group(2), 16)
block = fix_block_name(m.group(3))
add_range_into_dic(dic, block, start, end)
blocks.append(block)
noblock = fix_block_name('No_Block')
dic[noblock] = inverse_ranges(add_ranges_in_dic(dic))
blocks.append(noblock)
return dic, blocks
def add_primitive_props(assigned):
DIC['Assigned'] = normalize_ranges(assigned)
DIC['Any'] = [(0x000000, 0x10ffff)]
DIC['ASCII'] = [(0x000000, 0x00007f)]
DIC['NEWLINE'] = [(0x00000a, 0x00000a)]
DIC['Cn'] = inverse_ranges(DIC['Assigned'])
DIC['C'].extend(DIC['Cn'])
DIC['C'] = normalize_ranges(DIC['C'], True)
d = []
d.extend(DIC['Ll'])
d.extend(DIC['Lt'])
d.extend(DIC['Lu'])
DIC['LC'] = normalize_ranges(d, True)
def add_posix_props(dic):
alnum = []
alnum.extend(dic['Alphabetic'])
alnum.extend(dic['Nd']) # Nd == Decimal_Number
alnum = normalize_ranges(alnum, True)
blank = [(0x0009, 0x0009)]
blank.extend(dic['Zs']) # Zs == Space_Separator
blank = normalize_ranges(blank, True)
word = []
word.extend(dic['Alphabetic'])
word.extend(dic['M']) # M == Mark
word.extend(dic['Nd'])
word.extend(dic['Pc']) # Pc == Connector_Punctuation
word = normalize_ranges(word, True)
graph = sub_ranges(dic['Any'], dic['White_Space'])
graph = sub_ranges(graph, dic['Cc'])
graph = sub_ranges(graph, dic['Cs']) # Cs == Surrogate
graph = sub_ranges(graph, dic['Cn']) # Cn == Unassigned
graph = normalize_ranges(graph, True)
p = []
p.extend(graph)
p.extend(dic['Zs'])
p = normalize_ranges(p, True)
dic['Alpha'] = dic['Alphabetic']
dic['Upper'] = dic['Uppercase']
dic['Lower'] = dic['Lowercase']
dic['Punct'] = dic['P'] # P == Punctuation
dic['Digit'] = dic['Nd']
dic['XDigit'] = [(0x0030, 0x0039), (0x0041, 0x0046), (0x0061, 0x0066)]
dic['Alnum'] = alnum
dic['Space'] = dic['White_Space']
dic['Blank'] = blank
dic['Cntrl'] = dic['Cc']
dic['Word'] = word
dic['Graph'] = graph
dic['Print'] = p
def set_max_prop_name(name):
global PROPERTY_NAME_MAX_LEN
n = len(name)
if n > PROPERTY_NAME_MAX_LEN:
PROPERTY_NAME_MAX_LEN = n
def entry_prop_name(name, index):
set_max_prop_name(name)
if OUTPUT_LIST_MODE and index >= len(POSIX_LIST):
print >> UPF, "%3d: %s" % (index, name)
def entry_and_print_prop_and_index(name, index):
entry_prop_name(name, index)
nname = normalize_prop_name(name)
print_prop_and_index(nname, index)
def parse_and_merge_properties(path, klass):
dic, props = parse_properties(path, klass)
merge_dic(DIC, dic)
merge_props(PROPS, props)
return dic, props
### main ###
argv = sys.argv
argc = len(argv)
POSIX_ONLY = False
INCLUDE_GRAPHEME_CLUSTER_DATA = False
for i in range(1, argc):
arg = argv[i]
if arg == '-posix':
POSIX_ONLY = True
elif arg == '-gc':
INCLUDE_GRAPHEME_CLUSTER_DATA = True
else:
print >> sys.stderr, "Invalid argument: %s" % arg
OUTPUT_LIST_MODE = not(POSIX_ONLY)
with open('UnicodeData.txt', 'r') as f:
dic, assigned = parse_unicode_data_file(f)
DIC = dic
add_primitive_props(assigned)
PROPS = DIC.keys()
PROPS = list_sub(PROPS, POSIX_LIST)
parse_and_merge_properties('DerivedCoreProperties.txt', 'Derived Property')
dic, props = parse_and_merge_properties('Scripts.txt', 'Script')
DIC['Unknown'] = inverse_ranges(add_ranges_in_dic(dic))
parse_and_merge_properties('PropList.txt', 'Binary Property')
parse_and_merge_properties('emoji-data.txt', 'Emoji Property')
PROPS.append('Unknown')
KDIC['Unknown'] = 'Script'
ALIASES = parse_property_aliases('PropertyAliases.txt')
a = parse_property_value_aliases('PropertyValueAliases.txt')
merge_dic(ALIASES, a)
dic, BLOCKS = parse_blocks('Blocks.txt')
merge_dic(DIC, dic)
if INCLUDE_GRAPHEME_CLUSTER_DATA:
dic, props = parse_properties('GraphemeBreakProperty.txt',
'GraphemeBreak Property',
GRAPHEME_CLUSTER_BREAK_NAME_PREFIX)
merge_dic(DIC, dic)
merge_props(PROPS, props)
#prop = GRAPHEME_CLUSTER_BREAK_NAME_PREFIX + 'Other'
#DIC[prop] = inverse_ranges(add_ranges_in_dic(dic))
#PROPS.append(prop)
#KDIC[prop] = 'GrapemeBreak Property'
add_posix_props(DIC)
PROPS = sorted(PROPS)
s = '''%{
/* Generated by make_unicode_property_data.py. */
'''
print s
for prop in POSIX_LIST:
print_property(prop, DIC[prop], "POSIX [[:%s:]]" % prop)
print ''
if not(POSIX_ONLY):
for prop in PROPS:
klass = KDIC.get(prop, None)
if klass is None:
n = len(prop)
if n == 1:
klass = 'Major Category'
elif n == 2:
klass = 'General Category'
else:
klass = '-'
print_property(prop, DIC[prop], klass)
for block in BLOCKS:
print_property(block, DIC[block], 'Block')
print ''
print "static const OnigCodePoint*\nconst CodeRanges[] = {"
for prop in POSIX_LIST:
print " CR_%s," % | |
#!/usr/bin/env python3
# coding: utf-8
#
# A script that collects referentially transparent functions related to drawing.
import curses
import utils
# A function that initializes Curses.
def initialize():
# Make cursor invisible.
curses.curs_set(0)
# Color initialization. The background color is assumed to be transparent.
curses.start_color()
curses.use_default_colors()
for n in range(16): curses.init_pair(n, n, -1)
# A function that draws a rectangular border.
def rectbox(win, rect):
# Decompose the rect into (x, y, w, h) for making it easier to use later.
x, y, w, h = rect
# Calculate the upper left coordinates (x0, y0) and the lower right coordinates (x1, y1).
x0, x1 = x, x + w - 1
y0, y1 = y, y + h - 1
# Place the four corner characters.
# Note that the function win.insch moves the character to the right.
win.insch(y0, x0, curses.ACS_ULCORNER)
win.insch(y0, x1, curses.ACS_URCORNER)
win.insch(y1, x0, curses.ACS_LLCORNER)
win.insch(y1, x1, curses.ACS_LRCORNER)
# Place horizontal line characters, being careful not to fill the corners.
win.hline(y0, x0 + 1, curses.ACS_HLINE, w - 2)
win.hline(y1, x0 + 1, curses.ACS_HLINE, w - 2)
# Arrange vertical line characters while being careful not to fill the corners.
win.vline(y0 + 1, x0, curses.ACS_VLINE, h - 2)
win.vline(y0 + 1, x1, curses.ACS_VLINE, h - 2)
# A function that draws a rectangular border.
# The inside of the rectangle can be separated by a vertical line.
def rectbox_with_vlines(win, rect, cols_vline):
# Decompose the rect into (x, y, w, h) for making it easier to use later.
x, y, w, h = rect
# Calculate the lower right coordinates (x1, y1).
y0, y1 = y, y + h - 1
# First, draw the outer frame.
rectbox(win, rect)
# Then, draw a vertical lines.
for col in cols_vline:
# Place the TEEs.
win.addch(y0, col, curses.ACS_TTEE)
win.addch(y1, col, curses.ACS_BTEE)
# Place vertical line characters, being careful not to fill the TEEs.
win.vline(y0 + 1, col, curses.ACS_VLINE, h - 2)
# A function that draws a list box. The color of the focused line is inverted.
# Where the variable `contents` is a list of tuples with the following format:
# (left character string, right character string, attributes)
def listbox(win, rect, contents, focus, cmap):
# Decompose the rect into (x, y, w, h) for making it easier to use later.
x, y, w, h = rect
# Modify to ensure that the variable focus is not outside the range of lines, and
# Copy an instance of the variable focus for later.
focus = int(utils.clip(0, focus, len(contents)))
# The length of `contents` will be used a lot in the following steps, so define an alias.
n = len(contents)
# Calculate the standard cursor position at which focused line is placed
# if the given contents is enough long.
c0 = int(2 * h / 3 + 1)
# The displayed part is calculated from the standard cursor position and focus position.
idx_bgn = utils.clip(0, focus - c0, n - h)
idx_end = utils.clip(h, focus - c0 + h, n)
# Based on the calculated display position, lists, attr, and focus are limited to the display part only.
# The variable focus can be overwritten because the instance was copied earlier.
contents = contents[idx_bgn:idx_end]
focus -= idx_bgn
# The actual drawing work is performed below.
for row, (left, right, selected, attr) in enumerate(contents):
# Format the string based on the width information.
# The drawing area does not extend beyond the rect, and the inversion affects the right edge of the screen.
# Make sure that the string length matches the width.
line = utils.clipstr(left, w - utils.strlen(right) - 4, fill = " ", append = "~")
line = " " + line + " " + right + " "
# If attr is an integer, convert it to the corresponding color.
if isinstance(attr, int) and 0 <= attr <= 7:
attr = curses.color_pair(attr)
# If a file is selected, add "*" at the beginning to force the highlight color to change.
if selected:
line = "*" + line[1:]
attr = curses.color_pair(cmap.get("*", 0))
# Add invert attribute if the line matches the cursor position.
if row == focus: attr += curses.A_REVERSE
# Draw a character string on the screen.
win.addstr(y + row, x, line, attr)
# A function that display the given string. This function supports ANSI color code.
def textbox(win, rect, text):
# Decompose the rect into (x, y, w, h) for making it easier to use later.
x, y, w, h = rect
# For each line of the string with an escape sequence, the escape sequence is interpreted and draw to screen.
# Note that this can be achieved because the output of the highlight command is independent for each line.
for row, line in enumerate(text.split("\n")):
# Limitation on the number of lines so that the drawing area does not exceed rect.
if row >= h: break
# Convert a string with an escape sequence into a tuple (escaped content, followed by the string).
# This can be achieved by (1) divide each line with "\x1b" as the delimiter, and (2) divide them with m as the delimiter only once.
# However, if the escape sequence is not included at the beginning or end of the line,
# a tuple like (following string, ) is generated. Therefore, the process for manage this issue is added to the end of lines.
tokens = [token.split("m", 1) for token in line.split("\x1b[")]
tokens = [["", "m".join(tokens[0])]] + tokens[1:]
# Line breaks and unnecessary white spaces at the end of lines are annoying and should be deleted.
tokens[-1][-1].rstrip()
# Prepare a variable to count the number of drawn lines.
col = 0
# Move the cursor to the upper left of the drawing area.
win.move(y + row, x)
for color, value in tokens:
# Limit the number of characters to be drawn so that the drawing area does not exceed the rect.
if col + utils.strlen(value) >= w:
value = utils.clipstr(value, w - col, append = "~")
# Interpret escape sequences.
# Enclose it in try-except, since many errors tend to be caused in here.
try : color = int(color.split(";")[-1])
except: color = 0
# Convert the color code to the curses attribute.
if 30 <= color <= 37: attr = curses.color_pair(color - 30)
elif 90 <= color <= 97: attr = curses.color_pair(color - 82)
else : attr = curses.color_pair(0)
# Draw the string on the screen.
win.addstr(y + row, x + col, value, attr)
# Update the number of characters drawn in this line.
col += utils.strlen(value)
# A function that draws a window screen in filer mode.
# The variable contents1 is the contents of the left window,
# contents2 is the list containing the contents of the center window.
# The contents3 must be a function which can be called by "contents3(win, rect)".
def draw_filer_window(win, width_ratios, headers, contents1, contents2, contents3, focus1, focus2, show_preview, cmap):
# Clear the screen (actually it is cleared when refresh() is called).
win.erase()
# Get the maximum width of the screen.
h_max, w_max = win.getmaxyx()
# Get the number of lines in the header and footer.
# The height of the frame is reduced by the number of lines.
n_headers = len(headers)
# Just to be on the safe side, limit the length of the input width ratio list to 3 and also normalize it.
width_ratios = [r / sum(width_ratios[:3]) for r in width_ratios[:3]]
# Calculate the width of the list box (note that it is the width of its contents, not the width of the frame).
# Note that each window size is different whether showing preview window or not.
w1 = round(w_max * width_ratios[0]) - 1
w2 = round(w_max * width_ratios[1]) - 1 if show_preview else w_max - w1 - 3
w3 = w_max - (w1 + w2) - 4 if show_preview else 0
# Draw the | |
lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
self.assertNotHasAttr(a, "spam")
def mygetattr(self, name):
jeżeli name == "spam":
zwróć "spam"
podnieś AttributeError
C.__getattr__ = mygetattr
self.assertEqual(a.spam, "spam")
a.new = 12
self.assertEqual(a.new, 12)
def mysetattr(self, name, value):
jeżeli name == "spam":
podnieś AttributeError
zwróć object.__setattr__(self, name, value)
C.__setattr__ = mysetattr
spróbuj:
a.spam = "not spam"
wyjąwszy AttributeError:
dalej
inaczej:
self.fail("expected AttributeError")
self.assertEqual(a.spam, "spam")
klasa D(C):
dalej
d = D()
d.foo = 1
self.assertEqual(d.foo, 1)
# Test handling of int*seq oraz seq*int
klasa I(int):
dalej
self.assertEqual("a"*I(2), "aa")
self.assertEqual(I(2)*"a", "aa")
self.assertEqual(2*I(3), 6)
self.assertEqual(I(3)*2, 6)
self.assertEqual(I(3)*I(2), 6)
# Test comparison of classes przy dynamic metaclasses
klasa dynamicmetaclass(type):
dalej
klasa someclass(metaclass=dynamicmetaclass):
dalej
self.assertNotEqual(someclass, object)
def test_errors(self):
# Testing errors...
spróbuj:
klasa C(list, dict):
dalej
wyjąwszy TypeError:
dalej
inaczej:
self.fail("inheritance z both list oraz dict should be illegal")
spróbuj:
klasa C(object, Nic):
dalej
wyjąwszy TypeError:
dalej
inaczej:
self.fail("inheritance z non-type should be illegal")
klasa Classic:
dalej
spróbuj:
klasa C(type(len)):
dalej
wyjąwszy TypeError:
dalej
inaczej:
self.fail("inheritance z CFunction should be illegal")
spróbuj:
klasa C(object):
__slots__ = 1
wyjąwszy TypeError:
dalej
inaczej:
self.fail("__slots__ = 1 should be illegal")
spróbuj:
klasa C(object):
__slots__ = [1]
wyjąwszy TypeError:
dalej
inaczej:
self.fail("__slots__ = [1] should be illegal")
klasa M1(type):
dalej
klasa M2(type):
dalej
klasa A1(object, metaclass=M1):
dalej
klasa A2(object, metaclass=M2):
dalej
spróbuj:
klasa B(A1, A2):
dalej
wyjąwszy TypeError:
dalej
inaczej:
self.fail("finding the most derived metaclass should have failed")
def test_classmethods(self):
# Testing klasa methods...
klasa C(object):
def foo(*a): zwróć a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
klasa D(C):
dalej
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
# Test dla a specific crash (SF bug 528132)
def f(cls, arg): zwróć (cls, arg)
ff = classmethod(f)
self.assertEqual(ff.__get__(0, int)(42), (int, 42))
self.assertEqual(ff.__get__(0)(42), (int, 42))
# Test super() przy classmethods (SF bug 535444)
self.assertEqual(C.goo.__self__, C)
self.assertEqual(D.goo.__self__, D)
self.assertEqual(super(D,D).goo.__self__, D)
self.assertEqual(super(D,d).goo.__self__, D)
self.assertEqual(super(D,D).goo(), (D,))
self.assertEqual(super(D,d).goo(), (D,))
# Verify that a non-callable will podnieś
meth = classmethod(1).__get__(1)
self.assertRaises(TypeError, meth)
# Verify that classmethod() doesn't allow keyword args
spróbuj:
classmethod(f, kw=1)
wyjąwszy TypeError:
dalej
inaczej:
self.fail("classmethod shouldn't accept keyword args")
cm = classmethod(f)
self.assertEqual(cm.__dict__, {})
cm.x = 42
self.assertEqual(cm.x, 42)
self.assertEqual(cm.__dict__, {"x" : 42})
usuń cm.x
self.assertNotHasAttr(cm, "x")
@support.impl_detail("the module 'xxsubtype' jest internal")
def test_classmethods_in_c(self):
# Testing C-based klasa methods...
zaimportuj xxsubtype jako spam
a = (1, 2, 3)
d = {'abc': 123}
x, a1, d1 = spam.spamlist.classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d1 = spam.spamlist().classmeth(*a, **d)
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
spam_cm = spam.spamlist.__dict__['classmeth']
x2, a2, d2 = spam_cm(spam.spamlist, *a, **d)
self.assertEqual(x2, spam.spamlist)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
klasa SubSpam(spam.spamlist): dalej
x2, a2, d2 = spam_cm(SubSpam, *a, **d)
self.assertEqual(x2, SubSpam)
self.assertEqual(a2, a1)
self.assertEqual(d2, d1)
przy self.assertRaises(TypeError):
spam_cm()
przy self.assertRaises(TypeError):
spam_cm(spam.spamlist())
przy self.assertRaises(TypeError):
spam_cm(list)
def test_staticmethods(self):
# Testing static methods...
klasa C(object):
def foo(*a): zwróć a
goo = staticmethod(foo)
c = C()
self.assertEqual(C.goo(1), (1,))
self.assertEqual(c.goo(1), (1,))
self.assertEqual(c.foo(1), (c, 1,))
klasa D(C):
dalej
d = D()
self.assertEqual(D.goo(1), (1,))
self.assertEqual(d.goo(1), (1,))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
sm = staticmethod(Nic)
self.assertEqual(sm.__dict__, {})
sm.x = 42
self.assertEqual(sm.x, 42)
self.assertEqual(sm.__dict__, {"x" : 42})
usuń sm.x
self.assertNotHasAttr(sm, "x")
@support.impl_detail("the module 'xxsubtype' jest internal")
def test_staticmethods_in_c(self):
# Testing C-based static methods...
zaimportuj xxsubtype jako spam
a = (1, 2, 3)
d = {"abc": 123}
x, a1, d1 = spam.spamlist.staticmeth(*a, **d)
self.assertEqual(x, Nic)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
x, a1, d2 = spam.spamlist().staticmeth(*a, **d)
self.assertEqual(x, Nic)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
def test_classic(self):
# Testing classic classes...
klasa C:
def foo(*a): zwróć a
goo = classmethod(foo)
c = C()
self.assertEqual(C.goo(1), (C, 1))
self.assertEqual(c.goo(1), (C, 1))
self.assertEqual(c.foo(1), (c, 1))
klasa D(C):
dalej
d = D()
self.assertEqual(D.goo(1), (D, 1))
self.assertEqual(d.goo(1), (D, 1))
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
klasa E: # *not* subclassing z C
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertPrawda(repr(C.foo.__get__(C())).startswith("<bound method "))
def test_compattr(self):
# Testing computed attributes...
klasa C(object):
klasa computed_attribute(object):
def __init__(self, get, set=Nic, delete=Nic):
self.__get = get
self.__set = set
self.__delete = delete
def __get__(self, obj, type=Nic):
zwróć self.__get(obj)
def __set__(self, obj, value):
zwróć self.__set(obj, value)
def __delete__(self, obj):
zwróć self.__delete(obj)
def __init__(self):
self.__x = 0
def __get_x(self):
x = self.__x
self.__x = x+1
zwróć x
def __set_x(self, x):
self.__x = x
def __delete_x(self):
usuń self.__x
x = computed_attribute(__get_x, __set_x, __delete_x)
a = C()
self.assertEqual(a.x, 0)
self.assertEqual(a.x, 1)
a.x = 10
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
usuń a.x
self.assertNotHasAttr(a, 'x')
def test_newslots(self):
# Testing __new__ slot override...
klasa C(list):
def __new__(cls):
self = list.__new__(cls)
self.foo = 1
zwróć self
def __init__(self):
self.foo = self.foo + 2
a = C()
self.assertEqual(a.foo, 3)
self.assertEqual(a.__class__, C)
klasa D(C):
dalej
b = D()
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
def test_altmro(self):
# Testing mro() oraz overriding it...
klasa A(object):
def f(self): zwróć "A"
klasa B(A):
dalej
klasa C(A):
def f(self): zwróć "C"
klasa D(B, C):
dalej
self.assertEqual(D.mro(), [D, B, C, A, object])
self.assertEqual(D.__mro__, (D, B, C, A, object))
self.assertEqual(D().f(), "C")
klasa PerverseMetaType(type):
def mro(cls):
L = type.mro(cls)
L.reverse()
zwróć L
klasa X(D,B,C,A, metaclass=PerverseMetaType):
dalej
self.assertEqual(X.__mro__, (object, A, C, B, D, X))
self.assertEqual(X().f(), "A")
spróbuj:
klasa _metaclass(type):
def mro(self):
zwróć [self, dict, object]
klasa X(object, metaclass=_metaclass):
dalej
# In CPython, the klasa creation above already podnieśs
# TypeError, jako a protection against the fact that
# instances of X would segfault it. In other Python
# implementations it would be ok to let the klasa X
# be created, but instead get a clean TypeError on the
# __setitem__ below.
x = object.__new__(X)
x[5] = 6
wyjąwszy TypeError:
dalej
inaczej:
self.fail("devious mro() zwróć nie caught")
spróbuj:
klasa _metaclass(type):
def mro(self):
zwróć [1]
klasa X(object, metaclass=_metaclass):
dalej
wyjąwszy TypeError:
dalej
inaczej:
self.fail("non-class mro() zwróć nie caught")
spróbuj:
klasa _metaclass(type):
def mro(self):
zwróć 1
klasa X(object, metaclass=_metaclass):
dalej
wyjąwszy TypeError:
dalej
inaczej:
self.fail("non-sequence mro() zwróć nie caught")
def test_overloading(self):
# Testing operator overloading...
klasa B(object):
"Intermediate klasa because object doesn't have a __setattr__"
klasa C(B):
def __getattr__(self, name):
jeżeli name == "foo":
zwróć ("getattr", name)
inaczej:
podnieś AttributeError
def __setattr__(self, name, value):
jeżeli name == "foo":
self.setattr = (name, value)
inaczej:
zwróć B.__setattr__(self, name, value)
def __delattr__(self, name):
jeżeli name == "foo":
self.delattr = name
inaczej:
zwróć B.__delattr__(self, name)
def __getitem__(self, key):
zwróć ("getitem", key)
def __setitem__(self, key, value):
self.setitem = (key, value)
def __delitem__(self, key):
self.delitem = key
a = C()
self.assertEqual(a.foo, ("getattr", "foo"))
a.foo = 12
self.assertEqual(a.setattr, ("foo", 12))
usuń a.foo
self.assertEqual(a.delattr, "foo")
self.assertEqual(a[12], ("getitem", 12))
a[12] = 21
self.assertEqual(a.setitem, (12, 21))
usuń a[12]
self.assertEqual(a.delitem, 12)
self.assertEqual(a[0:10], ("getitem", slice(0, 10)))
a[0:10] = "foo"
self.assertEqual(a.setitem, (slice(0, 10), "foo"))
usuń a[0:10]
self.assertEqual(a.delitem, (slice(0, 10)))
def test_methods(self):
# Testing methods...
klasa C(object):
def __init__(self, x):
self.x = x
def foo(self):
zwróć self.x
c1 = C(1)
self.assertEqual(c1.foo(), 1)
klasa D(C):
boo = C.foo
goo = c1.foo
d2 = D(2)
self.assertEqual(d2.foo(), 2)
self.assertEqual(d2.boo(), 2)
self.assertEqual(d2.goo(), 1)
klasa E(object):
foo = C.foo
self.assertEqual(E().foo.__func__, C.foo) # i.e., unbound
self.assertPrawda(repr(C.foo.__get__(C(1))).startswith("<bound method "))
def test_special_method_lookup(self):
# The lookup of special methods bypasses __getattr__ oraz
# __getattribute__, but they still can be descriptors.
def run_context(manager):
przy manager:
dalej
def iden(self):
zwróć self
def hello(self):
zwróć b"hello"
def empty_seq(self):
zwróć []
def zero(self):
zwróć 0
def complex_num(self):
zwróć 1j
def stop(self):
podnieś StopIteration
def return_true(self, thing=Nic):
zwróć Prawda
def do_isinstance(obj):
zwróć isinstance(int, obj)
def do_issubclass(obj):
zwróć issubclass(int, obj)
def do_dict_missing(checker):
klasa DictSub(checker.__class__, dict):
dalej
self.assertEqual(DictSub()["hi"], 4)
def some_number(self_, key):
self.assertEqual(key, "hi")
zwróć 4
def swallow(*args): dalej
def format_impl(self, spec):
zwróć "hello"
# It would be nice to have every special method tested here, but I'm
# only listing the ones I can remember outside of typeobject.c, since it
# does it right.
specials = [
("__bytes__", bytes, hello, set(), {}),
("__reversed__", reversed, empty_seq, set(), {}),
("__length_hint__", list, zero, set(),
{"__iter__" : iden, "__next__" : stop}),
("__sizeof__", sys.getsizeof, zero, set(), {}),
("__instancecheck__", do_isinstance, return_true, set(), {}),
("__missing__", do_dict_missing, some_number,
set(("__class__",)), | |
##########
# Code from: https://github.com/tobiasbaumann1/Adaptive_Mechanism_Design
##########
import copy
import logging
import numpy as np
import tensorflow as tf
logging.basicConfig(filename='Planning_Agent.log', level=logging.DEBUG, filemode='w')
from marltoolbox.algos.adaptive_mechanism_design.agent import Agent, convert_from_rllib_env_format
from tensorflow.python.ops import math_ops
def var_shape(x):
out = x.get_shape().as_list()
return out
def intprod(x):
return int(np.prod(x))
def numel(x):
return intprod(var_shape(x))
class Planning_Agent(Agent):
def __init__(self, env, underlying_agents, learning_rate=0.01,
gamma=0.95, max_reward_strength=None, cost_param=0, with_redistribution=False,
value_fn_variant='exact', n_units=None, weight_decay=0.0, convert_a_to_one_hot=False, mean_theta=0.0,
loss_mul_planner=1.0, std_theta=0.1, planner_clip_norm=0.5, normalize_planner=False,
add_state_grad=False, planner_momentum=0.9, use_adam_optimizer=True, use_softmax_hot=True,
square_cost=False, normalize_against_v=False, use_v_pl=False,
normalize_against_vp=False, normalize_vp_separated=False):
super().__init__(env, learning_rate, gamma)
self.underlying_agents = underlying_agents
self.log = []
self.max_reward_strength = max_reward_strength
n_players = len(underlying_agents)
self.with_redistribution = with_redistribution
self.value_fn_variant = value_fn_variant
self.convert_a_to_one_hot = convert_a_to_one_hot
self.env_name = env.NAME
self.env = env
self.loss_mul_planner = loss_mul_planner
with tf.variable_scope('Planner'):
self.s = tf.placeholder(tf.float32, [1, env.n_features], "state_pl")
self.a_players = tf.placeholder(tf.float32, [1, n_players], "player_actions")
self.convertion_to_one_hot(use_softmax_hot)
if value_fn_variant == 'exact':
if self.convert_a_to_one_hot:
self.p_players = tf.placeholder(tf.float32, [1, n_players, env.NUM_ACTIONS], "player_action_probs")
else:
self.p_players = tf.placeholder(tf.float32, [1, n_players], "player_action_probs")
self.a_plan = tf.placeholder(tf.float32, [2, 2], "conditional_planning_actions") # works only for matrix games
self.r_players = tf.placeholder(tf.float32, [1, n_players], "player_rewards")
if self.convert_a_to_one_hot:
self.inputs = tf.concat([self.s, self.a_players_one_hot_reshape], 1)
else:
self.inputs = tf.concat([self.s, self.a_players], 1)
if normalize_planner:
self.inputs = self.inputs-0.5
with tf.variable_scope('Policy_p'):
if self.convert_a_to_one_hot:
ma_action_space_dim = 2 * env.NUM_ACTIONS
else:
ma_action_space_dim = env.NUM_ACTIONS
if not isinstance(n_units, list):
units = [env.n_features + ma_action_space_dim, n_units, n_players]
else:
units = [env.n_features + ma_action_space_dim] + n_units + [n_players]
self.create_multi_layer_fc(units, mean_theta, std_theta)
if max_reward_strength is None:
self.action_layer = self.l1
else:
self.action_layer = tf.sigmoid(self.l1)
with tf.variable_scope('Vp'):
if max_reward_strength is not None:
self.vp = 2 * max_reward_strength * (self.action_layer - 0.5)
else:
self.vp = self.action_layer
with tf.variable_scope('V_total'):
if value_fn_variant == 'proxy':
self.v = 2 * self.a_players - 1
# if value_fn_variant == 'estimated':
if value_fn_variant == 'estimated' or value_fn_variant == 'exact':
if "CoinGame" in self.env_name:
self.v = tf.reduce_sum(self.r_players)
else:
self.v = tf.reduce_sum(self.r_players) - 1.9
# if value_fn_variant == 'exact':
# self.v = tf.placeholder(tf.float32, [1, n_players], "player_values")
with tf.variable_scope('cost_function'):
if value_fn_variant == 'estimated':
self.g_log_pi = tf.placeholder(tf.float32, [env.n_features, n_players], "player_gradients")
cost_list = []
for underlying_agent in underlying_agents:
# policy gradient theorem
idx = underlying_agent.agent_idx
if value_fn_variant == 'estimated':
if "CoinGame" in self.env_name: # or True:
self.g_Vp = self.g_log_pi[:, idx] * self.vp[:, idx]
self.g_V = self.g_log_pi[:, idx] * (self.v[:, idx]
if value_fn_variant == 'proxy'
else self.v)
else:
self.g_Vp = self.g_log_pi[0, idx] * self.vp[0, idx]
self.g_V = self.g_log_pi[0, idx] * (self.v[0, idx]
if value_fn_variant == 'proxy'
else self.v)
if value_fn_variant == 'exact':
act_idx = tf.cast(self.a_players[0, idx], tf.int32)
if self.convert_a_to_one_hot:
self.g_p = self.p_players[0, idx, act_idx] * (1 - self.p_players[0, idx, act_idx])
self.p_opp = self.p_players[0, 1 - idx, act_idx]
grad = tf.gradients(ys=self.vp[0, idx], xs=self.a_players)
if add_state_grad:
grad_s = tf.gradients(ys=self.vp[0, idx], xs=self.s)
self.g_Vp = self.g_p * grad[0][0, idx]
if add_state_grad:
self.g_Vp += self.g_p * tf.reduce_sum(grad_s)
else:
self.g_p = self.p_players[0, idx] * (1 - self.p_players[0, idx])
self.p_opp = self.p_players[0, 1 - idx]
grad = tf.gradients(ys=self.vp[0, idx], xs=self.a_players)
if add_state_grad:
grad_s = tf.gradients(ys=self.vp[0, idx], xs=self.s)
self.g_Vp = self.g_p * grad[0][0, idx]
if add_state_grad:
self.g_Vp += self.g_p * tf.reduce_sum(grad_s)
if "CoinGame" in self.env_name:
if add_state_grad:
self.g_Vp = self.g_Vp / (3*9+4)
self.g_V = self.g_p * tf.reduce_sum(self.v)
else:
if add_state_grad:
self.g_Vp = self.g_Vp / (5+1)
if not use_v_pl:
self.g_V = self.g_p * (self.p_opp * (2 * env.R - env.T - env.S)
+ (1 - self.p_opp) * (env.T + env.S - 2 * env.P))
else:
self.g_V = self.g_p * tf.reduce_sum(self.v)
cost_list.append(- underlying_agent.learning_rate * self.g_Vp * self.g_V)
if with_redistribution:
if square_cost:
self.extra_loss = cost_param * tf.norm(self.vp - tf.reduce_mean(self.vp)) * \
tf.norm(self.vp - tf.reduce_mean(self.vp))
else:
self.extra_loss = cost_param * tf.norm(self.vp - tf.reduce_mean(self.vp))
else:
if square_cost:
self.extra_loss = cost_param * tf.norm(self.vp) * tf.norm(self.vp)
else:
self.extra_loss = cost_param * tf.norm(self.vp)
if not normalize_vp_separated:
self.cost = tf.reduce_sum(tf.stack(cost_list))
else:
self.cost = tf.stack(cost_list, axis=0)
self.dynamic_scaling_vp(normalize_against_vp, max_reward_strength, normalize_vp_separated)
self.dynamic_scaling_v(normalize_against_v)
if planner_clip_norm is not None:
self.cost = tf.clip_by_norm(self.cost, planner_clip_norm, axes=None, name=None)
self.loss = (self.cost + self.extra_loss)
if weight_decay > 0.0:
self.loss += weight_decay * self.weights_norm
with tf.variable_scope('trainPlanningAgent'):
#AdamOptimizer
if use_adam_optimizer:
self.train_op = tf.train.AdamOptimizer(self.loss_mul_planner *learning_rate).minimize(self.loss,
var_list=tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='Planner/Policy_p'))
else:
self.train_op = tf.train.MomentumOptimizer(self.loss_mul_planner *learning_rate,
momentum=planner_momentum).minimize(self.loss,
var_list=tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='Planner/Policy_p'))
self.sess.run(tf.global_variables_initializer())
def convertion_to_one_hot(self, use_softmax_hot):
if "CoinGame" in self.env_name:
values = tf.stack([
-tf.math.abs(-self.a_players),
-tf.math.abs(1 - self.a_players),
-tf.math.abs(2 - self.a_players),
-tf.math.abs(3 - self.a_players),
], 2)
values = tf.where(tf.equal(values, -2), values+1, values)
values = tf.where(tf.equal(values, -3), values+2, values)
else:
values = tf.stack([
-tf.math.abs(-self.a_players),
-tf.math.abs(1 - self.a_players),
], 2)
if use_softmax_hot:
self.a_players_one_hot = tf.nn.softmax(values)
else:
self.a_players_one_hot = values + 1
self.a_players_one_hot_reshape = tf.reshape(self.a_players_one_hot, (1, -1))
def create_multi_layer_fc(self, units, mean_theta, std_theta):
print("units", units)
var_list = []
input_ = self.inputs
for i in range(len(units)):
with tf.variable_scope("planner_layer_{}".format(i)):
n_in = units[i]
n_out = units[i + 1]
print("i", i)
print("n_in", n_in)
print("n_out", n_out)
if i + 1 == len(units) - 1:
break
w_l1 = tf.Variable(tf.random_normal([n_in, n_out], mean=0.0, stddev=std_theta))
b_l1 = tf.Variable(tf.random_normal([n_out], mean=0.0, stddev=std_theta))
l1 = tf.nn.leaky_relu(tf.matmul(input_, w_l1) + b_l1)
var_list.extend([w_l1, b_l1])
input_ = l1
self.w_pi0 = tf.Variable(tf.random_normal([n_in, n_out], mean=0.0, stddev=std_theta))
self.b_pi0 = tf.Variable(tf.random_normal([n_out], mean=mean_theta, stddev=std_theta))
self.l1 = tf.matmul(input_, self.w_pi0) + self.b_pi0
var_list.extend([self.w_pi0, self.b_pi0])
self.parameters = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
weights_norm = math_ops.reduce_sum(self.parameters * self.parameters, None, keepdims=True)
self.weights_norm = tf.sqrt(tf.reduce_sum(weights_norm))
def dynamic_scaling_vp(self, normalize_against_vp, max_reward_strength, normalize_vp_separated):
if "CoinGame" in self.env_name:
init_v = 0.1
else:
init_v = 0.0
if not normalize_vp_separated:
self.mean_vp_np = init_v * normalize_against_vp
self.mean_vp_in = tf.placeholder(tf.float32, shape=(), name="mean_vp")
else:
temp_v = init_v * normalize_against_vp
self.mean_vp_np = [temp_v, temp_v]
self.mean_vp_in = tf.placeholder(tf.float32, shape=(2,), name="mean_vp")
if normalize_against_vp:
if not normalize_vp_separated:
null = 0.0
self.mean_vp_out = ((1 - (1 / normalize_against_vp)) * self.mean_vp_in +
tf.math.abs(tf.reduce_sum(self.vp)))
self.cost = tf.cond(tf.equal(self.mean_vp_out, null), lambda: null,
lambda: self.cost / (
self.mean_vp_out / normalize_against_vp * 10 / max_reward_strength))
else:
null = 0.0
self.mean_vp_out = ((1 - (1 / normalize_against_vp)) * self.mean_vp_in +
tf.math.abs(tf.reduce_sum(self.vp, axis=0)))
cost_list = []
cost_list.append(tf.cond(tf.equal(self.mean_vp_out[0], 0.0), lambda: null,
lambda: self.cost[0] / (
self.mean_vp_out[0] / normalize_against_vp * 10 / max_reward_strength)))
cost_list.append(tf.cond(tf.equal(self.mean_vp_out[1], 0.0), lambda: null,
lambda: self.cost[1] / (
self.mean_vp_out[1] / normalize_against_vp * 10 / max_reward_strength)))
self.cost = tf.reduce_sum(tf.stack(cost_list))
else:
self.mean_vp_out = self.mean_vp_in
def dynamic_scaling_v(self, normalize_against_v):
if "CoinGame" in self.env_name:
self.mean_v_np = 0.2 * normalize_against_v
else:
self.mean_v_np = 0.0
self.mean_v_in = tf.placeholder(tf.float32, name="mean_v")
if normalize_against_v:
self.mean_v_out = ((1-(1/normalize_against_v)) * self.mean_v_in +
tf.reduce_sum(tf.math.abs(self.v)))
self.cost = tf.cond(tf.equal(self.mean_v_out, 0.0), lambda:0.0, lambda:self.cost /
(self.mean_v_out/normalize_against_v))
else:
self.mean_v_out = self.mean_v_in
def get_weigths(self):
return self.sess.run(self.parameters, {})
def learn(self, s, a_players, coin_game=False, env_rewards=None):
s = s[np.newaxis, :]
if env_rewards is None:
if coin_game:
# TODO remove hardcoded policy_id
actions = {"player_red": a_players[0], "player_blue": a_players[1]}
r_players_rllib_format = self.env._compute_rewards(s, actions)
else:
r_players_rllib_format = self.env._compute_rewards(*a_players)
r_players = convert_from_rllib_env_format(r_players_rllib_format, self.env.players_ids)
else:
r_players = env_rewards
a_players = np.asarray(a_players)
if self.convert_a_to_one_hot:
a_players_one_hot = self.np_action_to_one_hot(a_players)
feed_dict = {self.s: s,
self.a_players: a_players[np.newaxis, ...],
self.r_players: r_players[np.newaxis, :]}
if self.value_fn_variant == 'estimated':
g_log_pi_list = []
for underlying_agent in self.underlying_agents:
idx = underlying_agent.agent_idx
# if "CoinGame" in self.env_name:
g_log_pi_list.append(underlying_agent.calc_g_log_pi(s, a_players_one_hot[idx])[0][0, ...])
# else:
# g_log_pi_list.append(underlying_agent.calc_g_log_pi(s, a_players[idx]))
# if "CoinGame" in self.env_name:
g_log_pi_arr = np.stack(g_log_pi_list, axis=1)
# else:
# g_log_pi_arr = np.reshape(np.asarray(g_log_pi_list), [1, -1])
# print("g_log_pi_arr", g_log_pi_arr.shape)
feed_dict[self.g_log_pi] = g_log_pi_arr
if self.value_fn_variant == 'exact':
p_players_list = []
for underlying_agent in self.underlying_agents:
idx = underlying_agent.agent_idx
if self.convert_a_to_one_hot:
p_players_list.append(underlying_agent.calc_action_probs(s, add_dim=False))
else:
p_players_list.append(underlying_agent.calc_action_probs(s)[0, -1]) # Only 2 actions
# if "CoinGame" in self.env_name:
# v_list.append(underlying_agent.calcul_value(s, add_dim=False))
if self.convert_a_to_one_hot:
p_players_arr = np.stack(p_players_list, axis=1)
else:
p_players_arr = np.reshape(np.asarray(p_players_list), [1, -1])
feed_dict[self.p_players] = p_players_arr
# if "CoinGame" in self.env_name:
# v_players_arr = np.reshape(np.asarray(v_list), [1, -1])
# feed_dict[self.v] = v_players_arr
# if "CoinGame" not in self.env_name:
# feed_dict[self.a_plan] = self.calc_conditional_planning_actions(s)
feed_dict[self.mean_v_in] = self.mean_v_np
feed_dict[self.mean_vp_in] = self.mean_vp_np
(_, action, loss, g_Vp, g_V, cost, extra_loss, l1,
mean_v, vp, v, mean_vp) = self.sess.run([self.train_op, self.vp, self.loss,
self.g_Vp, self.g_V,
self.cost, self.extra_loss, self.l1,
self.mean_v_out, self.vp, self.v,
self.mean_vp_out], feed_dict)
self.mean_v_np = mean_v
self.mean_vp_np = mean_vp
return action, loss, g_Vp, g_V, r_players, cost, extra_loss, l1, mean_v, vp, v, mean_vp
def get_log(self):
return self.log
def np_action_to_one_hot(self, a_players):
a_players_one_hot = np.zeros((len(a_players), self.env.NUM_ACTIONS))
for idx, act in enumerate(a_players.tolist()):
a_players_one_hot[idx, act] = 1
return a_players_one_hot
def choose_action(self, s, a_players):
s = s[np.newaxis, :]
a_players = np.asarray(a_players)
a_plan = self.sess.run(self.vp, {self.s: s,
self.a_players: a_players[np.newaxis, ...]})[0, :]
# self.log.append(self.calc_conditional_planning_actions(s))
return a_plan
def calc_conditional_planning_actions(self, s):
assert "CoinGame" not in self.env_name
# Planning actions in each of the 4 cases: DD, CD, DC, CC
a_plan_DD = self.sess.run(self.action_layer, {self.s: s, self.a_players: np.array([0, 0])[np.newaxis, :]})
a_plan_CD = | |
<gh_stars>10-100
import json
import math
import time
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.csrf import requires_csrf_token
from django.views.decorators.csrf import ensure_csrf_cookie
import authorization.authorization as token
from django.contrib import auth
from authorization import authorization
import user.models as database
import config.config as config
import state.state as state
from conn import conn
from authorization import authorization
# Create your views here.
HTTP_RESULT_SUCCESS = 1
HTTP_RESULT_FAIL = -100
HTTP_RESULT_PERMISSION_DENIED = 0
HTTP_RESULT_NO_SUCH_API = -1
HTTP_RESULT_PARAMS_ERROR = -2
HTTP_RESULT_LOGIN_REQUIRE = -3
HTTP_RESULT_UNKNOWN_ERROR = -4
JSON_PERMISSION_DENIED = '{"result":0}'
JSON_OPPERATION_SUCCESS = '{"result":1}'
JSON_OPPERATION_FAIL = '{"result":-100}'
JSON_NO_SUCH_API = '{"result":-1}'
JSON_PARAMS_INCORRENCT = '{"result":-2}'
JSON_LOGIN_REQUIRE = '{"result":-3}'
JSON_UNKNOWN_ERROR = '{"result":-4}'
JSON_REQUIRE_POST = '{"request":-5}'
DICT_RESULT_CODE = {
HTTP_RESULT_PARAMS_ERROR: JSON_PERMISSION_DENIED,
HTTP_RESULT_NO_SUCH_API: JSON_NO_SUCH_API,
HTTP_RESULT_PARAMS_ERROR: JSON_PARAMS_INCORRENCT,
HTTP_RESULT_LOGIN_REQUIRE: JSON_LOGIN_REQUIRE,
HTTP_RESULT_UNKNOWN_ERROR: JSON_UNKNOWN_ERROR,
}
KEY_RESULT = 'result'
KEY_TYPE = 'type'
KEY_OPERATION_RESULT = 'operation_result'
TYPE_DEVICE = 1
TYPE_LIGHT = 2
TYPE_TEMPERATURE = 3
TYPE_HUMIDITY = 4
TYPE_DIRT_HUMIDITY = 5
TYPE_FERTILIZATION = 6
TYPE_WATER = 7
TYPE_USER = 8
# 处理函数
def __checkUser(request):
# if request.user.is_authenticated:
# return HTTP_RESULT_SUCCESS
# return HTTP_RESULT_LOGIN_REQUIRE
return HTTP_RESULT_SUCCESS
def __intToHex( num):
return ('0' + str(hex(num)[2:]))[-2:]
@csrf_exempt
def device(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
custom_device_config = config.get_device_config()
if request.method == 'POST': # 如果是POST请求
print(request.POST)
if config.KEY_DEVICE_STATE_1 in request.POST: # 设备1
custom_device_config.device1State = int(request.POST.get(config.KEY_DEVICE_STATE_1, config.STATE_OFF))
print('自定义设备1被设置为 ', custom_device_config.device1State)
conn.sendCmd('00','0'+str(custom_device_config.device1State))
state.device_1 = custom_device_config.device1State
if config.KEY_DEVICE_STATE_2 in request.POST: # 设备2
custom_device_config.device2State = int(request.POST.get(config.KEY_DEVICE_STATE_2, config.STATE_OFF))
print('自定义设备2被设置为 ', custom_device_config.device2State)
conn.sendCmd('01','0'+str(custom_device_config.device2State))
state.device_2 = custom_device_config.device2State
if config.KEY_DEVICE_STATE_3 in request.POST: # 设备3
custom_device_config.device3State = int(request.POST.get(config.KEY_DEVICE_STATE_3, config.STATE_OFF))
print('自定义设备3被设置为 ', custom_device_config.device3State)
conn.sendCmd('02','0'+str(custom_device_config.device3State))
state.device_3 = custom_device_config.device3State
if config.KEY_DEVICE_STATE_4 in request.POST: # 设备4
custom_device_config.device4State = int(request.POST.get(config.KEY_DEVICE_STATE_4, config.STATE_OFF))
print('自定义设备4被设置为 ', custom_device_config.device4State)
conn.sendCmd('03','0'+str(custom_device_config.device4State))
state.device_4 = custom_device_config.device4State
config.set_device_config_obj(custom_device_config)
time.sleep(0.3)
conn.sendCmdReset()
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
config.KEY_DEVICE_STATE_1: int(custom_device_config.device1State),
config.KEY_DEVICE_STATE_2: int(custom_device_config.device2State),
config.KEY_DEVICE_STATE_3: int(custom_device_config.device3State),
config.KEY_DEVICE_STATE_4: int(custom_device_config.device4State),
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def light(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
light_config = config.get_light_config()
if request.method == 'POST':
if config.KEY_IS_LIGHT_ON in request.POST:
light_config.isLightOn = request.POST.get(config.KEY_IS_LIGHT_ON, config.STATE_OFF)
print('环境光设置为', light_config.isLightOn)
conn.sendCmd('08','0'+str(light_config.isLightOn))
state.light = light_config.isLightOn
time.sleep(0.01)
if config.KEY_LIGHT_COLOR in request.POST:
light_config.lightColor = request.POST.get(config.KEY_LIGHT_COLOR, config.STATE_OFF)
print('环境光颜色设置为', light_config.lightColor)
conn.sendCmd('09','0'+str(light_config.lightColor))
state.light_color = light_config.lightColor
time.sleep(0.01)
if config.KEY_LIGHT_LEVEL in request.POST:
light_config.lightLevel = request.POST.get(config.KEY_LIGHT_LEVEL, config.STATE_OFF)
print('环境光亮度设置为', light_config.lightLevel)
tempData = light_config.lightLevel
if(int(tempData) > 9):
tempData = 'a'
conn.sendCmd('0a','0'+str(tempData))
state.light_level = light_config.lightLevel
time.sleep(0.01)
config.set_light_config_obj(light_config)
time.sleep(0.3)
conn.sendCmdReset()
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
config.KEY_IS_LIGHT_ON: light_config.isLightOn,
config.KEY_LIGHT_COLOR: light_config.lightColor,
config.KEY_LIGHT_LEVEL: light_config.lightLevel,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def temperature(request):
request_result = __checkUser(request)
request_result = HTTP_RESULT_SUCCESS
if request_result == HTTP_RESULT_SUCCESS:
temperature_config = config.get_temperature_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
temperature_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.STATE_OFF)
print('自动温度控制被设置为 ' + temperature_config.isAutoControl)
if config.KEY_UPPER_LIMIT in request.POST:
temperature_config.upperLimit = request.POST.get(config.KEY_UPPER_LIMIT, 30)
state.temperature_upper = temperature_config.upperLimit
conn.sendCmd('0b',__intToHex(int(temperature_config.upperLimit)))
print('自动温度控制温度上限被设置为 ' + temperature_config.upperLimit)
if config.KEY_LOWER_LIMIT in request.POST:
temperature_config.lowerLimit = request.POST.get(config.KEY_LOWER_LIMIT, 0)
state.temperature_lower = temperature_config.lowerLimit
conn.sendCmd('0c',__intToHex(int(temperature_config.lowerLimit)))
print('自动温度控制温度下限被设置为 ' + temperature_config.lowerLimit)
if config.KEY_UPPER_ACTION in request.POST:
temperature_config.upperActions = request.POST.get(config.KEY_UPPER_ACTION, config.EMPTY_ACTION)
print('自动温度控制温度上限执行动作被设置为 ' + temperature_config.upperActions)
if config.KEY_LOWER_ACTION in request.POST:
temperature_config.lowerActions = request.POST.get(config.KEY_LOWER_ACTION, config.EMPTY_ACTION)
print('自动温度控制温度下限执行动作被设置为 ' + temperature_config.lowerActions)
time.sleep(0.3)
conn.sendCmdReset()
config.set_temperature_config_obj(temperature_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_TEMPERATURE,
config.KEY_IS_AUTO_CONTROL: temperature_config.isAutoControl,
config.KEY_UPPER_LIMIT: temperature_config.upperLimit,
config.KEY_LOWER_LIMIT: temperature_config.lowerLimit,
config.KEY_UPPER_ACTION: temperature_config.upperActions,
config.KEY_LOWER_ACTION: temperature_config.lowerActions,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def humidity(request):
request_result = __checkUser(request)
request_result = HTTP_RESULT_SUCCESS
if request_result == HTTP_RESULT_SUCCESS:
humidity_config = config.get_humidity_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
humidity_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.STATE_OFF)
print('自动湿度控制被设置为 ' + humidity_config.isAutoControl)
if config.KEY_UPPER_LIMIT in request.POST:
humidity_config.upperLimit = request.POST.get(config.KEY_UPPER_LIMIT,config.DEFAULT_HUMIDITY_UPPER_LIMIT)
state.humidity_air_upper = humidity_config.upperLimit
conn.sendCmd('0d',__intToHex(int(humidity_config.upperLimit)))
print('自动湿度控制湿度上限被设置为 ' + humidity_config.upperLimit)
if config.KEY_LOWER_LIMIT in request.POST:
humidity_config.lowerLimit = request.POST.get(config.KEY_LOWER_LIMIT,config.DEFAULT_HUMIDITY_LOWER_LIMIT)
state.humidity_air_lower = humidity_config.lowerLimit
conn.sendCmd('0e',__intToHex(int(humidity_config.lowerLimit)))
print('自动湿度控制湿度下限被设置为 ' + humidity_config.lowerLimit)
if config.KEY_UPPER_ACTION in request.POST:
humidity_config.upperActions = request.POST.get(config.KEY_UPPER_ACTION, config.DEFAULT_ACTIONS)
print('自动湿度动作上限被设置为 ', humidity_config.upperActions)
if config.KEY_LOWER_ACTION in request.POST:
humidity_config.lowerActions = request.POST.get(config.KEY_LOWER_ACTION, config.DEFAULT_ACTIONS)
print('自动湿度动作下限被设置为 ', humidity_config.upperActions)
config.set_dumidity_config_obj(humidity_config)
time.sleep(0.3)
conn.sendCmdReset()
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_HUMIDITY,
config.KEY_IS_AUTO_CONTROL: humidity_config.isAutoControl,
config.KEY_UPPER_LIMIT: humidity_config.upperLimit,
config.KEY_LOWER_LIMIT: humidity_config.lowerLimit,
config.KEY_UPPER_ACTION: humidity_config.upperActions,
config.KEY_LOWER_ACTION: humidity_config.lowerActions,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def dirt_humidity(request):
request_result = __checkUser(request)
request_result = HTTP_RESULT_SUCCESS
if request_result == HTTP_RESULT_SUCCESS:
dirt_humidity_config = config.get_dirt_humidity_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
dirt_humidity_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.STATE_OFF)
print('自动土壤湿度控制被设置为 ' + dirt_humidity_config.isAutoControl)
if config.KEY_UPPER_LIMIT in request.POST:
dirt_humidity_config.upperLimit = request.POST.get(config.KEY_UPPER_LIMIT,config.DEFAULT_HUMIDITY_UPPER_LIMIT)
state.humidity_dirt_upper = dirt_humidity_config.upperLimit
print('自动土壤湿度控制温度上限被设置为 ' + dirt_humidity_config.upperLimit)
conn.sendCmd('12',__intToHex(int(dirt_humidity_config.upperLimit)))
if config.KEY_LOWER_LIMIT in request.POST:
dirt_humidity_config.lowerLimit = request.POST.get(config.KEY_LOWER_LIMIT,config.DEFAULT_HUMIDITY_LOWER_LIMIT)
state.humidity_dirt_lower = dirt_humidity_config.lowerLimit
print('自动土壤湿度控制湿度下限被设置为 ' + dirt_humidity_config.lowerLimit)
conn.sendCmd('13',__intToHex(int(dirt_humidity_config.lowerLimit)))
if config.KEY_UPPER_ACTION in request.POST:
dirt_humidity_config.upperActions = request.POST.get(config.KEY_UPPER_ACTION, config.DEFAULT_ACTIONS)
print('自动土壤湿度动作上限被设置为 ', dirt_humidity_config.upperActions)
if config.KEY_LOWER_ACTION in request.POST:
dirt_humidity_config.lowerActions = request.POST.get(config.KEY_LOWER_ACTION, config.DEFAULT_ACTIONS)
print('自动土壤湿度动作下限被设置为 ', dirt_humidity_config.upperActions)
config.set_dirt_humidity_config_obj(dirt_humidity_config)
time.sleep(0.3)
conn.sendCmdReset()
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_DIRT_HUMIDITY,
config.KEY_IS_AUTO_CONTROL: dirt_humidity_config.isAutoControl,
config.KEY_UPPER_LIMIT: dirt_humidity_config.upperLimit,
config.KEY_LOWER_LIMIT: dirt_humidity_config.lowerLimit,
config.KEY_UPPER_ACTION: dirt_humidity_config.upperActions,
config.KEY_LOWER_ACTION: dirt_humidity_config.lowerActions,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def fertilization(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
fertilization_config = config.get_fertilization_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
fertilization_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL,
config.DEFAULT_AUTO_CONTROL)
print('自动施肥被设置为', fertilization_config.isAutoControl)
if config.KEY_REPEAT_TYPE in request.POST:
fertilization_config.repeatType = request.POST.get(config.KEY_REPEAT_TYPE, config.DEFAULT_REPEAT_TYPE)
print('自动施肥重复类型被设置为', fertilization_config.repeatType)
if config.KEY_REPEAT_CIRCLE in request.POST:
fertilization_config.repeatCircle = request.POST.get(config.KEY_REPEAT_CIRCLE,
config.DEFAULT_REPEAT_CIRCLE)
print('自动施肥周期被设置为', fertilization_config.repeatCircle)
if config.KEY_HOUR in request.POST:
fertilization_config.hour = request.POST.get(config.KEY_HOUR, config.DEFAULT_REPEAT_HOUR)
print('自动施肥周期小时被设置为', fertilization_config.hour)
if config.KEY_MINUTE in request.POST:
fertilization_config.minute = request.POST.get(config.KEY_MINUTE, config.DEFAULT_REPEAT_MINUTE)
print('自动施肥周期分钟被设置为', fertilization_config.minute)
config.set_fertilization_config_obj(fertilization_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_FERTILIZATION,
config.KEY_IS_AUTO_CONTROL: fertilization_config.isAutoControl,
config.KEY_REPEAT_TYPE: fertilization_config.repeatType,
config.KEY_REPEAT_CIRCLE: fertilization_config.repeatCircle,
config.KEY_HOUR: fertilization_config.hour,
config.KEY_MINUTE: fertilization_config.minute
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def water(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
water_config = config.get_water_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
water_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.DEFAULT_AUTO_CONTROL)
print('自动浇水被设置为', water_config.isAutoControl)
if config.KEY_REPEAT_TYPE in request.POST:
water_config.repeatType = request.POST.get(config.KEY_REPEAT_TYPE, config.DEFAULT_REPEAT_TYPE)
print('自动浇水重复类型被设置为', water_config.repeatType)
if config.KEY_REPEAT_CIRCLE in request.POST:
water_config.repeatCircle = request.POST.get(config.KEY_REPEAT_CIRCLE, config.DEFAULT_REPEAT_CIRCLE)
print('自动浇水周期被设置为', water_config.repeatCircle)
if config.KEY_HOUR in request.POST:
water_config.hour = request.POST.get(config.KEY_HOUR, config.DEFAULT_REPEAT_HOUR)
print('自动浇水周期小时被设置为', water_config.hour)
if config.KEY_MINUTE in request.POST:
water_config.minute = request.POST.get(config.KEY_MINUTE, config.DEFAULT_REPEAT_MINUTE)
print('自动浇水周期分钟被设置为', water_config.minute)
config.set_water_config_obj(water_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_WATER,
config.KEY_IS_AUTO_CONTROL: water_config.isAutoControl,
config.KEY_REPEAT_TYPE: water_config.repeatType,
config.KEY_REPEAT_CIRCLE: water_config.repeatCircle,
config.KEY_HOUR: water_config.hour,
config.KEY_MINUTE: water_config.minute
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
TYPE_USER_NO = -1
TYPE_USER_CREATE = 0
TYPE_USER_EDIT = 1
TYPE_USER_DELETE = 2
ACTION_TYPE_NONE = -1
ACTION_TYPE_USER_CREATE = 0
ACTION_TYPE_USER_EDIT = 1
ACTION_TYPE_USER_DEL = 2
VALUE_OPERATION_RESULT_FAIL = 'fail'
VALUE_OPERATION_RESULT_SUCCESS = 'success'
VALUE_OPERATION_RESULT_USER_EXIST = 'user_exist'
VALUE_OPERATION_RESULT_USER_NOT_EXIST = 'user_not_exist'
VALUE_BOOL_TRUE = 1
VALUE_BOOL_FALSE = 0
JSON_NO_SUCH_USER = '{"result":-5}'
JSON_USER_EXIST = '{"result":-6}'
@csrf_exempt
def user(request):
request_result = __checkUser(request)
request_result = HTTP_RESULT_SUCCESS # 要删掉
if request_result == HTTP_RESULT_SUCCESS: # 如果请求成功
if request.method == 'GET': # 如果是Get获取数据
print('Get请求')
if {config.KEY_PAGE_NUM, config.KEY_PAGE_COUNT}.intersection(request.GET): # 制定页码
print('请求参数符合条件')
user_list = database.User.objects.all()
return HttpResponse(user_list)
elif request.method == 'POST':
print('收到的API的关于用户的POST的请求\n'+str(request.POST))
if {config.KEY_USER_ACTION_TYPE, config.KEY_USERNAME, config.KEY_PASSWORD, config.KEY_IS_STUFF,
config.KEY_IS_ACTIVE, config.KEY_NICKNAME, config.KEY_INFO}.intersection(request.POST):
print('用户的参数校验正确')
user_username = request.POST[config.KEY_USERNAME]
user_password = request.POST[config.KEY_PASSWORD]
user_is_stuff = int(request.POST[config.KEY_IS_STUFF]) == VALUE_BOOL_TRUE
user_is_active = int(request.POST[config.KEY_IS_ACTIVE]) == VALUE_BOOL_TRUE
user_action_type = int(request.POST[config.KEY_USER_ACTION_TYPE])
user_info = request.POST[config.KEY_INFO]
user_nickname = request.POST[config.KEY_NICKNAME]
if user_nickname == '':
user_nickname = user_username
if user_info == '':
user_info = '暂无简介'
user_operation_result = VALUE_OPERATION_RESULT_FAIL
if user_action_type == ACTION_TYPE_USER_CREATE: # 如果是创建用户
print('创建用户 是否工作人员 '+str(user_is_stuff)+' 是否活动 '+str(user_is_active))
if database.User.objects.filter(username=user_username): # 如果用户名已被注册
user_operation_result = VALUE_OPERATION_RESULT_USER_EXIST
print("同户名 %s 已被注册"%user_username)
else: # 如果用户名可用
temp_user = database.User()
temp_user.username = user_username
temp_user.nickname = user_nickname
temp_user.set_password(<PASSWORD>)
temp_user.is_staff = user_is_stuff
temp_user.is_active = user_is_active
temp_user.info = user_info
temp_user.save()
print("同户名 %s 创建成功"%user_username)
elif user_action_type == ACTION_TYPE_USER_EDIT: # 如果是编辑用户,不会改变密码
print('编辑用户')
if database.User.objects.filter(username=user_username): # 如果用户的确存在
temp_user = database.User.objects.get(username=user_username)
temp_user.username = user_username
temp_user.nickname = user_nickname
temp_user.is_staff = user_is_stuff
temp_user.is_active = user_is_active
temp_user.info = user_info
temp_user.save()
print("同户 %s 编辑成功"%user_username)
else: # 如果用户不存在
user_operation_result = VALUE_OPERATION_RESULT_USER_EXIST
print("同户 %s 不存在,不能编辑"%user_username)
elif user_action_type == ACTION_TYPE_USER_DEL: # 如果是删除账户
print('删除用户')
if database.User.objects.filter(username=user_username): # 如果用户的确存在
temp_user = database.User.objects.get(username=user_username)
print("同户 %s 删除成功"%user_username)
temp_user.delete()
else: # 如果用户不存在
user_operation_result = VALUE_OPERATION_RESULT_USER_EXIST
print("同户 %s 不存在,不能删除"%user_username)
else:
print('传递的用户操作类型不正确')
user_action_type = ACTION_TYPE_NONE
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_USER,
KEY_OPERATION_RESULT: user_operation_result,
config.KEY_USERNAME: user_username,
config.KEY_NICKNAME: user_nickname,
config.KEY_IS_STUFF: user_is_stuff,
config.KEY_IS_ACTIVE: user_is_active,
config.KEY_INFO: user_info,
config.KEY_USER_ACTION_TYPE: user_action_type
}
return HttpResponse(json.dumps(json_obj))
return HttpResponse(JSON_PARAMS_INCORRENCT)
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
KEY_SCHEDULE_ACTION = 'action'
VAL_SCHEDULE_ACTION_ADD = '1'
VAL_SCHEDULE_ACTION_EDIT = '2'
VAL_SCHEDULE_ACTION_DEL = '3'
KEY_SCHEDULE_NAME = 'name'
KEY_SCHEDULE_IS_ACTIVE = 'is_active'
KEY_SCHEDULE_REPEAT_TYPE = 'repeat_type'
KEY_SCHEDULE_REPEAT_VALUE = 'repeat_value'
KEY_SCHEDULE_HOUR = 'hour'
KEY_SCHEDULE_MINUTE = 'minute'
KEY_SHCEDULE_TARGET = 'target'
KEY_SCHEDULE_VALUE = 'value'
KEY_SCHEDULE_OLD_NAME = 'old_name'
JSON_PARAMS_NOT_DEFINE = '{"result":-7}' # 结果 参数没定义
# 操作类型
@csrf_exempt
def schedule(request):
request_result = __checkUser(request)
if request.method == 'GET':
return HttpResponse('应该返回所有的定时,现在还没有完成')
elif request.method == 'POST':
if request_result == HTTP_RESULT_SUCCESS:
if {KEY_SCHEDULE_ACTION,KEY_SCHEDULE_NAME,KEY_SCHEDULE_REPEAT_TYPE,KEY_SCHEDULE_REPEAT_VALUE,KEY_SCHEDULE_HOUR,
KEY_SCHEDULE_MINUTE,KEY_SHCEDULE_TARGET,KEY_SCHEDULE_VALUE,KEY_SCHEDULE_IS_ACTIVE}.intersection(request.POST):
scheduleObject = config.Schedule()
scheduleObject.name = request.POST.get(KEY_SCHEDULE_NAME,'undefine')
scheduleObject.repeat_type = request.POST.get(KEY_SCHEDULE_REPEAT_TYPE,'0')
scheduleObject.repeat_circle = request.POST.get(KEY_SCHEDULE_REPEAT_VALUE,'0')
scheduleObject.hour = request.POST.get(KEY_SCHEDULE_HOUR,'0')
scheduleObject.minute = request.POST.get(KEY_SCHEDULE_MINUTE,'0')
scheduleObject.target = request.POST.get(KEY_SHCEDULE_TARGET,'0')
scheduleObject.value = request.POST.get(KEY_SCHEDULE_VALUE,'0')
scheduleObject.is_active = request.POST.get(KEY_SCHEDULE_IS_ACTIVE,'false')
scheduleObject.start_time = config.get_time_stamp()
scheduleOldName = request.POST.get(KEY_SCHEDULE_OLD_NAME,' ')
if request.POST[KEY_SCHEDULE_ACTION] == VAL_SCHEDULE_ACTION_ADD:
config.add_schedule(scheduleObject)
elif request.POST[KEY_SCHEDULE_ACTION] == VAL_SCHEDULE_ACTION_DEL:
config.del_schedule(scheduleObject.name)
elif request.POST[KEY_SCHEDULE_ACTION] == VAL_SCHEDULE_ACTION_EDIT:
config.edit_schedule(scheduleObject,scheduleOldName)
print('定时请求参数完整')
return HttpResponse(JSON_OPPERATION_SUCCESS)
else :
return HttpResponse(JSON_PARAMS_NOT_DEFINE)
else :
return HttpResponse(JSON_LOGIN_REQUIRE)
@csrf_exempt
def noSuchApi(request):
return HttpResponse(JSON_NO_SUCH_API)
'''
传过来的参数说明
year : 年
month : 月
day : 日
hour : 时
minute : 分
second : 秒
air_temp : 气温
air_humidity : 空气湿度
dirt_temp : 土壤温度
dirt_humiity : 土壤湿度
light : 亮度
count : 将要返回的条目数
page : 将要返回的页数
'''
from farm.models import Sensor as Sensor
from django.forms.models import model_to_dict
@csrf_exempt
def history(request):
if request.method == 'GET':
| |
without end_time
stage.start_timestamp = .1
cb._update_progress_time_info(None)
self.assertEqual(ctx.progress, {})
# update with end_time
cb._update_progress_time_info(1.5)
self.assertAlmostEqual(ctx.progress['elapsed'], 1.4)
# update eta
self.assertNotIn('eta', ctx.progress)
stage.get_eta = mock.Mock(return_value=123.)
cb._update_progress_time_info(1.6)
self.assertEqual(ctx.progress['eta'], 123.)
stage.get_eta = mock.Mock(return_value=1e-8)
cb._update_progress_time_info(1.6)
self.assertNotIn('eta', ctx.progress)
def test_life_cycle(self):
class _MyRemoteDoc(object):
logs = []
def start_worker(self):
self.logs.append('start_worker')
def update(self, metrics):
self.logs.append(('update', deep_copy(metrics)))
def stop_worker(self):
self.logs.append('stop_worker')
logs = []
remote_doc = _MyRemoteDoc()
cb = LoggerCallback(
console_mode=(LoggerMode.LOG_MAJOR |
LoggerMode.LOG_EVERY_FEW_BATCHES),
console_writer=logs.append,
remote_doc=remote_doc,
console_log_batch_freq=2,
remote_push_interval=.1,
)
stage = Stage(StageType.TRAIN, max_epoch=13, max_batch=78)
stage.start_timestamp = 0.5
stage.get_eta = mock.Mock(return_value=61)
####################
# on_stage_begin() #
####################
data = new_callback_data(stage=stage, start_timestamp=0.5)
cb.on_stage_begin(data)
self.assertEqual(cb.stage, data.stage)
self.assertEqual(len(logs), 1)
self.assertRegex(
logs[-1],
r'^\[[^\[\]]+\] Train started\n$',
)
self.assertEqual(remote_doc.logs, ['start_worker'])
logs.clear()
remote_doc.logs.clear()
####################
# on_epoch_begin() #
####################
stage.epoch.index = 3
stage.epoch.is_active = True
cb.ctx.metrics_collector.update({'xxx': 123.})
cb.ctx.batch_metrics.update({'yyy': 456.})
data = new_callback_data(
stage=stage,
index=3,
size=567,
start_timestamp=1.0,
)
cb.on_epoch_begin(data)
self.assertEqual(cb.ctx.progress, {
'epoch': 3, # i.e., ``index``
'max_epoch': 13,
})
self.assertEqual(cb.ctx.metrics_collector.to_json(), {}) # should be cleared
self.assertEqual(cb.ctx.batch_metrics, {}) # should be cleared
self.assertEqual(len(logs), 1)
self.assertEqual(logs[-1], 'Epoch 3/13\n')
self.assertEqual(remote_doc.logs, [])
logs.clear()
remote_doc.logs.clear()
####################
# on_batch_begin() #
####################
stage.batch.index = 4
stage.batch.is_active = True
cb.ctx.batch_metrics.update({'yyy': 456.})
cb.ctx.progress['batch_metrics'] = {}
data = new_callback_data(
stage=stage,
index=4,
size=32,
start_timestamp=1.5,
)
cb.on_batch_begin(data)
self.assertEqual(cb.ctx.progress, {
'epoch': 3,
'max_epoch': 13,
'batch': 4,
'max_batch': 78,
})
self.assertEqual(cb.ctx.batch_metrics, {})
self.assertEqual(len(logs), 0)
self.assertEqual(remote_doc.logs, [])
##################################
# nested stage within this batch #
##################################
def nested_stage():
stage2 = Stage(StageType.VALIDATION)
stage2.start_timestamp = 2.0
# enter stage
data2 = new_callback_data(stage=stage2, start_timestamp=2.0)
cb.on_stage_begin(data2)
self.assertEqual(len(logs), 0)
self.assertEqual(remote_doc.logs, [])
# exit stage
data2 = new_callback_data(
stage=stage2,
start_timestamp=2.0,
end_timestamp=2.5,
exc_time=0.5,
metrics={'val_acc': 0.75},
)
cb.on_stage_end(data2)
nested_stage()
self.assertEqual(len(logs), 0)
self.assertEqual(remote_doc.logs, [
('update', {
'progress.validation': {'elapsed': 0.5},
'result': {'val_acc': 0.75}
})
])
self.assertEqual(cb.ctx.metrics_collector.to_json(), {'val_acc': 0.75})
self.assertEqual(cb.ctx.batch_metrics, {'val_acc': 0.75})
remote_doc.logs.clear()
##################
# on_batch_end() #
##################
cb.ctx.last_remote_push_time = 0.
stage.batch.end_timestamp = 3.0
stage.best_validation_mark = True
data = new_callback_data(
stage=stage,
index=4,
size=32,
start_timestamp=1.5,
end_timestamp=3.0,
exc_time=1.5,
metrics={'acc': 0.5, 'loss': 0.25},
)
cb.on_batch_end(data)
self.assertEqual(
cb.ctx.metrics_collector.to_json(),
{'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
)
self.assertEqual(
cb.ctx.batch_metrics,
{'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
)
stage.batch.is_active = False
self.assertEqual(len(logs), 1)
self.assertRegex(
logs[-1],
r'^\s4/78 - eta 1:01 - acc: 0.5 - loss: 0.25 - val_acc: 0.75 \(\*\)\n$',
)
self.assertEqual(remote_doc.logs, [
('update',
{'progress.train': {
'batch': 4,
'batch_metrics': {'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
'batch_time': 1.5,
'elapsed': 2.5,
'epoch': 3,
'eta': 61,
'max_batch': 78,
'max_epoch': 13},
'result': {'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75}})
])
logs.clear()
remote_doc.logs.clear()
##################
# on_epoch_end() #
##################
stage.epoch.is_active = False
stage.epoch.end_timestamp = 3.5
data = new_callback_data(
stage=stage,
index=3,
size=567,
start_timestamp=1.0,
end_timestamp=3.5,
exc_time=2.5,
metrics={'acc': 0.125},
)
cb.on_epoch_end(data)
self.assertEqual(
cb.ctx.metrics_collector.to_json(),
{'acc': 0.125, 'loss': 0.25, 'val_acc': 0.75},
)
self.assertEqual(
cb.ctx.batch_metrics,
{'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
)
stage.epoch.is_active = False
self.assertEqual(len(logs), 1)
self.assertRegex(
logs[-1],
r'^4 iters in 2.5s - eta 1:01 - acc: 0.125 - loss: 0.25 - val_acc: 0.75 \(\*\)\n$',
)
self.assertEqual(remote_doc.logs, [
('update',
{'progress.train': {
'batch': 4,
'batch_metrics': {'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
'batch_time': 1.5,
'elapsed': 3.0,
'eta': 61,
'epoch': 3,
'epoch_time': 2.5,
'max_batch': 78,
'max_epoch': 13},
'result': {'acc': 0.125, 'loss': 0.25, 'val_acc': 0.75}})
])
logs.clear()
remote_doc.logs.clear()
##################
# on_stage_end() #
##################
stage.end_timestamp = 10.
data = new_callback_data(
stage=stage,
start_timestamp=0.5,
end_timestamp=10.,
exc_time=9.5,
metrics={'acc': 0.875},
)
ctx = cb.ctx
cb.on_stage_end(data)
self.assertEqual(len(cb._ctx_stack), 0)
self.assertEqual(ctx.progress, {
'elapsed': 9.5,
'eta': 61,
'epoch': 3,
'max_epoch': 13,
'epoch_time': 2.5,
'batch': 4,
'max_batch': 78,
'batch_time': 1.5,
'batch_metrics': {'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
})
self.assertEqual(ctx.metrics_collector.to_json(), {
'acc': 0.875,
'loss': 0.25,
'val_acc': 0.75,
})
self.assertEqual(len(logs), 1)
self.assertRegex(
logs[-1],
r'^\[[^\[\]]+\] Train finished in 9.5s - acc: 0.875 - loss: 0.25 - '
r'val_acc: 0.75\n$',
)
self.assertEqual(remote_doc.logs, [
('update', {
'progress.train': {
'elapsed': 9.5,
'eta': 61,
'epoch': 3,
'epoch_time': 2.5,
'max_epoch': 13,
'batch': 4,
'max_batch': 78,
'batch_time': 1.5,
'batch_metrics': {
'acc': 0.5, 'loss': 0.25, 'val_acc': 0.75},
},
'result': {'acc': 0.875, 'loss': 0.25, 'val_acc': 0.75},
}),
'stop_worker'
])
class StopOnNaNTestCase(unittest.TestCase):
def test_stop_on_nan(self):
metrics = {'a': 1., 'b': 2.}
data = new_callback_data(metrics=metrics)
cb = StopOnNaN()
# no nan metric should be okay
cb.on_batch_end(data)
cb.on_epoch_end(data)
cb.on_stage_end(data)
# nan metric should raise error
data.metrics['b'] = np.nan
with pytest.raises(NaNMetricError,
match='NaN metric encountered: \'b\''):
cb.on_batch_end(data)
with pytest.raises(NaNMetricError,
match='NaN metric encountered: \'b\''):
cb.on_epoch_end(data)
with pytest.raises(NaNMetricError,
match='NaN metric encountered: \'b\''):
cb.on_stage_end(data)
class _MyTrainCallback(BaseTrainCallback):
pass
class TrainStageCallbackTestCase(unittest.TestCase):
def test_life_cycle(self):
def make_data(stage):
return new_callback_data(stage=stage, start_timestamp=0.)
cb = _MyTrainCallback()
train_stage = Stage(StageType.TRAIN)
train_stage2 = Stage(StageType.TRAIN)
test_stage = Stage(StageType.TEST)
self.assertIsNone(cb.stage)
cb.on_stage_begin(make_data(train_stage))
self.assertIs(cb.stage, train_stage)
cb.on_stage_begin(make_data(train_stage2))
self.assertIs(cb.stage, train_stage)
cb.on_stage_begin(make_data(test_stage))
self.assertIs(cb.stage, train_stage)
cb.on_stage_end(make_data(test_stage))
self.assertIs(cb.stage, train_stage)
cb.on_stage_end(make_data(train_stage2))
self.assertIs(cb.stage, train_stage)
cb.on_stage_end(make_data(train_stage))
self.assertIsNone(cb.stage)
with pytest.raises(
RuntimeError,
match=f'The outer stage of `{_MyTrainCallback.__qualname__}` '
f'must be a train stage: got test stage .*'):
cb.on_stage_begin(make_data(test_stage))
class CheckpointCallbackTestCase(unittest.TestCase):
def test_base(self):
stage = Stage(StageType.TRAIN)
data = new_callback_data(stage=stage, start_timestamp=0.)
checkpoint = BaseCheckpoint()
a = SimpleStatefulObject()
with TemporaryDirectory() as temp_dir:
root_dir = os.path.join(temp_dir, 'ckpt')
# test invalid state objects
with pytest.raises(ValueError,
match='State object key \'__stage\' is '
'reserved.'):
_ = BaseCheckpointCallback(checkpoint, root_dir, {'__stage': a})
with pytest.raises(ValueError,
match='The item \'a\' in `state_objects` is not '
'a StatefulObject: got .*'):
_ = BaseCheckpointCallback(checkpoint, root_dir, {'a': 123})
# test construct
cb = BaseCheckpointCallback(
checkpoint=checkpoint,
root_dir=root_dir,
state_objects=StatefulObjectGroup({'a': a}),
max_checkpoints_to_keep=3
)
self.assertIsNone(cb.checkpoint_manager)
self.assertEqual(list(cb.state_objects), ['a'])
self.assertIs(cb.state_objects['a'], a)
# on_stage_begin()
cb.on_stage_begin(data)
self.assertEqual(cb.checkpoint_manager.checkpoint, checkpoint)
self.assertEqual(cb.checkpoint_manager.root_dir, root_dir)
self.assertEqual(cb.checkpoint_manager.max_to_keep, 3)
self.assertEqual(
list(cb.checkpoint_manager.state_objects),
['a', '__stage']
)
self.assertIs(cb.checkpoint_manager.state_objects['a'], a)
stage_state = cb.checkpoint_manager.state_objects['__stage']
self.assertIsInstance(stage_state, _StageCounterState)
self.assertEqual(stage_state.stage, stage)
# make checkpoint
cb.checkpoint_manager.save = mock.Mock()
stage.epoch.index = 4
stage.batch.index = 5
cb.make_checkpoint()
self.assertEqual(
cb.checkpoint_manager.save.call_args,
(
('epoch-4-batch-5',), {}
)
)
# on_stage_end()
cb.on_stage_end(data)
self.assertNotIn('__stage', cb.state_objects)
self.assertIsNone(cb.checkpoint_manager)
def test_auto_checkpoint(self):
class _MyCheckpoint(BaseCheckpoint):
logs = []
def _save(self, checkpoint_path: str) -> None:
os.makedirs(checkpoint_path)
self.logs.append(('save', checkpoint_path))
def _restore(self, checkpoint_path: str) -> None:
self.logs.append(('restore', checkpoint_path))
checkpoint = _MyCheckpoint()
a = SimpleStatefulObject()
state_objects = {'a': a}
stage = Stage(StageType.TRAIN)
data = new_callback_data(stage=stage, start_timestamp=0.)
stage2 = Stage(StageType.TRAIN)
data2 = new_callback_data(stage=stage2, start_timestamp=0.)
with TemporaryDirectory() as temp_dir:
root_dir = os.path.join(temp_dir, 'ckpt')
# test construct
exclusive_args = ('interval', 'epoch_freq', 'batch_freq')
for mode in (0b000, 0b011, 0b101, 0b110, 0b111):
kwargs = {k: None if (mode & (1 << i)) == 0 else 1
for i, k in enumerate(exclusive_args)}
with pytest.raises(ValueError,
match='One and only one of `interval`, '
'`epoch_freq` and `batch_freq` should '
'be specified'):
_ = AutoCheckpoint(checkpoint=checkpoint, root_dir=root_dir,
**kwargs)
with pytest.raises(TypeError,
match='`restore_checkpoint` must be a str or a '
'bool: got .*'):
_ = AutoCheckpoint(checkpoint=checkpoint, root_dir=root_dir,
interval=1., restore_checkpoint=123)
for arg in exclusive_args:
cb = AutoCheckpoint(checkpoint=checkpoint, root_dir=root_dir,
**{arg: 123})
self.assertEqual(cb.checkpoint, checkpoint)
self.assertEqual(cb.root_dir, root_dir)
self.assertEqual(cb.restore_checkpoint, True)
self.assertEqual(cb.last_checkpoint_time, 0.)
self.assertEqual(getattr(cb, arg), 123)
for arg2 in exclusive_args:
if arg2 != arg:
self.assertIsNone(getattr(cb, arg2))
####################
# on_train_begin() #
####################
cb = AutoCheckpoint(checkpoint=checkpoint, root_dir=root_dir,
interval=0., state_objects=state_objects)
cb.make_checkpoint = mock.Mock()
cb.on_stage_begin(data)
self.assertIs(cb.checkpoint_manager.state_objects['a'], a)
self.assertIs(cb.checkpoint_manager.state_objects['__stage'].stage,
stage)
a.value = 123
ckpt_path_1 = cb.checkpoint_manager.save('ckpt_1')
a.value = 456
ckpt_path_2 = cb.checkpoint_manager.save('ckpt_2')
checkpoint.logs.clear()
# restore_checkpoint is True
cb.restore_checkpoint = True
a.value = 789
with mock.patch('time.time', return_value=111.):
cb.on_train_begin(data)
self.assertEqual(cb.last_checkpoint_time, 111.)
self.assertEqual(a.value, 456)
self.assertEqual(
checkpoint.logs,
[('restore', os.path.join(ckpt_path_2, 'ckpt'))]
)
checkpoint.logs.clear()
# restore_checkpoint is False
cb.restore_checkpoint = False
a.value = 789
with mock.patch('time.time', return_value=222.):
cb.on_train_begin(data)
self.assertEqual(cb.last_checkpoint_time, 222.)
self.assertEqual(a.value, 789)
self.assertEqual(checkpoint.logs, [])
# restore_checkpoint is str
cb.restore_checkpoint = ckpt_path_1
a.value = 789
with mock.patch('time.time', return_value=333.):
cb.on_train_begin(data)
self.assertEqual(cb.last_checkpoint_time, 333.)
self.assertEqual(a.value, 123)
self.assertEqual(
checkpoint.logs,
[('restore', os.path.join(ckpt_path_1, 'ckpt'))]
)
checkpoint.logs.clear()
# a new stage should not trigger action
cb.restore_checkpoint = True
with mock.patch('time.time', return_value=444.):
cb.on_train_begin(data2)
self.assertEqual(cb.last_checkpoint_time, 333.)
self.assertEqual(checkpoint.logs, [])
#################################################
# on_train_epoch_end() and on_batch_epoch_end() #
#################################################
# on_train_epoch_end() by interval
cb.interval = 1.
cb.last_checkpoint_time = 0.
cb.epoch_freq = cb.batch_freq = None
data.index = 3
data.end_timestamp = .5
with mock.patch('time.time', return_value=555.):
cb.on_train_epoch_end(data)
self.assertFalse(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 0.)
cb.make_checkpoint.reset_mock()
data.end_timestamp = 1.
with mock.patch('time.time', return_value=555.):
cb.on_train_epoch_end(data)
self.assertTrue(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 555.)
cb.make_checkpoint.reset_mock()
# on_batch_epoch_end() by interval
cb.interval = 1.
cb.last_checkpoint_time = 0.
cb.epoch_freq = cb.batch_freq = None
data.index = 3
data.end_timestamp = .5
with mock.patch('time.time', return_value=555.):
cb.on_train_batch_end(data)
self.assertFalse(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 0.)
cb.make_checkpoint.reset_mock()
data.end_timestamp = 1.
with mock.patch('time.time', return_value=555.):
cb.on_train_batch_end(data)
self.assertTrue(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 555.)
cb.make_checkpoint.reset_mock()
cb.stage.batch.total = 3 # do not save checkpoint at the last batch
cb.last_checkpoint_time = 0.
data.end_timestamp = 1.
with mock.patch('time.time', return_value=555.):
cb.on_train_batch_end(data)
self.assertFalse(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 0.)
cb.make_checkpoint.reset_mock()
# on_train_epoch_end() by epoch_freq
cb.epoch_freq = 3
cb.last_checkpoint_time = 0.
cb.interval = cb.batch_freq = None
data.end_timestamp = 1.
data.index = 2
with mock.patch('time.time', return_value=555.):
cb.on_train_epoch_end(data)
self.assertFalse(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 0.)
cb.make_checkpoint.reset_mock()
data.index = 3
with mock.patch('time.time', return_value=555.):
cb.on_train_epoch_end(data)
self.assertTrue(cb.make_checkpoint.called)
self.assertEqual(cb.last_checkpoint_time, 555.)
cb.make_checkpoint.reset_mock()
# on_train_batch_end() by batch_freq
cb.batch_freq = | |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import collections.abc
import datetime
import functools
import operator
import warnings
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Collection,
Dict,
FrozenSet,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import attr
import pendulum
from sqlalchemy import func, or_
from sqlalchemy.orm.session import Session
from airflow.compat.functools import cache
from airflow.exceptions import UnmappableOperator
from airflow.models.abstractoperator import (
DEFAULT_OWNER,
DEFAULT_POOL_SLOTS,
DEFAULT_PRIORITY_WEIGHT,
DEFAULT_QUEUE,
DEFAULT_RETRIES,
DEFAULT_RETRY_DELAY,
DEFAULT_TRIGGER_RULE,
DEFAULT_WEIGHT_RULE,
AbstractOperator,
TaskStateChangeCallback,
)
from airflow.models.pool import Pool
from airflow.serialization.enums import DagAttributeTypes
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.ti_deps.deps.mapped_task_expanded import MappedTaskIsExpanded
from airflow.typing_compat import Literal
from airflow.utils.context import Context
from airflow.utils.helpers import is_container
from airflow.utils.operator_resources import Resources
from airflow.utils.state import State, TaskInstanceState
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.types import NOTSET
if TYPE_CHECKING:
import jinja2 # Slow import.
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.dag import DAG
from airflow.models.taskinstance import TaskInstance
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
# BaseOperator.apply() can be called on an XComArg, sequence, or dict (not
# any mapping since we need the value to be ordered).
Mappable = Union[XComArg, Sequence, dict]
ValidationSource = Union[Literal["map"], Literal["partial"]]
# For isinstance() check.
@cache
def get_mappable_types() -> Tuple[type, ...]:
from airflow.models.xcom_arg import XComArg
return (XComArg, dict, list)
def validate_mapping_kwargs(op: Type["BaseOperator"], func: ValidationSource, value: Dict[str, Any]) -> None:
# use a dict so order of args is same as code order
unknown_args = value.copy()
for klass in op.mro():
init = klass.__init__ # type: ignore[misc]
try:
param_names = init._BaseOperatorMeta__param_names
except AttributeError:
continue
for name in param_names:
value = unknown_args.pop(name, NOTSET)
if func != "apply":
continue
if value is NOTSET:
continue
if isinstance(value, get_mappable_types()):
continue
type_name = type(value).__name__
error = f"{op.__name__}.apply() got an unexpected type {type_name!r} for keyword argument {name}"
raise ValueError(error)
if not unknown_args:
return # If we have no args left to check: stop looking at the MRO chain.
if len(unknown_args) == 1:
error = f"an unexpected keyword argument {unknown_args.popitem()[0]!r}"
else:
names = ", ".join(repr(n) for n in unknown_args)
error = f"unexpected keyword arguments {names}"
raise TypeError(f"{op.__name__}.{func}() got {error}")
def prevent_duplicates(kwargs1: Dict[str, Any], kwargs2: Dict[str, Any], *, fail_reason: str) -> None:
duplicated_keys = set(kwargs1).intersection(kwargs2)
if not duplicated_keys:
return
if len(duplicated_keys) == 1:
raise TypeError(f"{fail_reason} argument: {duplicated_keys.pop()}")
duplicated_keys_display = ", ".join(sorted(duplicated_keys))
raise TypeError(f"{fail_reason} arguments: {duplicated_keys_display}")
def ensure_xcomarg_return_value(arg: Any) -> None:
from airflow.models.xcom_arg import XCOM_RETURN_KEY, XComArg
if isinstance(arg, XComArg):
if arg.key != XCOM_RETURN_KEY:
raise ValueError(f"cannot map over XCom with custom key {arg.key!r} from {arg.operator}")
elif not is_container(arg):
return
elif isinstance(arg, collections.abc.Mapping):
for v in arg.values():
ensure_xcomarg_return_value(v)
elif isinstance(arg, collections.abc.Iterable):
for v in arg:
ensure_xcomarg_return_value(v)
@attr.define(kw_only=True, repr=False)
class OperatorPartial:
"""An "intermediate state" returned by ``BaseOperator.partial()``.
This only exists at DAG-parsing time; the only intended usage is for the
user to call ``.apply()`` on it at some point (usually in a method chain) to
create a ``MappedOperator`` to add into the DAG.
"""
operator_class: Type["BaseOperator"]
kwargs: Dict[str, Any]
_apply_called: bool = False # Set when apply() is called to ease user debugging.
def __attrs_post_init__(self):
from airflow.operators.subdag import SubDagOperator
if issubclass(self.operator_class, SubDagOperator):
raise TypeError("Mapping over deprecated SubDagOperator is not supported")
validate_mapping_kwargs(self.operator_class, "partial", self.kwargs)
def __repr__(self) -> str:
args = ", ".join(f"{k}={v!r}" for k, v in self.kwargs.items())
return f"{self.operator_class.__name__}.partial({args})"
def __del__(self):
if not self._apply_called:
warnings.warn(f"{self!r} was never mapped!")
def apply(self, **mapped_kwargs: "Mappable") -> "MappedOperator":
from airflow.operators.dummy import DummyOperator
validate_mapping_kwargs(self.operator_class, "apply", mapped_kwargs)
prevent_duplicates(self.kwargs, mapped_kwargs, fail_reason="mapping already partial")
ensure_xcomarg_return_value(mapped_kwargs)
partial_kwargs = self.kwargs.copy()
task_id = partial_kwargs.pop("task_id")
params = partial_kwargs.pop("params")
dag = partial_kwargs.pop("dag")
task_group = partial_kwargs.pop("task_group")
start_date = partial_kwargs.pop("start_date")
end_date = partial_kwargs.pop("end_date")
op = MappedOperator(
operator_class=self.operator_class,
mapped_kwargs=mapped_kwargs,
partial_kwargs=partial_kwargs,
task_id=task_id,
params=params,
deps=MappedOperator.deps_for(self.operator_class),
operator_extra_links=self.operator_class.operator_extra_links,
template_ext=self.operator_class.template_ext,
template_fields=self.operator_class.template_fields,
ui_color=self.operator_class.ui_color,
ui_fgcolor=self.operator_class.ui_fgcolor,
is_dummy=issubclass(self.operator_class, DummyOperator),
task_module=self.operator_class.__module__,
task_type=self.operator_class.__name__,
dag=dag,
task_group=task_group,
start_date=start_date,
end_date=end_date,
)
self._apply_called = True
return op
@attr.define(kw_only=True)
class MappedOperator(AbstractOperator):
"""Object representing a mapped operator in a DAG."""
operator_class: Union[Type["BaseOperator"], str]
mapped_kwargs: Dict[str, "Mappable"]
partial_kwargs: Dict[str, Any]
# Needed for serialization.
task_id: str
params: Optional[dict]
deps: FrozenSet[BaseTIDep]
operator_extra_links: Collection["BaseOperatorLink"]
template_ext: Collection[str]
template_fields: Collection[str]
ui_color: str
ui_fgcolor: str
_is_dummy: bool
_task_module: str
_task_type: str
dag: Optional["DAG"]
task_group: Optional["TaskGroup"]
start_date: Optional[pendulum.DateTime]
end_date: Optional[pendulum.DateTime]
upstream_task_ids: Set[str] = attr.ib(factory=set, init=False)
downstream_task_ids: Set[str] = attr.ib(factory=set, init=False)
is_mapped: ClassVar[bool] = True
subdag: None = None # Since we don't support SubDagOperator, this is always None.
def __repr__(self):
return f"<Mapped({self._task_type}): {self.task_id}>"
def __attrs_post_init__(self):
from airflow.models.xcom_arg import XComArg
self._validate_argument_count()
if self.task_group:
self.task_group.add(self)
if self.dag:
self.dag.add_task(self)
for k, v in self.mapped_kwargs.items():
if k in self.template_fields:
XComArg.apply_upstream_relationship(self, v)
for k, v in self.partial_kwargs.items():
if k in self.template_fields:
XComArg.apply_upstream_relationship(self, v)
@classmethod
@cache
def get_serialized_fields(cls):
# Not using 'cls' here since we only want to serialize base fields.
return frozenset(attr.fields_dict(MappedOperator)) - {
"dag",
"deps",
"is_mapped",
"subdag",
"task_group",
"upstream_task_ids",
}
@staticmethod
@cache
def deps_for(operator_class: Type["BaseOperator"]) -> FrozenSet[BaseTIDep]:
operator_deps = operator_class.deps
if not isinstance(operator_deps, collections.abc.Set):
raise UnmappableOperator(
f"'deps' must be a set defined as a class-level variable on {operator_class.__name__}, "
f"not a {type(operator_deps).__name__}"
)
return operator_deps | {MappedTaskIsExpanded()}
def _validate_argument_count(self) -> None:
"""Validate mapping arguments by unmapping with mocked values.
This ensures the user passed enough arguments in the DAG definition for
the operator to work in the task runner. This does not guarantee the
arguments are *valid* (that depends on the actual mapping values), but
makes sure there are *enough* of them.
"""
if isinstance(self.operator_class, str):
return # No need to validate deserialized operator.
self.operator_class.validate_mapped_arguments(**self._get_unmap_kwargs())
@property
def task_type(self) -> str:
"""Implementing Operator."""
return self._task_type
@property
def inherits_from_dummy_operator(self) -> bool:
"""Implementing Operator."""
return self._is_dummy
@property
def roots(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def leaves(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def owner(self) -> str: # type: ignore[override]
return self.partial_kwargs.get("owner", DEFAULT_OWNER)
@property
def email(self) -> Union[None, str, Iterable[str]]:
return self.partial_kwargs.get("email")
@property
def trigger_rule(self) -> TriggerRule:
return self.partial_kwargs.get("trigger_rule", DEFAULT_TRIGGER_RULE)
@property
def depends_on_past(self) -> bool:
return bool(self.partial_kwargs.get("depends_on_past"))
@property
def wait_for_downstream(self) -> bool:
return bool(self.partial_kwargs.get("wait_for_downstream"))
@property
def retries(self) -> Optional[int]:
return self.partial_kwargs.get("retries", DEFAULT_RETRIES)
@property
def queue(self) -> str:
return self.partial_kwargs.get("queue", DEFAULT_QUEUE)
@property
def pool(self) -> str:
return self.partial_kwargs.get("pool", Pool.DEFAULT_POOL_NAME)
@property
def pool_slots(self) -> Optional[str]:
return self.partial_kwargs.get("pool_slots", DEFAULT_POOL_SLOTS)
@property
def execution_timeout(self) -> Optional[datetime.timedelta]:
return self.partial_kwargs.get("execution_timeout")
@property
def retry_delay(self) -> datetime.timedelta:
return self.partial_kwargs.get("retry_delay", DEFAULT_RETRY_DELAY)
@property
def retry_exponential_backoff(self) -> bool:
return bool(self.partial_kwargs.get("retry_exponential_backoff"))
@property
def priority_weight(self) -> int: # type: ignore[override]
return self.partial_kwargs.get("priority_weight", DEFAULT_PRIORITY_WEIGHT)
@property
def weight_rule(self) -> int: # type: ignore[override]
return self.partial_kwargs.get("weight_rule", DEFAULT_WEIGHT_RULE)
@property
def sla(self) -> Optional[datetime.timedelta]:
return self.partial_kwargs.get("sla")
@property
def max_active_tis_per_dag(self) -> Optional[int]:
return self.partial_kwargs.get("max_active_tis_per_dag")
@property
def resources(self) -> Optional[Resources]:
return self.partial_kwargs.get("resources")
@property
def on_execute_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_execute_callback")
@property
def on_failure_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_failure_callback")
@property
def on_retry_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_retry_callback")
@property
def on_success_callback(self) -> Optional[TaskStateChangeCallback]:
return self.partial_kwargs.get("on_success_callback")
@property
def run_as_user(self) -> Optional[str]:
return self.partial_kwargs.get("run_as_user")
@property
def executor_config(self) -> dict:
return self.partial_kwargs.get("executor_config", {})
@property
def inlets(self) -> Optional[Any]:
return self.partial_kwargs.get("inlets", None)
@property
def outlets(self) -> Optional[Any]:
return self.partial_kwargs.get("outlets", None)
def get_dag(self) -> Optional["DAG"]:
"""Implementing Operator."""
return self.dag
def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:
"""Implementing DAGNode."""
return DagAttributeTypes.OP, self.task_id
def _get_unmap_kwargs(self) -> Dict[str, Any]:
return {
"task_id": self.task_id,
"dag": self.dag,
"task_group": self.task_group,
"params": self.params,
"start_date": self.start_date,
"end_date": self.end_date,
**self.partial_kwargs,
**self.mapped_kwargs,
}
def unmap(self) -> "BaseOperator":
"""Get the "normal" Operator after applying the current mapping."""
dag = self.dag
if not dag:
raise RuntimeError("Cannot unmap a task without a DAG")
if isinstance(self.operator_class, str):
raise RuntimeError("Cannot unmap a deserialized operator")
dag._remove_task(self.task_id)
return self.operator_class(**self._get_unmap_kwargs())
def _get_expansion_kwargs(self) -> Dict[str, "Mappable"]:
"""The kwargs to calculate expansion length against.
This is ``self.mapped_kwargs`` for classic operators because kwargs to
``BaseOperator.apply()`` contribute to operator arguments.
"""
return self.mapped_kwargs
def _get_map_lengths(self, run_id: str, *, session: Session) -> Dict[str, int]:
# TODO: Find a way to cache this.
from airflow.models.taskmap import TaskMap
from airflow.models.xcom_arg import XComArg
expansion_kwargs | |
import json
import logging
from datetime import datetime, timezone
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.templatetags.static import static
from django.http.response import Http404, HttpResponse # , JsonResponse
from django.shortcuts import get_object_or_404, redirect, reverse
from django.urls import reverse_lazy
# from django.utils.html import mark_safe
from django.views.decorators.http import require_POST
from django.views.generic.base import TemplateView, View
from django.views.generic.edit import DeleteView
from memoize import delete_memoized
# from apps.data.models.condition import Condition
# from apps.data.models.encounter import Encounter
from apps.data.models.procedure import Procedure
# from apps.data.models.observation import Observation
from apps.data.models.practitioner import Practitioner
from apps.data.util import parse_timestamp
from apps.notifications.models import Notification
from apps.org.models import (
REQUEST_APPROVED,
REQUEST_DENIED,
REQUEST_REQUESTED,
RESOURCE_CHOICES,
Organization,
ResourceGrant,
ResourceRequest,
)
from apps.users.models import UserProfile
from apps.users.utils import get_id_token_payload
from .constants import RECORDS_STU3, FIELD_TITLES, PROVIDER_RESOURCES, RESOURCES
# , VITALSIGNS
from .forms import ResourceRequestForm
from .utils import (
fetch_member_data,
# get_allergies,
get_prescriptions,
get_resource_data,
)
from .fhir_requests import (
get_converted_fhir_resource,
get_lab_results,
get_vital_signs,
)
from .fhir_utils import (
resource_count,
load_test_fhir_data,
find_index,
find_list_entry,
path_extract,
sort_json,
create_vital_sign_view_by_date
)
logger = logging.getLogger(__name__)
class SelfOrApprovedOrgMixin(UserPassesTestMixin):
def get_login_url(self):
"""Org agents can request access, others go home (login or member:dashboard)."""
if (
not self.request.user.is_anonymous
and self.request.user.user_type == self.request.user.UserType.ORG_AGENT
):
return reverse('member:request-access', args=[self.kwargs['pk']])
else:
return reverse('login') + '?next=' + self.request.path
def handle_no_permission(self):
return redirect(self.get_login_url())
def get_member(self):
return get_object_or_404(get_user_model().objects.filter(pk=self.kwargs['pk']))
def test_func(self):
"""
The request.user may see the member's data sources if:
- the request.user is the member, or
- the request.user is in an Organization that has been granted access
to the member's data
"""
member = self.get_member()
if member != self.request.user:
# The request.user is not the member. If the request.user is not in
# an Organization that has been granted access to the member's data,
# then return False.
resource_grant = ResourceGrant.objects.filter(
organization__agents=self.request.user, member=member
).first()
if not resource_grant:
return False
return True
class SummaryView(LoginRequiredMixin, SelfOrApprovedOrgMixin, TemplateView):
template_name = "summary2.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['member'] = self.get_member()
# Get the data for the member, and set it in the context
data = fetch_member_data(context['member'], 'sharemyhealth')
context['updated_at'] = parse_timestamp(data.get('updated_at'))
context['timestamp'] = data.get('updated_at', "No timestamp")
if context['updated_at']:
context['time_since_update'] = (
datetime.now(timezone.utc) - context['updated_at']
)
context['updated_at'] = context['updated_at'].timestamp()
fhir_data = load_test_fhir_data(data)
# fhir_data = data.get('fhir_data')
if settings.DEBUG:
context['data'] = data
#
# get resource bundles
#
resource_list = RESOURCES
# Observation mixes lab results and vital signs
resource_list.remove('Observation')
resources = get_converted_fhir_resource(fhir_data)
if len(resources.entry) > 0:
resources = resources.entry
else:
resources = []
context.setdefault('resources', resources)
labs = get_lab_results(fhir_data)
if len(labs.entry) > 0:
labs = labs.entry
else:
labs = []
context.setdefault('labs', labs)
vitals = get_vital_signs(fhir_data)
if len(vitals.entry) > 0:
vitals = vitals.entry
else:
vitals = []
context.setdefault('vitals', vitals)
counts = resource_count(resources)
context.setdefault('counts', counts)
# print(counts)
#
#####
if fhir_data is None or 'entry' not in fhir_data or not fhir_data['entry']:
delete_memoized(fetch_member_data, context[
'member'], 'sharemyhealth')
# all_records = RECORDS
all_records = RECORDS_STU3
summarized_records = []
notes_headers = ['Agent Name', 'Organization', 'Date']
for record in all_records:
if record['call_type'].lower() == "fhir":
entries = get_converted_fhir_resource(fhir_data, record['resources'])
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
elif record['call_type'].lower() == 'custom':
if record['name'] == 'VitalSigns':
entries = get_vital_signs(fhir_data)
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
elif record['name'] == 'LabResults':
entries = get_lab_results(fhir_data)
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
else: # skip
pass
context.setdefault('summarized_records', summarized_records)
context.setdefault('notes_headers', notes_headers)
# TODO: include notes in the context data.
return context
class RecordsView(LoginRequiredMixin, SelfOrApprovedOrgMixin, TemplateView):
template_name = "records2.html"
default_resource_name = 'sharemyhealth'
default_record_type = 'Condition'
def get_context_data(self, **kwargs):
"""Add records data into the context."""
context = super().get_context_data(**kwargs)
context['member'] = self.get_member()
resource_name = self.kwargs.get('resource_name') or 'list'
# Get the data for the member, and set it in the context
data = fetch_member_data(context['member'], 'sharemyhealth')
context['updated_at'] = parse_timestamp(data.get('updated_at'))
context['timestamp'] = data.get('updated_at', "No timestamp")
if context['updated_at']:
context['time_since_update'] = (
datetime.now(timezone.utc) - context['updated_at']
)
fhir_data = load_test_fhir_data(data)
# fhir_data = data.get('fhir_data')
if settings.DEBUG:
context['data'] = data
logging.debug(
"fhir_data records: %r",
fhir_data and fhir_data.get(
'entry') and len(fhir_data.get('entry')),
)
if fhir_data is None or 'entry' not in fhir_data or not fhir_data['entry']:
delete_memoized(fetch_member_data, context[
'member'], 'sharemyhealth')
if resource_name == 'list':
all_records = RECORDS_STU3
summarized_records = []
for record in all_records:
if record['call_type'].lower() == "fhir":
# print("record processing for ", record['name'])
entries = get_converted_fhir_resource(fhir_data, record['resources'])
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
elif record['call_type'].lower() == 'custom':
if record['name'] == 'VitalSigns':
entries = get_vital_signs(fhir_data, record)
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
elif record['name'] == 'LabResults':
entries = get_lab_results(fhir_data, record)
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
else: # skip
pass
context.setdefault('all_records', summarized_records)
else:
resource_profile = RECORDS_STU3[find_index(RECORDS_STU3, "slug", resource_name)]
# print("Resource Profile", resource_profile)
if resource_profile:
title = resource_profile['display']
headers = resource_profile['headers']
exclude = resource_profile['exclude']
# second_fields = headers
# second_fields.append(exclude)
else:
title = resource_name
headers = ['id', ]
exclude = ['']
# second_fields
# ff = find_list_entry(FIELD_TITLES, "profile", resource_profile['name'])
# print("Friendly:", ff)
# print("headers:", headers)
# print("Exclude:", exclude)
# print("second_fields:", second_fields)
if "sort" in resource_profile:
sort_field = resource_profile['sort']
else:
sort_field = ""
title = resource_profile['display']
if resource_profile['call_type'] == 'custom':
if resource_profile['slug'] == 'labresults':
entries = get_lab_results(fhir_data, resource_profile)
elif resource_profile['slug'] == 'vitalsigns':
entries = get_vital_signs(fhir_data, resource_profile)
vitalsigns = create_vital_sign_view_by_date(entries['entry'])
print("vitalsigns:", vitalsigns)
context.setdefault('vitalsigns', vitalsigns)
elif resource_profile['call_type'] == 'skip':
entries = {'entry': []}
else:
entries = get_converted_fhir_resource(fhir_data, [resource_profile['name']])
content_list = path_extract(entries['entry'], resource_profile)
context.setdefault('friendly_fields', find_list_entry(FIELD_TITLES, "profile", resource_profile['name']))
context.setdefault('title', title)
context.setdefault('headers', headers)
context.setdefault('exclude', exclude)
# context.setdefault('content_list', content_list)
context.setdefault('resource_profile', resource_profile)
sorted_content = sort_json(content_list, sort_field)
context.setdefault('content_list', sorted_content)
return context
def render_to_response(self, context, **kwargs):
if context.get('redirect_url'):
return redirect(context.get('redirect_url'))
else:
return super().render_to_response(context, **kwargs)
class PrescriptionDetailModalView(
LoginRequiredMixin, SelfOrApprovedOrgMixin, TemplateView
):
"""modal (bare) HTML for a single prescription"""
template_name = "member/prescription_modal_content.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['member'] = self.get_member()
data = fetch_member_data(context['member'], 'sharemyhealth')
context['updated_at'] = parse_timestamp(data.get('updated_at'))
if context['updated_at']:
context['time_since_update'] = (
datetime.now(timezone.utc) - context['updated_at']
)
fhir_data = data.get('fhir_data')
if settings.DEBUG:
context['data'] = fhir_data
if fhir_data is None or 'entry' not in fhir_data or not fhir_data['entry']:
delete_memoized(fetch_member_data, context[
'member'], 'sharemyhealth')
prescriptions = get_prescriptions(
fhir_data, id=context[
'resource_id'], incl_practitioners=True, json=True
)
if not prescriptions:
return Http404()
else:
context['prescription'] = next(iter(prescriptions.values()))
return context
class DataView(LoginRequiredMixin, SelfOrApprovedOrgMixin, View):
"""Return JSON containing the requested member data."""
def get(self, request, *args, **kwargs):
member = self.get_member()
resource_type = kwargs['resource_type']
resource_id = kwargs['resource_id']
data = fetch_member_data(member, 'sharemyhealth')
###
# this will only pull a local fhir file if VPC_ENV is not prod|stage|dev
fhir_data = load_test_fhir_data(data)
# fhir_data = data.get('fhir_data')
if fhir_data is None or 'entry' not in fhir_data or not fhir_data['entry']:
delete_memoized(fetch_member_data, member, 'sharemyhealth')
if resource_type == 'prescriptions':
response_data = get_prescriptions(
fhir_data, id=resource_id, incl_practitioners=True, json=True
)
elif resource_type in RESOURCES:
resource_profile = RECORDS_STU3[find_index(RECORDS_STU3, "slug", resource_type.lower())]
if resource_profile:
bundle = get_converted_fhir_resource(fhir_data, [resource_profile['name']])
for entry in bundle['entry']:
if 'id' in entry:
if entry['id'] == resource_id:
data = entry
response_data = json.dumps(data, indent=settings.JSON_INDENT)
return HttpResponse(response_data)
else:
# fallback
data = {
resource['id']: resource
for resource in get_resource_data(
fhir_data, kwargs['resource_type'], id=resource_id
)
}
response_data = json.dumps(data, indent=settings.JSON_INDENT)
# print("httpResponse:", response_data, "-----")
return HttpResponse(response_data)
class ProvidersView(LoginRequiredMixin, SelfOrApprovedOrgMixin, TemplateView):
template_name = "records2.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
print(context)
context['member'] = self.get_member()
resource_name = self.kwargs.get('resource_name') or 'list'
data = fetch_member_data(context['member'], 'sharemyhealth')
context['updated_at'] = parse_timestamp(data.get('updated_at'))
if context['updated_at']:
context['time_since_update'] = (
datetime.now(timezone.utc) - context['updated_at']
)
context['back_to'] = 'member:providers'
####
# this will only pull a local fhir file if VPC_ENV is not prod|stage|dev
fhir_data = load_test_fhir_data(data)
# fhir_data = data.get('fhir_data')
if settings.DEBUG:
context['data'] = data
logging.debug(
"fhir_data records: %r",
fhir_data and fhir_data.get(
'entry') and len(fhir_data.get('entry')),
)
if fhir_data is None or 'entry' not in fhir_data or not fhir_data['entry']:
delete_memoized(fetch_member_data, context[
'member'], 'sharemyhealth')
if resource_name == 'list':
provider_related = []
for r in RECORDS_STU3:
if r['name'] in PROVIDER_RESOURCES:
provider_related.append(r)
all_records = provider_related
summarized_records = []
for record in all_records:
if record['call_type'].lower() == "fhir":
# print("record processing for ", record['name'])
entries = get_converted_fhir_resource(fhir_data, record['resources'])
record['data'] = entries['entry']
record['count'] = len(entries['entry'])
summarized_records.append(record)
elif record['call_type'].lower() == 'custom':
pass
else: # skip
pass
context['back_to'] = 'member:providers'
context.setdefault('all_records', summarized_records)
else:
resource_profile = RECORDS_STU3[find_index(RECORDS_STU3, "slug", resource_name)]
# print("Resource Profile", resource_profile)
if resource_profile:
title = resource_profile['display']
headers = resource_profile['headers']
exclude = resource_profile['exclude']
# second_fields = headers
# second_fields.append(exclude)
else:
title = resource_name
headers = ['id', ]
exclude = ['']
# second_fields
# ff = find_list_entry(FIELD_TITLES, "profile", resource_profile['name'])
# print("Friendly:", ff)
# print("headers:", headers)
# print("Exclude:", exclude)
# print("second_fields:", second_fields)
if "sort" in resource_profile:
sort_field = resource_profile['sort']
else:
sort_field = ""
title = resource_profile['display']
if | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stimulation algorithm for prosthesis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy
import pickle
import copy
import os
import cvxpy
import tensorflow as tf
from tensorflow.python.platform import gfile
tf.flags.DEFINE_string('Algorithm', 'simultaneous_planning',
'Planning algorithm to use')
tf.flags.DEFINE_float('learning_rate',
100,
'Learning rate for optimization.')
tf.flags.DEFINE_integer('t_max',
20,
'Maximum number of stimulations')
tf.flags.DEFINE_integer('delta',
5,
'Maximum number of stimulations')
tf.flags.DEFINE_string('normalization',
'C',
'Normalization ')
tf.flags.DEFINE_string('save_dir',
'/home/bhaishahster/stimulation_algos/pgd/',
'Directory to store results.')
FLAGS = flags.FLAGS
def main(unused_argv=()):
src = '/home/bhaishahster/Stimulation_data.pkl'
data = pickle.load(gfile.Open(src, 'r'))
S_collection = data['S'] # Target
A = data['A'] # Decoder
D = data['D'].T # Dictionary
for itarget in range(S_collection.shape[1]):
S = S_collection[:, itarget]
# Run Greedy first to initialize
if FLAGS.Algorithm == 'greedy':
x_greedy = greedy_stimulation(S, A, D, max_stims = FLAGS.t_max * FLAGS.delta,
file_suffix='%d' % itarget, save=True, save_dir=FLAGS.save_dir)
if FLAGS.Algorithm == 'simultaneous_planning':
x_greedy = greedy_stimulation(S, A, D, max_stims = FLAGS.t_max * FLAGS.delta,
file_suffix='%d' % itarget, save=False, save_dir=FLAGS.save_dir)
# Plan for multiple time points
x_init = np.zeros((x_greedy.shape[0], FLAGS.t_max))
#from IPython import embed; embed()
for it in range(FLAGS.t_max):
print((it + 1) * FLAGS.delta - 1)
x_init[:, it] = x_greedy[:, (it + 1) * FLAGS.delta - 1]
simultaneous_planning(S, A, D, t_max=FLAGS.t_max, lr=FLAGS.learning_rate,
delta=FLAGS.delta, normalization=FLAGS.normalization,
file_suffix='%d' % itarget, x_init=x_init, save_dir=FLAGS.save_dir)
if FLAGS.Algorithm == 'simultaneous_planning_cvx':
simultaneous_planning_cvx(S, A, D, t_max=FLAGS.t_max,
delta=FLAGS.delta,
file_suffix='%d' % itarget, save_dir=FLAGS.save_dir)
def greedy_stimulation(S, A, D, save_dir='', max_stims = 100, file_suffix='', save=False):
'''Greedily select stimulation pattern for each step.'''
n_dict_elem = D.shape[1]
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
AD = A.dot(D)
x = np.zeros(n_dict_elem)
current_mean_percept = A.dot(D.dot(x))
x_chosen = np.zeros((n_dict_elem, max_stims))
for istim in range(max_stims):
print(istim)
errs = np.sum((np.expand_dims(S - current_mean_percept, 1) - AD) ** 2, 0) + var_dict
chosen_dict = np.argmin(errs)
min_e_d = errs[chosen_dict]
'''
# Compute objective value
min_e_d = np.inf
for idict in range(n_dict_elem):
diff = S - current_mean_percept - AD[:, idict]
error = np.sum(diff ** 2, 0) + var_dict[idict]
if error < min_e_d:
chosen_dict = idict
min_e_d = error
'''
x[chosen_dict] += 1
current_mean_percept = A.dot(D.dot(x))
x_chosen[chosen_dict, istim] = 1
# Final Error
x_chosen = np.cumsum(x_chosen, 1)
error_curve = compute_error(S, A, D, var_dict, x_chosen)
if save:
save_dict = {'error_curve': error_curve, 'x_chosen': x_chosen, 'x': x}
pickle.dump(save_dict,
gfile.Open(os.path.join(save_dir,
'greedy_%d_%s.pkl' %
(max_stims, file_suffix)),
'w'))
return x_chosen
def compute_error(S, A, D, var_dict, x_chosen):
diff = np.expand_dims(S, 1) - A.dot(D.dot(x_chosen))
return np.sum(diff ** 2, 0) + np.dot(var_dict, x_chosen)
def simultaneous_planning_cvx(S, A, D, t_max = 2000, delta = 5,
file_suffix='', save_dir=''):
# Setup problem parameters
# make p_tau uniform between 500 and 2000
p_tau = np.ones(t_max)
p_tau[:5] = 0
p_tau = p_tau / np.sum(p_tau)
n_dict_elem = D.shape[1]
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
# Construct the problem.
y = cvxpy.Variable(n_dict_elem, t_max)
x = cvxpy.cumsum(y, 1)
S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
objective = cvxpy.Minimize((cvxpy.sum_entries((S_expanded - A * (D * x))**2, 0) + var_dict * x) * p_tau)
constraints = [0 <= y, cvxpy.sum_entries(y, 0).T <= delta * np.ones((1, t_max)).T]
prob = cvxpy.Problem(objective, constraints)
# The optimal objective is returned by prob.solve().
result = prob.solve(verbose=True)
# The optimal value for x is stored in x.value.
print(x.value)
# The optimal Lagrange multiplier for a constraint
# is stored in constraint.dual_value.
print(constraints[0].dual_value)
def simultaneous_planning(S, A, D, save_dir='', t_max = 2000, lr=0.01,
normalization='T-i', delta = 5,
file_suffix='', x_init=None):
''' Solve the simultaneous planning constrained optimization problem.
Let xi be the set of electrodes played till time i.
Let the distribution of saccades be p(tau).
Optimization problem.
Min E_tau ||S - ADx_tau||^2
subject to -
x_{i+1} >= x_{i} forall i
|x_i|_1 <= i forall i
x_i >= 0 forall i.
Solve using projected gradient descent.
'''
# Compute expanded quantities
S_expanded = np.repeat(np.expand_dims(S, 1), t_max, 1)
if normalization == 'T-i':
normalizing_factors = np.array([t_max - i for i in range(t_max)])
if normalization == 'sqrt(T-i)':
normalizing_factors = np.sqrt(np.array([t_max - i for i in range(t_max)]))
if normalization == 'C':
normalizing_factors = (t_max / 2) + 0 * np.array([t_max - i for i in range(t_max)])
# make p_tau uniform between 500 and 2000
p_tau = np.ones(t_max)
# TODO(bhaishahster): Dont hardcode p_tau!!
p_tau[:5] = 0
p_tau = p_tau / np.sum(p_tau)
n_dict_elem = D.shape[1]
# TODO(bhaishahster): Find better initialization.
# Initialize
if x_init is not None:
y_init_normalized = np.zeros_like(x_init) # successive difference of x.
y_init_normalized[:, 0] = x_init[:, 0]
for iy in np.arange(1, y_init_normalized.shape[1]):
y_init_normalized[:, iy] = x_init[:, iy] - x_init[:, iy - 1]
y_init = y_init_normalized * np.expand_dims(normalizing_factors, 0) #
else:
# Zero initialization
y_init = np.zeros((n_dict_elem, t_max))
# Smarter initialization
#x_init = np.linalg.pinv(D).dot(np.linalg.pinv(A).dot(S_expanded))
#x_init = project_constraints(x_init)
#
# Do projected gradient descent
y = y_init.copy()
f_log = []
# compute variance of dictionary elements.
stas_norm = np.expand_dims(np.sum(A ** 2, 0) ,0) # 1 x # cells
var_dict = np.squeeze(np.dot(stas_norm, D * (1 - D))) # # dict
radii_normalized = (np.ones(y.shape[1])) * delta
radii = np.multiply(normalizing_factors, radii_normalized )
x_log = []
y_best = []
f_min = np.inf
for iiter in range(4000):
if iiter % 500 == 499:
lr = lr * 0.3
# compute x from y
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
x_log += [x]
# Compute objective value
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
f = errors.dot(p_tau)
print('Iterate: %d, Function value : %.3f' % (iiter, f))
f_log += [f]
if f < f_min:
f_min = f
y_best = y
# Gradients step
grad = (D.T.dot(A.T.dot((S_expanded - A.dot(D.dot(x))))) - np.expand_dims(var_dict, 1)) * np.expand_dims(p_tau, 0)
# collect gradient for each y. - new formulation that Kunal suggested.
grad_y = np.cumsum(grad[:, ::-1], 1)
grad_y = grad_y[:, ::-1] / np.expand_dims(normalizing_factors, 0)
# y = y + (lr / np.sqrt(iiter + 1)) * grad_y
y = y + (lr) * grad_y
# Project to constraint set
y = project_l1_pos(y, radii)
'''
if iiter > 2:
if np.abs(f_log[-2] - f_log[-1]) < 1e-5:
# compute x from y
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
break
'''
y = y_best
y_normalized = y / np.expand_dims(normalizing_factors, 0)
x = np.cumsum(y_normalized, 1)
diff = S_expanded - A.dot(D.dot(x))
errors = np.sum(diff ** 2, 0) + np.dot(var_dict, x)
# Randomized rounding
x_rr_discrete = randomized_rounding(x)
errors_rr_discrete = compute_error(S, A, D, var_dict, x_rr_discrete)
# Hard thresholding
y_ht_discrete = hard_thresholding(y, radii)
y_ht_discrete_normalized = y_ht_discrete / np.expand_dims(normalizing_factors, 0)
x_ht_discrete = np.cumsum(y_ht_discrete_normalized, 1)
errors_ht_discrete = compute_error(S, A, D, var_dict, x_ht_discrete)
x_log = np.array(x_log)
x_decrease = np.sum((x_log - x_log[-1, :, :]) ** 2, 1)
x_dec_best = np.sum((x_log - x[:, :]) ** 2, 1)
x_last = x_log[-1]
save_dict = {'x': x, 'x_rr_discrete': x_rr_discrete,
'x_ht_discrete': x_ht_discrete,
'errors': errors, 'x_decrease': x_decrease,
'x_dec_best': x_dec_best, 'x_last': x_last,
'errors_rr_discrete': errors_rr_discrete,
'errors_ht_discrete': errors_ht_discrete,
'radii_normalized': radii_normalized,
'radii': radii, 'normalizing_factors': normalizing_factors,
'f_log': f_log, 'y': y, 'y_ht_discrete': y_ht_discrete, 'S': S,
'A': A, 'D': D}
pickle.dump(save_dict,
gfile.Open(os.path.join(save_dir,
'pgd_%d_%.6f_%s_%d_%s.pkl' %(t_max, lr,
normalization,
delta,
file_suffix)),
'w'))
def randomized_rounding(x):
'''Randomized rounding.'''
# Discretize
thresholds = np.random.rand(x.shape[0])
x_discrete = np.zeros_like(x)
for idict in range(x.shape[0]):
for itime in range(x.shape[1]):
x_discrete[idict, itime] = np.ceil(x[idict, itime] - thresholds[idict])
return x_discrete
def hard_thresholding(y, radii):
'''Hard thresholding of y.'''
y_discrete = np.zeros_like(y)
for t in range(y.shape[1]):
l1_radius = radii[t]
idx = np.argsort(y[:, t])[::-1]
for iidx in idx:
y_discrete[iidx, t] = np.ceil(y[iidx, t])
if y_discrete[:, t].sum() >= l1_radius:
break
return y_discrete
def project_l1_pos(x, radii):
'''Numpy implementation of | |
"""various mesh_utils tests"""
import os
import unittest
from io import StringIO
from docopt import DocoptExit
import numpy as np
from cpylog import SimpleLogger
#import pyNastran
#from pyNastran.bdf.bdf import BDF
#root_path = pyNastran.__path__[0]
#test_path = os.path.join(root_path, 'bdf', 'test', 'unit')
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
from pyNastran.bdf.mesh_utils.export_mcids import export_mcids
from pyNastran.bdf.mesh_utils.split_cbars_by_pin_flag import split_cbars_by_pin_flag
from pyNastran.bdf.mesh_utils.split_elements import split_line_elements
from pyNastran.bdf.mesh_utils.pierce_shells import (
pierce_shell_model) #, quad_intersection, triangle_intersection)
from pyNastran.bdf.mesh_utils.mirror_mesh import (
write_bdf_symmetric, bdf_mirror, bdf_mirror_plane)
from pyNastran.bdf.mesh_utils.mass_properties import (
mass_properties, mass_properties_nsm) #mass_properties_breakdown
from pyNastran.bdf.mesh_utils.make_half_model import make_symmetric_model
from pyNastran.bdf.mesh_utils.bdf_merge import bdf_merge
from pyNastran.bdf.mesh_utils.utils import cmd_line
# not tested
from pyNastran.bdf.mesh_utils.mesh import create_structured_cquad4s, create_structured_chexas
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.abspath(os.path.join(PKG_PATH, '..', 'models'))
np.set_printoptions(edgeitems=3, infstr='inf',
linewidth=75, nanstr='nan', precision=3,
suppress=True, threshold=1000, formatter=None)
class TestMeshUtils(unittest.TestCase):
"""various mesh_utils tests"""
def test_free_faces(self):
"""CTETRA10"""
#bdf free_faces [-d | -l] [-f] [--encoding ENCODE] BDF_FILENAME SKIN_FILENAME
#with self.assertRaises(SystemExit):
#cmd_line(argv=['bdf', 'free_faces'])
bdf_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending.bdf')
#log = get_logger(log=None, level='info', encoding='utf-8')
cmd_line(argv=['bdf', 'free_faces', bdf_filename, 'skin.bdf'], quiet=True)
os.remove('skin.bdf')
def test_structured_cquads(self):
"""tests create_structured_cquad4s"""
pid = 42
p1 = [0., 0., 0.]
p2 = [1., 0., 0.]
p3 = [1., 1., 0.]
p4 = [0., 1., 0.]
model = BDF()
nx = 10
ny = 20
create_structured_cquad4s(model, pid, p1, p2, p3, p4, nx, ny, nid=1, eid=1, theta_mcid=0.)
def test_structured_chexas(self):
"""tests test_structured_chexas"""
#1U CubeSat is 10 cm, 10 cm, 11.35 cm.
#2U CubeSat is 10 cm, 10 cm, 22.70 cm.
#6U CubeSat is 20 cm, 10 cm, 34.05 cm.
model = BDF()
pid = 1
i = 20.
j = 10.
k = 5.
p1 = [0., 0., 0.]
p2 = [i, 0., 0.]
p3 = [i, j, 0.]
p4 = [0., j, 0.]
p5 = [0., 0., k]
p6 = [i, 0., k]
p7 = [i, j, k]
p8 = [0., j, k]
nx = 2
ny = 2
nz = 2
x = np.linspace(0., i, nx + 1)
y = np.linspace(0., j, ny + 1)
z = np.linspace(0., k, nz + 1)
create_structured_chexas(model, pid,
x, y, z, nx, ny, nz, eid=1)
model.write_bdf('test_structured_chexas.bdf')
def test_eq1(self):
"""Collapse nodes 2 and 3; consider 1-3"""
log = SimpleLogger(level='error')
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1,,0.,0.,0.\n'
'GRID,2,,0.,0.,0.5\n'
'GRID,3,,0.,0.,0.51\n'
'GRID,10,,0.,0.,1.\n'
'GRID,11,,0.,0.,1.\n'
'CTRIA3,1,1,1,2,11\n'
'CTRIA3,3,1,2,3,11\n'
'CTRIA3,4,1,1,2,10\n'
'PSHELL,1,1,0.1\n'
'MAT1,1,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
# model = BDF(debug=False)
# model.read_bdf(bdf_filename_out)
# assert len(model.nodes) == 3, len(model.nodes)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_eq2(self):
r"""
5
6 *-------* 40
| \ |
| \ |
| \ |
*-------* 3
1 20
"""
log = SimpleLogger(level='error')
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1, , 0., 0., 0.\n'
'GRID,20,, 1., 0., 0.\n'
'GRID,3, , 1.01, 0., 0.\n'
'GRID,40,, 1., 1., 0.\n'
'GRID,5, , 0., 1., 0.\n'
'GRID,6, , 0., 1.01, 0.\n'
'CTRIA3,1, 100,1,20,6\n'
'CTRIA3,10,100,3,40,5\n'
'PSHELL,100,1000,0.1\n'
'MAT1,1000,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
# Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
msg = 'nnodes=%s\n' % len(model.nodes)
for nid, node in sorted(model.nodes.items()):
msg += 'nid=%s xyz=%s\n' % (nid, node.xyz)
assert len(model.nodes) == 4, msg
#os.remove(bdf_filename)
os.remove(bdf_filename_out)
tol = 0.009
# Don't collapse anything because the tolerance is too small
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 6, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
node_set = [2, 3]
# Node 2 is not defined, so crash
with self.assertRaises(RuntimeError):
# node 2 is not defined because it should be node 20
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
tol = 0.2
node_list = [20, 3]
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_list, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
node_set = {20, 3}
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
aset = np.array([20, 3, 4], dtype='int32')
bset = np.array([20, 3], dtype='int32')
node_set = np.intersect1d(aset, bset)
assert len(node_set) > 0, node_set
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False, debug=False)
model = BDF(debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
def test_eq3(self):
"""node_set=None"""
log = SimpleLogger(level='error')
lines = [
'$pyNastran: version=msc',
'$pyNastran: punch=True',
'$pyNastran: encoding=ascii',
'$NODES',
'$ Nodes to merge:',
'$ 5987 10478',
'$ GRID 5987 35.46 -6. 0.',
'$ GRID 10478 35.46 -6. 0.',
'$ 5971 10479',
'$ GRID 5971 34.92 -6. 0.',
'$ GRID 10479 34.92 -6. 0.',
'$ 6003 10477',
'$ GRID 6003 36. -6. 0.',
'$ GRID 10477 36. -6. 0.',
'GRID 5971 34.92 -6. 0.',
'GRID 5972 34.92-5.73333 0.',
'GRID 5973 34.92-5.46667 0.',
'GRID 5987 35.46 -6. 0.',
'GRID 5988 35.46-5.73333 0.',
'GRID 5989 35.46-5.46667 0.',
'GRID 6003 36. -6. 0.',
'GRID 6004 36.-5.73333 0.',
'GRID 6005 36.-5.46667 0.',
'GRID 10476 36. -6. -1.5',
'GRID 10477 36. -6. 0.',
'GRID 10478 35.46 -6. 0.',
'GRID 10479 34.92 -6. 0.',
'GRID 10561 34.92 -6. -.54',
'$ELEMENTS_WITH_PROPERTIES',
'PSHELL 1 1 .1',
'CQUAD4 5471 1 5971 5987 5988 5972',
'CQUAD4 5472 1 5972 5988 5989 5973',
'CQUAD4 5486 1 5987 6003 6004 5988',
'CQUAD4 5487 1 5988 6004 6005 5989',
'PSHELL 11 1 .1',
'CTRIA3 9429 11 10561 10476 10478',
'CTRIA3 9439 11 10478 10479 10561',
'CTRIA3 9466 11 10476 10477 10478',
'$MATERIALS',
'MAT1 1 3. .3',
]
bdf_filename = 'nonunique2.bdf'
bdf_filename_out = 'unique2.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write('\n'.join(lines))
tol = 0.01
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 11, len(model.nodes)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_eq4(self):
r"""
5
6 *-------* 40
| \ |
| \ |
| \ |
*-------* 3
1 20
"""
log = SimpleLogger(level='error')
msg = 'CEND\n'
msg += 'BEGIN BULK\n'
msg += 'GRID,1, , 0., 0., 0.\n'
msg += 'GRID,20,, 1., 0., 0.\n'
msg += 'GRID,3, , 1.01, 0., 0.\n'
msg += 'GRID,41,, 1., 1., 0.\n' # eq
msg += 'GRID,4,, 1., 1., 0.\n' # eq
msg += 'GRID,40,, 1., 1., 0.\n' # eq
msg += 'GRID,4,, 1., 1., 0.\n' # eq
msg += 'GRID,5, , 0., 1., 0.\n'
msg += 'GRID,6, , 0., 1.01, 0.\n'
msg += 'CTRIA3,1, 100,1,20,6\n'
msg += 'CTRIA3,10,100,3,40,5\n'
msg += 'PSHELL,100,1000,0.1\n'
msg += 'MAT1,1000,3.0,, 0.3\n'
msg += 'ENDDATA'
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
node_set = [4, 40, 41]
# Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
nids = model.nodes.keys()
assert len(model.nodes) == 6, 'nnodes=%s nodes=%s' % (len(model.nodes), nids)
assert 1 in nids, nids
assert 20 in nids, nids
assert 3 in nids, nids
assert 4 in nids, nids
assert 5 in nids, nids
assert 6 in nids, nids
assert 40 not in nids, nids
assert 41 not in nids, nids
#print(nids)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_merge_01(self):
"""merges multiple bdfs into a single deck"""
log = SimpleLogger(level='error')
bdf_filename1 = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
bdf_filename2 = os.path.join(MODEL_PATH, 'sol_101_elements', 'static_solid_shell_bar.bdf')
bdf_filename3 = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending.bdf')
bdf_filename4 = os.path.join(MODEL_PATH, 'iSat', 'ISat_Dploy_Sm.dat')
bdf_filename_out1 = os.path.join(MODEL_PATH, 'bwb', 'BWBsaero_staticbar_8.out')
bdf_filename_out2 = os.path.join(MODEL_PATH, 'bwb', 'BWBsaero_static_bar_16.out')
bdf_filename_out3 = os.path.join(MODEL_PATH, 'bwb', 'BWBsaero_staticbar_isat.out')
bdf_filenames1 = [bdf_filename1, bdf_filename2]
bdf_filenames2 = [bdf_filename1, bdf_filename2, bdf_filename3, bdf_filename4]
bdf_merge(bdf_filenames1, bdf_filename_out=bdf_filename_out1,
renumber=True, encoding=None, size=8, is_double=False,
cards_to_skip=None, log=log)
bdf_merge(bdf_filenames1, bdf_filename_out=bdf_filename_out2,
renumber=False, encoding=None, size=16, is_double=False,
cards_to_skip=None, log=log)
bdf_merge(bdf_filenames2, bdf_filename_out=bdf_filename_out3,
renumber=False, encoding=None, size=16, is_double=False,
cards_to_skip=None, log=log)
read_bdf(bdf_filename_out1, log=log)
read_bdf(bdf_filename_out2, log=log)
read_bdf(bdf_filename_out3, log=log)
def test_exit(self):
"""tests totally failing to run"""
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'export_caero_mesh'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'convert'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'scale'])
#with self.assertRaises(SystemExit):
#cmd_line(argv=['bdf', 'bin'])
#with self.assertRaises(SystemExit):
#cmd_line(argv=['bdf', 'filter'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'mirror'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'renumber'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'equivalence'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'free_faces'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'merge'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'export_caero_mesh'])
with | |
(self._metadata_pb.emulator_type ==
emulator_meta_data_pb2.EmulatorMetaDataPb.QEMU2):
possible_kernels = ['kernel-ranchu-64', 'kernel-ranchu']
else:
possible_kernels = ['kernel-qemu']
system_image_dir = self._metadata_pb.system_image_dir
for name in possible_kernels:
if os.path.isfile(os.path.join(system_image_dir, name)):
return name
raise Exception('No kernel file found in %s' % system_image_dir)
def _KernelFile(self):
return os.path.join(self._SessionImagesDir(), self._KernelFileName())
def _InitImagesDir(self):
return os.path.join(self._images_dir, 'init')
def _SessionImagesDir(self):
return os.path.join(self._images_dir, 'session')
def _SnapshotRamBinFile(self):
return os.path.join(self._SessionImagesDir(), 'snapshots', 'default_boot',
'ram.bin')
def _SetUUID(self, sd, uuid_value):
"""Set UUID for sd card image."""
with open(sd, 'r+b') as f:
f.seek(SD_CARD_UUID_OFFSET)
f.write(struct.pack('i', uuid_value))
def _SparseCp(self, src, dst):
"""Copies a file and respects its sparseness.
Symbolic links are dereferenced.
Args:
src: the source file
dst: the destination file
"""
subprocess.check_call(
['cp', '--sparse=always', '--dereference', src, dst])
def _ExtractTarEntry(self, archive, entry, working_dir):
"""Extracts a single entry from a compressed tar archive."""
subprocess.check_call([
'tar', '-xzSf', archive, '--no-same-owner',
'-C', working_dir, '--no-anchored', entry])
def _StageDataFiles(self,
system_image_dir,
userdata_tarball,
timer,
enable_guest_gl,
snapshot_file,
system_image_path=None,
data_image_path=None,
vendor_img_path=None,
encryptionkey_img_path=None,
advanced_features_ini=None,
build_prop_path=None,
modified_ramdisk_path=None,
data_files=None):
"""Stages files for the emulator launch."""
self._images_dir = os.path.abspath(self._TempDir('images'))
os.makedirs(self._InitImagesDir())
os.makedirs(self._SessionImagesDir())
# Copy build.prop into the session dir where the emulator will find it.
# TODO(b/67322170): Generally we want build.prop in the session dir where
# the emulator will see it, but swordfish wear_23 breaks when we do that.
# Until we fix that device, we need a special case to avoid breaking it.
if (self.GetApiVersion() > 10 and
not self._IsBuggyWearBuild(build_prop_path)):
shutil.copy(build_prop_path,
os.path.join(self._SessionImagesDir(), 'build.prop'))
# kernel is never compressed (thank god.)
init_kernel = os.path.abspath(
os.path.join(system_image_dir, self._KernelFileName()))
assert os.path.exists(init_kernel)
os.symlink(init_kernel, self._KernelFile())
init_sys = os.path.abspath(system_image_path)
assert os.path.exists(init_sys), '%s: no system.img' % system_image_path
if system_image_path.endswith('.img'):
os.symlink(init_sys, self._InitSystemFile())
if (self._metadata_pb.emulator_type ==
emulator_meta_data_pb2.EmulatorMetaDataPb.QEMU2 and
not self._ShouldModifySystemImage(enable_guest_gl)):
# Qemu2 does not need a writable system.img file, so we symlink to
# ObjFS to avoid a copy.
os.symlink(init_sys, self._SystemFile())
else:
logging.info('Copying system image to %s', self._SystemFile())
timer.start('COPY_SYSTEM_IMAGE')
self._SparseCp(self._InitSystemFile(), self._SystemFile())
timer.stop('COPY_SYSTEM_IMAGE')
os.chmod(self._SystemFile(), stat.S_IRWXU)
else:
assert system_image_path.endswith('.img.tar.gz'), 'Not known format'
logging.info('Extracting system image from tar.gz')
timer.start('EXTRACT_SYSTEM_IMAGE')
self._ExtractTarEntry(
init_sys, 'system.img', os.path.dirname(self._SystemFile()))
shutil.move(os.path.join(os.path.dirname(self._SystemFile()),
'system.img'),
self._SystemFile())
timer.stop('EXTRACT_SYSTEM_IMAGE')
os.chmod(self._SystemFile(), stat.S_IRWXU)
timer.start('MODIFY_SYSTEM_IMAGE')
self._ModifySystemImage(enable_guest_gl)
timer.stop('MODIFY_SYSTEM_IMAGE')
# Folders created are data/misc/*
# Folders created are data/nativetest/**/* and so on.
# If we don't place the files in the right location, we end up
# getting weird exceptions in logcat since emulator requires those files
# to be present.
if data_files:
for each_file in data_files:
fn = each_file.split('data/')[1]
dn = os.path.join(self._SessionImagesDir(), 'data', os.path.dirname(fn))
# Create if this dir does not exist.
if not os.path.exists(dn):
os.makedirs(dn)
bn = os.path.basename(fn)
shutil.copy(each_file, os.path.join(dn, bn))
# Pipe service won't work for user build and api level 23+, since
# pipe_traversal doesn't have a right seclinux policy. In this case, just
# use real adb.
self._use_real_adb = (
self._IsUserBuild(build_prop_path) and self.GetApiVersion() >= 23)
if userdata_tarball:
# userdata tarball should contain:
# self._UserdataQemuFile()
# self._RamdiskFile()
# self._CacheFile()
# self._SdcardFile()
# self._SnapshotFile()
#
# It does not include:
# self._KernelFile() # handled above
# self._SystemFile() # handled above
# self._InitSystemFile() # handled above
tar_opts = '-xzSf'
if (self._metadata_pb.emulator_type ==
emulator_meta_data_pb2.EmulatorMetaDataPb.QEMU2):
# qemu2's userdata.dat is not gzipped because it is a diff of the
# initial userdata partition and thus quite small already. It also
# doesn't compress as well as a raw image does.
tar_opts = '-xSf'
subprocess.check_call(['tar', tar_opts, userdata_tarball, '-C',
self._images_dir])
data_size = FLAGS.data_partition_size
if (self.GetApiVersion() >= 19 and data_size and
data_size > os.path.getsize(self._UserdataQemuFile()) >> 20):
logging.info('Resize data partition to %dM', data_size)
subprocess.check_call(['/sbin/resize2fs', '-f',
self._UserdataQemuFile(), '%dM' % data_size])
# Symlink the snapshot file to the actual location.
if (snapshot_file and self._metadata_pb.emulator_architecture == 'x86' and
os.path.exists(snapshot_file)):
os.symlink(snapshot_file, self._SnapshotRamBinFile())
else:
# self._RamdiskFile() - we modify this abit
# self._SnapshotFile() - always exists
self._InitializeRamdisk(system_image_dir, modified_ramdisk_path)
self._SparseCp(self.android_platform.empty_snapshot_fs,
self._SnapshotFile())
if vendor_img_path and not os.path.exists(self._VendorFile()):
init_data = vendor_img_path
assert os.path.exists(init_data), '%s: no vendor.img' % vendor_img_path
if init_data.endswith('.img.tar.gz'):
self._ExtractTarEntry(
init_data, 'vendor.img', os.path.dirname(self._VendorFile()))
shutil.move(os.path.join(os.path.dirname(self._VendorFile()),
'vendor.img'),
self._VendorFile())
elif init_data.endswith('.img'):
self._SparseCp(init_data, self._VendorFile())
else:
raise Exception('Unknown vendor image type %s', vendor_img_path)
os.chmod(self._VendorFile(), stat.S_IRWXU)
if encryptionkey_img_path and not os.path.exists(
self._EncryptionKeyImageFile()):
init_data = encryptionkey_img_path
assert os.path.exists(init_data), (
'%s: no encryptionkey.img' % encryptionkey_img_path)
assert init_data.endswith('.img'), 'Not known format'
shutil.copy(init_data, self._EncryptionKeyImageFile())
os.chmod(self._EncryptionKeyImageFile(), stat.S_IRWXU)
if advanced_features_ini and not os.path.exists(
self._AdvancedFeaturesFile()):
assert os.path.exists(advanced_features_ini), (
'Advanced Features file %s does not exist' % advanced_features_ini)
shutil.copy(advanced_features_ini, self._AdvancedFeaturesFile())
os.chmod(self._AdvancedFeaturesFile(), stat.S_IRWXU)
if data_image_path and not os.path.exists(self._UserdataQemuFile()):
init_data = data_image_path
assert os.path.exists(init_data), '%s: no userdata.img' % data_image_path
if init_data.endswith('.img'):
self._SparseCp(init_data, self._UserdataQemuFile())
else:
assert init_data.endswith('.img.tar.gz'), 'Not known format'
self._ExtractTarEntry(
init_data,
'userdata.img',
os.path.dirname(self._UserdataQemuFile()))
shutil.move(os.path.join(os.path.dirname(self._UserdataQemuFile()),
'userdata.img'),
self._UserdataQemuFile())
if not os.path.exists(self._CacheFile()):
init_cache = resources.GetResourceFilename(
'android_test_support/'
'tools/android/emulator/support/cache.img.tar.gz')
self._ExtractTarEntry(init_cache, 'cache.img',
os.path.dirname(self._CacheFile()))
if not os.path.exists(self._SdcardFile()):
sdcard_size_mb = self._metadata_pb.sdcard_size_mb
if sdcard_size_mb == 256:
sd_name = 'default_sdcard.256.img'
self._ExtractTarEntry(
resources.GetResourceFilename(
'android_test_support/'
'tools/android/emulator/support/%s.tar.gz' % sd_name),
sd_name, os.path.dirname(self._SdcardFile()))
shutil.move(os.path.join(os.path.dirname(self._SdcardFile()),
sd_name),
self._SdcardFile())
logging.info('Using default sd card.')
else:
logging.info('Making sdcard on the fly due to a nonstandard size')
sdcard_args = [
self.android_platform.mksdcard,
'%sM' % sdcard_size_mb,
self._SdcardFile()]
timer.start(_SDCARD_CREATE)
common.SpawnAndWaitWithRetry(sdcard_args)
# 1AEF-1A1E is hard coded in AdbController.java
self._SetUUID(self._SdcardFile(), 0x1AEF1A1E)
timer.stop(_SDCARD_CREATE)
os.chmod(self._SdcardFile(), stat.S_IRWXU)
if os.path.exists(self._UserdataQemuFile()):
os.chmod(self._UserdataQemuFile(), stat.S_IRWXU)
os.chmod(self._CacheFile(), stat.S_IRWXU)
os.chmod(self._SnapshotFile(), stat.S_IRWXU)
# pylint: disable=too-many-statements
def _MakeAvd(self):
"""Crafts a set of ini files to correspond to an avd for this device.
AVD is the only way to pass certain properties on to the emulated device,
most notably dpi-device and vm heapsize (both of which are ignored from the
command line). Unfortunately there are options which are only controllable
from commandline (instead of avd) so we get to configure things thru both
interfaces. One day I hope the configuration style will all be unified into
one rational method which is effective both thru ADT/eclipse and
programatically. (As you're about to see, programmatically creating AVDs is
a bit of a trip!).
Returns:
an appropriate avd_name to pass to the emulator.
"""
# When using AVDs, the emulator expects to find AVDs beneath
#
# $ANDROID_SDK_HOME/.android/avd/[avd_name].
# if unset, this defaults to $HOME or /tmp
# both of these are undesired in our case.
#
# Also when using AVDs the emulator wants to find $ANDROID_SDK_ROOT
# and expects skin info to be stored beneath that location. We will
# in the future need to support skins.
avd_files = self._TempDir('avd_files')
android_tmp_dir = os.path.join(avd_files, 'tmp')
home_dir = os.path.join(avd_files, 'home')
os.makedirs(android_tmp_dir)
os.makedirs(home_dir)
# New version of emulator check for these directories.
os.makedirs(os.path.join(self._images_dir, 'platforms'))
os.makedirs(os.path.join(self._images_dir, 'platform-tools'))
self._emulator_env['ANDROID_HOME'] = self._images_dir
self._emulator_env['ANDROID_SDK_ROOT'] = self._images_dir
self._emulator_env['ANDROID_SDK_HOME'] = home_dir
self._emulator_env['HOME'] = home_dir
self._emulator_env['ANDROID_TMP'] = android_tmp_dir
self._console_auth_token_file = os.path.join(home_dir,
'.emulator_console_auth_token')
if not self._enable_console_auth:
# Write an empty file to disable console auth.
with open(self._console_auth_token_file, 'w+') as f:
f.write('')
dot_android_dir = os.path.join(home_dir, '.android')
os.makedirs(dot_android_dir)
ddms_cfg_file = os.path.join(dot_android_dir, 'ddms.cfg')
with open(ddms_cfg_file, 'w+') as ddms_cfg:
# suppress the 'welcome to android' dialog
ddms_cfg.write('pingOptIn=false\n')
ddms_cfg.write('pingTime.emulator=1348614108574\n')
ddms_cfg.write('pingId=592273184351987827\n')
dot_config_dir = os.path.join(home_dir, '.config',
'Android Open Source Project')
os.makedirs(dot_config_dir)
emulator_cfg_file = os.path.join(dot_config_dir, 'Emulator.conf')
with open(emulator_cfg_file, 'w+') as emulator_cfg:
# suppress some dialogs
emulator_cfg.write('[General]\n')
emulator_cfg.write('showAdbWarning=false\n')
emulator_cfg.write('showAvdArchWarning=false\n')
avd_dir = os.path.join(home_dir, '.android', 'avd')
# Allowed chars are:
# ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.-
avd_name = 'mobile_ninjas.adb.%s' % self.emulator_adb_port
content_dir = os.path.join(avd_dir, avd_name)
os.makedirs(content_dir)
root_config_file = os.path.join(avd_dir, '%s.ini' % avd_name)
with open(root_config_file, 'w+') as root_config:
root_config.write('path=%s\n' % self._SessionImagesDir())
root_config.write('target=android-%s\n' % self._metadata_pb.api_name)
user_cfg_file = os.path.join(self._SessionImagesDir(), 'emulator-user.ini')
with open(user_cfg_file, 'w+') as user_cfg:
# Always put emulator window in fixed position.
user_cfg.write('window.x = 0\n')
user_cfg.write('window.y = 0\n')
config_ini_file = os.path.join(self._SessionImagesDir(), 'config.ini')
with open(config_ini_file, 'w+') as config_ini:
wrote_cores = False
for prop in self._metadata_pb.avd_config_property:
config_ini.write('%s=%s\n' % (prop.name, prop.value))
wrote_cores |= prop.name == _CORES_PROP
# the default size is ~256 megs, which fills up fast on iterative
# development.
if 'ext4' in subprocess.check_output(['file', self._UserdataQemuFile()]):
# getting this size right is pretty crucial - if it doesnt match
# the underlying file the guest os will get confused.
config_ini.write('disk.dataPartition.size=%s\n' %
os.path.getsize(self._UserdataQemuFile()))
else:
config_ini.write('disk.dataPartition.size=2047m\n')
# system partition must be less than 2GB (there's a constraint check in
# qemu). Also we must set the commandline flag too - which sets both
# userdata and system sizes, so everything is set to 2047 for sanity.
if 'ext4' in subprocess.check_output(['file', self._SystemFile()]):
# getting this size right is pretty crucial - if it doesnt match
# the underlying file the guest os will get confused.
config_ini.write('disk.systemPartition.size=%s\n' %
os.path.getsize(self._SystemFile()))
else:
config_ini.write('disk.systemPartition.size=2047m\n')
# a link back to our original name, not | |
<filename>jcast/main.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""jcast.main: Main function."""
import os
import datetime
import logging
from functools import partial
import tqdm
from jcast import params, fates, model
from jcast.junctions import Junction, RmatsResults
from jcast.annots import ReadAnnotations, ReadGenome
from jcast.sequences import Sequence
from jcast import __version__
def runjcast(args):
"""
main look for jcast flow.
:param args: parsed arguments
:return:
"""
# Get timestamp for out files
now = datetime.datetime.now()
write_dir = os.path.join(args.out, 'jcast_' + now.strftime('%Y%m%d%H%M%S'))
os.makedirs(write_dir, exist_ok=True)
# Main logger setup
main_log = logging.getLogger('jcast')
main_log.propagate = False
main_log.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler(os.path.join(write_dir, 'jcast_main.log'))
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
main_log.addHandler(fh)
#
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
ch.setFormatter(formatter)
main_log.addHandler(ch)
main_log.info(args)
main_log.info(__version__)
#
# Open the rMATS output file (MXE) here, rename the columns
#
assert os.path.exists(os.path.join(args.rmats_folder, 'MXE.MATS.JC.txt')), 'rMATS files not found, check directory.'
rmats_results = RmatsResults(rmats_dir=args.rmats_folder)
# Model read count cutoff
#
# Read the gtf file using the gtfpase package.
# Then write as a pandas data frame.
#
gtf = ReadAnnotations(args.gtf_file)
gtf.read_gtf()
#
# Read genome file into memory
#
genome = ReadGenome(args.genome)
#
# Model read count cutoff.
# TODO: move this to a separate class
#
if args.model:
main_log.info('The -m flag is set. The modeled read count will override -r --read values.')
# Make a numpy array of all junction SJC sum counts
rmats_results.get_junction_count_array()
pt, gmm, min_count = model.gaussian_mixture(sum_sjc_array=rmats_results.sum_sjc_array)
# Plot out the model
model.plot_model(sum_sjc_array=rmats_results.sum_sjc_array,
pt=pt,
gmm=gmm,
min_count=min_count,
write_dir=write_dir,
filename='model',
)
# If the m flag is not set, use the r argument value as min count
else:
min_count = args.read
#
# Main loop through every line of each of the five rMATS files to make junction object, then translate them
#
for rma in [rmats_results.rmats_mxe,
rmats_results.rmats_se,
rmats_results.rmats_ri,
rmats_results.rmats_a5ss,
rmats_results.rmats_a3ss,
]:
junctions = [Junction(**rma.iloc[i].to_dict()) for i in range(len(rma))]
translate_one_partial = partial(_translate_one,
gtf=gtf,
genome=genome,
args=args,
write_dir=write_dir,
pred_bound=min_count,
)
#
# Concurrent futures
#
# import concurrent.futures
# with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()-1) as pool:
# for i, f in enumerate(tqdm.tqdm(pool.map(
# translate_one_partial,
# junctions,
# ),
# total=len(junctions),
# desc='Processing {0} Junctions'.format(rma.jxn_type[0]),
# )):
# main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(junctions[i].junction_type,
# junctions[i].name,
# junctions[i].gene_symbol,
# junctions[i].gene_id,
# ))
# main_log.info(f)
#
# Single threaded for-loop
#
for jx in tqdm.tqdm(junctions,
total=len(junctions),
desc='Processing {0} Junctions'.format(rma.jxn_type[0]),
):
main_log.info('>>>>>> Doing {0} junction {1} for gene {2} {3}'.format(jx.junction_type,
jx.name,
jx.gene_symbol,
jx.gene_id,
))
main_log.info(translate_one_partial(jx))
return True
def _translate_one(junction,
gtf,
genome,
args,
write_dir,
pred_bound,
):
""" get coordinate and translate one junction; arguments are passed through partial from main"""
#
# trim slice coordinates by translation starts and ends
#
junction.trim_cds(gtf)
#
# get translated phase from GTF. Note this should be done after trimming to get the
# right frame in case the exon in question is trimmed by the coding start
#
junction.get_translated_phase(gtf)
#
# initiate a sequence object that copies most of the junction information
#
sequence = Sequence(junction)
#
# get nucleotide sequences of all slices using genome in memory
# (anchor, alternative-1, alternative-2, downstream)
# conjoin alternative exons to make slice 1 and 2,
#
sequence.make_slice_localgenome(genome.genome)
#
# translate to peptides
#
sequence.get_canonical_aa(gtf=gtf, genome_index=genome.genome)
sequence.translate(use_phase=True)
#
# filter by junction read counts - discard junction if the min read count is below threshold
#
# If the -r argument is set directly and the -m flag is not, use the -r integer for count filtering
# If the -m flag is set, use the modeled count for filtering
if (not args.model and junction.sum_sjc <= args.read) or (args.model and junction.sum_sjc <= pred_bound):
#
# If the canonical flag is set, append the canonical
# Sp to the gene_canonical output even if none of the transcript slices are stitchable
# back to the canonical protein. This avoids not having any protein level representation
# of a gene potentially in the proteome.
#
if args.canonical:
sequence.write_canonical(outdir=write_dir)
return fates.skipped_low
#
# discard junction if the corrected P value of this read count is < threshold
# this removes junctions that are inconsistently found on both replicates.
#
q_lo, q_hi = args.qvalue
if not q_lo <= junction.fdr <= q_hi:
# Write canonical anyhow if the canonical flag is set.
if args.canonical:
sequence.write_canonical(outdir=write_dir)
return fates.skipped_low
#
# write the Tier 1 and Tier 2 results into fasta file
#
if len(sequence.slice1_aa) > 0 and len(sequence.slice2_aa) > 0:
# Tier 1: both translated without stop codon, no frameshift
if not sequence.frameshift:
# Do a function like this to extend with fasta, and then write if necessary.
# TODO: instead of using Uniprot we should get the canonical exons from the GTF directly
for slice_ in [1, 2]:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=False)
sequence.write_slices(
outdir=write_dir,
suffix='T1',
)
return fates.tier1.format(sequence.j.phase,
sequence.translated_phase,
)
#
# Tier 2: both translated without stop codon, but with one frameshift
#
elif sequence.frameshift:
for slice_ in [1, 2]:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=False)
# 2020-07-30 if slice runs into a frame shift,
# allows the opportunity to stitch N-terminus only
if [sequence.slice1_stitched, sequence.slice2_stitched][slice_-1] is None:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=True)
sequence.write_slices(
outdir=write_dir,
suffix='T2',
)
return fates.tier2.format(sequence.j.phase,
sequence.translated_phase,
)
#
# Tier 3 - retrieved phase is different from PTC-free frame.
#
else:
sequence.translate(use_phase=False)
# after tier 3 translation, check if both slices are good
if len(sequence.slice1_aa) > 0 and len(sequence.slice2_aa) > 0:
for slice_ in [1, 2]:
sequence.stitch_to_canonical_aa(slice_to_stitch=slice_,
slice_has_ptc=False)
sequence.write_slices(outdir=write_dir,
suffix='T3',
)
return fates.tier3.format(sequence.j.phase,
sequence.translated_phase,
)
#
# Tier 4: if sequence is still not good, do Tier 4: One of the two slices hits stop codon.
# write out the slice if it is at least a certain proportion (params.ptc_threshold) as long as the long slice.
#
# translate again after tier 3 to reset to tier 1/2 translation state (using retrieved phase)
sequence.translate(use_phase=True,
log=False,
)
# TODO: we should avoid translating twice.
# force-translate through slice 2 if slice 2 hits PTC:
if len(sequence.slice1_aa) > 0 and len(sequence.slice2_aa) == 0:
forced_slice = 2
sequence.stitch_to_canonical_aa(slice_to_stitch=1)
sequence.translate_forced(slice_to_translate=forced_slice)
if len(sequence.slice2_aa) / len(sequence.slice1_aa) >= params.ptc_threshold:
sequence.stitch_to_canonical_aa(slice_to_stitch=2,
slice_has_ptc=True)
sequence.write_slices(outdir=write_dir,
suffix='T4',
)
return fates.tier4.format(forced_slice)
# force-translate through slice 1 if slice 1 hits PTC:
elif len(sequence.slice2_aa) > 0 and len(sequence.slice1_aa) == 0:
forced_slice = 1
sequence.stitch_to_canonical_aa(slice_to_stitch=2)
sequence.translate_forced(slice_to_translate=1)
if len(sequence.slice1_aa) / len(sequence.slice2_aa) >= params.ptc_threshold:
sequence.stitch_to_canonical_aa(slice_to_stitch=1,
slice_has_ptc=True)
sequence.write_slices(outdir=write_dir,
suffix='T4',
)
return fates.tier4.format(forced_slice)
#
# if nothing works, write FAILURE fate
#
else:
#
# salvage the canonical sequence in the long slice if it matches Sp exactly.
# note that this means if we identify a gene in RNA-seq, we will append the canonical
# Sp to the gene_canonical output even if none of the transcript slices are stitchable
# back to the canonical protein. This is to avoid not having any protein level representation
# of a gene potentially in the proteome.
#
if args.canonical:
sequence.write_canonical(outdir=write_dir)
return fates.fail
def main():
""" running main with parsed arguments from command line """
import argparse
import sys
parser = argparse.ArgumentParser(description='jcast retrieves transcript splice junctions'
'and translates them into amino acid sequences')
parser.add_argument('rmats_folder',
help='path to folder storing rMATS output',
)
parser.add_argument('gtf_file',
help='path to Ensembl gtf file',
)
parser.add_argument('genome',
help='path to genome file',
)
# parser.add_argument('-n', '--num_threads', help='number of threads for concurrency [default: 6]',
# default=6,
# type=int)
parser.add_argument('-o', '--out',
help='name of the output files [default: psq_out]',
default='out')
parser.add_argument('-r', '--read',
help='the lowest skipped junction read count for a junction to be translated [default: 1]',
default=1,
type=int,
)
parser.add_argument('-m', '--model',
help='models junction read count cutoff using a Gaussian mixture model [default: False]',
action='store_true',
default=False,
#type=bool,
)
parser.add_argument('-c', '--canonical', help='write out canonical protein sequence even if transcript'
'slices are untranslatable [default: False]',
default=False,
action='store_true',
# type=bool,
)
parser.add_argument('-q', '--qvalue',
help='take junctions with rMATS fdr within this threshold [default: 0 1]',
metavar=('q_lo', 'q_hi'),
nargs=2,
default=[0, 1],
type=float)
parser.set_defaults(func=runjcast)
# print help message if no arguments are given
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
# parse all the arguments
args = parser.parse_args()
| |
graph.merged_outputs_map[fetch.name]
return fetch
elif isinstance(fetch, ops.Operation):
graph = Graph.get()
op_replicas = graph.get_local_replicas(graph.get_operation_by_name(fetch.name))
replicas.extend([item.primitive_obj for item in op_replicas])
return fetch
elif isinstance(fetch, (six.string_types, SparseTensor)):
# TODO(wangang.wa): String and SparseTensor should be supported.
return fetch
else:
raise ValueError("Type of fetches is not supported for epl now. "
"Fetch type: %s." % type(fetch))
def _init_local_resources(self, fn):
"""Try to init local resources if needed."""
Graph.get().set_model_phase(ModelPhase.SESSION_RUN_PHASE)
assign_ops = None
if not Graph.get().is_local_resources_ready and Env.get().cluster:
if Graph.get().need_parallel:
assign_ops = broadcast_variables()
local_resources = resources.local_resources()
Env.get().parallel_information[constant.ALL_COMM_RESOURCES].extend(local_resources)
# Initialize local_resources for gradients aggregation and
# variables broadcasting.
Graph.get().is_local_resources_ready = True
local_resources_init_op = control_flow_ops.group(resources.initialize_resources(local_resources))
fn(self, local_resources_init_op)
# Initialize local variables.
local_variables_init_op = tf_variables.local_variables_initializer()
fn(self, local_variables_init_op)
return assign_ops
def replace_logging_tensor_hook(logging_hooks):
"""Replace tensor in logging_hooks with merged tensor if any."""
for log_hook in logging_hooks:
matched_tensor = {}
for log_key, tensor in log_hook[1].items():
merged_tensor = Graph.get().merged_outputs_map.get(tensor.name)
if merged_tensor is not None:
matched_tensor[log_key] = merged_tensor
for key, tensor in matched_tensor.items():
log_hook[1][key] = tensor
def base_session_run(fn):
"""Initialize local resource and broadcast variables after variables init."""
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Base session run."""
# Do not need to check replicas, initialize local resources and merge
# outputs for some scenes which like running init ops in saver restore
# function. Just run fetches and return results is ok. Only fetches
# runned by users need to go through epl runing hooks.
if Env.get().parallel_information and Env.get().parallel_information.get(constant.INFO_KEY_PREVENT_RUN_HOOK):
return fn(self, fetches, feed_dict, options, run_metadata)
ops.get_default_graph()._finalized = False
assign_ops = _init_local_resources(self, fn)
if isinstance(fetches, dict) and "caller" not in fetches:
tf_logging.warn("This is not a MonitoredSession for no key caller in dict fetches: ", fetches)
return fn(self, fetches, feed_dict, options, run_metadata)
actual_fetches = fetches if isinstance(fetches, dict) else {"caller": fetches}
actual_fetches["replicas"] = []
if Graph.get().need_parallel:
logging_hooks = [(k, hooks) for k, hooks in actual_fetches.items() if k.__class__.__name__ == 'LoggingTensorHook']
replace_logging_tensor_hook(logging_hooks)
actual_fetches["caller"] = _append_replicated_fetches(actual_fetches["caller"], actual_fetches["replicas"])
outputs = fn(self, actual_fetches, feed_dict, options, run_metadata)
if assign_ops:
fn(self, assign_ops)
ops.get_default_graph()._finalized = True
return outputs if isinstance(fetches, dict) else outputs["caller"]
return run
def base_session_makecallable(fn):
"""Hook session.make_callable."""
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Basic session make_callable."""
if isinstance(fetches, dict) and "caller" not in fetches:
raise ValueError("This is not a MonitoredSession/Monitored"
"TrainingSession because of no key 'caller' "
"in dict fetches.")
actual_fetches = fetches if isinstance(fetches, dict) else {"caller": fetches}
actual_fetches["replicas"] = []
actual_fetches["caller"] = _append_replicated_fetches(actual_fetches["caller"], actual_fetches["replicas"])
outputs = fn(self, actual_fetches, feed_list, accept_options)
return outputs
return make_callable
def function_add_to_graph(fn):
"""Hook add_to_graph function of tensorflow function."""
def add_to_graph(self, *args, **kwargs):
pre_function_name = Graph.get().current_function_name
Graph.get().current_function_name = self._func_name
with ModelPhase(ModelPhase.ADD_FUNCTION):
fn(self, *args, **kwargs)
Graph.get().current_function_name = pre_function_name
return add_to_graph
def _is_func_dataset_related():
"""Check if the function is created by dataset."""
call_stacks = inspect.stack()
for call_stack in call_stacks:
if any(fn in call_stack.filename for fn in constant.dataset_related_files):
return True
return False
def func_graph_create_op(fn):
"""Hook create_op function of FuncGraph to get tensorflow function."""
def create_op(self, *args, **kwargs):
"""Create FuncGraph op. Put dataset-related ops to CPU."""
pre_function_name = Graph.get().current_function_name
if _is_func_dataset_related():
cpu_device = Env.get().cluster.current_worker_cpu()
with ModelPhase(ModelPhase.ADD_FUNCTION), ops.device(cpu_device):
res = fn(self, *args, **kwargs)
else:
with ModelPhase(ModelPhase.ADD_FUNCTION):
res = fn(self, *args, **kwargs)
Graph.get().current_function_name = pre_function_name
return res
return create_op
def saver_init(fn):
"""Hook saver init to select out save_and_restore_operations."""
def init(self, *args, **kwargs):
"""Tensorflow saver init function."""
with ModelPhase(ModelPhase.SAVE_AND_RESTORE):
ret = fn(self, *args, **kwargs)
return ret
return init
def saver_save(fn):
"""Only first constucting worker allowed to save checkpoint."""
def save(self, *args, **kwargs):
"""Tensorflow saver save function."""
# TODO(wangang.wa): This code will be removed after merging
# variables for split strategy.
for taskgraph in Graph.get().taskgraphs:
if taskgraph.strategy_context.split_strategy:
with ModelPhase(ModelPhase.SAVE_AND_RESTORE):
ret = fn(self, *args, **kwargs)
return ret
if Graph.get().first_constructor_rank != Env.get().cluster.worker_index:
return None
with ModelPhase(ModelPhase.SAVE_AND_RESTORE):
ret = fn(self, *args, **kwargs)
return ret
return save
def saver_restore(fn):
"""Hook tensorflow saver restore function. Only first constructing worker
allowed to restore checkpiont."""
def restore(self, sess, save_path):
"""Tensorflow saver restore function."""
# Initialize replicated variables on current worker.
Env.get().parallel_information[constant.INFO_KEY_PREVENT_RUN_HOOK] = True
graph = Graph.get()
sess.run(graph.primitive_init_op)
if Graph.get().need_parallel:
init_replicas = graph.get_local_replicas(
graph.get_operation_by_name(graph.primitive_init_op.name))
for init in init_replicas:
sess.run(init.primitive_obj)
Env.get().parallel_information[constant.INFO_KEY_PREVENT_RUN_HOOK] = False
ret = None
# TODO(wangang.wa): This code will be removed after merging
# variables for split strategy.
if Graph.get().first_constructor_rank == Env.get().cluster.worker_index or \
any(taskgraph.strategy_context.split_strategy is not None for taskgraph in Graph.get().taskgraphs):
with ModelPhase(ModelPhase.SAVE_AND_RESTORE):
ret = fn(self, sess, save_path)
return ret
return restore
def summary_scalar(fn):
"""Hook tf.summary.scalar"""
def scalar(name, tensor, collections=None, family=None):
val = fn(name, tensor, collections, family)
Graph.get().summary_map[val.name] = SummaryInfo(name, tensor.name, constant.SUMMARY_SCALAR_TYPE)
return val
return scalar
def summary_image(fn):
"""Hook tf.summary.image"""
def image(name, tensor, max_outputs=3, collections=None, family=None):
val = fn(name, tensor, max_outputs, collections, family)
Graph.get().summary_map[val.name] = SummaryInfo(name, tensor.name, constant.SUMMARY_IMAGE_TYPE)
return val
return image
def summary_histogram(fn):
"""Hook tf.summary.histogram"""
def histogram(name, values, collections=None, family=None):
val = fn(name, values, collections, family)
Graph.get().summary_map[val.name] = SummaryInfo(name, values.name, constant.SUMMARY_HISTOGRAM_TYPE)
return val
return histogram
def summary_audio(fn):
"""Hook tf.summary.audio"""
def audio(name,
tensor,
sample_rate,
max_outputs=3,
collections=None,
family=None):
"""audio func."""
val = fn(name, tensor, sample_rate, max_outputs, collections, family)
Graph.get().summary_map[val.name] = SummaryInfo(name, tensor.name, constant.SUMMARY_AUDIO_TYPE)
return val
return audio
def summary_text(fn):
"""Hook tf.summary.text"""
def text(name, tensor, collections=None):
val = fn(name, tensor, collections)
Graph.get().summary_map[val.name] = SummaryInfo(name, tensor.name, constant.SUMMARY_TEXT_TYPE)
return val
return text
def summary_tensor(fn):
"""Hook tf.summary.tensor_summary"""
def tensor_summary(name,
tensor,
summary_description=None,
collections=None,
summary_metadata=None,
family=None,
display_name=None):
"""Record relation between tensor and summary tag."""
val = fn(name, tensor, summary_description, collections, summary_metadata,
family, display_name)
Graph.get().summary_map[val.name] = SummaryInfo(name, tensor.name, constant.SUMMARY_TENSOR_TYPE)
return val
return tensor_summary
def distributed_add_weight(fn):
"""Replace add_weight with distributed_add_weight for Split context."""
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Re-implementation of add_weight."""
strategy = Env.get().strategy_context.split_strategy
if strategy and shape:
devices = strategy.devices
fan_in, fan_out = init_ops._compute_fans(shape)
initializer = initializers.get(initializer, fan_in=fan_in, fan_out=fan_out)
shape = list(shape)
num_devices = len(devices)
shape[0] = dispatch_across_consumers(shape[0], num_devices, Env.get().cluster.worker_index)
shape = tuple(shape)
res = fn(self,
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
**kwargs)
return res
return add_weight
def distributed_dense_layer(fn):
"""Replace dense layer to distributed dense layer for Split context."""
def dense(*args, **kwargs):
"""Re-implementation of dense."""
strategy = Env.get().strategy_context.split_strategy
if strategy and not strategy.is_nested:
strategy.is_nested = True
res = distributed_dense(*args, **kwargs)
strategy.is_nested = False
else:
res = fn(*args, **kwargs)
return res
return dense
def distributed_sparse_softmax_cross_entropy(fn):
"""Replace sparse softmax cross entropy loss function for Split context."""
def sparse_softmax_cross_entropy(*args, **kwargs):
"""Re-implementation of sparse_softmax_cross_entropy."""
strategy = Env.get().strategy_context.split_strategy
if strategy and not strategy.is_nested:
strategy.is_nested = True
res = distributed_sparse_softmax_cross_entropy_with_logits(*args, **kwargs)
strategy.is_nested = False
else:
res = fn(*args, **kwargs)
return res
return sparse_softmax_cross_entropy
def distributed_argmax(fn):
"""Replace argmax function for Split context."""
def argmax(*args, **kwargs):
"""Re-implementation of argmax."""
strategy = Env.get().strategy_context.split_strategy
if strategy and not strategy.is_nested:
strategy.is_nested = True
res = distributed_ops.distributed_argmax(*args, **kwargs)
strategy.is_nested = False
else:
res = fn(*args, **kwargs)
return res
return argmax
def distributed_einsum(fn):
"""Replace einsum with distributed_einsum for Split context."""
def einsum(equation, *inputs, **kwargs):
"""Re-implemention of einsum."""
strategy = Env.get().strategy_context.split_strategy
if strategy:
# TODO(jiangle.jl): Remove when epl supports split nested with replica
assert len(Env.get().cluster.virtual_devices) == 1
inputs = list(inputs)
devices = strategy.devices
num_devices = len(devices)
if num_devices > 1:
current_device = common.get_device_string(task=Env.get().cluster.worker_index)
if constant.INFO_EINSUM_INDEX not in Env.get().parallel_information:
if constant.SHARED_COMMUNICATOR_FOR_DISPATCH_AND_COMBINE in Graph.get().collective_communicator:
comm = Graph.get().collective_communicator[constant.SHARED_COMMUNICATOR_FOR_DISPATCH_AND_COMBINE]
else:
comm = create_simple_communicator(constant.SHARED_COMMUNICATOR_FOR_DISPATCH_AND_COMBINE, devices)
Graph.get().collective_communicator[constant.SHARED_COMMUNICATOR_FOR_DISPATCH_AND_COMBINE] = comm
# TODO(jiangle.jl): Refactor when epl supports auto split
Env.get().parallel_information[constant.INFO_EINSUM_INDEX] = 1
inputs[0] = alltoall(comm, inputs[0], current_device)
inputs[0] = array_ops.concat(array_ops.split(inputs[0], num_devices, 0), axis=2)
else:
# TODO(jiangle.jl): Refactor when epl supports auto split
Env.get().parallel_information[constant.INFO_EINSUM_INDEX] += 1
if not Env.get().parallel_information[constant.INFO_EINSUM_INDEX] % constant.NUM_EINSUM_IN_SPLIT_FOR_MOE:
assert constant.SHARED_COMMUNICATOR_FOR_DISPATCH_AND_COMBINE in Graph.get().collective_communicator, \
"Combine tensor should use the same communicator as dispatching"
comm = Graph.get().collective_communicator[constant.SHARED_COMMUNICATOR_FOR_DISPATCH_AND_COMBINE]
del Env.get().parallel_information[constant.INFO_EINSUM_INDEX]
inputs[1] = array_ops.concat(array_ops.split(inputs[1], num_devices, 2), axis=0)
inputs[1] = alltoall(comm, inputs[1], current_device)
return fn(equation, *inputs, **kwargs)
return einsum
def distributed_equal(fn):
"""Replace argmax function for Split context."""
def equal(x, y, name=None):
"""Re-implementation of equal."""
strategy = Env.get().strategy_context.split_strategy
if strategy and not strategy.is_nested:
strategy.is_nested = True
res = distributed_ops.distributed_equal(x, y, name=name)
strategy.is_nested = False
else:
res = fn(x, y, name=name)
return res
return equal
def add_layer_hooks():
"""Add hooks of layers to replace function for Split context
in in-place mode."""
# TODO(wangang.wa): Hook __init__ function and apply function of Dense
# class is a better way which could support user denfine Dense object
# first and then execute its call function.
layers.dense = distributed_dense_layer(layers.dense)
losses.sparse_softmax_cross_entropy = distributed_sparse_softmax_cross_entropy(losses.sparse_softmax_cross_entropy)
gen_math_ops.arg_max = distributed_argmax(gen_math_ops.arg_max)
gen_math_ops.equal = distributed_equal(gen_math_ops.equal)
math.equal = distributed_equal(math.equal)
math_ops.equal = distributed_equal(math_ops.equal)
tensorflow.equal = | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Tests for salt.utils.jinja
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import ast
import copy
import datetime
import os
import pprint
import re
import tempfile
# Import Salt libs
import salt.config
import salt.loader
# dateutils is needed so that the strftime jinja filter is loaded
import salt.utils.dateutils # pylint: disable=unused-import
import salt.utils.files
import salt.utils.json
import salt.utils.stringutils
import salt.utils.yaml
from jinja2 import DictLoader, Environment, exceptions
from salt.exceptions import SaltRenderError
from salt.ext import six
from salt.ext.six.moves import builtins
from salt.utils.decorators.jinja import JinjaFilter
from salt.utils.jinja import (
SaltCacheLoader,
SerializerExtension,
ensure_sequence_filter,
tojson,
)
from salt.utils.odict import OrderedDict
from salt.utils.templates import JINJA, render_jinja_tmpl
from tests.support.case import ModuleCase
from tests.support.helpers import flaky
from tests.support.mock import MagicMock, Mock, patch
# Import Salt Testing libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
# Import 3rd party libs
try:
import timelib # pylint: disable=W0611
HAS_TIMELIB = True
except ImportError:
HAS_TIMELIB = False
BLINESEP = salt.utils.stringutils.to_bytes(os.linesep)
class JinjaTestCase(TestCase):
def test_tojson(self):
"""
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
"""
data = {"Non-ascii words": ["süß", "спам", "яйца"]}
result = tojson(data)
expected = (
'{"Non-ascii words": ["s\\u00fc\\u00df", '
'"\\u0441\\u043f\\u0430\\u043c", '
'"\\u044f\\u0439\\u0446\\u0430"]}'
)
assert result == expected, result
class MockFileClient(object):
"""
Does not download files but records any file request for testing
"""
def __init__(self, loader=None):
if loader:
loader._file_client = self
self.requests = []
def get_file(self, template, dest="", makedirs=False, saltenv="base"):
self.requests.append(
{"path": template, "dest": dest, "makedirs": makedirs, "saltenv": saltenv}
)
def _setup_test_dir(src_dir, test_dir):
os.makedirs(test_dir)
salt.utils.files.recursive_copy(src_dir, test_dir)
filename = os.path.join(test_dir, "non_ascii")
with salt.utils.files.fopen(filename, "wb") as fp:
fp.write(b"Assun\xc3\xa7\xc3\xa3o" + BLINESEP)
filename = os.path.join(test_dir, "hello_simple")
with salt.utils.files.fopen(filename, "wb") as fp:
fp.write(b"world" + BLINESEP)
filename = os.path.join(test_dir, "hello_import")
lines = [
r"{% from 'macro' import mymacro -%}",
r"{% from 'macro' import mymacro -%}",
r"{{ mymacro('Hey') ~ mymacro(a|default('a'), b|default('b')) }}",
]
with salt.utils.files.fopen(filename, "wb") as fp:
for line in lines:
fp.write(line.encode("utf-8") + BLINESEP)
class TestSaltCacheLoader(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.template_dir = os.path.join(self.tempdir, "files", "test")
_setup_test_dir(
os.path.join(RUNTIME_VARS.BASE_FILES, "templates"), self.template_dir
)
self.opts = {
"file_buffer_size": 1048576,
"cachedir": self.tempdir,
"file_roots": {"test": [self.template_dir]},
"pillar_roots": {"test": [self.template_dir]},
"extension_modules": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "extmods"
),
}
super(TestSaltCacheLoader, self).setUp()
def tearDown(self):
salt.utils.files.rm_rf(self.tempdir)
def test_searchpath(self):
"""
The searchpath is based on the cachedir option and the saltenv parameter
"""
tmp = tempfile.gettempdir()
opts = copy.deepcopy(self.opts)
opts.update({"cachedir": tmp})
loader = self.get_loader(opts=opts, saltenv="test")
assert loader.searchpath == [os.path.join(tmp, "files", "test")]
def test_mockclient(self):
"""
A MockFileClient is used that records all file requests normally sent
to the master.
"""
loader = self.get_loader(opts=self.opts, saltenv="test")
res = loader.get_source(None, "hello_simple")
assert len(res) == 3
# res[0] on Windows is unicode and use os.linesep so it works cross OS
self.assertEqual(six.text_type(res[0]), "world" + os.linesep)
tmpl_dir = os.path.join(self.template_dir, "hello_simple")
self.assertEqual(res[1], tmpl_dir)
assert res[2](), "Template up to date?"
assert loader._file_client.requests
self.assertEqual(loader._file_client.requests[0]["path"], "salt://hello_simple")
def get_loader(self, opts=None, saltenv="base"):
"""
Now that we instantiate the client in the __init__, we need to mock it
"""
if opts is None:
opts = self.opts
with patch.object(SaltCacheLoader, "file_client", Mock()):
loader = SaltCacheLoader(opts, saltenv)
self.addCleanup(setattr, SaltCacheLoader, "_cached_client", None)
# Create a mock file client and attach it to the loader
MockFileClient(loader)
return loader
def get_test_saltenv(self):
"""
Setup a simple jinja test environment
"""
loader = self.get_loader(saltenv="test")
jinja = Environment(loader=loader)
return loader._file_client, jinja
def test_import(self):
"""
You can import and use macros from other files
"""
fc, jinja = self.get_test_saltenv()
result = jinja.get_template("hello_import").render()
self.assertEqual(result, "Hey world !a b !")
assert len(fc.requests) == 2
self.assertEqual(fc.requests[0]["path"], "salt://hello_import")
self.assertEqual(fc.requests[1]["path"], "salt://macro")
def test_relative_import(self):
"""
You can import using relative paths
issue-13889
"""
fc, jinja = self.get_test_saltenv()
tmpl = jinja.get_template(os.path.join("relative", "rhello"))
result = tmpl.render()
self.assertEqual(result, "Hey world !a b !")
assert len(fc.requests) == 3
self.assertEqual(
fc.requests[0]["path"], os.path.join("salt://relative", "rhello")
)
self.assertEqual(
fc.requests[1]["path"], os.path.join("salt://relative", "rmacro")
)
self.assertEqual(fc.requests[2]["path"], "salt://macro")
# This must fail when rendered: attempts to import from outside file root
template = jinja.get_template("relative/rescape")
self.assertRaises(exceptions.TemplateNotFound, template.render)
def test_include(self):
"""
You can also include a template that imports and uses macros
"""
fc, jinja = self.get_test_saltenv()
result = jinja.get_template("hello_include").render()
self.assertEqual(result, "Hey world !a b !")
assert len(fc.requests) == 3
self.assertEqual(fc.requests[0]["path"], "salt://hello_include")
self.assertEqual(fc.requests[1]["path"], "salt://hello_import")
self.assertEqual(fc.requests[2]["path"], "salt://macro")
def test_include_context(self):
"""
Context variables are passes to the included template by default.
"""
_, jinja = self.get_test_saltenv()
result = jinja.get_template("hello_include").render(a="Hi", b="Salt")
self.assertEqual(result, "Hey world !Hi Salt !")
def test_cached_file_client(self):
"""
Multiple instantiations of SaltCacheLoader use the cached file client
"""
with patch("salt.transport.client.ReqChannel.factory", Mock()):
loader_a = SaltCacheLoader(self.opts)
loader_b = SaltCacheLoader(self.opts)
assert loader_a._file_client is loader_b._file_client
def test_file_client_kwarg(self):
"""
A file client can be passed to SaltCacheLoader overriding the any
cached file client
"""
mfc = MockFileClient()
loader = SaltCacheLoader(self.opts, _file_client=mfc)
assert loader._file_client is mfc
def test_cache_loader_shutdown(self):
"""
The shudown method can be called without raising an exception when the
file_client does not have a destroy method
"""
mfc = MockFileClient()
assert not hasattr(mfc, "destroy")
loader = SaltCacheLoader(self.opts, _file_client=mfc)
assert loader._file_client is mfc
# Shutdown method should not raise any exceptions
loader.shutdown()
class TestGetTemplate(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.template_dir = os.path.join(self.tempdir, "files", "test")
_setup_test_dir(
os.path.join(RUNTIME_VARS.BASE_FILES, "templates"), self.template_dir
)
self.local_opts = {
"file_buffer_size": 1048576,
"cachedir": self.tempdir,
"file_client": "local",
"file_ignore_regex": None,
"file_ignore_glob": None,
"file_roots": {"test": [self.template_dir]},
"pillar_roots": {"test": [self.template_dir]},
"fileserver_backend": ["roots"],
"hash_type": "md5",
"extension_modules": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "extmods"
),
}
self.local_salt = {}
super(TestGetTemplate, self).setUp()
def tearDown(self):
salt.utils.files.rm_rf(self.tempdir)
def test_fallback(self):
"""
A Template with a filesystem loader is returned as fallback
if the file is not contained in the searchpath
"""
fn_ = os.path.join(self.template_dir, "hello_simple")
with salt.utils.files.fopen(fn_) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
self.assertEqual(out, "world" + os.linesep)
def test_fallback_noloader(self):
"""
A Template with a filesystem loader is returned as fallback
if the file is not contained in the searchpath
"""
filename = os.path.join(self.template_dir, "hello_import")
with salt.utils.files.fopen(filename) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
self.assertEqual(out, "Hey world !a b !" + os.linesep)
def test_saltenv(self):
"""
If the template is within the searchpath it can
import, include and extend other templates.
The initial template is expected to be already cached
get_template does not request it from the master again.
"""
fc = MockFileClient()
with patch.object(SaltCacheLoader, "file_client", MagicMock(return_value=fc)):
filename = os.path.join(self.template_dir, "hello_import")
with salt.utils.files.fopen(filename) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(
opts={
"cachedir": self.tempdir,
"file_client": "remote",
"file_roots": self.local_opts["file_roots"],
"pillar_roots": self.local_opts["pillar_roots"],
},
a="Hi",
b="Salt",
saltenv="test",
salt=self.local_salt,
),
)
self.assertEqual(out, "Hey world !Hi Salt !" + os.linesep)
self.assertEqual(fc.requests[0]["path"], "salt://macro")
def test_macro_additional_log_for_generalexc(self):
"""
If we failed in a macro because of e.g. a TypeError, get
more output from trace.
"""
expected = r"""Jinja error:.*division.*
.*macrogeneral\(2\):
---
\{% macro mymacro\(\) -%\}
\{\{ 1/0 \}\} <======================
\{%- endmacro %\}
---.*"""
filename = os.path.join(self.template_dir, "hello_import_generalerror")
fc = MockFileClient()
with patch.object(SaltCacheLoader, "file_client", MagicMock(return_value=fc)):
with salt.utils.files.fopen(filename) as fp_:
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_macro_additional_log_for_undefined(self):
"""
If we failed in a macro because of undefined variables, get
more output from trace.
"""
expected = r"""Jinja variable 'b' is undefined
.*macroundefined\(2\):
---
\{% macro mymacro\(\) -%\}
\{\{b.greetee\}\} <-- error is here <======================
\{%- endmacro %\}
---"""
filename = os.path.join(self.template_dir, "hello_import_undefined")
fc = MockFileClient()
with patch.object(SaltCacheLoader, "file_client", MagicMock(return_value=fc)):
with salt.utils.files.fopen(filename) as fp_:
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_macro_additional_log_syntaxerror(self):
"""
If we failed in a macro, get more output from trace.
"""
expected = r"""Jinja syntax error: expected token .*end.*got '-'.*
.*macroerror\(2\):
---
# macro
\{% macro mymacro\(greeting, greetee='world'\) -\} <-- error is here <======================
\{\{ greeting ~ ' ' ~ greetee \}\} !
\{%- endmacro %\}
---.*"""
filename = os.path.join(self.template_dir, "hello_import_error")
fc = MockFileClient()
with patch.object(SaltCacheLoader, "file_client", MagicMock(return_value=fc)):
with salt.utils.files.fopen(filename) as fp_:
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_non_ascii_encoding(self):
fc = MockFileClient()
with patch.object(SaltCacheLoader, "file_client", MagicMock(return_value=fc)):
filename = os.path.join(self.template_dir, "hello_import")
with salt.utils.files.fopen(filename) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(
opts={
"cachedir": self.tempdir,
"file_client": "remote",
"file_roots": self.local_opts["file_roots"],
"pillar_roots": self.local_opts["pillar_roots"],
},
a="Hi",
b="Sàlt",
saltenv="test",
salt=self.local_salt,
),
)
self.assertEqual(
out,
salt.utils.stringutils.to_unicode("Hey world !Hi Sàlt !" + os.linesep),
)
self.assertEqual(fc.requests[0]["path"], "salt://macro")
filename = os.path.join(self.template_dir, "non_ascii")
with salt.utils.files.fopen(filename, "rb") as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read(), "utf-8"),
dict(
opts={
"cachedir": self.tempdir,
"file_client": "remote",
"file_roots": self.local_opts["file_roots"],
"pillar_roots": self.local_opts["pillar_roots"],
},
a="Hi",
b="Sàlt",
saltenv="test",
salt=self.local_salt,
),
)
self.assertEqual("Assunção" + os.linesep, out)
self.assertEqual(fc.requests[0]["path"], "salt://macro")
@skipIf(HAS_TIMELIB is False, "The `timelib` library is not installed.")
def test_strftime(self):
response = render_jinja_tmpl(
'{{ "2002/12/25"|strftime }}',
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
self.assertEqual(response, "2002-12-25")
objects = (
datetime.datetime(2002, 12, 25, 12, 00, 00, 00),
"2002/12/25",
| |
^ ( 1 / 2.4 ) ) - 0.055
# else, data[x, y, c] = data[x, y, c] * 12.92
data[mask] **= 0.4167
data[mask] *= 1.055
data[mask] -= 0.055
data[np.invert(mask)] *= 12.92
# rescale
return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
def degamma_adobe_rgb_1998(self, clip_range=[0, 65535]):
# bring data in range 0 to 1
data = np.clip(self.data, clip_range[0], clip_range[1])
data = np.divide(data, clip_range[1])
data = np.power(data, 2.2) # originally raised to 2.19921875
# rescale
return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
def gamma_adobe_rgb_1998(self, clip_range=[0, 65535]):
# bring data in range 0 to 1
data = np.clip(self.data, clip_range[0], clip_range[1])
data = np.divide(data, clip_range[1])
data = np.power(data, 0.4545)
# rescale
return np.clip(data * clip_range[1], clip_range[0], clip_range[1])
def get_xyz_reference(self, cie_version="1931", illuminant="d65"):
if (cie_version == "1931"):
xyz_reference_dictionary = {"A" : [109.850, 100.0, 35.585],\
"B" : [99.0927, 100.0, 85.313],\
"C" : [98.074, 100.0, 118.232],\
"d50" : [96.422, 100.0, 82.521],\
"d55" : [95.682, 100.0, 92.149],\
"d65" : [95.047, 100.0, 108.883],\
"d75" : [94.972, 100.0, 122.638],\
"E" : [100.0, 100.0, 100.0],\
"F1" : [92.834, 100.0, 103.665],\
"F2" : [99.187, 100.0, 67.395],\
"F3" : [103.754, 100.0, 49.861],\
"F4" : [109.147, 100.0, 38.813],\
"F5" : [90.872, 100.0, 98.723],\
"F6" : [97.309, 100.0, 60.191],\
"F7" : [95.044, 100.0, 108.755],\
"F8" : [96.413, 100.0, 82.333],\
"F9" : [100.365, 100.0, 67.868],\
"F10" : [96.174, 100.0, 81.712],\
"F11" : [100.966, 100.0, 64.370],\
"F12" : [108.046, 100.0, 39.228]}
elif (cie_version == "1964"):
xyz_reference_dictionary = {"A" : [111.144, 100.0, 35.200],\
"B" : [99.178, 100.0, 84.3493],\
"C" : [97.285, 100.0, 116.145],\
"D50" : [96.720, 100.0, 81.427],\
"D55" : [95.799, 100.0, 90.926],\
"D65" : [94.811, 100.0, 107.304],\
"D75" : [94.416, 100.0, 120.641],\
"E" : [100.0, 100.0, 100.0],\
"F1" : [94.791, 100.0, 103.191],\
"F2" : [103.280, 100.0, 69.026],\
"F3" : [108.968, 100.0, 51.965],\
"F4" : [114.961, 100.0, 40.963],\
"F5" : [93.369, 100.0, 98.636],\
"F6" : [102.148, 100.0, 62.074],\
"F7" : [95.792, 100.0, 107.687],\
"F8" : [97.115, 100.0, 81.135],\
"F9" : [102.116, 100.0, 67.826],\
"F10" : [99.001, 100.0, 83.134],\
"F11" : [103.866, 100.0, 65.627],\
"F12" : [111.428, 100.0, 40.353]}
else:
print("Warning! cie_version must be 1931 or 1964.")
return
return np.divide(xyz_reference_dictionary[illuminant], 100.0)
def sobel_prewitt_direction_label(self, gradient_magnitude, theta, threshold=0):
direction_label = np.zeros(np.shape(gradient_magnitude), dtype=np.float32)
theta = np.asarray(theta)
# vertical
mask = ((theta >= -22.5) & (theta <= 22.5))
direction_label[mask] = 3.
# +45 degree
mask = ((theta > 22.5) & (theta <= 67.5))
direction_label[mask] = 2.
# -45 degree
mask = ((theta < -22.5) & (theta >= -67.5))
direction_label[mask] = 4.
# horizontal
mask = ((theta > 67.5) & (theta <= 90.)) | ((theta < -67.5) & (theta >= -90.))
direction_label[mask] = 1.
gradient_magnitude = np.asarray(gradient_magnitude)
mask = gradient_magnitude < threshold
direction_label[mask] = 0.
return direction_label
def edge_wise_median(self, kernel_size, edge_location):
# pad two pixels at the border
no_of_pixel_pad = math.floor(kernel_size / 2) # number of pixels to pad
data = self.data
data = np.pad(data, \
(no_of_pixel_pad, no_of_pixel_pad),\
'reflect') # reflect would not repeat the border value
edge_location = np.pad(edge_location,\
(no_of_pixel_pad, no_of_pixel_pad),\
'reflect') # reflect would not repeat the border value
width, height = self.get_width_height()
output = np.empty((height, width), dtype=np.float32)
for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
if (edge_location[i, j] == 1):
output[i - no_of_pixel_pad, j - no_of_pixel_pad] = \
np.median(data[i - no_of_pixel_pad : i + no_of_pixel_pad + 1,\
j - no_of_pixel_pad : j + no_of_pixel_pad + 1])
elif (edge_location[i, j] == 0):
output[i - no_of_pixel_pad, j - no_of_pixel_pad] = data[i, j]
return output
def nonuniform_quantization(self):
output = np.zeros(np.shape(self.data), dtype=np.float32)
min_val = np.min(self.data)
max_val = np.max(self.data)
mask = (self.data > (7./8.) * (max_val - min_val))
output[mask] = 3.
mask = (self.data > (3./4.) * (max_val - min_val)) & (self.data <= (7./8.) * (max_val - min_val))
output[mask] = 2.
mask = (self.data > (1./2.) * (max_val - min_val)) & (self.data <= (3./4.) * (max_val - min_val))
output[mask] = 1.
return output
def __str__(self):
return self.name
# =============================================================
# function: distance_euclid
# returns Euclidean distance between two points
# =============================================================
def distance_euclid(point1, point2):
return math.sqrt((point1[0] - point2[0])**2 + (point1[1]-point2[1])**2)
# =============================================================
# class: special_functions
# pass input through special functions
# =============================================================
class special_function:
def __init__(self, data, name="special function"):
self.data = np.float32(data)
self.name = name
def soft_coring(self, slope, tau_threshold, gamma_speed):
# Usage: Used in the unsharp masking sharpening Process
# Input:
# slope: controls the boost.
# the amount of sharpening, higher slope
# means more aggresssive sharpening
#
# tau_threshold: controls the amount of coring.
# threshold value till which the image is
# not sharpened. The lower the value of
# tau_threshold the more frequencies
# goes through the sharpening process
#
# gamma_speed: controls the speed of convergence to the slope
# smaller value gives a little bit more
# sharpened image, this may be a fine tuner
return slope * self.data * ( 1. - np.exp(-((np.abs(self.data / tau_threshold))**gamma_speed)))
def distortion_function(self, correction_type="barrel-1", strength=0.1):
if (correction_type == "pincushion-1"):
return np.divide(self.data, 1. + strength * self.data)
elif (correction_type == "pincushion-2"):
return np.divide(self.data, 1. + strength * np.power(self.data, 2))
elif (correction_type == "barrel-1"):
return np.multiply(self.data, 1. + strength * self.data)
elif (correction_type == "barrel-2"):
return np.multiply(self.data, 1. + strength * np.power(self.data, 2))
else:
print("Warning! Unknown correction_type.")
return
def bilateral_filter(self, edge):
# bilateral filter based upon the work of
# <NAME>, <NAME>, and <NAME>, 2007 work
# note: if edge data is not provided, image is served as edge
# this is called normal bilateral filter
# if edge data is provided, then it is called cross or joint
# bilateral filter
# get width and height of the image
width, height = helpers(self.data).get_width_height()
# sigma_spatial
sigma_spatial = min(height, width) / 16.
# calculate edge_delta
edge_min = np.min(edge)
edge_max = np.max(edge)
edge_delta = edge_max - edge_min
# sigma_range and sampling_range
sigma_range = 0.1 * edge_delta
sampling_range = sigma_range
sampling_spatial = sigma_spatial
# derived_sigma_spatial and derived_sigma_range
derived_sigma_spatial = sigma_spatial / sampling_spatial
derived_sigma_range = sigma_range / sampling_range
# paddings
padding_xy = np.floor(2. * derived_sigma_spatial) + 1.
padding_z = np.floor(2. * derived_sigma_range) + 1.
# downsamples
downsample_width = np.uint16(np.floor((width - 1.) / sampling_spatial) + 1. + 2. * padding_xy)
downsample_height = np.uint16(np.floor((height - 1.) / sampling_spatial) + 1. + 2. * padding_xy)
downsample_depth = np.uint16(np.floor(edge_delta / sampling_range) + 1. + 2. * padding_z)
grid_data = np.zeros((downsample_height, downsample_width, downsample_depth))
grid_weight = np.zeros((downsample_height, downsample_width, downsample_depth))
jj, ii = np.meshgrid(np.arange(0, width, 1),\
np.arange(0, height, 1))
di = np.uint16(np.round( ii / sampling_spatial ) + padding_xy + 1.)
dj = np.uint16(np.round( jj / sampling_spatial ) + padding_xy + 1.)
dz = np.uint16(np.round( (edge - edge_min) / sampling_range ) + padding_z + 1.)
for i in range(0, height):
for j in range(0, width):
data_z = self.data[i, j]
if not np.isnan(data_z):
dik = di[i, j]
djk = dj[i, j]
dzk = dz[i, j]
grid_data[dik, djk, dzk] = grid_data[dik, djk, dzk] + data_z
grid_weight[dik, djk, dzk] = grid_weight[dik, djk, dzk] + 1.
kernel_width = 2. * derived_sigma_spatial + 1.
kernel_height = kernel_width
kernel_depth = 2. * derived_sigma_range + 1.
half_kernel_width = np.floor(kernel_width / 2.)
half_kernel_height = np.floor(kernel_height / 2.)
half_kernel_depth = np.floor(kernel_depth / 2.)
grid_x, grid_y, grid_z = np.meshgrid(np.arange(0, kernel_width, 1),\
np.arange(0, kernel_height, 1),\
np.arange(0, kernel_depth, 1))
grid_x = grid_x - half_kernel_width
grid_y = grid_y - half_kernel_height
grid_z = grid_z - half_kernel_depth
grid_r_squared = ( ( np.multiply(grid_x, grid_x) + \
np.multiply(grid_y, grid_y) ) / np.multiply(derived_sigma_spatial, derived_sigma_spatial) ) + \
( np.multiply(grid_z, grid_z) / np.multiply(derived_sigma_range, derived_sigma_range) )
kernel = np.exp(-0.5 * grid_r_squared)
blurred_grid_data = ndimage.convolve(grid_data, kernel, mode='reflect')
blurred_grid_weight = ndimage.convolve(grid_weight, kernel, mode='reflect')
# divide
blurred_grid_weight = np.asarray(blurred_grid_weight)
mask = blurred_grid_weight == 0
blurred_grid_weight[mask] = -2.
normalized_blurred_grid = np.divide(blurred_grid_data, blurred_grid_weight)
mask = blurred_grid_weight < -1
normalized_blurred_grid[mask] = 0.
blurred_grid_weight[mask] = 0.
# upsample
jj, ii = np.meshgrid(np.arange(0, width, 1),\
np.arange(0, height, 1))
di = (ii / sampling_spatial) + padding_xy + 1.
dj = (jj / sampling_spatial) + padding_xy + 1.
dz = (edge - edge_min) / sampling_range + padding_z + 1.
# arrange the input points
n_i, n_j, n_z = np.shape(normalized_blurred_grid)
points = (np.arange(0, n_i, 1), | |
Args:
timeout(float): The maximum amount of time to wait in seconds.
"""
_funcs.nx_wait(self._handle, constants.Condition.INTF_COMMUNICATING, 0, timeout)
def wait_for_intf_remote_wakeup(self, timeout=10):
# type: (float) -> None
"""Wait for interface remote wakeup.
Wait for the interface to wakeup due to activity by a remote node on the
network. This wait is used for CAN, when you set the 'can_tcvr_state'
property to 'constants.CanTcvrState.SLEEP'. Although the interface
itself is ready to communicate, this places the transceiver into a sleep
state. When a remote CAN node transmits a frame, the transceiver wakes
up, and communication is restored. This wait detects that remote wakeup.
This wait is used for LIN when you set 'lin_sleep' property to
'constants.LinSleep.REMOTE_SLEEP' or 'constants.LinSleep.LOCAL_SLEEP'.
When asleep, if a remote LIN ECU transmits the wakeup pattern (break),
the XNET LIN interface detects this transmission and wakes up. This wait
detects that remote wakeup.
Args:
timeout(float): The maximum amount of time to wait in seconds.
"""
_funcs.nx_wait(self._handle, constants.Condition.INTF_REMOTE_WAKEUP, 0, timeout)
def connect_terminals(self, source, destination):
# type: (typing.Text, typing.Text) -> None
"""Connect terminals on the XNET interface.
This function connects a source terminal to a destination terminal on
the interface hardware. The XNET terminal represents an external or
internal hardware connection point on a National Instruments XNET
hardware product. External terminals include PXI Trigger lines for a PXI
card, RTSI terminals for a PCI card, or the single external terminal for
a C Series module. Internal terminals include timebases (clocks) and
logical entities such as a start trigger.
The terminal inputs use the Terminal I/O names. Typically, one of the
pair is an internal and the other an external.
Args:
source(str): Connection source name.
destination(str): Connection destination name.
"""
_funcs.nx_connect_terminals(self._handle, source, destination)
def disconnect_terminals(self, source, destination):
# type: (typing.Text, typing.Text) -> None
"""Disconnect terminals on the XNET interface.
This function disconnects a specific pair of source/destination terminals
previously connected with :any:`nixnet._session.base.SessionBase.connect_terminals`.
When the final session for a given interface is cleared, NI-XNET
automatically disconnects all terminal connections for that interface.
Therefore, 'disconnect_terminals' is not required for most applications.
This function typically is used to change terminal connections
dynamically while an application is running. To disconnect a terminal,
you first must stop the interface using
:any:`nixnet._session.base.SessionBase.stop` with the Interface Only
scope. Then you can call 'disconnect_terminals' and
:any:`nixnet._session.base.SessionBase.connect_terminals` to adjust
terminal connections. Finally, you can call
:any:`nixnet._session.base.SessionBase.start` with the Interface Only
scope to restart the interface.
You can disconnect only a terminal that has been previously connected.
Attempting to disconnect a nonconnected terminal results in an error.
Args:
source(str): Connection source name.
destination(str): Connection destination name.
"""
_funcs.nx_disconnect_terminals(self._handle, source, destination)
def change_lin_schedule(self, sched_index):
# type: (int) -> None
"""Writes communication states of an XNET session.
This function writes a request for the LIN interface to change
the running schedule.
According to the LIN protocol, only the master executes schedules,
not slaves. If the
:any:`nixnet._session.intf.Interface.lin_master` property is false (slave),
this write function implicitly sets that property to true (master). If the
interface currently is running as a slave, this write returns an error,
because it cannot change to master while running.
Args:
sched_index(int): Index to the schedule table that the LIN master executes.
The schedule tables are sorted the way they are returned from the
database with the `nixnet.database._cluster.Cluster.lin_schedules`
property.
"""
_funcs.nx_write_state(self._handle, constants.WriteState.LIN_SCHEDULE_CHANGE, _ctypedefs.u32(sched_index))
def change_lin_diagnostic_schedule(self, schedule):
# type: (constants.LinDiagnosticSchedule) -> None
"""Writes communication states of an XNET session.
This function writes a request for the LIN interface to change
the diagnostic schedule.
Args:
schedule(:any:`nixnet._enums.LinDiagnosticSchedule`): Diagnostic schedule
that the LIN master executes.
"""
_funcs.nx_write_state(self._handle, constants.WriteState.LIN_DIAGNOSTIC_SCHEDULE_CHANGE, _ctypedefs.u32(schedule.value)) # NOQA: E501
@property
def time_current(self):
# type: () -> int
"""int: Current interface time."""
state_value_ctypes = _ctypedefs.nxTimestamp_t()
state_size = ctypes.sizeof(state_value_ctypes)
_funcs.nx_read_state(
self._handle,
constants.ReadState.TIME_CURRENT,
state_size,
ctypes.pointer(state_value_ctypes))
time = state_value_ctypes.value
return time
@property
def time_start(self):
# type: () -> int
"""int: Time the interface was started."""
state_value_ctypes = _ctypedefs.nxTimestamp_t()
state_size = ctypes.sizeof(state_value_ctypes)
_funcs.nx_read_state(
self._handle,
constants.ReadState.TIME_START,
state_size,
ctypes.pointer(state_value_ctypes))
time = state_value_ctypes.value
if time == 0:
# The interface is not communicating.
_errors.check_for_error(constants.Err.SESSION_NOT_STARTED.value)
return time
@property
def time_communicating(self):
# type: () -> int
"""int: Time the interface started communicating.
The time is usually later than ``time_start`` because the interface
must undergo a communication startup procedure.
"""
state_value_ctypes = _ctypedefs.nxTimestamp_t()
state_size = ctypes.sizeof(state_value_ctypes)
_funcs.nx_read_state(
self._handle,
constants.ReadState.TIME_COMMUNICATING,
state_size,
ctypes.pointer(state_value_ctypes))
time = state_value_ctypes.value
if time == 0:
# The interface is not communicating.
_errors.check_for_error(constants.Err.SESSION_NOT_STARTED.value)
return time
@property
def state(self):
# type: () -> constants.SessionInfoState
""":any:`nixnet._enums.SessionInfoState`: Session running state."""
state_value_ctypes = _ctypedefs.u32()
state_size = ctypes.sizeof(state_value_ctypes)
_funcs.nx_read_state(
self._handle,
constants.ReadState.SESSION_INFO,
state_size,
ctypes.pointer(state_value_ctypes))
state = state_value_ctypes.value
return constants.SessionInfoState(state)
@property
def can_comm(self):
# type: () -> types.CanComm
""":any:`nixnet.types.CanComm`: CAN Communication state"""
state_value_ctypes = _ctypedefs.u32()
state_size = ctypes.sizeof(state_value_ctypes)
_funcs.nx_read_state(
self._handle,
constants.ReadState.CAN_COMM,
state_size,
ctypes.pointer(state_value_ctypes))
bitfield = state_value_ctypes.value
return _utils.parse_can_comm_bitfield(bitfield)
@property
def lin_comm(self):
# type: () -> types.LinComm
""":any:`nixnet.types.LinComm`: LIN Communication state"""
state_value_ctypes = (_ctypedefs.u32 * 2)() # type: ignore
state_size = ctypes.sizeof(state_value_ctypes)
_funcs.nx_read_state(
self._handle,
constants.ReadState.LIN_COMM,
state_size,
ctypes.pointer(state_value_ctypes))
first = state_value_ctypes[0].value
second = state_value_ctypes[1].value
return _utils.parse_lin_comm_bitfield(first, second)
def check_fault(self):
# type: () -> None
"""Check for an asynchronous fault.
A fault is an error that occurs asynchronously to the NI-XNET
application calls. The fault cause may be related to network
communication, but it also can be related to XNET hardware, such as a
fault in the onboard processor. Although faults are extremely rare,
nxReadState provides a detection method distinct from the status of
NI-XNET function calls, yet easy to use alongside the common practice
of checking the communication state.
"""
state_value_ctypes = _ctypedefs.u32()
state_size = ctypes.sizeof(state_value_ctypes)
fault = _funcs.nx_read_state(
self._handle,
constants.ReadState.SESSION_INFO,
state_size,
ctypes.pointer(state_value_ctypes))
_errors.check_for_error(fault)
@property
def intf(self):
# type: () -> session_intf.Interface
""":any:`nixnet._session.intf.Interface`: Returns the Interface configuration object for the session."""
return self._intf
@property
def j1939(self):
# type: () -> session_j1939.J1939
""":any:`nixnet._session.j1939.J1939`: Returns the J1939 configuration object for the session."""
return self._j1939
@property
def application_protocol(self):
# type: () -> constants.AppProtocol
""":any:`nixnet._enums.AppProtocol`: This property returns the application protocol that the session uses.
The database used with the session determines the application protocol.
"""
return constants.AppProtocol(_props.get_session_application_protocol(self._handle))
@property
def auto_start(self):
# type: () -> bool
"""bool: Automatically starts the output session on the first call to the appropriate write function.
For input sessions, start always is performed within the first call to
the appropriate read function (if not already started using
:any:`nixnet._session.base.SessionBase.start`). This is done
because there is no known use case for reading a stopped input session.
For output sessions, as long as the first call to the appropriate write
function contains valid data, you can leave this property at its default
value of true. If you need to call the appropriate write function
multiple times prior to starting the session, or if you are starting
multiple sessions simultaneously, you can set this property to false.
After calling the appropriate write function as desired, you can call
:any:`nixnet._session.base.SessionBase.start` to start the session(s).
When automatic start is performed, it is equivalent to
:any:`nixnet._session.base.SessionBase.start` with scope set to Normal.
This starts the session itself, and if the interface is not already
started, it starts the interface also.
"""
return _props.get_session_auto_start(self._handle)
@auto_start.setter
def auto_start(self, value):
# type: (bool) -> None
_props.set_session_auto_start(self._handle, value)
@property
def cluster_name(self):
# type: () -> typing.Text
"""str: This property returns the cluster (network) name used with the session."""
return _props.get_session_cluster_name(self._handle)
@property
def database_name(self):
# type: () -> typing.Text
"""str: This property returns the database name used with the session."""
return _props.get_session_database_name(self._handle)
@property
def mode(self):
# type: () -> constants.CreateSessionMode
""":any:`nixnet._enums.CreateSessionMode`: This property returns the mode associated with the session.
For more information, refer to :any:`nixnet._enums.CreateSessionMode`.
"""
return constants.CreateSessionMode(_props.get_session_mode(self._handle))
@property
def num_pend(self):
# type: () -> int
"""int: This property returns the number of values (frames or signals) pending for the session.
For input sessions, this is the number of frame/signal values available
to the appropriate read function. If you call the appropriate read
function with number to read of this number and timeout of 0.0, the
| |
* uk_61
+ 1085386600 * uk_62
+ 705501290 * uk_63
+ 2442119850 * uk_64
+ 2094796138 * uk_65
+ 1889232031 * uk_66
+ 978876700 * uk_67
+ 636269855 * uk_68
+ 2202472575 * uk_69
+ 225 * uk_7
+ 1889232031 * uk_70
+ 507190000 * uk_71
+ 329673500 * uk_72
+ 1141177500 * uk_73
+ 978876700 * uk_74
+ 214287775 * uk_75
+ 741765375 * uk_76
+ 636269855 * uk_77
+ 2567649375 * uk_78
+ 2202472575 * uk_79
+ 193 * uk_8
+ 1889232031 * uk_80
+ 166375 * uk_81
+ 647350 * uk_82
+ 583825 * uk_83
+ 302500 * uk_84
+ 196625 * uk_85
+ 680625 * uk_86
+ 583825 * uk_87
+ 2518780 * uk_88
+ 2271610 * uk_89
+ 2572416961 * uk_9
+ 1177000 * uk_90
+ 765050 * uk_91
+ 2648250 * uk_92
+ 2271610 * uk_93
+ 2048695 * uk_94
+ 1061500 * uk_95
+ 689975 * uk_96
+ 2388375 * uk_97
+ 2048695 * uk_98
+ 550000 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 339020 * uk_100
+ 1138500 * uk_101
+ 1082840 * uk_102
+ 246895 * uk_103
+ 829125 * uk_104
+ 788590 * uk_105
+ 2784375 * uk_106
+ 2648250 * uk_107
+ 2518780 * uk_108
+ 8120601 * uk_109
+ 10194519 * uk_11
+ 8645814 * uk_110
+ 3716892 * uk_111
+ 2706867 * uk_112
+ 9090225 * uk_113
+ 8645814 * uk_114
+ 9204996 * uk_115
+ 3957288 * uk_116
+ 2881938 * uk_117
+ 9678150 * uk_118
+ 9204996 * uk_119
+ 10853866 * uk_12
+ 1701264 * uk_120
+ 1238964 * uk_121
+ 4160700 * uk_122
+ 3957288 * uk_123
+ 902289 * uk_124
+ 3030075 * uk_125
+ 2881938 * uk_126
+ 10175625 * uk_127
+ 9678150 * uk_128
+ 9204996 * uk_129
+ 4666148 * uk_13
+ 9800344 * uk_130
+ 4213232 * uk_131
+ 3068332 * uk_132
+ 10304100 * uk_133
+ 9800344 * uk_134
+ 1811296 * uk_135
+ 1319096 * uk_136
+ 4429800 * uk_137
+ 4213232 * uk_138
+ 960646 * uk_139
+ 3398173 * uk_14
+ 3226050 * uk_140
+ 3068332 * uk_141
+ 10833750 * uk_142
+ 10304100 * uk_143
+ 9800344 * uk_144
+ 778688 * uk_145
+ 567088 * uk_146
+ 1904400 * uk_147
+ 1811296 * uk_148
+ 412988 * uk_149
+ 11411775 * uk_15
+ 1386900 * uk_150
+ 1319096 * uk_151
+ 4657500 * uk_152
+ 4429800 * uk_153
+ 4213232 * uk_154
+ 300763 * uk_155
+ 1010025 * uk_156
+ 960646 * uk_157
+ 3391875 * uk_158
+ 3226050 * uk_159
+ 10853866 * uk_16
+ 3068332 * uk_160
+ 11390625 * uk_161
+ 10833750 * uk_162
+ 10304100 * uk_163
+ 9800344 * uk_164
+ 3025 * uk_17
+ 11055 * uk_18
+ 11770 * uk_19
+ 55 * uk_2
+ 5060 * uk_20
+ 3685 * uk_21
+ 12375 * uk_22
+ 11770 * uk_23
+ 40401 * uk_24
+ 43014 * uk_25
+ 18492 * uk_26
+ 13467 * uk_27
+ 45225 * uk_28
+ 43014 * uk_29
+ 201 * uk_3
+ 45796 * uk_30
+ 19688 * uk_31
+ 14338 * uk_32
+ 48150 * uk_33
+ 45796 * uk_34
+ 8464 * uk_35
+ 6164 * uk_36
+ 20700 * uk_37
+ 19688 * uk_38
+ 4489 * uk_39
+ 214 * uk_4
+ 15075 * uk_40
+ 14338 * uk_41
+ 50625 * uk_42
+ 48150 * uk_43
+ 45796 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 517055809161 * uk_47
+ 550497229654 * uk_48
+ 236662360412 * uk_49
+ 92 * uk_5
+ 172351936387 * uk_50
+ 578793816225 * uk_51
+ 550497229654 * uk_52
+ 153424975 * uk_53
+ 560698545 * uk_54
+ 596962630 * uk_55
+ 256638140 * uk_56
+ 186899515 * uk_57
+ 627647625 * uk_58
+ 596962630 * uk_59
+ 67 * uk_6
+ 2049098319 * uk_60
+ 2181627066 * uk_61
+ 937895748 * uk_62
+ 683032773 * uk_63
+ 2293766775 * uk_64
+ 2181627066 * uk_65
+ 2322727324 * uk_66
+ 998555672 * uk_67
+ 727209022 * uk_68
+ 2442119850 * uk_69
+ 225 * uk_7
+ 2322727324 * uk_70
+ 429285616 * uk_71
+ 312631916 * uk_72
+ 1049883300 * uk_73
+ 998555672 * uk_74
+ 227677591 * uk_75
+ 764588925 * uk_76
+ 727209022 * uk_77
+ 2567649375 * uk_78
+ 2442119850 * uk_79
+ 214 * uk_8
+ 2322727324 * uk_80
+ 166375 * uk_81
+ 608025 * uk_82
+ 647350 * uk_83
+ 278300 * uk_84
+ 202675 * uk_85
+ 680625 * uk_86
+ 647350 * uk_87
+ 2222055 * uk_88
+ 2365770 * uk_89
+ 2572416961 * uk_9
+ 1017060 * uk_90
+ 740685 * uk_91
+ 2487375 * uk_92
+ 2365770 * uk_93
+ 2518780 * uk_94
+ 1082840 * uk_95
+ 788590 * uk_96
+ 2648250 * uk_97
+ 2518780 * uk_98
+ 465520 * uk_99,
uk_0
+ 50719 * uk_1
+ 2789545 * uk_10
+ 318780 * uk_100
+ 1039500 * uk_101
+ 928620 * uk_102
+ 261855 * uk_103
+ 853875 * uk_104
+ 762795 * uk_105
+ 2784375 * uk_106
+ 2487375 * uk_107
+ 2222055 * uk_108
+ 2863288 * uk_109
+ 7202098 * uk_11
+ 4052964 * uk_110
+ 1693776 * uk_111
+ 1391316 * uk_112
+ 4536900 * uk_113
+ 4052964 * uk_114
+ 5736942 * uk_115
+ 2397528 * uk_116
+ 1969398 * uk_117
+ 6421950 * uk_118
+ 5736942 * uk_119
+ 10194519 * uk_12
+ 1001952 * uk_120
+ 823032 * uk_121
+ 2683800 * uk_122
+ 2397528 * uk_123
+ 676062 * uk_124
+ 2204550 * uk_125
+ 1969398 * uk_126
+ 7188750 * uk_127
+ 6421950 * uk_128
+ 5736942 * uk_129
+ 4260396 * uk_13
+ 8120601 * uk_130
+ 3393684 * uk_131
+ 2787669 * uk_132
+ 9090225 * uk_133
+ 8120601 * uk_134
+ 1418256 * uk_135
+ 1164996 * uk_136
+ 3798900 * uk_137
+ 3393684 * uk_138
+ 956961 * uk_139
+ 3499611 * uk_14
+ 3120525 * uk_140
+ 2787669 * uk_141
+ 10175625 * uk_142
+ 9090225 * uk_143
+ 8120601 * uk_144
+ 592704 * uk_145
+ 486864 * uk_146
+ 1587600 * uk_147
+ 1418256 * uk_148
+ 399924 * uk_149
+ 11411775 * uk_15
+ 1304100 * uk_150
+ 1164996 * uk_151
+ 4252500 * uk_152
+ 3798900 * uk_153
+ 3393684 * uk_154
+ 328509 * uk_155
+ 1071225 * uk_156
+ 956961 * uk_157
+ 3493125 * uk_158
+ 3120525 * uk_159
+ 10194519 * uk_16
+ 2787669 * uk_160
+ 11390625 * uk_161
+ 10175625 * uk_162
+ 9090225 * uk_163
+ 8120601 * uk_164
+ 3025 * uk_17
+ 7810 * uk_18
+ 11055 * uk_19
+ 55 * uk_2
+ 4620 * uk_20
+ 3795 * uk_21
+ 12375 * uk_22
+ 11055 * uk_23
+ 20164 * uk_24
+ 28542 * uk_25
+ 11928 * uk_26
+ 9798 * uk_27
+ 31950 * uk_28
+ 28542 * uk_29
+ 142 * uk_3
+ 40401 * uk_30
+ 16884 * uk_31
+ 13869 * uk_32
+ 45225 * uk_33
+ 40401 * uk_34
+ 7056 * uk_35
+ 5796 * uk_36
+ 18900 * uk_37
+ 16884 * uk_38
+ 4761 * uk_39
+ 201 * uk_4
+ 15525 * uk_40
+ 13869 * uk_41
+ 50625 * uk_42
+ 45225 * uk_43
+ 40401 * uk_44
+ 130470415844959 * uk_45
+ 141482932855 * uk_46
+ 365283208462 * uk_47
+ 517055809161 * uk_48
+ 216083024724 * uk_49
+ 84 * uk_5
+ 177496770309 * uk_50
+ 578793816225 * uk_51
+ 517055809161 * uk_52
+ 153424975 * uk_53
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.