hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f30d8c8e6d43d445337f8cecd9b94c9435753996
| 448
|
py
|
Python
|
tests/test_environment.py
|
FabienZa/tuxml
|
35fdd8c2d2b5cd3d46bed18619c8f840842f2614
|
[
"Apache-2.0"
] | 3
|
2020-09-09T14:19:21.000Z
|
2020-09-30T13:53:53.000Z
|
tests/test_environment.py
|
FabienZa/tuxml
|
35fdd8c2d2b5cd3d46bed18619c8f840842f2614
|
[
"Apache-2.0"
] | 42
|
2020-06-30T16:53:36.000Z
|
2022-02-20T14:28:53.000Z
|
tests/test_environment.py
|
FabienZa/tuxml
|
35fdd8c2d2b5cd3d46bed18619c8f840842f2614
|
[
"Apache-2.0"
] | 2
|
2020-09-09T15:46:17.000Z
|
2021-02-10T15:24:12.000Z
|
from pytest import raises
from unittest import TestCase #Usefull when testing classes
import compilation.environment as environment
def test_get_environment_details():
# We test if we have any throw, which should never happen
environment.get_environment_details()
def test_print_environment_details():
# This should always pass, but just in case...
environment.print_environment_details(environment.get_environment_details())
| 29.866667
| 80
| 0.801339
| 58
| 448
| 5.982759
| 0.586207
| 0.259366
| 0.181556
| 0.184438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145089
| 448
| 14
| 81
| 32
| 0.906005
| 0.287946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.428571
| 0
| 0.714286
| 0.285714
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b823809bb940f7303f9b0233d58601b237b251da
| 149
|
py
|
Python
|
src/gt4sd/frameworks/enzeptional/__init__.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | 1
|
2022-02-22T02:06:10.000Z
|
2022-02-22T02:06:10.000Z
|
src/gt4sd/frameworks/enzeptional/__init__.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | 12
|
2022-02-21T12:59:24.000Z
|
2022-02-22T12:25:49.000Z
|
src/gt4sd/frameworks/enzeptional/__init__.py
|
hhhsu0825/gt4sd-core
|
4a1fe9da58d2f33bba2fba64604427e037ad7a46
|
[
"MIT"
] | null | null | null |
"""enzeptional - ENZymE OPTImizatiON for biocatALysis.
Module for enzyme optimization.
"""
from .optimization import EnzymeOptimizer # noqa: F401
| 21.285714
| 55
| 0.778523
| 15
| 149
| 7.733333
| 0.733333
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0.14094
| 149
| 6
| 56
| 24.833333
| 0.882813
| 0.644295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b88c6b9a3e86b07134f862458c7720b015c0a171
| 4,610
|
py
|
Python
|
tests/stepsizes_test.py
|
plopd/plop-msc-thesis
|
c61fcf53c670b288ac8593790f9cc3f3abd50989
|
[
"MIT"
] | 3
|
2022-01-14T19:56:30.000Z
|
2022-02-04T21:46:29.000Z
|
tests/stepsizes_test.py
|
plopd/plop-msc-thesis
|
c61fcf53c670b288ac8593790f9cc3f3abd50989
|
[
"MIT"
] | 3
|
2021-03-31T20:23:09.000Z
|
2021-12-13T20:51:15.000Z
|
tests/stepsizes_test.py
|
plopd/plop-msc-thesis
|
c61fcf53c670b288ac8593790f9cc3f3abd50989
|
[
"MIT"
] | null | null | null |
import numpy as np
from agents.agents import get_agent
def test_td_step_size():
agent_info = {"step_size": 0.5, "representations": "TA", "num_states": 5}
td = get_agent("TD")()
td.agent_init(agent_info)
assert td.step_size == agent_info.get("step_size")
def test_td_step_size_tile_coding():
agent_info = {
"step_size": 0.5,
"representations": "TC",
"tiles_per_dim": "10,10",
"min_x": "0,0",
"max_x": "1,1",
"tilings": 5,
}
td = get_agent("TDTileCoding")()
td.agent_init(agent_info)
assert td.step_size == agent_info.get("step_size") / agent_info.get("tilings")
def test_etd_step_size_undiscounted():
agent_info = {
"step_size": 0.5,
"representations": "TA",
"num_states": 5,
"discount_rate": 1.0,
"trace_decay": 0.95,
"interest": 1,
}
etd = get_agent("ETD")()
etd.agent_init(agent_info)
assert etd.step_size == agent_info.get("step_size")
def test_etd_step_size():
agent_info = {
"step_size": 0.5,
"representations": "TA",
"num_states": 5,
"discount_rate": 0.25,
"trace_decay": 0.95,
"interest": 1,
}
etd = get_agent("ETD")()
etd.agent_init(agent_info)
assert etd.step_size == agent_info.get("step_size") / (
(
agent_info.get("interest")
- agent_info.get("interest")
* agent_info.get("trace_decay")
* agent_info.get("discount_rate")
)
/ (1 - agent_info.get("discount_rate"))
)
def test_etd_step_size_tile_coding():
agent_info = {
"step_size": 0.5,
"representations": "TC",
"tiles_per_dim": "10,10",
"min_x": "0,0",
"max_x": "1,1",
"tilings": 5,
"discount_rate": 0.25,
"trace_decay": 0.95,
"interest": 1,
}
etd = get_agent("ETDTileCoding")()
etd.agent_init(agent_info)
M = (
agent_info.get("interest")
- agent_info.get("interest")
* agent_info.get("trace_decay")
* agent_info.get("discount_rate")
) / (1 - agent_info.get("discount_rate"))
assert etd.step_size == agent_info.get("step_size") / agent_info.get("tilings") / M
def test_td_step_size_fourier():
agent_info = {"step_size": 0.5, "representations": "F", "num_dims": 2, "order": 2}
td = get_agent("TD")()
td.agent_init(agent_info)
C = td.FR.C
num_features = td.FR.num_features
step_sizes = np.full(td.FR.num_features, fill_value=agent_info.get("step_size"))
for i in range(1, num_features):
step_sizes[i] /= np.sqrt(np.sum(np.square(C[i])))
assert np.array_equal(td.step_size, step_sizes)
def test_td_step_size_random_binary():
agent_info = {
"step_size": 0.5,
"representations": "RB",
"num_states": 5,
"num_features": 3,
"num_ones": 2,
"seed": 0,
}
td = get_agent("TD")()
td.agent_init(agent_info)
num_ones = td.FR.num_ones
td.step_size = agent_info.get("step_size") / num_ones
def test_etd_step_size_random_binary():
agent_info = {
"step_size": 0.5,
"representations": "RB",
"num_states": 5,
"num_features": 3,
"num_ones": 2,
"seed": 0,
"discount_rate": 0.25,
"trace_decay": 0.95,
"interest": 1,
}
etd = get_agent("ETD")()
M = (
agent_info.get("interest")
- agent_info.get("interest")
* agent_info.get("trace_decay")
* agent_info.get("discount_rate")
) / (1 - agent_info.get("discount_rate"))
etd.agent_init(agent_info)
num_ones = etd.FR.num_ones
etd.step_size = agent_info.get("step_size") / num_ones / M
def test_etd_step_size_fourier():
agent_info = {
"step_size": 0.5,
"representations": "F",
"num_dims": 2,
"order": 2,
"discount_rate": 0.25,
"trace_decay": 0.95,
"interest": 1,
}
etd = get_agent("ETD")()
M = (
agent_info.get("interest")
- agent_info.get("interest")
* agent_info.get("trace_decay")
* agent_info.get("discount_rate")
) / (1 - agent_info.get("discount_rate"))
etd.agent_init(agent_info)
C = etd.FR.C
num_features = etd.FR.num_features
step_sizes = np.full(etd.FR.num_features, fill_value=agent_info.get("step_size"))
for i in range(1, num_features):
step_sizes[i] /= np.sqrt(np.sum(np.square(C[i])))
step_sizes /= M
assert np.array_equal(etd.step_size, step_sizes)
| 24.263158
| 87
| 0.57679
| 633
| 4,610
| 3.886256
| 0.115324
| 0.179268
| 0.15122
| 0.082927
| 0.899594
| 0.857724
| 0.852033
| 0.829268
| 0.827236
| 0.762195
| 0
| 0.026867
| 0.265293
| 4,610
| 189
| 88
| 24.391534
| 0.699439
| 0
| 0
| 0.661972
| 0
| 0
| 0.20564
| 0
| 0
| 0
| 0
| 0
| 0.049296
| 1
| 0.06338
| false
| 0
| 0.014085
| 0
| 0.077465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b8bbd448b56b5d7abcdb56f80d170c1bffce7ad8
| 52,463
|
py
|
Python
|
pybind/slxos/v17r_1_01a/bridge_domain_state/bridge_domain_list/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_1_01a/bridge_domain_state/bridge_domain_list/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_1_01a/bridge_domain_state/bridge_domain_list/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import outer_vlan_list
class bridge_domain_list(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-nsm-operational - based on the path /bridge-domain-state/bridge-domain-list. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: bridge domain node
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__bd_id','__vc_id','__active_ac_lif_count','__config_ac_lif_count','__active_vfi_lif_count','__config_vfi_lif_count','__local_switching','__block_bpdu','__bd_type','__ve_ifindex','__pw_profile','__mac_limit','__statistics','__mac_addr_withdrawal','__mct_enabled','__description','__outer_vlan_list',)
_yang_name = 'bridge-domain-list'
_rest_name = 'bridge-domain-list'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
self.__pw_profile = YANGDynClass(base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
self.__mac_limit = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__bd_type = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__mac_addr_withdrawal = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__bd_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__config_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__block_bpdu = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__active_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__mct_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__statistics = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
self.__vc_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__outer_vlan_list = YANGDynClass(base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)
self.__config_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__ve_ifindex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
self.__active_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
self.__local_switching = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'bridge-domain-state', u'bridge-domain-list']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'bridge-domain-state', u'bridge-domain-list']
def _get_bd_id(self):
"""
Getter method for bd_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_id (uint32)
YANG Description: BD id
"""
return self.__bd_id
def _set_bd_id(self, v, load=False):
"""
Setter method for bd_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_id() directly.
YANG Description: BD id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bd_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__bd_id = t
if hasattr(self, '_set'):
self._set()
def _unset_bd_id(self):
self.__bd_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="bd-id", rest_name="bd-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_vc_id(self):
"""
Getter method for vc_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/vc_id (uint32)
YANG Description: vc id
"""
return self.__vc_id
def _set_vc_id(self, v, load=False):
"""
Setter method for vc_id, mapped from YANG variable /bridge_domain_state/bridge_domain_list/vc_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_vc_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_vc_id() directly.
YANG Description: vc id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """vc_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__vc_id = t
if hasattr(self, '_set'):
self._set()
def _unset_vc_id(self):
self.__vc_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="vc-id", rest_name="vc-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_active_ac_lif_count(self):
"""
Getter method for active_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_ac_lif_count (uint16)
YANG Description: active ac lif count
"""
return self.__active_ac_lif_count
def _set_active_ac_lif_count(self, v, load=False):
"""
Setter method for active_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_ac_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_active_ac_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active_ac_lif_count() directly.
YANG Description: active ac lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active_ac_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__active_ac_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_active_ac_lif_count(self):
self.__active_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-ac-lif-count", rest_name="active-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_config_ac_lif_count(self):
"""
Getter method for config_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_ac_lif_count (uint16)
YANG Description: config ac lif count
"""
return self.__config_ac_lif_count
def _set_config_ac_lif_count(self, v, load=False):
"""
Setter method for config_ac_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_ac_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_ac_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_ac_lif_count() directly.
YANG Description: config ac lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config_ac_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__config_ac_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_config_ac_lif_count(self):
self.__config_ac_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-ac-lif-count", rest_name="config-ac-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_active_vfi_lif_count(self):
"""
Getter method for active_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_vfi_lif_count (uint16)
YANG Description: active vfi lif count
"""
return self.__active_vfi_lif_count
def _set_active_vfi_lif_count(self, v, load=False):
"""
Setter method for active_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/active_vfi_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_active_vfi_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active_vfi_lif_count() directly.
YANG Description: active vfi lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active_vfi_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__active_vfi_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_active_vfi_lif_count(self):
self.__active_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="active-vfi-lif-count", rest_name="active-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_config_vfi_lif_count(self):
"""
Getter method for config_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_vfi_lif_count (uint16)
YANG Description: config vfi lif count
"""
return self.__config_vfi_lif_count
def _set_config_vfi_lif_count(self, v, load=False):
"""
Setter method for config_vfi_lif_count, mapped from YANG variable /bridge_domain_state/bridge_domain_list/config_vfi_lif_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_config_vfi_lif_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config_vfi_lif_count() directly.
YANG Description: config vfi lif count
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config_vfi_lif_count must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__config_vfi_lif_count = t
if hasattr(self, '_set'):
self._set()
def _unset_config_vfi_lif_count(self):
self.__config_vfi_lif_count = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="config-vfi-lif-count", rest_name="config-vfi-lif-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_local_switching(self):
"""
Getter method for local_switching, mapped from YANG variable /bridge_domain_state/bridge_domain_list/local_switching (boolean)
YANG Description: local switching
"""
return self.__local_switching
def _set_local_switching(self, v, load=False):
"""
Setter method for local_switching, mapped from YANG variable /bridge_domain_state/bridge_domain_list/local_switching (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_local_switching is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_local_switching() directly.
YANG Description: local switching
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """local_switching must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__local_switching = t
if hasattr(self, '_set'):
self._set()
def _unset_local_switching(self):
self.__local_switching = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="local-switching", rest_name="local-switching", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_block_bpdu(self):
"""
Getter method for block_bpdu, mapped from YANG variable /bridge_domain_state/bridge_domain_list/block_bpdu (boolean)
YANG Description: block bpdu
"""
return self.__block_bpdu
def _set_block_bpdu(self, v, load=False):
"""
Setter method for block_bpdu, mapped from YANG variable /bridge_domain_state/bridge_domain_list/block_bpdu (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_block_bpdu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_block_bpdu() directly.
YANG Description: block bpdu
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """block_bpdu must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__block_bpdu = t
if hasattr(self, '_set'):
self._set()
def _unset_block_bpdu(self):
self.__block_bpdu = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="block-bpdu", rest_name="block-bpdu", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_bd_type(self):
"""
Getter method for bd_type, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_type (uint16)
YANG Description: bd type
"""
return self.__bd_type
def _set_bd_type(self, v, load=False):
"""
Setter method for bd_type, mapped from YANG variable /bridge_domain_state/bridge_domain_list/bd_type (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_bd_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bd_type() directly.
YANG Description: bd type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """bd_type must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__bd_type = t
if hasattr(self, '_set'):
self._set()
def _unset_bd_type(self):
self.__bd_type = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="bd-type", rest_name="bd-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_ve_ifindex(self):
"""
Getter method for ve_ifindex, mapped from YANG variable /bridge_domain_state/bridge_domain_list/ve_ifindex (uint32)
YANG Description: ve_ifindex
"""
return self.__ve_ifindex
def _set_ve_ifindex(self, v, load=False):
"""
Setter method for ve_ifindex, mapped from YANG variable /bridge_domain_state/bridge_domain_list/ve_ifindex (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ve_ifindex is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ve_ifindex() directly.
YANG Description: ve_ifindex
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ve_ifindex must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)""",
})
self.__ve_ifindex = t
if hasattr(self, '_set'):
self._set()
def _unset_ve_ifindex(self):
self.__ve_ifindex = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ve-ifindex", rest_name="ve-ifindex", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint32', is_config=False)
def _get_pw_profile(self):
"""
Getter method for pw_profile, mapped from YANG variable /bridge_domain_state/bridge_domain_list/pw_profile (string)
YANG Description: pw_profile
"""
return self.__pw_profile
def _set_pw_profile(self, v, load=False):
"""
Setter method for pw_profile, mapped from YANG variable /bridge_domain_state/bridge_domain_list/pw_profile (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_pw_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pw_profile() directly.
YANG Description: pw_profile
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pw_profile must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)""",
})
self.__pw_profile = t
if hasattr(self, '_set'):
self._set()
def _unset_pw_profile(self):
self.__pw_profile = YANGDynClass(base=unicode, is_leaf=True, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
def _get_mac_limit(self):
"""
Getter method for mac_limit, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_limit (uint16)
YANG Description: mac_limit
"""
return self.__mac_limit
def _set_mac_limit(self, v, load=False):
"""
Setter method for mac_limit, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_limit (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_limit() directly.
YANG Description: mac_limit
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_limit must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)""",
})
self.__mac_limit = t
if hasattr(self, '_set'):
self._set()
def _unset_mac_limit(self):
self.__mac_limit = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="mac-limit", rest_name="mac-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='uint16', is_config=False)
def _get_statistics(self):
"""
Getter method for statistics, mapped from YANG variable /bridge_domain_state/bridge_domain_list/statistics (boolean)
YANG Description: statistics
"""
return self.__statistics
def _set_statistics(self, v, load=False):
"""
Setter method for statistics, mapped from YANG variable /bridge_domain_state/bridge_domain_list/statistics (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics() directly.
YANG Description: statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """statistics must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__statistics = t
if hasattr(self, '_set'):
self._set()
def _unset_statistics(self):
self.__statistics = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="statistics", rest_name="statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_mac_addr_withdrawal(self):
"""
Getter method for mac_addr_withdrawal, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_addr_withdrawal (boolean)
YANG Description: mac address withdrawal
"""
return self.__mac_addr_withdrawal
def _set_mac_addr_withdrawal(self, v, load=False):
"""
Setter method for mac_addr_withdrawal, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mac_addr_withdrawal (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_addr_withdrawal is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_addr_withdrawal() directly.
YANG Description: mac address withdrawal
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_addr_withdrawal must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__mac_addr_withdrawal = t
if hasattr(self, '_set'):
self._set()
def _unset_mac_addr_withdrawal(self):
self.__mac_addr_withdrawal = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mac-addr-withdrawal", rest_name="mac-addr-withdrawal", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_mct_enabled(self):
"""
Getter method for mct_enabled, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mct_enabled (boolean)
YANG Description: mct enabled
"""
return self.__mct_enabled
def _set_mct_enabled(self, v, load=False):
"""
Setter method for mct_enabled, mapped from YANG variable /bridge_domain_state/bridge_domain_list/mct_enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_enabled() directly.
YANG Description: mct enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mct_enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)""",
})
self.__mct_enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_mct_enabled(self):
self.__mct_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="mct-enabled", rest_name="mct-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='boolean', is_config=False)
def _get_description(self):
"""
Getter method for description, mapped from YANG variable /bridge_domain_state/bridge_domain_list/description (string)
YANG Description: bridge domain specific description
"""
return self.__description
def _set_description(self, v, load=False):
"""
Setter method for description, mapped from YANG variable /bridge_domain_state/bridge_domain_list/description (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_description is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_description() directly.
YANG Description: bridge domain specific description
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """description must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)""",
})
self.__description = t
if hasattr(self, '_set'):
self._set()
def _unset_description(self):
self.__description = YANGDynClass(base=unicode, is_leaf=True, yang_name="description", rest_name="description", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='string', is_config=False)
def _get_outer_vlan_list(self):
"""
Getter method for outer_vlan_list, mapped from YANG variable /bridge_domain_state/bridge_domain_list/outer_vlan_list (list)
YANG Description: bd_vlan_tag_info
"""
return self.__outer_vlan_list
def _set_outer_vlan_list(self, v, load=False):
"""
Setter method for outer_vlan_list, mapped from YANG variable /bridge_domain_state/bridge_domain_list/outer_vlan_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_outer_vlan_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_outer_vlan_list() directly.
YANG Description: bd_vlan_tag_info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """outer_vlan_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)""",
})
self.__outer_vlan_list = t
if hasattr(self, '_set'):
self._set()
def _unset_outer_vlan_list(self):
self.__outer_vlan_list = YANGDynClass(base=YANGListType("outer_vlan",outer_vlan_list.outer_vlan_list, yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='outer-vlan', extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}), is_container='list', yang_name="outer-vlan-list", rest_name="outer-vlan-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-bd-vlan-tag-info', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='list', is_config=False)
bd_id = __builtin__.property(_get_bd_id)
vc_id = __builtin__.property(_get_vc_id)
active_ac_lif_count = __builtin__.property(_get_active_ac_lif_count)
config_ac_lif_count = __builtin__.property(_get_config_ac_lif_count)
active_vfi_lif_count = __builtin__.property(_get_active_vfi_lif_count)
config_vfi_lif_count = __builtin__.property(_get_config_vfi_lif_count)
local_switching = __builtin__.property(_get_local_switching)
block_bpdu = __builtin__.property(_get_block_bpdu)
bd_type = __builtin__.property(_get_bd_type)
ve_ifindex = __builtin__.property(_get_ve_ifindex)
pw_profile = __builtin__.property(_get_pw_profile)
mac_limit = __builtin__.property(_get_mac_limit)
statistics = __builtin__.property(_get_statistics)
mac_addr_withdrawal = __builtin__.property(_get_mac_addr_withdrawal)
mct_enabled = __builtin__.property(_get_mct_enabled)
description = __builtin__.property(_get_description)
outer_vlan_list = __builtin__.property(_get_outer_vlan_list)
_pyangbind_elements = {'bd_id': bd_id, 'vc_id': vc_id, 'active_ac_lif_count': active_ac_lif_count, 'config_ac_lif_count': config_ac_lif_count, 'active_vfi_lif_count': active_vfi_lif_count, 'config_vfi_lif_count': config_vfi_lif_count, 'local_switching': local_switching, 'block_bpdu': block_bpdu, 'bd_type': bd_type, 've_ifindex': ve_ifindex, 'pw_profile': pw_profile, 'mac_limit': mac_limit, 'statistics': statistics, 'mac_addr_withdrawal': mac_addr_withdrawal, 'mct_enabled': mct_enabled, 'description': description, 'outer_vlan_list': outer_vlan_list, }
| 69.121212
| 796
| 0.748718
| 7,307
| 52,463
| 5.08225
| 0.029561
| 0.042546
| 0.054287
| 0.034899
| 0.890241
| 0.871822
| 0.850522
| 0.841717
| 0.835497
| 0.827391
| 0
| 0.010833
| 0.123725
| 52,463
| 758
| 797
| 69.212401
| 0.796963
| 0.183329
| 0
| 0.503632
| 0
| 0.041162
| 0.368318
| 0.202367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130751
| false
| 0
| 0.021792
| 0
| 0.261501
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b242b7cf995a83c2656416d12e372d93e8e3461b
| 2,970
|
py
|
Python
|
tutorial/resource.py
|
queryfish/jobcrawler
|
f0cf70e6ca909648e5a0af37dcc5fb3a548a4cfa
|
[
"MIT"
] | null | null | null |
tutorial/resource.py
|
queryfish/jobcrawler
|
f0cf70e6ca909648e5a0af37dcc5fb3a548a4cfa
|
[
"MIT"
] | null | null | null |
tutorial/resource.py
|
queryfish/jobcrawler
|
f0cf70e6ca909648e5a0af37dcc5fb3a548a4cfa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
USER_AGENT_LIST = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1", \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36", \
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"
]
# PROXIES = [
# '83.219.1.201:41380',]
PROXIES = [
# '193.112.128.212:8118',
'202.183.32.182:80',
'183.129.244.16:11161',
'60.13.42.34:9999',
'119.254.94.71:39053',
'175.44.158.15:9000',
'112.111.98.176:9000',
'27.203.142.151:8060',
'27.188.65.244:8060',
'183.129.207.80:12608',
'114.234.83.79:9000',
'117.87.178.88:9000',
'117.90.137.65:9999',
'117.90.252.143:9000',
'183.129.207.86:13974',
'121.232.194.251:9000',
# '1.85.220.195:8118',
# '60.255.186.169:8888',
# '118.187.58.34:53281',
# '116.224.191.141:8118',
# '120.27.5.62:9090',
# '119.132.250.156:53281',
# '139.129.166.68:3128'
]
| 55
| 125
| 0.643434
| 557
| 2,970
| 3.420108
| 0.2693
| 0.08399
| 0.094488
| 0.199475
| 0.706562
| 0.705512
| 0.693438
| 0.670866
| 0.611024
| 0.546457
| 0
| 0.318996
| 0.154545
| 2,970
| 53
| 126
| 56.037736
| 0.439665
| 0.081481
| 0
| 0
| 0
| 0.512821
| 0.852356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b24eda160b2f1440087de366d41fa1bc28771cbb
| 796
|
py
|
Python
|
python/ngsi_v2/test/test_query_pattern.py
|
orchestracities/sdk
|
9dd1e618d6c013ab916f3880df84c7882f6beec6
|
[
"Apache-2.0"
] | 2
|
2019-12-22T01:01:34.000Z
|
2021-07-03T20:30:03.000Z
|
python/ngsi_v2/test/test_query_pattern.py
|
orchestracities/sdk
|
9dd1e618d6c013ab916f3880df84c7882f6beec6
|
[
"Apache-2.0"
] | 2
|
2019-06-06T05:45:45.000Z
|
2019-06-06T09:03:10.000Z
|
python/ngsi_v2/test/test_query_pattern.py
|
orchestracities/sdk
|
9dd1e618d6c013ab916f3880df84c7882f6beec6
|
[
"Apache-2.0"
] | 2
|
2021-07-03T20:30:06.000Z
|
2021-11-30T21:55:02.000Z
|
# coding: utf-8
"""
ngsi_v2
NGSI V2 API RC-2018.07 # noqa: E501
The version of the OpenAPI document: 0.2.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import ngsi_v2
from ngsi_v2.models.query_pattern import QueryPattern # noqa: E501
from ngsi_v2.rest import ApiException
class TestQueryPattern(unittest.TestCase):
"""QueryPattern unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testQueryPattern(self):
"""Test QueryPattern"""
# FIXME: construct object with mandatory attributes with example values
# model = ngsi_v2.models.query_pattern.QueryPattern() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 19.9
| 79
| 0.682161
| 99
| 796
| 5.282828
| 0.575758
| 0.068834
| 0.038241
| 0.06501
| 0.091778
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040717
| 0.228643
| 796
| 39
| 80
| 20.410256
| 0.811075
| 0.429648
| 0
| 0.214286
| 1
| 0
| 0.019231
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b2691fd430b1ea673b48132b8036417005de10a3
| 107
|
py
|
Python
|
weideshop/tests/factories/products/products.py
|
michaelgichia/weideshop
|
01a408b358b9ad7d52747b42c36dc16206b4b915
|
[
"BSD-2-Clause"
] | null | null | null |
weideshop/tests/factories/products/products.py
|
michaelgichia/weideshop
|
01a408b358b9ad7d52747b42c36dc16206b4b915
|
[
"BSD-2-Clause"
] | null | null | null |
weideshop/tests/factories/products/products.py
|
michaelgichia/weideshop
|
01a408b358b9ad7d52747b42c36dc16206b4b915
|
[
"BSD-2-Clause"
] | null | null | null |
from weideshop.products.models import Product
import factory
class ProductFactory(factory.Factory):
pass
| 21.4
| 46
| 0.841121
| 13
| 107
| 6.923077
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102804
| 107
| 5
| 47
| 21.4
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b2ab5f0f7c7b1f4d9909d63570960007b08c4883
| 120
|
py
|
Python
|
ctnas/core/metric/__init__.py
|
AlbertiPot/CTNAS
|
7689dc85e4445d087a672847ac22aca1acd0ac8b
|
[
"BSD-3-Clause"
] | 35
|
2021-03-10T08:03:08.000Z
|
2022-03-30T03:53:54.000Z
|
ctnas/core/metric/__init__.py
|
ShunLu91/CTNAS
|
ecb22ea66b7ba075c48ca4c4db28f68b777f45db
|
[
"BSD-3-Clause"
] | 5
|
2021-06-30T02:50:09.000Z
|
2021-08-30T01:43:07.000Z
|
ctnas/core/metric/__init__.py
|
ShunLu91/CTNAS
|
ecb22ea66b7ba075c48ca4c4db28f68b777f45db
|
[
"BSD-3-Clause"
] | 3
|
2021-08-14T14:59:12.000Z
|
2021-11-22T03:41:44.000Z
|
from .accuracy import AccuracyMetric
from .average import AverageMetric
from .moving_average import MovingAverageMetric
| 30
| 47
| 0.875
| 13
| 120
| 8
| 0.615385
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 120
| 3
| 48
| 40
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a234ac87d847f001a6b5c48ab4e5ba8a41ce5ab6
| 27
|
py
|
Python
|
hamsclient/__main__.py
|
bfritscher/hamsclient
|
233b3237f681cbbab2d7f75d0858db00ccbacfa3
|
[
"MIT"
] | 2
|
2021-08-23T17:12:50.000Z
|
2022-03-22T07:07:31.000Z
|
hamsclient/__main__.py
|
bfritscher/hamsclient
|
233b3237f681cbbab2d7f75d0858db00ccbacfa3
|
[
"MIT"
] | 2
|
2021-08-23T17:18:37.000Z
|
2022-03-29T14:37:43.000Z
|
hamsclient/__main__.py
|
bfritscher/hamsclient
|
233b3237f681cbbab2d7f75d0858db00ccbacfa3
|
[
"MIT"
] | 2
|
2020-11-24T07:46:15.000Z
|
2022-03-21T19:24:15.000Z
|
import requests
import re
| 6.75
| 15
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 3
| 16
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2405c7c649c539c37b4b0e292ad2a196d2981bd
| 209
|
py
|
Python
|
bagua/torch_api/algorithms/__init__.py
|
lheimabch/bagua
|
af3a0bdc8547885ad6b2420367a79aa838d6c9a8
|
[
"MIT"
] | 1
|
2021-12-20T03:14:39.000Z
|
2021-12-20T03:14:39.000Z
|
bagua/torch_api/algorithms/__init__.py
|
lheimabch/bagua
|
af3a0bdc8547885ad6b2420367a79aa838d6c9a8
|
[
"MIT"
] | null | null | null |
bagua/torch_api/algorithms/__init__.py
|
lheimabch/bagua
|
af3a0bdc8547885ad6b2420367a79aa838d6c9a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from .base import Algorithm, AlgorithmImpl # noqa: F401
from . import bytegrad, decentralized, gradient_allreduce # noqa: F401
from . import q_adam, async_model_average # noqa: F401
| 34.833333
| 71
| 0.76555
| 28
| 209
| 5.571429
| 0.714286
| 0.153846
| 0.153846
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05618
| 0.148325
| 209
| 5
| 72
| 41.8
| 0.820225
| 0.258373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a254225eb22e79dd26fc312586510e66c6ad1700
| 124
|
py
|
Python
|
number.py
|
alexhong121/py_pt
|
78f11f2a2fed54154017371a97de563a8fffcd81
|
[
"MIT"
] | null | null | null |
number.py
|
alexhong121/py_pt
|
78f11f2a2fed54154017371a97de563a8fffcd81
|
[
"MIT"
] | null | null | null |
number.py
|
alexhong121/py_pt
|
78f11f2a2fed54154017371a97de563a8fffcd81
|
[
"MIT"
] | null | null | null |
#data transformer boolean
# 非0數字 回傳 True
print(bool(True)) # True
print(bool(1)) # True
# 0值回傳 False
print(bool(0)) # False
| 17.714286
| 25
| 0.701613
| 20
| 124
| 4.35
| 0.6
| 0.310345
| 0.298851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038095
| 0.153226
| 124
| 7
| 26
| 17.714286
| 0.790476
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
a27de1fcaca15fce684563a6e104e457ecdcc96c
| 97
|
py
|
Python
|
aegis/utils/ssh/ssh_handler.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
aegis/utils/ssh/ssh_handler.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
aegis/utils/ssh/ssh_handler.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
# -*- encoding:utf-8 -*-
"""
Author: Yijie.Wu
Email: 1694517106@qq.com
Date: 2020/5/16 15:17
"""
| 13.857143
| 24
| 0.618557
| 16
| 97
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 0.134021
| 97
| 6
| 25
| 16.166667
| 0.452381
| 0.896907
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0c4a85ed9be564572f22f94aefa3df67e54491cd
| 38
|
py
|
Python
|
server_beta_app/models/documents/source_income/__init__.py
|
dalmarcogd/test_django_elasticsearch
|
9c97857a7f225a87554637fcae405e8c1a03d0f7
|
[
"Apache-2.0"
] | null | null | null |
server_beta_app/models/documents/source_income/__init__.py
|
dalmarcogd/test_django_elasticsearch
|
9c97857a7f225a87554637fcae405e8c1a03d0f7
|
[
"Apache-2.0"
] | 13
|
2020-06-05T18:26:43.000Z
|
2021-06-10T20:36:13.000Z
|
backend/server_beta/server_beta_app/models/documents/source_income/__init__.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | 1
|
2019-04-07T23:42:22.000Z
|
2019-04-07T23:42:22.000Z
|
from .source_income_document import *
| 19
| 37
| 0.842105
| 5
| 38
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a762c9b815af9576194a4e3c6bf92bd872ce3bb4
| 47
|
py
|
Python
|
myproject/__init__.py
|
finsberg/sphinx-tutorial
|
8a6bd88c2bde51d79570c34d6ec42f95e71a998b
|
[
"MIT"
] | 4
|
2020-10-14T04:09:38.000Z
|
2021-03-16T13:43:49.000Z
|
myproject/__init__.py
|
finsberg/sphinx-tutorial
|
8a6bd88c2bde51d79570c34d6ec42f95e71a998b
|
[
"MIT"
] | null | null | null |
myproject/__init__.py
|
finsberg/sphinx-tutorial
|
8a6bd88c2bde51d79570c34d6ec42f95e71a998b
|
[
"MIT"
] | 3
|
2021-02-01T21:02:13.000Z
|
2021-03-14T16:07:34.000Z
|
from . import mymodule
from .mymodule import *
| 15.666667
| 23
| 0.765957
| 6
| 47
| 6
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 24
| 23.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7639d8fc3f77fc037a8fe8b8edeec844a5a9ae2
| 14,758
|
py
|
Python
|
tests/test_rental_pyhamcrest.py
|
pauljackals/shop-with-tests
|
4682f80bf3c54167a01a0c1bd4f03e67cce4a9ce
|
[
"MIT"
] | null | null | null |
tests/test_rental_pyhamcrest.py
|
pauljackals/shop-with-tests
|
4682f80bf3c54167a01a0c1bd4f03e67cce4a9ce
|
[
"MIT"
] | null | null | null |
tests/test_rental_pyhamcrest.py
|
pauljackals/shop-with-tests
|
4682f80bf3c54167a01a0c1bd4f03e67cce4a9ce
|
[
"MIT"
] | null | null | null |
import unittest
from hamcrest import *
from src.rental.rental import Rental
import uuid
import json
import copy
import datetime
class TestRentalPyHamcrest(unittest.TestCase):
def setUp(self):
with open('data/database_for_testing.json') as file:
database = json.loads(file.read())
self.database_for_checking = copy.deepcopy(database)
self.rental = Rental(database, datetime.datetime(year=2020, month=12, day=2, hour=14, minute=17))
def test_load_database(self):
assert_that(self.rental.load_database('data/database_for_testing.json'), equal_to(True))
def test_load_database_no_file(self):
assert_that(calling(self.rental.load_database).with_args('test'), raises(FileNotFoundError, "^Database doesn't exist$"))
def test_load_database_wrong_type(self):
assert_that(calling(self.rental.load_database).with_args(23), raises(TypeError, "^Database file name must be a string$"))
def test_load_database_empty_name(self):
assert_that(calling(self.rental.load_database).with_args(''), raises(ValueError), "^Database file name must not be empty$")
def test_save_database(self):
assert_that(self.rental.save_database(), equal_to(True))
def test_save_database_file(self):
self.rental.save_database()
with open('src/rental/database_copy.json') as file:
database_copy = json.loads(file.read())
assert_that(self.database_for_checking, equal_to(database_copy))
def test_get_user_reservations(self):
reservations = [
{
"id": "4248797f-9a3e-4a52-b3f7-bb72eef51755",
"user": "2fe45694-eb13-4283-824e-cd6fb179bfcf",
"game": 1,
"from": "2020-12-15 13:00",
"to": "2020-12-19 14:30"
}
]
assert_that(self.rental.get_user_reservations('2fe45694-eb13-4283-824e-cd6fb179bfcf'), contains_inanyorder(*reservations))
def test_get_user_reservations_wrong_type(self):
assert_that(calling(self.rental.get_user_reservations).with_args(123), raises(TypeError, '^User ID must be a string$'))
def test_get_user_reservations_empty(self):
assert_that(calling(self.rental.get_user_reservations).with_args(''), raises(ValueError, '^User ID must not be empty$'))
def test_get_user_reservations_no_user(self):
assert_that(calling(self.rental.get_user_reservations).with_args('test'), raises(LookupError, '^No such user$'))
def test_create_reservation(self):
assert_that(
uuid.UUID(self.rental.create_reservation(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-19 14:30',
'2020-12-21 13:00'
), version=4),
instance_of(uuid.UUID)
)
def test_create_reservation_wrong_date_from_non_digit(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'20d0-12-19 14:30',
'2020-12-21 13:00'
),
raises(ValueError, '^Wrong date syntax$')
)
def test_create_reservation_wrong_date_to_non_digit(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-19 14:30',
'20d0-12-21 13:00'
),
raises(ValueError, '^Wrong date syntax$')
)
def test_create_reservation_wrong_date_from_wrong_day_in_month(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-11-31 14:30',
'2020-12-21 13:00'
),
raises(ValueError, '^No such day in provided month$')
)
def test_create_reservation_wrong_date_to_wrong_day_in_month(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2021-04-21 14:30',
'2021-04-31 13:00'
),
raises(ValueError, '^No such day in provided month$')
)
def test_create_reservation_wrong_date_from_wrong_day_in_month_february_non_leap(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2021-02-29 14:30',
'2021-12-21 13:00'
),
raises(ValueError, '^No such day in provided month$')
)
def test_create_reservation_wrong_date_to_wrong_day_in_month_february_non_leap(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2021-12-21 14:30',
'2021-02-29 13:00'
),
raises(ValueError, '^No such day in provided month$')
)
def test_create_reservation_from_day_in_month_february_leap(self):
assert_that(
uuid.UUID(self.rental.create_reservation(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2024-02-29 14:30',
'2024-12-21 13:00'
), version=4),
instance_of(uuid.UUID)
)
def test_create_reservation_to_day_in_month_february_leap(self):
assert_that(
uuid.UUID(self.rental.create_reservation(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2024-02-21 14:30',
'2024-02-29 13:00'
), version=4),
instance_of(uuid.UUID)
)
def test_create_reservation_error_date_from_empty(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'',
'2020-12-21 13:00'
),
raises(ValueError, '^Wrong date syntax$')
)
def test_create_reservation_error_date_to_empty(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-19 14:30',
''
),
raises(ValueError, '^Wrong date syntax$')
)
def test_create_reservation_wrong_user_type(self):
assert_that(
calling(self.rental.create_reservation).with_args(
34,
1,
'2020-12-19 14:30',
'2020-12-21 13:00'
),
raises(TypeError, '^User ID must be a string$')
)
def test_create_reservation_no_user(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'test',
1,
'2020-12-19 14:30',
'2020-12-21 13:00'
),
raises(LookupError, '^No such user$')
)
def test_create_reservation_error_wrong_game_type(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
'1',
'2020-12-19 14:30',
'2020-12-21 13:00'
),
raises(TypeError, '^Game ID must be an integer$')
)
def test_create_reservation_error_empty_user(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'',
1,
'2020-12-19 14:30',
'2020-12-21 13:00'
),
raises(ValueError, '^User ID must not be empty$')
)
def test_create_reservation_error_no_game(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
999,
'2020-12-19 14:30',
'2020-12-21 13:00'
),
raises(LookupError, '^No such game$')
)
def test_create_reservation_minute_error_date_from(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-19 14:29',
'2020-12-21 13:00'
),
raises(ValueError, '^Both dates must be rounded to full hours or half \\(:00/:30\\)$')
)
def test_create_reservation_minute_error_date_to(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-19 14:30',
'2020-12-21 13:01'
),
raises(ValueError, '^Both dates must be rounded to full hours or half \\(:00/:30\\)$')
)
def test_create_reservation_error_dates_switched(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-21 13:00',
'2020-12-19 14:30'
),
raises(ValueError, '^End date must be later than start date$')
)
def test_create_reservation_error_date_from_closed_day(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-20 14:30',
'2020-12-22 13:00'
),
raises(ValueError, '^Rental shop is closed during this time$')
)
def test_create_reservation_error_date_to_closed_day(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-19 14:30',
'2020-12-20 13:00'
),
raises(ValueError, '^Rental shop is closed during this time$')
)
def test_create_reservation_error_date_from_open_hours_before(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-18 08:30',
'2020-12-19 13:00'
),
raises(ValueError, '^Rental shop is closed during this time$')
)
def test_create_reservation_error_date_from_open_hours_after(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-18 21:00',
'2020-12-19 13:00'
),
raises(ValueError, '^Rental shop is closed during this time$')
)
def test_create_reservation_error_date_to_open_hours_before(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-18 14:30',
'2020-12-19 09:00'
),
raises(ValueError, '^Rental shop is closed during this time$')
)
def test_create_reservation_error_date_to_open_hours_after(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-18 14:00',
'2020-12-19 16:00'
),
raises(ValueError, '^Rental shop is closed during this time$')
)
def test_create_reservation_error_date_from_already_taken(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-15 13:30',
'2020-12-21 15:00'
),
raises(ValueError, '^Game is already reserved during this time$')
)
def test_create_reservation_error_date_to_already_taken(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-12-14 13:30',
'2020-12-16 15:00'
),
raises(ValueError, '^Game is already reserved during this time$')
)
def test_create_reservation_wrong_date_before_now(self):
assert_that(
calling(self.rental.create_reservation).with_args(
'8a85f066-bd8d-43df-b471-a6e708471c4c',
1,
'2020-11-28 14:30',
'2020-12-01 13:00'
),
raises(ValueError, '^Both dates must not be in the past$')
)
def test_add_user(self):
assert_that(
uuid.UUID(self.rental.add_user(
'Test',
'Testington',
'something@example.com'
), version=4),
instance_of(uuid.UUID)
)
def test_add_user_error_empty_name(self):
assert_that(
calling(self.rental.add_user).with_args(
'Test',
'',
'something@example.com'
),
raises(ValueError, '^Names must not be empty$')
)
def test_add_user_error_wrong_name_type(self):
assert_that(
calling(self.rental.add_user).with_args(
1,
'Testington',
'something@example.com'
),
raises(TypeError, '^Names must be strings$')
)
def test_add_user_error_wrong_email_type(self):
assert_that(
calling(self.rental.add_user).with_args(
'Test',
'Testington',
None
),
raises(TypeError, '^Email must be a string$')
)
def test_add_user_error_email_invalid(self):
assert_that(
calling(self.rental.add_user).with_args(
'Test',
'Testington',
'somethingexample.com'
),
raises(ValueError, '^Email is not valid$')
)
def tearDown(self):
self.rental = None
self.database_for_checking = None
if __name__ == '__main__':
unittest.main()
| 34.806604
| 131
| 0.557054
| 1,657
| 14,758
| 4.715148
| 0.105009
| 0.121848
| 0.073467
| 0.094074
| 0.813644
| 0.766543
| 0.741457
| 0.710739
| 0.700883
| 0.689748
| 0
| 0.132288
| 0.338732
| 14,758
| 423
| 132
| 34.888889
| 0.668306
| 0
| 0
| 0.554667
| 0
| 0
| 0.221304
| 0.078601
| 0
| 0
| 0
| 0
| 0.114667
| 1
| 0.12
| false
| 0
| 0.018667
| 0
| 0.141333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a7b9013274be91445cd3cc0a9de552238a61bd8e
| 134
|
py
|
Python
|
retools/tests/jobs.py
|
szaydel/retools
|
4e7ee27dd3c1b969d9cf63b29dc70e451aa20b43
|
[
"MIT"
] | 52
|
2015-01-20T05:43:25.000Z
|
2021-12-18T08:45:45.000Z
|
retools/tests/jobs.py
|
szaydel/retools
|
4e7ee27dd3c1b969d9cf63b29dc70e451aa20b43
|
[
"MIT"
] | 2
|
2020-01-23T23:26:01.000Z
|
2021-01-04T17:02:26.000Z
|
retools/tests/jobs.py
|
szaydel/retools
|
4e7ee27dd3c1b969d9cf63b29dc70e451aa20b43
|
[
"MIT"
] | 15
|
2015-05-15T10:45:39.000Z
|
2021-05-12T16:39:37.000Z
|
def echo_default(default='hello'): # pragma: nocover
return default
def echo_back(): # pragma: nocover
return 'howdy all'
| 19.142857
| 53
| 0.686567
| 17
| 134
| 5.294118
| 0.588235
| 0.155556
| 0.422222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201493
| 134
| 6
| 54
| 22.333333
| 0.841122
| 0.231343
| 0
| 0
| 0
| 0
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
38ef4b299ec0c92f30c40d7061e098f98cb30e80
| 2,689
|
py
|
Python
|
colour/algebra/__init__.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T06:28:15.000Z
|
2022-02-12T06:28:15.000Z
|
colour/algebra/__init__.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/algebra/__init__.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
from .coordinates import * # noqa
from . import coordinates
from .common import (
is_spow_enabled,
set_spow_enable,
spow_enable,
spow,
normalise_maximum,
vector_dot,
matrix_dot,
linear_conversion,
linstep_function,
lerp,
smoothstep_function,
smooth,
is_identity,
)
from .geometry import (
normalise_vector,
euclidean_distance,
manhattan_distance,
extend_line_segment,
LineSegmentsIntersections_Specification,
intersect_line_segments,
ellipse_coefficients_general_form,
ellipse_coefficients_canonical_form,
point_at_angle_on_ellipse,
ellipse_fitting_Halir1998,
ELLIPSE_FITTING_METHODS,
ellipse_fitting,
)
from .interpolation import (
kernel_nearest_neighbour,
kernel_linear,
kernel_sinc,
kernel_lanczos,
kernel_cardinal_spline,
KernelInterpolator,
NearestNeighbourInterpolator,
LinearInterpolator,
SpragueInterpolator,
CubicSplineInterpolator,
PchipInterpolator,
NullInterpolator,
lagrange_coefficients,
table_interpolation_trilinear,
table_interpolation_tetrahedral,
TABLE_INTERPOLATION_METHODS,
table_interpolation,
)
from .extrapolation import Extrapolator
from .random import random_triplet_generator
from .regression import least_square_mapping_MoorePenrose
__all__ = []
__all__ += coordinates.__all__
__all__ += [
"is_spow_enabled",
"set_spow_enable",
"spow_enable",
"spow",
"normalise_maximum",
"vector_dot",
"matrix_dot",
"linear_conversion",
"linstep_function",
"lerp",
"smoothstep_function",
"smooth",
"is_identity",
]
__all__ += [
"normalise_vector",
"euclidean_distance",
"manhattan_distance",
"extend_line_segment",
"LineSegmentsIntersections_Specification",
"intersect_line_segments",
"ellipse_coefficients_general_form",
"ellipse_coefficients_canonical_form",
"point_at_angle_on_ellipse",
"ellipse_fitting_Halir1998",
"ELLIPSE_FITTING_METHODS",
"ellipse_fitting",
]
__all__ += [
"kernel_nearest_neighbour",
"kernel_linear",
"kernel_sinc",
"kernel_lanczos",
"kernel_cardinal_spline",
"KernelInterpolator",
"NearestNeighbourInterpolator",
"LinearInterpolator",
"SpragueInterpolator",
"CubicSplineInterpolator",
"PchipInterpolator",
"NullInterpolator",
"lagrange_coefficients",
"table_interpolation_trilinear",
"table_interpolation_tetrahedral",
"TABLE_INTERPOLATION_METHODS",
"table_interpolation",
]
__all__ += [
"Extrapolator",
]
__all__ += [
"random_triplet_generator",
]
__all__ += [
"least_square_mapping_MoorePenrose",
]
| 23.587719
| 57
| 0.729639
| 235
| 2,689
| 7.72766
| 0.323404
| 0.079295
| 0.030837
| 0.017621
| 0.819383
| 0.819383
| 0.819383
| 0.819383
| 0.819383
| 0.819383
| 0
| 0.003661
| 0.18743
| 2,689
| 113
| 58
| 23.79646
| 0.82746
| 0.001488
| 0
| 0.053571
| 0
| 0
| 0.321655
| 0.173313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac63d387528cda8632354b0e60ff99788a96fd4e
| 933
|
py
|
Python
|
hy-data-analysis-with-python-spring-2020/part03-e08_almost_meeting_lines/src/almost_meeting_lines.py
|
Melimet/DAP2020
|
0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a
|
[
"MIT"
] | null | null | null |
hy-data-analysis-with-python-spring-2020/part03-e08_almost_meeting_lines/src/almost_meeting_lines.py
|
Melimet/DAP2020
|
0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a
|
[
"MIT"
] | null | null | null |
hy-data-analysis-with-python-spring-2020/part03-e08_almost_meeting_lines/src/almost_meeting_lines.py
|
Melimet/DAP2020
|
0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import numpy as np
def almost_meeting_lines(a1, b1, a2, b2):
return []
def main():
a1=1
b1=2
a2=-1
b2=0
(x, y), exact = almost_meeting_lines(a1, b1, a2, b2)
if exact:
print(f"Lines meet at x={x} and y={y}")
a1=a2=1
b1=2
b2=-2
(x, y), exact = almost_meeting_lines(a1, b1, a1, b2)
if exact:
print(f"Lines meet at x={x} and y={y}")
else:
print(f"Closest point at x={x} and y={y}")
a1=1
b1=2
(x, y), exact = almost_meeting_lines(a1, b1, a1, b1)
if exact:
print(f"Lines meet at x={x} and y={y}")
else:
print(f"Closest point at x={x} and y={y}")
a1=1
b1=2
a2=1
b2=1
(x, y), exact = almost_meeting_lines(a1, b1, a2, b2)
if exact:
print(f"Lines meet at x={x} and y={y}")
else:
print(f"Closest point at x={x} and y={y}")
if __name__ == "__main__":
main()
| 19.851064
| 56
| 0.527331
| 170
| 933
| 2.788235
| 0.194118
| 0.088608
| 0.059072
| 0.103376
| 0.852321
| 0.852321
| 0.852321
| 0.759494
| 0.759494
| 0.759494
| 0
| 0.074074
| 0.305466
| 933
| 46
| 57
| 20.282609
| 0.657407
| 0.018221
| 0
| 0.621622
| 0
| 0
| 0.240437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.027027
| 0.027027
| 0.108108
| 0.189189
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bab475ec5fb42cf4a76d33881d6312737bc20a9
| 3,229
|
py
|
Python
|
musmanim/note_shapes.py
|
mscuthbert/musmanim
|
3ce0502d44b715e08f6ddca375c3a84d0a4078cf
|
[
"BSD-3-Clause"
] | null | null | null |
musmanim/note_shapes.py
|
mscuthbert/musmanim
|
3ce0502d44b715e08f6ddca375c3a84d0a4078cf
|
[
"BSD-3-Clause"
] | null | null | null |
musmanim/note_shapes.py
|
mscuthbert/musmanim
|
3ce0502d44b715e08f6ddca375c3a84d0a4078cf
|
[
"BSD-3-Clause"
] | null | null | null |
import manim as m
import numpy as np
class Maxima(m.Polygon):
def __init__(self, **kwargs):
super().__init__(
[-0.8, 0.5, 0],
[ 0.8, 0.5, 0],
[ 0.8, -1.5, 0],
[ 0.7, -1.5, 0],
[ 0.7, -0.5, 0],
[-0.8, -0.5, 0],
color=kwargs.get('color', m.PURPLE_C),
fill_color=kwargs.get('fill_color', m.PURPLE_C),
fill_opacity=kwargs.get('fill_opacity', 0.9),
**kwargs)
self.corner_radius = 0.05
self.round_corners(self.corner_radius)
def get_critical_point(self, direction):
if np.array_equal(direction, [0, 0, 0]):
return [0, 0, 0]
return super().get_critical_point(direction)
class Longa(m.Polygon):
def __init__(self, rounded=True, color=m.PURPLE_C, fill_color=None, fill_opacity=0.9, **kwargs):
if fill_color is None:
fill_color = color
super().__init__(
[-0.5, 1.5, 0],
[ 0.5, 1.5, 0],
[ 0.5, -0.5, 0],
[ 0.4, -0.5, 0],
[ 0.4, 0.5, 0],
[-0.5, 0.5, 0],
color=color,
fill_color=fill_color,
fill_opacity=fill_opacity,
**kwargs)
self.shift(m.DOWN)
if rounded:
self.corner_radius = 0.05
self.round_corners(self.corner_radius)
else:
self.corner_radius = 0.0
self.shift(self.get_center())
def get_critical_point(self, direction):
if np.array_equal(direction, [0, 0, 0]):
return np.array((0.0, 0.0, 0.0))
return super().get_critical_point(direction)
class Breve(m.Square):
def __init__(self,
color=m.PURPLE_C,
fill_color=None,
fill_opacity=0.9,
side_length=1.0,
**kwargs):
if fill_color is None:
fill_color = color
super().__init__(side_length=side_length,
color=color,
fill_color=fill_color,
fill_opacity=fill_opacity,
**kwargs)
self.corner_radius = side_length * 0.05
self.round_corners(self.corner_radius)
class Semibreve(Breve):
def __init__(self, side_length=0.707, **kwargs):
super().__init__(side_length=side_length, **kwargs)
self.rotate(m.PI / 4)
class Minim(m.Polygon):
def __init__(self, **kwargs):
super().__init__(
[-0.5, 0.0, 0],
[-0.05, 0.5, 0],
[-0.05, 1.5, 0],
[ 0.05, 1.5, 0],
[ 0.05, 0.5, 0],
[ 0.5, 0.0, 0],
[ 0.0, -0.5, 0],
color=kwargs.get('color', m.PURPLE_C),
fill_color=kwargs.get('fill_color', m.PURPLE_C),
fill_opacity=kwargs.get('fill_opacity', 0.9),
**kwargs)
self.corner_radius = 0.05
self.round_corners(self.corner_radius)
self.shift(self.get_center())
def get_critical_point(self, direction):
if np.array_equal(direction, [0, 0, 0]):
return np.array((0.0, 0.0, 0.0))
return super().get_critical_point(direction)
| 31.349515
| 100
| 0.507587
| 436
| 3,229
| 3.513761
| 0.130734
| 0.053525
| 0.035248
| 0.02611
| 0.847258
| 0.828329
| 0.79047
| 0.772193
| 0.740862
| 0.674935
| 0
| 0.072347
| 0.349334
| 3,229
| 102
| 101
| 31.656863
| 0.65683
| 0
| 0
| 0.511364
| 0
| 0
| 0.016723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.022727
| 0
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bb711f0ba8e1ee7b367cb513c4ea5c1eb0d4ffb
| 84
|
py
|
Python
|
patterns/singleton/smodule/__init__.py
|
ceb10n/design-patterns-with-python
|
287e0717d19dd21088b8e44f6df9a97b590f2804
|
[
"MIT"
] | 1
|
2019-01-22T08:19:31.000Z
|
2019-01-22T08:19:31.000Z
|
patterns/singleton/smodule/__init__.py
|
ceb10n/design-patterns-with-python
|
287e0717d19dd21088b8e44f6df9a97b590f2804
|
[
"MIT"
] | null | null | null |
patterns/singleton/smodule/__init__.py
|
ceb10n/design-patterns-with-python
|
287e0717d19dd21088b8e44f6df9a97b590f2804
|
[
"MIT"
] | null | null | null |
def this_is_like_a_singleton():
print('Hello from a singleton as a func module')
| 42
| 52
| 0.761905
| 15
| 84
| 4
| 0.8
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154762
| 84
| 2
| 52
| 42
| 0.84507
| 0
| 0
| 0
| 0
| 0
| 0.458824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ce3c3770684bcd0c9815c214dbbcf5ec40a6cc4b
| 126
|
py
|
Python
|
alphabot/utils/exceptions.py
|
SirMammingtonham/alphastone
|
06e633b4b750c002d2d488334aa75b292482651d
|
[
"Unlicense"
] | 21
|
2018-08-31T06:11:17.000Z
|
2022-01-12T09:12:27.000Z
|
remote/utils/exceptions.py
|
djdookie/alphastone
|
2963bb5538d42aeb7789b124496b3ad0c507e2ff
|
[
"Unlicense"
] | 4
|
2018-08-19T23:13:46.000Z
|
2019-07-21T06:04:14.000Z
|
remote/utils/exceptions.py
|
djdookie/alphastone
|
2963bb5538d42aeb7789b124496b3ad0c507e2ff
|
[
"Unlicense"
] | 7
|
2018-12-17T23:36:20.000Z
|
2021-11-02T13:54:55.000Z
|
class GetStateError(Exception):
pass
class UnhandledAction(Exception):
pass
class GameTreeFailure(Exception):
pass
| 21
| 33
| 0.769841
| 12
| 126
| 8.083333
| 0.5
| 0.402062
| 0.371134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15873
| 126
| 6
| 34
| 21
| 0.915094
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0209f39db41b5d2aa37c705698063b4882d6abd6
| 12,068
|
py
|
Python
|
mpikat/meerkat/test/antennas.py
|
TeepChairin/mpikat
|
464d76113c92e0e8a3106ccc05ef551a1427e582
|
[
"MIT"
] | 2
|
2018-11-12T12:17:27.000Z
|
2019-02-08T15:44:14.000Z
|
mpikat/meerkat/test/antennas.py
|
TeepChairin/mpikat
|
464d76113c92e0e8a3106ccc05ef551a1427e582
|
[
"MIT"
] | 3
|
2018-08-03T12:05:20.000Z
|
2018-08-03T12:13:53.000Z
|
mpikat/meerkat/test/antennas.py
|
TeepChairin/mpikat
|
464d76113c92e0e8a3106ccc05ef551a1427e582
|
[
"MIT"
] | 4
|
2019-01-21T16:31:34.000Z
|
2019-12-03T09:27:15.000Z
|
ANTENNAS = {'m000': 'm000, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -8.254 -207.2925 1.209 5875.794 5877.025, -0:00:39.7 0 -0:04:04.4 -0:04:53.0 0:00:57.8 -0:00:13.9 0:13:45.2 0:00:59.8, 1.22',
'm001': 'm001, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 1.1275 -171.7635 1.0565 5869.964 5870.974, -0:42:08.0 0 0:01:44.0 0:01:11.9 -0:00:14.0 -0:00:21.0 -0:36:13.1 0:01:36.2, 1.22',
'm002': 'm002, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -32.1045 -224.2375 1.2545 5871.47 5872.221, 0:40:20.2 0 -0:02:41.9 -0:03:46.8 0:00:09.4 -0:00:01.1 0:03:04.7, 1.22',
'm003': 'm003, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -66.5125 -202.2765 0.8885 5872.781 5874.412, 0:16:25.4 0 0:00:53.5 -0:02:40.6 0:00:00.9 0:00:05.7 0:34:05.6 0:02:12.3, 1.22',
'm004': 'm004, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -123.618 -252.946 1.1085 5872.525 5874.394, 1:02:09.3 0 -0:00:39.3 -0:01:27.7 0:00:23.3 -0:00:11.8 -0:09:00.3 0:02:24.4, 1.22',
'm005': 'm005, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -102.088 -283.12 1.475 5877.82 5878.919, -0:07:42.6 0 -0:00:21.0 -0:04:59.5 0:00:14.7 0:00:09.6 -0:02:18.8, 1.22',
'm006': 'm006, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -18.223 -295.428 1.793 5880.202 5880.993, 1:54:58.0 0 -0:02:15.7 -0:02:25.3 0:00:15.1 -0:00:03.8 -0:07:50.2 0:01:28.9, 1.22',
'm007': 'm007, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -89.582 -402.73 2.3725 5888.631 5889.379, 0:19:04.3 0 -0:02:06.7 -0:00:12.7 0:00:32.2 -0:00:12.0 -0:24:09.4 0:00:11.7, 1.22',
'm008': 'm008, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -93.521 -535.026 3.0485 5874.908 5875.799, -0:04:50.4 0 -0:00:57.8 -0:01:49.6 0:00:10.7 0:00:21.2 -0:12:35.8 0:01:37.8, 1.22',
'm009': 'm009, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 32.3635 -371.056 2.742 5851.372 5852.133, 1:37:04.7 0 -0:04:08.8 -0:10:26.4 0:00:36.9 -0:00:05.0 -0:04:00.9 0:01:20.3, 1.22',
'm010': 'm010, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 88.1025 -511.872 3.778 5881.442 5882.956, 0:24:20.0 0 0:00:22.1 -0:02:55.9 0:00:26.8 0:00:08.9 -0:02:56.9 0:01:32.6, 1.22',
'm011': 'm011, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 84.02 -352.08 2.758 5882.336 5883.219, -1:59:21.8 0 -0:00:48.4 0:01:06.5 0:00:22.3 0:00:01.6 -0:07:28.0 0:02:54.6, 1.22',
'm012': 'm012, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 140.0255 -368.2685 3.0515 5863.332 5864.346, -0:17:21.2 0 -0:01:49.7 -0:00:38.2 0:00:15.2 -0:00:08.5 -0:01:11.4 0:01:57.3, 1.22',
'm013': 'm013, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 236.802 -393.463 3.72 5863.478 5864.225, 0:40:27.9 0 -0:02:35.2 -0:04:58.5 0:00:13.0 0:00:19.2 -0:05:55.6 0:01:09.3, 1.22',
'm014': 'm014, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 280.6775 -285.792 3.145 5868.66 5870.258, 0:51:40.6 0 -0:01:27.5 0:00:58.0 0:00:11.9 0:00:03.8 -0:02:31.1 0:01:55.5, 1.22',
'm015': 'm015, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 210.6565 -219.143 2.342 5916.984 5918.597, -0:13:13.4 0 -0:00:32.5 -0:03:32.9 -0:00:02.1 0:00:12.9 -0:09:30.0 -0:00:42.4, 1.22',
'm016': 'm016, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 288.1625 -185.881 2.435 5815.574 5816.717, 0:13:42.0 0 -0:02:17.7 0:00:02.0 0:00:04.9 -0:00:12.4 -0:08:00.4 0:02:07.9, 1.22',
'm017': 'm017, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 199.633 -112.264 1.5625 5875.203 5875.953, 0:53:59.5 0 -0:00:30.1 -0:01:02.3 0:00:47.8 -0:00:31.4 0:08:18.7 0:01:11.2, 1.22',
'm018': 'm018, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 105.736 -245.87 2.1305 5867.229 5868.115, 0:31:37.6 0 -0:01:49.2 -0:02:44.9 0:00:25.2 0:00:29.4 -0:02:20.3 0:00:29.1, 1.22',
'm019': 'm019, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 170.796 -285.225 2.677 5866.327 5867.445, 0:29:10.7 0 0:01:58.4 -0:00:33.4 0:00:18.1 -0:00:11.9 -0:16:29.3 0:01:48.8, 1.22',
'm020': 'm020, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 97.0175 -299.638 2.479 5837.131 5838.274, -1:42:52.9 0 -0:00:37.2 -0:01:00.1 0:00:25.5 0:00:06.0 0:03:40.9 0:02:12.6, 1.22',
'm021': 'm021, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -295.961 -327.237 0.715 5889.079 5890.081, -0:25:55.3 0 -0:02:11.8 -0:05:30.5 0:00:19.8 0:00:20.9 -0:01:43.8 -0:01:11.0, 1.22',
'm022': 'm022, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -372.995 0.548 -1.747 5874.934 5875.936, 1:06:28.5 0 -0:00:25.3 -0:02:26.8 -0:00:06.2 0:00:38.6 -0:05:52.4 0:01:54.6, 1.22',
'm023': 'm023, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -322.301 -142.1845 -0.573 5872.105 5873.709, -0:02:52.8 0 -0:00:33.2 -0:05:34.5 0:00:07.6 0:00:20.8 -0:07:17.4 0:01:02.4, 1.22',
'm024': 'm024, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -351.0375 150.089 -2.5575 5871.178 5872.254, -0:02:52.8 0 -0:00:33.2 -0:05:34.5 0:00:07.6 0:00:20.8 -0:07:17.4 0:01:02.4, 1.22',
'm025': 'm025, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -181.97 225.617 -2.335 5873.897 5874.035, -0:00:25.8 0 -0:01:43.0 -0:01:02.8 0 -0:00:40.2 0:02:19.5 -0:00:11.3, 1.22',
'm026': 'm026, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -98.9955 17.044 -0.591 5869.451 5871.315, -0:08:18.3 0 -0:03:41.1 -0:07:22.6 0:00:06.6 -0:00:12.7 -0:05:41.0 0:01:02.9, 1.22',
'm027': 'm027, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 40.478 -23.1155 0.2815 5865.756 5866.846, -0:02:40.8 0 0:13:36.2 0:12:16.4 -0:00:02.6 -0:00:17.1 -0:05:28.3 -0:00:57.8, 1.22',
'm028': 'm028, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -51.1715 -87.1695 0.229 5870.798 5872.664, 0:12:29.1 0 -0:01:23.1 -0:01:05.0 -0:00:05.8 -0:00:08.0 -0:00:53.8 0:01:27.9, 1.22',
'm029': 'm029, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -88.755 -124.1115 0.3085 5874.599 5876.192, -0:08:46.4 0 -0:01:26.6 -0:00:32.5 -0:00:11.8 0:00:18.7 0:00:36.5 0:01:05.1, 1.22',
'm030': 'm030, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 171.289 113.947 -0.1105 5866.179 5867.178, 1:03:21.2 0 -0:02:51.7 -0:11:51.4 -0:00:04.3 -0:00:19.6 -0:11:18.2 0:02:16.8, 1.22',
'm031': 'm031, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 246.573 93.7565 0.065 5868.193 5869.797, -0:41:56.1 0 0:04:12.6 0:13:03.8 -0:00:07.6 0:00:19.0 -0:19:06.9 0:01:29.0, 1.22',
'm032': 'm032, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 461.2865 175.5055 -0.0355 5864.861 5865.577, 0:35:45.5 0 -0:01:58.2 0:02:20.6 0:00:07.4 -0:00:36.1 -0:03:30.0 0:02:12.2, 1.22',
'm033': 'm033, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 580.7005 863.958 -3.809 5850.667 5852.17, 0:42:33.1 0 -0:01:10.6 -0:08:38.0 0:00:12.9 -0:00:46.4 -0:21:32.8 0:01:33.7, 1.22',
'm034': 'm034, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 357.8205 -28.309 1.573 5859.104 5860.723, 0:09:22.0 0 -0:02:15.0 -0:07:20.8 0:00:27.0 0:00:48.0 -0:11:43.9 0:00:44.8, 1.22',
'm035': 'm035, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 386.1655 -180.8965 2.888 5855.376 5856.245, 1:41:45.5 0 -0:00:14.0 -0:00:12.3 0:00:46.3 -0:00:36.2 -0:21:47.5 0:01:37.8, 1.22',
'm036': 'm036, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 388.265 -290.762 3.4135 5856.404 5857.4, 0:23:07.1 0 0:07:14.1 0:04:09.2 -0:00:23.9 -0:01:05.7 -0:11:04.8 0:01:25.9, 1.22',
'm037': 'm037, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 380.2895 -459.3075 4.78 5863.721 5864.597, 0:25:52.7 0 0:00:37.4 -0:03:29.9 0:00:25.9 -0:00:22.1 -0:08:15.7 0:01:24.7, 1.22',
'm038': 'm038, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 213.313 -569.0825 4.542 5871.115 5872.117, 0:06:17.5 0 -0:00:47.1 0:00:15.0 0:00:03.0 -0:00:07.3 0:04:40.4 0:03:23.1, 1.22',
'm039': 'm039, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 253.749 -592.1475 5.0315 5872.333 5873.763, 1:00:37.7 0 0:01:47.6 0:03:57.1 0:00:42.0 -0:00:38.6 -0:02:03.4 0:01:49.2, 1.22',
'm040': 'm040, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -26.8515 -712.219 4.4355 5875.653 5876.424, 0:32:01.1 0 -0:00:17.6 -0:00:55.4 0:00:08.8 -0:00:17.5 -0:03:39.7 -0:00:03.3, 1.22',
'm041': 'm041, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -287.5405 -661.6775 2.5565 5888.778 5889.533, 0:08:05.8 0 -0:00:09.6 0:03:44.9 0:00:22.3 -0:00:01.6 0:08:53.8 0:01:56.9, 1.22',
'm042': 'm042, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -361.709 -460.317 1.0945 5868.148 5870.029, -0:08:57.6 0 -0:00:54.1 -0:02:02.4 0:00:16.2 0:00:02.0 -0:01:39.1 0:01:29.0, 1.22',
'm043': 'm043, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -629.8435 -128.325 -2.1295 5856.073 5857.703, 0:22:45.1 0 0:00:47.5 0:01:15.8 0:00:42.3 -0:00:06.0 -0:10:24.5 0:02:10.9, 1.22',
'm044': 'm044, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -896.15 600.504 -8.035 5867.43 5868.189, -0:10:12.5 0 0:00:12.0 -0:00:03.5 0:00:20.2 -0:00:02.3 -0:03:00.2 0:01:26.3, 1.22',
'm045': 'm045, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -1832.849 266.753 -7.2595 5873.565 5874.716, -0:18:03.4 0 -0:03:41.9 -0:04:25.4 -0:00:08.3 0:00:25.7 -0:17:41.1 -0:00:06.8, 1.22',
'm046': 'm046, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -1467.3265 1751.927 -14.4575 5861.403 5861.772, 0:02:35.2 0 0:00:13.4 -0:01:11.2 0:00:12.4 -0:00:09.7 0:05:34.2 0:01:40.6, 1.22',
'm047': 'm047, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -578.287 -517.293 0.229 5867.784 5868.561, 0:47:36.0 0 -0:01:28.3 -0:02:55.2 0:00:11.0 -0:00:00.9 0:00:01.1 0:01:39.9, 1.22',
'm048': 'm048, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -2805.627 2686.877 -17.1145 5868.983 5869.731, 1:22:26.1 0 0:00:00.4 0:03:11.3 -0:01:45.4 0:01:30.1 -0:00:03.2 0:00:59.6, 1.22',
'm049': 'm049, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -3605.9465 436.482 -4.654 5903.491 5903.763, -0:03:20.0 0 -0:09:04.3 -0:02:24.3 -0:00:27.3 0:02:13.9 -0:08:09.6 0:01:14.3, 1.22',
'm050': 'm050, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -2052.3315 -843.701 -2.033 5885.963 5887.567, 0:21:39.5 0 0:03:40.3 0:02:10.8 0:01:07.1 0:00:42.9 -0:11:07.6 0:01:11.9, 1.22',
'm051': 'm051, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -850.254 -769.3555 0.216 5870.33 5871.214, -0:26:02.0 0 -0:01:20.2 -0:00:49.2 0:01:06.9 -0:00:02.9 -0:14:31.1 0:02:37.6, 1.22',
'm052': 'm052, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -593.1855 -1148.65 3.1635 5862.139 5862.887, 1:19:56.2 0 0:04:10.4 0:08:36.9 0:01:01.3 0:00:14.6 0:00:43.1 0:00:59.4, 1.22',
'm053': 'm053, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 9.3665 -1304.461 7.6165 5898.297 5879.68, 0:04:09.5 0 -0:00:28.0 0:02:14.6 -0:00:14.6 0:01:08.9 0:08:59.8 0:00:45.0, 1.22',
'm054': 'm054, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 871.9815 -499.814 5.943 5880.501 5881.515, 0:08:22.2 0 -0:00:24.9 0:03:29.8 -0:00:02.9 -0:00:48.3 0:06:55.9 0:01:56.6, 1.22',
'm055': 'm055, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 1201.794 96.488 2.6205 5851.065 5852.054, 0:11:16.5 0 0:06:25.3 0:03:51.0 0:00:50.8 -0:01:25.5 0:06:01.1 0:00:39.9, 1.22',
'm056': 'm056, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 1598.411 466.662 -0.4145 5853.659 5854.17, -0:22:03.5 0 0:02:48.2 0:10:43.2 0:00:05.8 -0:01:17.5 -0:03:40.7 -0:00:29.6, 1.22',
'm057': 'm057, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 294.693 3259.9135 -18.063 5855.239 5856.731, -0:08:42.8 0 -0:02:55.2 -0:02:08.5 -0:02:04.1 -0:00:24.9 -0:40:17.1 0:00:18.4, 1.22',
'm058': 'm058, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 2805.7905 2686.8635 -11.086 5870.582 5872.173, 0:04:40.5 0 -0:01:56.8 -0:04:59.2 -0:01:48.8 -0:01:42.0 -0:02:40.6 0:00:01.0, 1.22',
'm059': 'm059, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 3686.448 758.865 4.3625 5863.443 5865.037, 0:46:33.6 0 0:00:51.0 -0:00:39.9 -0:00:33.6 -0:02:00.8 -0:03:45.2 -0:01:25.5, 1.22',
'm060': 'm060, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, 3419.684 -1840.505 16.2585 5875.233 5876.1, -0:36:15.0 0 0:00:49.2 0:02:42.1 0:01:29.6 -0:02:00.3 -0:10:22.3 0:00:40.2, 1.22',
'm061': 'm061, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -16.4025 -2323.78 13.906 5876.273 5876.553, 0:38:27.9 0 -0:01:56.6 -0:04:45.6 0:01:16.1 0:00:15.0 0:08:40.8 0:01:46.9, 1.22',
'm062': 'm062, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -1440.6315 -2503.767 14.277 5933.954 5935.741, -0:15:23.0 0 0:00:04.6 -0:03:30.4 0:01:12.2 0:00:37.5 0:00:15.6 0:01:11.8, 1.22',
'm063': 'm063, -30:42:39.8, 21:26:38.0, 1035.0, 13.5, -3419.574 -1840.462 9.022 5685.012 5686.161, -0:59:43.2 0 0:01:58.6 0:01:49.8 0:01:23.3 0:02:04.6 -0:08:15.7 0:03:47.1, 1.22'}
| 182.848485
| 198
| 0.56596
| 3,510
| 12,068
| 1.945869
| 0.160684
| 0.07511
| 0.056223
| 0.065593
| 0.293704
| 0.247438
| 0.22694
| 0.22694
| 0.22694
| 0.223133
| 0
| 0.651421
| 0.148492
| 12,068
| 65
| 199
| 185.661538
| 0.013235
| 0
| 0
| 0
| 0
| 1
| 0.951355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02333d2b936a7f0bbd50f05c302c9b510f2d8af4
| 80
|
py
|
Python
|
tests/__init__.py
|
svetlyak40wt/python-processor
|
9126a021d603030899897803ab9973250e5b16f6
|
[
"BSD-2-Clause"
] | 40
|
2015-03-18T09:27:13.000Z
|
2021-12-31T06:25:48.000Z
|
tests/__init__.py
|
svetlyak40wt/python-processor
|
9126a021d603030899897803ab9973250e5b16f6
|
[
"BSD-2-Clause"
] | 2
|
2015-03-19T18:31:22.000Z
|
2016-08-19T13:49:31.000Z
|
tests/__init__.py
|
svetlyak40wt/python-processor
|
9126a021d603030899897803ab9973250e5b16f6
|
[
"BSD-2-Clause"
] | 7
|
2015-03-19T17:59:24.000Z
|
2019-09-05T15:16:19.000Z
|
import hy
from .pipeline import *
from .sources import *
from .outputs import *
| 16
| 23
| 0.75
| 11
| 80
| 5.454545
| 0.545455
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 80
| 4
| 24
| 20
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
024f48e3ebfe4d8883dd9b41b47262a1480a80f7
| 4,961
|
py
|
Python
|
mmdet/models/losses/smooth_l1_loss_augmix.py
|
WoojuLee24/mmdetection
|
ee27d22aadcff19bb36725604d24ddb4b681e471
|
[
"Apache-2.0"
] | 1
|
2022-02-28T06:23:07.000Z
|
2022-02-28T06:23:07.000Z
|
mmdet/models/losses/smooth_l1_loss_augmix.py
|
WoojuLee24/mmdetection
|
ee27d22aadcff19bb36725604d24ddb4b681e471
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/smooth_l1_loss_augmix.py
|
WoojuLee24/mmdetection
|
ee27d22aadcff19bb36725604d24ddb4b681e471
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss, weighted_loss2
import torch.nn.functional as F
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss2
def smooth_l1_loss_augmix(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
pred_orig, _, _ = torch.chunk(pred, 3)
target, _, _ = torch.chunk(target, 3)
assert beta > 0
if target.numel() == 0:
return pred_orig.sum() * 0
assert pred_orig.size() == target.size()
diff = torch.abs(pred_orig - target)
loss_orig = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss_orig
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss2
def l1_loss_augmix(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
pred_orig, _, _ = torch.chunk(pred, 3)
target, _, _ = torch.chunk(target, 3)
if target.numel() == 0:
return pred_orig.sum() * 0
assert pred_orig.size() == target.size()
loss_orig = torch.abs(pred_orig - target)
return loss_orig
@LOSSES.register_module()
class SmoothL1LossAugMix(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1LossAugMix, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss_augmix(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1LossAugMix(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1LossAugMix, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss_augmix(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
| 31.801282
| 78
| 0.600282
| 582
| 4,961
| 4.972509
| 0.16323
| 0.045612
| 0.038701
| 0.022115
| 0.817554
| 0.794748
| 0.73877
| 0.718037
| 0.718037
| 0.685556
| 0
| 0.011645
| 0.307599
| 4,961
| 155
| 79
| 32.006452
| 0.830859
| 0.393872
| 0
| 0.575342
| 0
| 0
| 0.011152
| 0
| 0
| 0
| 0
| 0
| 0.068493
| 1
| 0.082192
| false
| 0
| 0.082192
| 0
| 0.273973
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
65f4d3a13b1839f1d57c0f397c4566768e6987a2
| 4,402
|
py
|
Python
|
shenfun/optimization/numba/pdma.py
|
jaisw7/shenfun
|
7482beb5b35580bc45f72704b69343cc6fc1d773
|
[
"BSD-2-Clause"
] | 1
|
2021-03-06T09:29:39.000Z
|
2021-03-06T09:29:39.000Z
|
shenfun/optimization/numba/pdma.py
|
jaisw7/shenfun
|
7482beb5b35580bc45f72704b69343cc6fc1d773
|
[
"BSD-2-Clause"
] | null | null | null |
shenfun/optimization/numba/pdma.py
|
jaisw7/shenfun
|
7482beb5b35580bc45f72704b69343cc6fc1d773
|
[
"BSD-2-Clause"
] | null | null | null |
import numba as nb
__all__ = ['PDMA_SymLU', 'PDMA_SymLU_VC', 'PDMA_SymSolve', 'PDMA_SymLU2D',
'PDMA_SymLU3D', 'PDMA_SymSolve_VC']
def PDMA_SymLU_VC(d, a, l, axis=0):
n = d.ndim
if n == 1:
PDMA_SymLU(d, a, l)
elif n == 2:
PDMA_SymLU2D(d, a, l, axis)
elif n == 3:
PDMA_SymLU3D(d, a, l, axis)
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymLU(d, e, f):
n = d.shape[0]
m = e.shape[0]
k = n - m
for i in range(n-2*k):
lam = e[i]/d[i]
d[i+k] -= lam*e[i]
e[i+k] -= lam*f[i]
e[i] = lam
lam = f[i]/d[i]
d[i+2*k] -= lam*f[i]
f[i] = lam
lam = e[n-4]/d[n-4]
d[n-2] -= lam*e[n-4]
e[n-4] = lam
lam = e[n-3]/d[n-3]
d[n-1] -= lam*e[n-3]
e[n-3] = lam
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymLU2D(d, e, f, axis):
if axis == 0:
for j in range(d.shape[1]):
PDMA_SymLU(d[:-4, j], e[:-6, j], f[:-8, j])
elif axis == 1:
for i in range(d.shape[0]):
PDMA_SymLU(d[i, :-4], e[i, :-6], f[i, :-8])
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymLU3D(d, e, f, axis):
if axis == 0:
for j in range(d.shape[1]):
for k in range(d.shape[2]):
PDMA_SymLU(d[:-4, j, k], e[:-6, j, k], f[:-8, j, k])
elif axis == 1:
for i in range(d.shape[0]):
for k in range(d.shape[2]):
PDMA_SymLU(d[i, :-4, k], e[i, :-6, k], f[i, :-8, k])
elif axis == 2:
for i in range(d.shape[0]):
for j in range(d.shape[1]):
PDMA_SymLU(d[i, j, :-4], e[i, j, :-6], f[i, j, :-8])
def PDMA_SymSolve(d, a, l, x, axis=0):
n = x.ndim
if n == 1:
PDMA_SymSolve1D(d, a, l, x)
elif n == 2:
PDMA_SymSolve2D(d, a, l, x, axis)
elif n == 3:
PDMA_SymSolve3D(d, a, l, x, axis)
def PDMA_SymSolve_VC(d, a, l, x, axis=0):
n = x.ndim
if n == 1:
PDMA_SymSolve1D(d, a, l, x)
elif n == 2:
PDMA_SymSolve2D_VC(d, a, l, x, axis)
elif n == 3:
PDMA_SymSolve3D_VC(d, a, l, x, axis)
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymSolve1D(d, e, f, b):
n = d.shape[0]
b[2] -= e[0]*b[0]
b[3] -= e[1]*b[1]
for k in range(4, n):
b[k] -= (e[k-2]*b[k-2] + f[k-4]*b[k-4])
b[n-1] /= d[n-1]
b[n-2] /= d[n-2]
b[n-3] /= d[n-3]
b[n-3] -= e[n-3]*b[n-1]
b[n-4] /= d[n-4]
b[n-4] -= e[n-4]*b[n-2]
for k in range(n-5, -1, -1):
b[k] /= d[k]
b[k] -= (e[k]*b[k+2] + f[k]*b[k+4])
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymSolve2D(d, e, f, b, axis):
if axis == 0:
for j in range(b.shape[1]):
PDMA_SymSolve1D(d, e, f, b[:, j])
elif axis == 1:
for i in range(b.shape[0]):
PDMA_SymSolve1D(d, e, f, b[i])
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymSolve3D(d, e, f, b, axis):
if axis == 0:
for j in range(b.shape[1]):
for k in range(b.shape[2]):
PDMA_SymSolve1D(d, e, f, b[:, j, k])
elif axis == 1:
for i in range(b.shape[0]):
for k in range(b.shape[2]):
PDMA_SymSolve1D(d, e, f, b[i, :, k])
elif axis == 2:
for i in range(b.shape[0]):
for j in range(b.shape[1]):
PDMA_SymSolve1D(d, e, f, b[i, j])
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymSolve3D_VC(d, e, f, x, axis):
if axis == 0:
for j in range(d.shape[1]):
for k in range(d.shape[2]):
PDMA_SymSolve1D(d[:-4, j, k], e[:-6, j, k], f[:-8, j, k], x[:, j, k])
elif axis == 1:
for i in range(d.shape[0]):
for k in range(d.shape[2]):
PDMA_SymSolve1D(d[i, :-4, k], e[i, :-6, k], f[i, :-8, k], x[i, :, k])
elif axis == 2:
for i in range(d.shape[0]):
for j in range(d.shape[1]):
PDMA_SymSolve1D(d[i, j, :-4], e[i, j, :-6], f[i, j, :-8], x[i, j, :])
@nb.jit(nopython=True, fastmath=True, cache=True)
def PDMA_SymSolve2D_VC(d, e, f, x, axis):
if axis == 0:
for j in range(d.shape[1]):
PDMA_SymSolve1D(d[:-4, j], e[:-6, j], f[:-8, j], x[:, j])
elif axis == 1:
for i in range(d.shape[0]):
PDMA_SymSolve1D(d[i, :-4], e[i, :-6], f[i, :-8], x[i, :])
| 30.569444
| 85
| 0.472058
| 850
| 4,402
| 2.382353
| 0.057647
| 0.093333
| 0.06321
| 0.102716
| 0.81037
| 0.750617
| 0.738765
| 0.729383
| 0.725926
| 0.697284
| 0
| 0.050495
| 0.311677
| 4,402
| 143
| 86
| 30.783217
| 0.617822
| 0
| 0
| 0.484375
| 0
| 0
| 0.017265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085938
| false
| 0
| 0.007813
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a246451f90cefe194bc6cc2ee80e19addf4c6cf
| 11,297
|
py
|
Python
|
vosi/tests/tests.py
|
bruot/django-vosi
|
06bad7e2c3e4d80b93d9f19e3473f5fbaf51f1b3
|
[
"Apache-2.0"
] | null | null | null |
vosi/tests/tests.py
|
bruot/django-vosi
|
06bad7e2c3e4d80b93d9f19e3473f5fbaf51f1b3
|
[
"Apache-2.0"
] | null | null | null |
vosi/tests/tests.py
|
bruot/django-vosi
|
06bad7e2c3e4d80b93d9f19e3473f5fbaf51f1b3
|
[
"Apache-2.0"
] | 1
|
2021-06-23T13:25:36.000Z
|
2021-06-23T13:25:36.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test import Client
from django.test.utils import setup_test_environment
from vosi.models import Availability, AvailabilityOption
from vosi.models import VOResource_Capability, VOResource_Interface, VOResource_AccessURL
from vosi.renderers import VosiAvailabilityRenderer, VosiCapabilityRenderer
def remove_comment(content):
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
return content
class VosiAvailabilityRenderer_TestCase(TestCase):
def test_availability_render(self):
data = {'available': 'true', 'note': 'Service is available'}
response = VosiAvailabilityRenderer().render(data)
response = remove_comment(response)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:availability version="1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSIAvailability/v1.0"><vosi:available>true</vosi:available><vosi:note>Service is available</vosi:note></vosi:availability>"""
self.maxDiff = None
self.assertEqual(response, expected)
def test_availability_render_pretty(self):
data = {'available': 'true', 'note': 'Service is available'}
response = VosiAvailabilityRenderer().render(data, prettyprint=True)
expected = \
u"""<vosi:availability xmlns:vosi="http://www.ivoa.net/xml/VOSIAvailability/v1.0" version="1.1">
<vosi:available>true</vosi:available>
<vosi:note>Service is available</vosi:note>
</vosi:availability>
"""
self.assertEqual(response, expected)
class VosiCapabilityRenderer_TestCase(TestCase):
def setUp(self):
cap = VOResource_Capability.objects.create(
id="1",
standardID='ivo://ivoa.net/std/ExampleDM#DAL',
description='Example model',
appname="example1")
cap.save()
iface = VOResource_Interface.objects.create(
id="2",
type="vs:ParamHTTP",
capability=cap,
version="1.0",
role='std'
)
iface.save()
aurl = VOResource_AccessURL.objects.create(
interface=iface,
url="http://www.example.com/mydalinterface/",
use="full"
)
aurl.save()
data = VOResource_Capability.objects.all()
def test_capabilities_render(self):
data = VOResource_Capability.objects.filter(appname='example1').order_by('id')
response = VosiCapabilityRenderer().render(data)
response = remove_comment(response)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:capabilities xmlns:vr="http://www.ivoa.net/xml/VOResource/v1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.1" xmlns:vs="http://www.ivoa.net/xml/VODataService/v1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSICapabilities/v1.0"><capability standardID="ivo://ivoa.net/std/ExampleDM#DAL"><interface xsi:type="vs:ParamHTTP"><accessURL use="full">http://www.example.com/mydalinterface/</accessURL></interface></capability></vosi:capabilities>"""
self.maxDiff = None
self.assertEqual(response, expected)
def test_capabilities_render_pretty(self):
data = VOResource_Capability.objects.filter(appname='example1').order_by('id')
response = VosiCapabilityRenderer().render(data, prettyprint=True)
response = remove_comment(response)
expected = \
"""<vosi:capabilities xmlns:vr="http://www.ivoa.net/xml/VOResource/v1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:vs="http://www.ivoa.net/xml/VODataService/v1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSICapabilities/v1.0" version="1.1">
<capability standardID="ivo://ivoa.net/std/ExampleDM#DAL">
<interface xsi:type="vs:ParamHTTP">
<accessURL use="full">http://www.example.com/mydalinterface/</accessURL>
</interface>
</capability>
</vosi:capabilities>
"""
self.maxDiff = None
self.assertEqual(response, expected)
class Vosi_TestCase(TestCase):
def setUp(self):
ao_up = AvailabilityOption.objects.create(id="1", available=True, note="service is up", appname="example1")
ao_up.save()
ao_down = AvailabilityOption.objects.create(id="2", available=False, note="service is down", appname="example1")
ao_down.save()
a = Availability.objects.create(enabled=ao_up, appname="example1")
a.save()
ao_up = AvailabilityOption.objects.create(id="3", available=True, note="This service is up", appname="example2")
ao_up.save()
ao_down = AvailabilityOption.objects.create(id="4", available=False, note="This service is down", appname="example2")
ao_down.save()
a = Availability.objects.create(enabled=ao_down, appname="example2")
a.save()
cap = VOResource_Capability.objects.create(
id="1",
standardID='ivo://ivoa.net/std/ExampleDM#DAL',
description='Example model',
appname="example1")
cap.save()
iface = VOResource_Interface.objects.create(
id="2",
type="vs:ParamHTTP",
capability=cap,
version="1.0",
role='std'
)
iface.save()
aurl = VOResource_AccessURL.objects.create(
interface=iface,
url="http://www.example.com/mydalinterface/",
use="full"
)
aurl.save()
cap = VOResource_Capability.objects.create(
id="2",
standardID='ivo://ivoa.net/std/Example2DM#DAL',
description='Example2 model',
appname="example2")
cap.save()
iface = VOResource_Interface.objects.create(
id="3",
type="vs:ParamHTTP",
capability=cap,
version="2.0",
role='std'
)
iface.save()
aurl = VOResource_AccessURL.objects.create(
interface=iface,
url="http://www.example2.com/mydalinterface/",
use="full"
)
aurl.save()
def test_get_availability(self):
client = Client()
response = client.get(reverse('vosi:availability'))
self.assertEqual(response.status_code, 200)
content = response.content
# remove comment from content
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:availability version="1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSIAvailability/v1.0"><vosi:available>true</vosi:available><vosi:note>Service is ready for requests</vosi:note></vosi:availability>"""
self.maxDiff = None
self.assertEqual(content, expected)
def test_get_availability_example1(self):
client = Client()
#how to set: request.resolver_match.app_name = 'example_app'??
response = client.get(reverse('example1:availability'))#, app_name = 'example_app'))
self.assertEqual(response.status_code, 200)
content = response.content
# remove comment from content
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:availability version="1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSIAvailability/v1.0"><vosi:available>true</vosi:available><vosi:note>service is up</vosi:note></vosi:availability>"""
self.maxDiff = None
self.assertEqual(content, expected)
def test_get_availability_example2(self):
client = Client()
#how to set: request.resolver_match.app_name = 'example_app'??
response = client.get(reverse('example2:availability'))#, app_name = 'example_app'))
self.assertEqual(response.status_code, 200)
content = response.content
# remove comment from content
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:availability version="1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSIAvailability/v1.0"><vosi:available>false</vosi:available><vosi:note>This service is down</vosi:note></vosi:availability>"""
self.maxDiff = None
self.assertEqual(content, expected)
def test_get_capabilities(self):
client = Client()
response = client.get(reverse('vosi:capabilities'))
self.assertEqual(response.status_code, 200)
content = response.content
# remove comment from content
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:capabilities xmlns:vr="http://www.ivoa.net/xml/VOResource/v1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.1" xmlns:vs="http://www.ivoa.net/xml/VODataService/v1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSICapabilities/v1.0"><capability standardID="ivo://ivoa.net/std/ExampleDM#DAL"><interface xsi:type="vs:ParamHTTP"><accessURL use="full">http://www.example.com/mydalinterface/</accessURL></interface></capability><capability standardID="ivo://ivoa.net/std/Example2DM#DAL"><interface xsi:type="vs:ParamHTTP"><accessURL use="full">http://www.example2.com/mydalinterface/</accessURL></interface></capability></vosi:capabilities>"""
self.maxDiff = None
self.assertEqual(content, expected)
def test_get_capabilities_example1(self):
client = Client()
response = client.get(reverse('example1:capabilities'))
self.assertEqual(response.status_code, 200)
content = response.content
# remove comment from content
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:capabilities xmlns:vr="http://www.ivoa.net/xml/VOResource/v1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.1" xmlns:vs="http://www.ivoa.net/xml/VODataService/v1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSICapabilities/v1.0"><capability standardID="ivo://ivoa.net/std/ExampleDM#DAL"><interface xsi:type="vs:ParamHTTP"><accessURL use="full">http://www.example.com/mydalinterface/</accessURL></interface></capability></vosi:capabilities>"""
self.maxDiff = None
self.assertEqual(content, expected)
def test_get_capabilities_example2(self):
client = Client()
response = client.get(reverse('example2:capabilities'))
self.assertEqual(response.status_code, 200)
content = response.content
# remove comment from content
content = re.sub(r'<!--.*\n.*\n.*\n.*-->\n', '', content)
expected = \
u"""<?xml version="1.0" encoding="utf-8"?>
<vosi:capabilities xmlns:vr="http://www.ivoa.net/xml/VOResource/v1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.1" xmlns:vs="http://www.ivoa.net/xml/VODataService/v1.1" xmlns:vosi="http://www.ivoa.net/xml/VOSICapabilities/v1.0"><capability standardID="ivo://ivoa.net/std/Example2DM#DAL"><interface xsi:type="vs:ParamHTTP"><accessURL use="full">http://www.example2.com/mydalinterface/</accessURL></interface></capability></vosi:capabilities>"""
self.maxDiff = None
self.assertEqual(content, expected)
| 45.552419
| 656
| 0.654067
| 1,355
| 11,297
| 5.391882
| 0.098893
| 0.032576
| 0.030112
| 0.038325
| 0.864221
| 0.827539
| 0.80824
| 0.786614
| 0.768273
| 0.735423
| 0
| 0.018404
| 0.182349
| 11,297
| 247
| 657
| 45.736842
| 0.772545
| 0.032487
| 0
| 0.653631
| 0
| 0
| 0.11899
| 0.048504
| 0
| 0
| 0
| 0
| 0.089385
| 1
| 0.072626
| false
| 0
| 0.050279
| 0
| 0.145251
| 0.011173
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a47364e373769a7b931220039dc3d3ea459e2bd
| 23,348
|
py
|
Python
|
tests/test_converters.py
|
YVautrin/xmlschema
|
c0363bc56b1371ba4904ad5aeb1c3c3dee227350
|
[
"MIT"
] | 176
|
2019-07-08T00:15:03.000Z
|
2022-03-24T14:17:42.000Z
|
tests/test_converters.py
|
YVautrin/xmlschema
|
c0363bc56b1371ba4904ad5aeb1c3c3dee227350
|
[
"MIT"
] | 168
|
2019-07-01T14:49:03.000Z
|
2022-03-28T10:55:38.000Z
|
tests/test_converters.py
|
YVautrin/xmlschema
|
c0363bc56b1371ba4904ad5aeb1c3c3dee227350
|
[
"MIT"
] | 44
|
2019-08-21T22:59:02.000Z
|
2022-02-28T08:50:13.000Z
|
#!/usr/bin/env python
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
import unittest
import xml.etree.ElementTree as ElementTree
from pathlib import Path
try:
import lxml.etree as lxml_etree
except ImportError:
lxml_etree = None
from xmlschema import XMLSchema, XMLSchemaValidationError, fetch_namespaces
from xmlschema.etree import etree_element
from xmlschema.dataobjects import DataElement
from xmlschema.testing import etree_elements_assert_equal
from xmlschema.converters import XMLSchemaConverter, UnorderedConverter, \
ParkerConverter, BadgerFishConverter, AbderaConverter, JsonMLConverter, \
ColumnarConverter
from xmlschema.dataobjects import DataElementConverter
class TestConverters(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.col_xsd_filename = cls.casepath('examples/collection/collection.xsd')
cls.col_xml_filename = cls.casepath('examples/collection/collection.xml')
cls.col_xml_root = ElementTree.parse(cls.col_xml_filename).getroot()
cls.col_nsmap = fetch_namespaces(cls.col_xml_filename)
cls.col_namespace = cls.col_nsmap['col']
if lxml_etree is not None:
cls.col_lxml_root = lxml_etree.parse(cls.col_xml_filename).getroot()
else:
cls.col_lxml_root = None
@classmethod
def casepath(cls, relative_path):
return str(Path(__file__).parent.joinpath('test_cases', relative_path))
def test_element_class_argument(self):
converter = XMLSchemaConverter()
self.assertIs(converter.etree_element_class, etree_element)
converter = XMLSchemaConverter(etree_element_class=etree_element)
self.assertIs(converter.etree_element_class, etree_element)
if lxml_etree is not None:
converter = XMLSchemaConverter(etree_element_class=lxml_etree.Element)
self.assertIs(converter.etree_element_class, lxml_etree.Element)
def test_prefix_arguments(self):
converter = XMLSchemaConverter(cdata_prefix='#')
self.assertEqual(converter.cdata_prefix, '#')
converter = XMLSchemaConverter(attr_prefix='%')
self.assertEqual(converter.attr_prefix, '%')
converter = XMLSchemaConverter(attr_prefix='_')
self.assertEqual(converter.attr_prefix, '_')
converter = XMLSchemaConverter(attr_prefix='attribute__')
self.assertEqual(converter.attr_prefix, 'attribute__')
converter = XMLSchemaConverter(text_key='text__')
self.assertEqual(converter.text_key, 'text__')
def test_strip_namespace_argument(self):
# Test for issue #161
converter = XMLSchemaConverter(strip_namespaces=True)
col_xsd_filename = self.casepath('examples/collection/collection.xsd')
col_xml_filename = self.casepath('examples/collection/collection.xml')
col_schema = XMLSchema(col_xsd_filename, converter=converter)
self.assertIn('@xmlns:', str(col_schema.decode(col_xml_filename, strip_namespaces=False)))
self.assertNotIn('@xmlns:', str(col_schema.decode(col_xml_filename)))
def test_lossy_property(self):
self.assertTrue(XMLSchemaConverter().lossy)
self.assertFalse(XMLSchemaConverter(cdata_prefix='#').lossy)
def test_cdata_mapping(self):
schema = XMLSchema("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root">
<xs:complexType mixed="true">
<xs:sequence>
<xs:element name="node" type="xs:string" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
""")
self.assertEqual(
schema.decode('<root>1<node/>2<node/>3</root>'), {'node': [None, None]}
)
self.assertEqual(
schema.decode('<root>1<node/>2<node/>3</root>', cdata_prefix='#'),
{'#1': '1', 'node': [None, None], '#2': '2', '#3': '3'}
)
def test_preserve_root__issue_215(self):
schema = XMLSchema("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns="http://xmlschema.test/ns"
targetNamespace="http://xmlschema.test/ns">
<xs:element name="a">
<xs:complexType>
<xs:sequence>
<xs:element name="b1" type="xs:string" maxOccurs="unbounded"/>
<xs:element name="b2" type="xs:string" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
""")
xml_data = """<tns:a xmlns:tns="http://xmlschema.test/ns"><b1/><b2/></tns:a>"""
obj = schema.decode(xml_data)
self.assertListEqual(list(obj), ['@xmlns:tns', 'b1', 'b2'])
self.assertEqual(schema.encode(obj).tag, '{http://xmlschema.test/ns}a')
obj = schema.decode(xml_data, preserve_root=True)
self.assertListEqual(list(obj), ['tns:a'])
root = schema.encode(obj, preserve_root=True, path='tns:a',
namespaces={'tns': 'http://xmlschema.test/ns'})
self.assertEqual(root.tag, '{http://xmlschema.test/ns}a')
root = schema.encode(obj, preserve_root=True, path='{http://xmlschema.test/ns}a')
self.assertEqual(root.tag, '{http://xmlschema.test/ns}a')
root = schema.encode(obj, preserve_root=True)
self.assertEqual(root.tag, '{http://xmlschema.test/ns}a')
def test_etree_element_method(self):
converter = XMLSchemaConverter()
elem = converter.etree_element('A')
self.assertIsNone(etree_elements_assert_equal(elem, etree_element('A')))
elem = converter.etree_element('A', attrib={})
self.assertIsNone(etree_elements_assert_equal(elem, etree_element('A')))
def test_columnar_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=ColumnarConverter)
obj = col_schema.decode(self.col_xml_filename)
self.assertIn("'authorid'", str(obj))
self.assertNotIn("'author_id'", str(obj))
self.assertNotIn("'author__id'", str(obj))
obj = col_schema.decode(self.col_xml_filename, attr_prefix='_')
self.assertNotIn("'authorid'", str(obj))
self.assertIn("'author_id'", str(obj))
self.assertNotIn("'author__id'", str(obj))
obj = col_schema.decode(self.col_xml_filename, attr_prefix='__')
self.assertNotIn("'authorid'", str(obj))
self.assertNotIn("'author_id'", str(obj))
self.assertIn("'author__id'", str(obj))
col_schema = XMLSchema(self.col_xsd_filename)
obj = col_schema.decode(self.col_xml_filename, converter=ColumnarConverter,
attr_prefix='__')
self.assertNotIn("'authorid'", str(obj))
self.assertNotIn("'author_id'", str(obj))
self.assertIn("'author__id'", str(obj))
def test_data_element_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=DataElementConverter)
obj = col_schema.decode(self.col_xml_filename)
self.assertIsInstance(obj, DataElement)
self.assertEqual(obj.tag, self.col_xml_root.tag)
self.assertEqual(obj.nsmap, self.col_nsmap)
def test_decode_encode_default_converter(self):
col_schema = XMLSchema(self.col_xsd_filename)
# Decode from XML file
obj1 = col_schema.decode(self.col_xml_filename)
self.assertIn("'@xmlns:col'", repr(obj1))
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# Decode from lxml.etree.Element tree
if self.col_lxml_root is not None:
obj2 = col_schema.decode(self.col_lxml_root)
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree providing namespaces
obj2 = col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap)
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree without namespaces
obj2 = col_schema.decode(self.col_xml_root)
self.assertNotIn("'@xmlns:col'", repr(obj2))
self.assertNotEqual(obj1, obj2)
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_default_converter_with_preserve_root(self):
col_schema = XMLSchema(self.col_xsd_filename)
# Decode from XML file
obj1 = col_schema.decode(self.col_xml_filename, preserve_root=True)
self.assertIn("'col:collection'", repr(obj1))
self.assertIn("'@xmlns:col'", repr(obj1))
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap,
preserve_root=True)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1, preserve_root=True)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# Decode from lxml.etree.Element tree
if self.col_lxml_root is not None:
obj2 = col_schema.decode(self.col_lxml_root, preserve_root=True)
self.assertIn("'col:collection'", repr(obj2))
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree providing namespaces
obj2 = col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap, preserve_root=True)
self.assertIn("'col:collection'", repr(obj2))
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree without namespaces
obj2 = col_schema.decode(self.col_xml_root, preserve_root=True)
self.assertNotIn("'col:collection'", repr(obj2))
self.assertNotIn("'@xmlns:col'", repr(obj2))
self.assertNotEqual(obj1, obj2)
root = col_schema.encode(obj2, path='./col:collection',
namespaces=self.col_nsmap, preserve_root=True)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2, preserve_root=True) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_unordered_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=UnorderedConverter)
# Decode from XML file
obj1 = col_schema.decode(self.col_xml_filename)
self.assertIn("'@xmlns:col'", repr(obj1))
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# Decode from lxml.etree.Element tree
if self.col_lxml_root is not None:
obj2 = col_schema.decode(self.col_lxml_root)
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree providing namespaces
obj2 = col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap)
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree without namespaces
obj2 = col_schema.decode(self.col_xml_root)
self.assertNotIn("'@xmlns:col'", repr(obj2))
self.assertNotEqual(obj1, obj2)
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_unordered_converter_with_preserve_root(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=UnorderedConverter)
# Decode from XML file
obj1 = col_schema.decode(self.col_xml_filename, preserve_root=True)
self.assertIn("'col:collection'", repr(obj1))
self.assertIn("'@xmlns:col'", repr(obj1))
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap,
preserve_root=True)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1, preserve_root=True)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# Decode from lxml.etree.Element tree
if self.col_lxml_root is not None:
obj2 = col_schema.decode(self.col_lxml_root, preserve_root=True)
self.assertIn("'col:collection'", repr(obj2))
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree providing namespaces
obj2 = col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap, preserve_root=True)
self.assertIn("'col:collection'", repr(obj2))
self.assertIn("'@xmlns:col'", repr(obj2))
self.assertEqual(obj1, obj2)
# Decode from ElementTree.Element tree without namespaces
obj2 = col_schema.decode(self.col_xml_root, preserve_root=True)
self.assertNotIn("'col:collection'", repr(obj2))
self.assertNotIn("'@xmlns:col'", repr(obj2))
self.assertNotEqual(obj1, obj2)
root = col_schema.encode(obj2, path='./col:collection',
namespaces=self.col_nsmap, preserve_root=True)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2, preserve_root=True) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_parker_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=ParkerConverter)
obj1 = col_schema.decode(self.col_xml_filename)
with self.assertRaises(XMLSchemaValidationError) as ec:
col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIn("missing required attribute 'id'", str(ec.exception))
def test_decode_encode_badger_fish_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=BadgerFishConverter)
obj1 = col_schema.decode(self.col_xml_filename)
self.assertIn("'@xmlns'", repr(obj1))
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# With ElementTree namespaces are not mapped
obj2 = col_schema.decode(self.col_xml_root)
self.assertNotIn("'@xmlns'", repr(obj2))
self.assertNotEqual(obj1, obj2)
self.assertEqual(obj1, col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap))
# With lxml.etree namespaces are mapped
if self.col_lxml_root is not None:
self.assertEqual(obj1, col_schema.decode(self.col_lxml_root))
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_abdera_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=AbderaConverter)
obj1 = col_schema.decode(self.col_xml_filename)
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# Namespace mapping is required
with self.assertRaises(XMLSchemaValidationError) as ec:
col_schema.encode(obj1, path='./{%s}collection' % self.col_namespace)
self.assertIn("'xsi:schemaLocation' attribute not allowed", str(ec.exception))
# With ElementTree namespaces are not mapped
obj2 = col_schema.decode(self.col_xml_root)
self.assertNotEqual(obj1, obj2)
self.assertEqual(obj1, col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap))
# With lxml.etree namespaces are mapped
if self.col_lxml_root is not None:
self.assertEqual(obj1, col_schema.decode(self.col_lxml_root))
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_jsonml_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=JsonMLConverter)
obj1 = col_schema.decode(self.col_xml_filename)
self.assertIn('col:collection', repr(obj1))
self.assertIn('xmlns:col', repr(obj1))
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1, path='./{%s}collection' % self.col_namespace)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# With ElementTree namespaces are not mapped
obj2 = col_schema.decode(self.col_xml_root)
self.assertNotIn('col:collection', repr(obj2))
self.assertNotEqual(obj1, obj2)
self.assertEqual(obj1, col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap))
# With lxml.etree namespaces are mapped
if self.col_lxml_root is not None:
self.assertEqual(obj1, col_schema.decode(self.col_lxml_root))
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_columnar_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=ColumnarConverter)
obj1 = col_schema.decode(self.col_xml_filename)
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# Namespace mapping is required
with self.assertRaises(XMLSchemaValidationError) as ec:
col_schema.encode(obj1, path='./{%s}collection' % self.col_namespace)
self.assertIn("'xsi:schemaLocation' attribute not allowed", str(ec.exception))
# With ElementTree namespaces are not mapped
obj2 = col_schema.decode(self.col_xml_root)
self.assertNotEqual(obj1, obj2)
self.assertEqual(obj1, col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap))
# With lxml.etree namespaces are mapped
if self.col_lxml_root is not None:
self.assertEqual(obj1, col_schema.decode(self.col_lxml_root))
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
def test_decode_encode_data_element_converter(self):
col_schema = XMLSchema(self.col_xsd_filename, converter=DataElementConverter)
obj1 = col_schema.decode(self.col_xml_filename)
# self.assertIn('col:collection', repr(obj1))
self.assertIn('col', obj1.nsmap)
root = col_schema.encode(obj1, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1, path='./{%s}collection' % self.col_namespace)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj1)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
# With ElementTree namespaces are not mapped
obj2 = col_schema.decode(self.col_xml_root)
# Equivalent if compared as Element trees (tag, text, attrib, tail)
self.assertIsNone(etree_elements_assert_equal(obj1, obj2))
self.assertIsNone(etree_elements_assert_equal(
obj1, col_schema.decode(self.col_xml_root, namespaces=self.col_nsmap)
))
# With lxml.etree namespaces are mapped
if self.col_lxml_root is not None:
self.assertIsNone(etree_elements_assert_equal(
obj1, col_schema.decode(self.col_lxml_root)
))
root = col_schema.encode(obj2, path='./col:collection', namespaces=self.col_nsmap)
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
root = col_schema.encode(obj2) # No namespace unmap is required
self.assertIsNone(etree_elements_assert_equal(self.col_xml_root, root, strict=False))
if __name__ == '__main__':
import platform
header_template = "Test xmlschema converters with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
| 45.512671
| 98
| 0.686654
| 2,885
| 23,348
| 5.32409
| 0.073484
| 0.066536
| 0.045573
| 0.05013
| 0.830534
| 0.810482
| 0.780143
| 0.774089
| 0.753451
| 0.741536
| 0
| 0.009953
| 0.199632
| 23,348
| 512
| 99
| 45.601563
| 0.812008
| 0.080992
| 0
| 0.638728
| 0
| 0.00289
| 0.120247
| 0.01238
| 0
| 0
| 0
| 0
| 0.395954
| 1
| 0.060694
| false
| 0
| 0.034682
| 0.00289
| 0.101156
| 0.00289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cebc3a4ad50ba50393c8031955dc3ec66cb8b2dd
| 37
|
py
|
Python
|
marslab/tests/test_mertools.py
|
AndrewAnnex/marslab
|
dde6bcd627ff85d9125d4abfe06432fe241f4ca1
|
[
"BSD-3-Clause"
] | null | null | null |
marslab/tests/test_mertools.py
|
AndrewAnnex/marslab
|
dde6bcd627ff85d9125d4abfe06432fe241f4ca1
|
[
"BSD-3-Clause"
] | null | null | null |
marslab/tests/test_mertools.py
|
AndrewAnnex/marslab
|
dde6bcd627ff85d9125d4abfe06432fe241f4ca1
|
[
"BSD-3-Clause"
] | null | null | null |
from marslab.compat import mertools
| 12.333333
| 35
| 0.837838
| 5
| 37
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 2
| 36
| 18.5
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0c9015cd339048f9d2284b25d6889c7b52bcd4dd
| 36,470
|
py
|
Python
|
arch/ae.py
|
archishman/KiU-Net-pytorch
|
8c74fd8836e95834f8d247153059d2fe0c42bb20
|
[
"MIT"
] | null | null | null |
arch/ae.py
|
archishman/KiU-Net-pytorch
|
8c74fd8836e95834f8d247153059d2fe0c42bb20
|
[
"MIT"
] | null | null | null |
arch/ae.py
|
archishman/KiU-Net-pytorch
|
8c74fd8836e95834f8d247153059d2fe0c42bb20
|
[
"MIT"
] | null | null | null |
# Code for KiU-Net
# Author: Jeya Maria Jose
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import torch.nn.functional as F
import os
import matplotlib.pyplot as plt
class autoencoder(nn.Module):
def __init__(self):
super(autoencoder, self).__init__()
self.encoder1 = nn.Conv2d(3, 64, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(64, 128, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1,padding=2) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(512, 256, 3, stride=1, padding=2) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(64, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return out
class unet(nn.Module):
def __init__(self):
super(unet, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=1) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
t1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
t2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
t3 = out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
t4 = out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
# t2 = out
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape,t4.shape)
out = torch.add(out,t4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t1)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return out
class kinetwithsk(nn.Module):
def __init__(self):
super(kinetwithsk, self).__init__()
self.encoder1 = nn.Conv2d(1, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
# self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
# self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=2) # b, 16, 5, 5
# self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=2) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
# self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
# self.decoderf3 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
# self.encoderf1 = nn.Conv2d(16, 32, 3, stride=1, padding=1)
# self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
# self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.interpolate(self.encoder1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = out
out = F.relu(F.interpolate(self.encoder2(out),scale_factor=(2,2),mode ='bilinear'))
t2 = out
out = F.relu(F.interpolate(self.encoder3(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
out = F.relu(F.max_pool2d(self.decoder3(out),2,2))
out = torch.add(out,t2)
out = F.relu(F.max_pool2d(self.decoder4(out),2,2))
out = torch.add(out,t1)
out = F.relu(F.max_pool2d(self.decoder5(out),2,2))
# out = self.soft(out)
return out
class kitenet(nn.Module):
def __init__(self):
super(kitenet, self).__init__()
self.encoder1 = nn.Conv2d(1, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
# self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
# self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=2) # b, 16, 5, 5
# self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=2) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.interpolate(self.encoder1(x),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.encoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.encoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.max_pool2d(self.decoder3(out),2,2))
out = F.relu(F.max_pool2d(self.decoder4(out),2,2))
out = F.relu(F.max_pool2d(self.decoder5(out),2,2))
# out = self.soft(out)
return out
class kiunet(nn.Module):
def __init__(self, size=8):
super(kiunet, self).__init__()
assert(size % 2 == 0)
size //= 2
print(size)
self.encoder1 = nn.Conv2d(3, size * 2, 3, stride=1, padding=1) # First Layer GrayScale Image , change to input channels to 3 in case of RGB
self.en1_bn = nn.BatchNorm2d(size * 2)
self.encoder2= nn.Conv2d(size * 2, size * 4, 3, stride=1, padding=1)
self.en2_bn = nn.BatchNorm2d(size * 4)
self.encoder3= nn.Conv2d(size * 4, size * 8, 3, stride=1, padding=1)
self.en3_bn = nn.BatchNorm2d(size * 8)
self.decoder1 = nn.Conv2d(size * 8, size * 4, 3, stride=1, padding=1)
self.de1_bn = nn.BatchNorm2d(size * 4)
self.decoder2 = nn.Conv2d(size * 4,size * 2, 3, stride=1, padding=1)
self.de2_bn = nn.BatchNorm2d(size * 2)
self.decoder3 = nn.Conv2d(size * 2, size, 3, stride=1, padding=1)
self.de3_bn = nn.BatchNorm2d(size)
self.decoderf1 = nn.Conv2d(size * 8, size * 4, 3, stride=1, padding=1)
self.def1_bn = nn.BatchNorm2d(size * 4)
self.decoderf2= nn.Conv2d(size * 4, size * 2, 3, stride=1, padding=1)
self.def2_bn = nn.BatchNorm2d(size * 2)
self.decoderf3 = nn.Conv2d(size * 2, size, 3, stride=1, padding=1)
self.def3_bn = nn.BatchNorm2d(size)
self.encoderf1 = nn.Conv2d(3, size * 2, 3, stride=1, padding=1) # First Layer GrayScale Image , change to input channels to 3 in case of RGB
self.enf1_bn = nn.BatchNorm2d(size * 2)
self.encoderf2= nn.Conv2d(size * 2, size * 4, 3, stride=1, padding=1)
self.enf2_bn = nn.BatchNorm2d(size * 4)
self.encoderf3 = nn.Conv2d(size * 4, size * 8, 3, stride=1, padding=1)
self.enf3_bn = nn.BatchNorm2d(size * 8)
self.intere1_1 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.inte1_1bn = nn.BatchNorm2d(size * 2)
self.intere2_1 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.inte2_1bn = nn.BatchNorm2d(size * 4)
self.intere3_1 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.inte3_1bn = nn.BatchNorm2d(size * 8)
self.intere1_2 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.inte1_2bn = nn.BatchNorm2d(size * 2)
self.intere2_2 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.inte2_2bn = nn.BatchNorm2d(size * 4)
self.intere3_2 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.inte3_2bn = nn.BatchNorm2d(size * 8)
self.interd1_1 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.intd1_1bn = nn.BatchNorm2d(size * 4)
self.interd2_1 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.intd2_1bn = nn.BatchNorm2d(size * 2)
self.interd3_1 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.intd3_1bn = nn.BatchNorm2d(size * 8)
self.interd1_2 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.intd1_2bn = nn.BatchNorm2d(size * 4)
self.interd2_2 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.intd2_2bn = nn.BatchNorm2d(size * 2)
self.interd3_2 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.intd3_2bn = nn.BatchNorm2d(size * 8)
self.final = nn.Conv2d(size,2,1,stride=1,padding=0)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(self.en1_bn(F.max_pool2d(self.encoder1(x),2,2))) #U-Net branch
out1 = F.relu(self.enf1_bn(F.interpolate(self.encoderf1(x),scale_factor=(2.,2.),mode ='bilinear'))) #Ki-Net branch
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear')) #CRFB
out1 = torch.add(out1,F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),scale_factor=(4.0,4.0),mode ='bilinear')) #CRFB
u1 = out #skip conn
o1 = out1 #skip conn
out = F.relu(self.en2_bn(F.max_pool2d(self.encoder2(out),2,2)))
out1 = F.relu(self.enf2_bn(F.interpolate(self.encoderf2(out1),scale_factor=(2.0,2.0),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),scale_factor=(16.,16.),mode ='bilinear'))
u2 = out
o2 = out1
out = F.relu(self.en3_bn(F.max_pool2d(self.encoder3(out),2,2)))
out1 = F.relu(self.enf3_bn(F.interpolate(self.encoderf3(out1),scale_factor=(2.,2.),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),scale_factor=(0.015625,0.015625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),scale_factor=(64.,64.),mode ='bilinear'))
### End of encoder block
### Start Decoder
out = F.relu(self.de1_bn(F.interpolate(self.decoder1(out),scale_factor=(2.,2.),mode ='bilinear'))) #U-NET
out1 = F.relu(self.def1_bn(F.max_pool2d(self.decoderf1(out1),2,2))) #Ki-NET
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),scale_factor=(16.,16.),mode ='bilinear'))
out = torch.add(out,u2) #skip conn
out1 = torch.add(out1,o2) #skip conn
out = F.relu(self.de2_bn(F.interpolate(self.decoder2(out),scale_factor=(2.,2.),mode ='bilinear')))
out1 = F.relu(self.def2_bn(F.max_pool2d(self.decoderf2(out1),2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),scale_factor=(4.,4.),mode ='bilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = F.relu(self.de3_bn(F.interpolate(self.decoder3(out),scale_factor=(2.,2.),mode ='bilinear')))
out1 = F.relu(self.def3_bn(F.max_pool2d(self.decoderf3(out1),2,2)))
out = torch.add(out,out1) # fusion of both branches
out = F.relu(self.final(out)) #1*1 conv
#out = self.soft(out)
return out
class reskiunet(nn.Module):
def __init__(self):
super(reskiunet, self).__init__()
self.encoder1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.en1 = nn.Conv2d(3, 16, 1, stride=1, padding=0) # b, 16, 10, 10
self.en1_bn = nn.BatchNorm2d(16)
self.encoder2= nn.Conv2d(16, 32, 3, stride=1, padding=1) # b, 8, 3, 3
self.en2= nn.Conv2d(16, 32, 1, stride=1, padding=0)
self.en2_bn = nn.BatchNorm2d(32)
self.encoder3= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.en3= nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.en3_bn = nn.BatchNorm2d(64)
self.decoder1 = nn.Conv2d(64, 32, 3, stride=1, padding=1) # b, 1, 28, 28
self.de1 = nn.Conv2d(64, 32, 1, stride=1, padding=0)
self.de1_bn = nn.BatchNorm2d(32)
self.decoder2 = nn.Conv2d(32,16, 3, stride=1, padding=1)
self.de2 = nn.Conv2d(32,16, 1, stride=1, padding=0)
self.de2_bn = nn.BatchNorm2d(16)
self.decoder3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.de3 = nn.Conv2d(16, 8, 1, stride=1, padding=0)
self.de3_bn = nn.BatchNorm2d(8)
self.decoderf1 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.def1 = nn.Conv2d(64, 32, 1, stride=1, padding=0)
self.def1_bn = nn.BatchNorm2d(32)
self.decoderf2= nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.def2= nn.Conv2d(32, 16, 1, stride=1, padding=0)
self.def2_bn = nn.BatchNorm2d(16)
self.decoderf3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.def3 = nn.Conv2d(16, 8, 1, stride=1, padding=0)
self.def3_bn = nn.BatchNorm2d(8)
self.encoderf1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.enf1 = nn.Conv2d(3, 16, 1, stride=1, padding=0)
self.enf1_bn = nn.BatchNorm2d(16)
self.encoderf2= nn.Conv2d(16, 32, 3, stride=1, padding=1)
self.enf2= nn.Conv2d(16, 32, 1, stride=1, padding=0)
self.enf2_bn = nn.BatchNorm2d(32)
self.encoderf3 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.enf3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.enf3_bn = nn.BatchNorm2d(64)
self.intere1_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_1bn = nn.BatchNorm2d(16)
self.intere2_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_1bn = nn.BatchNorm2d(32)
self.intere3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_1bn = nn.BatchNorm2d(64)
self.intere1_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_2bn = nn.BatchNorm2d(16)
self.intere2_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_2bn = nn.BatchNorm2d(32)
self.intere3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_2bn = nn.BatchNorm2d(64)
self.interd1_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_1bn = nn.BatchNorm2d(32)
self.interd2_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_1bn = nn.BatchNorm2d(16)
self.interd3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_1bn = nn.BatchNorm2d(64)
self.interd1_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_2bn = nn.BatchNorm2d(32)
self.interd2_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_2bn = nn.BatchNorm2d(16)
self.interd3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_2bn = nn.BatchNorm2d(64)
self.final = nn.Conv2d(8,2,1,stride=1,padding=0)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = torch.add(self.en1(x),self.encoder1(x)) #init
out = F.relu(self.en1_bn(F.max_pool2d(out,2,2))) # U-Net
out1 = torch.add(self.enf1(x),self.encoder1(x)) #init
out1 = F.relu(self.enf1_bn(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))) # ki-net
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
u1 = out
o1 = out1
out = torch.add(self.en2(out),self.encoder2(out)) #res
out1 = torch.add(self.enf2(out1),self.encoderf2(out1)) #res
out = F.relu(self.en2_bn(F.max_pool2d(out,2,2)))
out1 = F.relu(self.enf2_bn(F.interpolate(out1,scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
u2 = out
o2 = out1
out = torch.add(self.en3(out),self.encoder3(out)) #res
out1 = torch.add(self.enf3(out1),self.encoderf3(out1)) #res
out = F.relu(self.en3_bn(F.max_pool2d(out,2,2)))
out1 = F.relu(self.enf3_bn(F.interpolate(out1,scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),scale_factor=(0.015625,0.015625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),scale_factor=(64,64),mode ='bilinear'))
### End of encoder block
# print(out.shape,out1.shape)
out = torch.add(self.de1(out),self.decoder1(out)) #res
out1 = torch.add(self.def1(out1),self.decoderf1(out1)) #res
out = F.relu(self.de1_bn(F.interpolate(out,scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def1_bn(F.max_pool2d(out1,2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
out = torch.add(out,u2)
out1 = torch.add(out1,o2)
out = torch.add(self.de2(out),self.decoder2(out)) #res
out1 = torch.add(self.def2(out1),self.decoderf2(out1)) #res
out = F.relu(self.de2_bn(F.interpolate(out,scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def2_bn(F.max_pool2d(out1,2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = torch.add(self.de3(out),self.decoder3(out)) #res
out1 = torch.add(self.def3(out1),self.decoderf3(out1)) #res
out = F.relu(self.de3_bn(F.interpolate(out,scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def3_bn(F.max_pool2d(out1,2,2)))
out = torch.add(out,out1)
out = F.relu(self.final(out))
# out = self.soft(out)
# print(out.shape)
return out
class DenseBlock(nn.Module):
def __init__(self, in_planes):
super(DenseBlock, self).__init__()
# print(int(in_planes/4))
self.c1 = nn.Conv2d(in_planes,in_planes,1,stride=1, padding=0)
self.c2 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
self.b1 = nn.BatchNorm2d(in_planes)
self.b2 = nn.BatchNorm2d(int(in_planes/4))
self.c3 = nn.Conv2d(in_planes+int(in_planes/4),in_planes,1,stride=1, padding=0)
self.c4 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
self.c5 = nn.Conv2d(in_planes+int(in_planes/2),in_planes,1,stride=1, padding=0)
self.c6 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
self.c7 = nn.Conv2d(in_planes+3*int(in_planes/4),in_planes,1,stride=1, padding=0)
self.c8 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
def forward(self, x):
org = x
# print(x.shape)
x= F.relu(self.b1(self.c1(x)))
# print(x.shape)
x= F.relu(self.b2(self.c2(x)))
d1 = x
# print(x.shape)
x = torch.cat((org,d1),1)
x= F.relu(self.b1(self.c3(x)))
x= F.relu(self.b2(self.c4(x)))
d2= x
x = torch.cat((org,d1,d2),1)
x= F.relu(self.b1(self.c5(x)))
x= F.relu(self.b2(self.c6(x)))
d3= x
x = torch.cat((org,d1,d2,d3),1)
x= F.relu(self.b1(self.c7(x)))
x= F.relu(self.b2(self.c8(x)))
d4= x
x = torch.cat((d1,d2,d3,d4),1)
x = torch.add(org,x)
return x
class densekiunet(nn.Module):
def __init__(self):
super(densekiunet, self).__init__()
self.encoder1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.en1 = DenseBlock(in_planes = 16) # b, 16, 10, 10
self.en1_bn = nn.BatchNorm2d(16)
self.encoder2= nn.Conv2d(16, 32, 3, stride=1, padding=1) # b, 8, 3, 3
self.en2= DenseBlock(in_planes = 32)
self.en2_bn = nn.BatchNorm2d(32)
self.encoder3= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.en3= DenseBlock(in_planes = 64)
self.en3_bn = nn.BatchNorm2d(64)
self.decoder1 = nn.Conv2d(64, 32, 3, stride=1, padding=1) # b, 1, 28, 28
self.de1 = DenseBlock(in_planes = 32)
self.de1_bn = nn.BatchNorm2d(32)
self.decoder2 = nn.Conv2d(32,16, 3, stride=1, padding=1)
self.de2 = DenseBlock(in_planes = 16)
self.de2_bn = nn.BatchNorm2d(16)
self.decoder3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.de3 = DenseBlock(in_planes = 8)
self.de3_bn = nn.BatchNorm2d(8)
self.decoderf1 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.def1 = DenseBlock(in_planes = 32)
self.def1_bn = nn.BatchNorm2d(32)
self.decoderf2= nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.def2= DenseBlock(in_planes = 16)
self.def2_bn = nn.BatchNorm2d(16)
self.decoderf3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.def3 = DenseBlock(in_planes = 8)
self.def3_bn = nn.BatchNorm2d(8)
self.encoderf1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.enf1 = DenseBlock(in_planes = 16)
self.enf1_bn = nn.BatchNorm2d(16)
self.encoderf2= nn.Conv2d(16, 32, 3, stride=1, padding=1)
self.enf2= DenseBlock(in_planes = 32)
self.enf2_bn = nn.BatchNorm2d(32)
self.encoderf3 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.enf3 = DenseBlock(in_planes = 64)
self.enf3_bn = nn.BatchNorm2d(64)
self.intere1_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_1bn = nn.BatchNorm2d(16)
self.intere2_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_1bn = nn.BatchNorm2d(32)
self.intere3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_1bn = nn.BatchNorm2d(64)
self.intere1_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_2bn = nn.BatchNorm2d(16)
self.intere2_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_2bn = nn.BatchNorm2d(32)
self.intere3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_2bn = nn.BatchNorm2d(64)
self.interd1_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_1bn = nn.BatchNorm2d(32)
self.interd2_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_1bn = nn.BatchNorm2d(16)
self.interd3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_1bn = nn.BatchNorm2d(64)
self.interd1_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_2bn = nn.BatchNorm2d(32)
self.interd2_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_2bn = nn.BatchNorm2d(16)
self.interd3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_2bn = nn.BatchNorm2d(64)
self.final = nn.Conv2d(8,2,1,stride=1,padding=0)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(self.en1_bn(F.max_pool2d(self.en1(self.encoder1(x)),2,2)))
out1 = F.relu(self.enf1_bn(F.interpolate(self.enf1(self.encoderf1(x)),scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
u1 = out
o1 = out1
out = F.relu(self.en2_bn(F.max_pool2d(self.en2(self.encoder2(out)),2,2)))
out1 = F.relu(self.enf2_bn(F.interpolate(self.enf2(self.encoderf2(out1)),scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
u2 = out
o2 = out1
out = F.relu(self.en3_bn(F.max_pool2d(self.en3(self.encoder3(out)),2,2)))
out1 = F.relu(self.enf3_bn(F.interpolate(self.enf3(self.encoderf3(out1)),scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),scale_factor=(0.015625,0.015625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),scale_factor=(64,64),mode ='bilinear'))
### End of encoder block
# print(out.shape,out1.shape)
out = F.relu(self.de1_bn(F.interpolate(self.de1(self.decoder1(out)),scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def1_bn(F.max_pool2d(self.def1(self.decoderf1(out1)),2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
out = torch.add(out,u2)
out1 = torch.add(out1,o2)
out = F.relu(self.de2_bn(F.interpolate(self.de2(self.decoder2(out)),scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def2_bn(F.max_pool2d(self.def2(self.decoderf2(out1)),2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = F.relu(self.de3_bn(F.interpolate(self.de3(self.decoder3(out)),scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def3_bn(F.max_pool2d(self.def3(self.decoderf3(out1)),2,2)))
out = torch.add(out,out1)
out = F.relu(self.final(out))
# out = self.soft(out)
# print(out.shape)
return out
class kiunet3d(nn.Module): #
def __init__(self, c=4,n=1,channels=128,groups = 16,norm='bn', num_classes=5):
super(kiunet3d, self).__init__()
# Entry flow
self.encoder1 = nn.Conv3d( c, n, kernel_size=3, padding=1, stride=1, bias=False)# H//2
self.encoder2 = nn.Conv3d( n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.encoder3 = nn.Conv3d( 2*n, 4*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder1 = nn.Conv3d( c, n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder2 = nn.Conv3d( n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder3 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.downsample1 = nn.MaxPool3d(2, stride=2)
self.downsample2 = nn.MaxPool3d(2, stride=2)
self.downsample3 = nn.MaxPool3d(2, stride=2)
self.kdownsample1 = nn.MaxPool3d(2, stride=2)
self.kdownsample2 = nn.MaxPool3d(2, stride=2)
self.kdownsample3 = nn.MaxPool3d(2, stride=2)
self.upsample1 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//8
self.upsample2 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//4
self.upsample3 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//2
self.kupsample1 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//8
self.kupsample2 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//4
self.kupsample3 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//2
self.decoder1 = nn.Conv3d( 4*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.decoder2 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.decoder3 = nn.Conv3d( 2*n, c, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder1 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder2 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder3 = nn.Conv3d( 2*n, c, kernel_size=3, padding=1, stride=1, bias=False)
self.intere1_1 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.inte1_1bn = nn.BatchNorm2d(16)
self.intere2_1 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.inte2_1bn = nn.BatchNorm2d(32)
self.intere3_1 = nn.Conv3d(2*n,4*n,3, stride=1, padding=1)
# self.inte3_1bn = nn.BatchNorm2d(64)
self.intere1_2 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.inte1_2bn = nn.BatchNorm2d(16)
self.intere2_2 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.inte2_2bn = nn.BatchNorm2d(32)
self.intere3_2 = nn.Conv3d(4*n,2*n,3, stride=1, padding=1)
# self.inte3_2bn = nn.BatchNorm2d(64)
self.interd1_1 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd1_1bn = nn.BatchNorm2d(32)
self.interd2_1 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd2_1bn = nn.BatchNorm2d(16)
self.interd3_1 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.intd3_1bn = nn.BatchNorm2d(64)
self.interd1_2 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd1_2bn = nn.BatchNorm2d(32)
self.interd2_2 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd2_2bn = nn.BatchNorm2d(16)
self.interd3_2 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.intd3_2bn = nn.BatchNorm2d(64)
self.seg = nn.Conv3d(c, num_classes, kernel_size=1, padding=0,stride=1,bias=False)
self.softmax = nn.Softmax(dim=1)
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.torch.nn.init.kaiming_normal_(m.weight) #
elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# Encoder
out = F.relu(F.max_pool3d(self.encoder1(x),2,2)) #U-Net branch
out1 = F.relu(F.interpolate(self.kencoder1(x),scale_factor=2,mode ='trilinear')) #Ki-Net branch
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intere1_1(out1)),scale_factor=0.25,mode ='trilinear')) #CRFB
out1 = torch.add(out1,F.interpolate(F.relu(self.intere1_2(tmp)),scale_factor=4,mode ='trilinear')) #CRFB
u1 = out #skip conn
o1 = out1 #skip conn
out = F.relu(F.max_pool3d(self.encoder2(out),2,2))
out1 = F.relu(F.interpolate(self.kencoder2(out1),scale_factor=2,mode ='trilinear'))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intere2_1(out1)),scale_factor=0.0625,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intere2_2(tmp)),scale_factor=16,mode ='trilinear'))
u2 = out
o2 = out1
out = F.relu(F.max_pool3d(self.encoder3(out),2,2))
out1 = F.relu(F.interpolate(self.kencoder3(out1),scale_factor=2,mode ='trilinear'))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intere3_1(out1)),scale_factor=0.015625,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intere3_2(tmp)),scale_factor=64,mode ='trilinear'))
### End of encoder block
### Start Decoder
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=2,mode ='trilinear')) #U-NET
out1 = F.relu(F.max_pool3d(self.kdecoder1(out1),2,2)) #Ki-NET
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.interd1_1(out1)),scale_factor=0.0625,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.interd1_2(tmp)),scale_factor=16,mode ='trilinear'))
out = torch.add(out,u2) #skip conn
out1 = torch.add(out1,o2) #skip conn
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=2,mode ='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder2(out1),2,2))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.interd2_1(out1)),scale_factor=0.25,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.interd2_2(tmp)),scale_factor=4,mode ='trilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=2,mode ='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder3(out1),2,2))
out = torch.add(out,out1) # fusion of both branches
out = F.relu(self.seg(out)) #1*1 conv
# out = self.soft(out)
return out
| 46.223067
| 152
| 0.603236
| 5,754
| 36,470
| 3.737748
| 0.04171
| 0.054029
| 0.099595
| 0.093458
| 0.910773
| 0.883852
| 0.838053
| 0.814061
| 0.797647
| 0.774957
| 0
| 0.104158
| 0.233151
| 36,470
| 788
| 153
| 46.281726
| 0.664855
| 0.073787
| 0
| 0.48244
| 0
| 0
| 0.020695
| 0
| 0
| 0
| 0
| 0
| 0.001848
| 1
| 0.033272
| false
| 0
| 0.020333
| 0
| 0.086876
| 0.001848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cb984758be09b6024069bb1b6a1d313d0158584
| 41
|
py
|
Python
|
src/sdgen/svg/__init__.py
|
PP-TSD/sdgen
|
58a3a46f7f612c8d7774dd43a4ab55df4f33ab20
|
[
"MIT"
] | 1
|
2015-02-18T17:59:05.000Z
|
2015-02-18T17:59:05.000Z
|
src/sdgen/svg/__init__.py
|
PP-TSD/sdgen
|
58a3a46f7f612c8d7774dd43a4ab55df4f33ab20
|
[
"MIT"
] | null | null | null |
src/sdgen/svg/__init__.py
|
PP-TSD/sdgen
|
58a3a46f7f612c8d7774dd43a4ab55df4f33ab20
|
[
"MIT"
] | null | null | null |
from sdgen.svg.svg import to_svg, to_png
| 20.5
| 40
| 0.804878
| 9
| 41
| 3.444444
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ccd99c73c9bad06a621f3514d17c5ca07cd6270
| 154
|
py
|
Python
|
efc/rpn_builder/lexer/__init__.py
|
yoptar/excel-formulas-calculator
|
a14017a21956600383cb282673d3c7693b383ee3
|
[
"MIT"
] | 11
|
2020-03-04T10:27:43.000Z
|
2022-03-13T13:40:42.000Z
|
efc/rpn_builder/lexer/__init__.py
|
yoptar/excel-formulas-calculator
|
a14017a21956600383cb282673d3c7693b383ee3
|
[
"MIT"
] | 2
|
2021-04-17T17:36:31.000Z
|
2021-11-16T13:34:50.000Z
|
efc/rpn_builder/lexer/__init__.py
|
yoptar/excel-formulas-calculator
|
a14017a21956600383cb282673d3c7693b383ee3
|
[
"MIT"
] | 5
|
2020-03-04T10:27:46.000Z
|
2022-03-12T01:42:07.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from efc.rpn_builder.lexer.lexer import Lexer
| 30.8
| 82
| 0.792208
| 21
| 154
| 5.428571
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007299
| 0.11039
| 154
| 4
| 83
| 38.5
| 0.824818
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0cec18ba9582ece4c91eec7cd781c2eea1f25313
| 279
|
py
|
Python
|
utils/StringUtils.py
|
MadMax2506/pdf-tools
|
a9e9517920c7114ec2ffab8189870b02768a7ea4
|
[
"Apache-2.0"
] | null | null | null |
utils/StringUtils.py
|
MadMax2506/pdf-tools
|
a9e9517920c7114ec2ffab8189870b02768a7ea4
|
[
"Apache-2.0"
] | null | null | null |
utils/StringUtils.py
|
MadMax2506/pdf-tools
|
a9e9517920c7114ec2ffab8189870b02768a7ea4
|
[
"Apache-2.0"
] | null | null | null |
def rreplace(string, old_str_part, new_str_part, occurrence):
split_str = string.rsplit(old_str_part, occurrence)
return new_str_part.join(split_str)
def rremove(string, str_part, occurrence):
split_str = string.rsplit(str_part, occurrence)
return split_str[0]
| 31
| 61
| 0.767025
| 42
| 279
| 4.761905
| 0.333333
| 0.21
| 0.34
| 0.22
| 0.37
| 0.37
| 0.37
| 0
| 0
| 0
| 0
| 0.004167
| 0.139785
| 279
| 8
| 62
| 34.875
| 0.829167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
0b33fd7981377f0519da0432bc4ae55410f261f8
| 11,468
|
py
|
Python
|
validation/random_field.py
|
navidcy/trackeddy
|
43e884d42c3fcf3eccb17bcd2d1e00ee0d478242
|
[
"MIT"
] | 36
|
2019-01-30T23:47:55.000Z
|
2022-03-08T06:08:41.000Z
|
validation/random_field.py
|
powerwordlearner/trackeddy
|
afe85694fcff62df75f8f598f8decf2ec3f28f8d
|
[
"MIT"
] | 14
|
2019-02-25T21:46:11.000Z
|
2022-03-08T08:58:36.000Z
|
validation/random_field.py
|
powerwordlearner/trackeddy
|
afe85694fcff62df75f8f598f8decf2ec3f28f8d
|
[
"MIT"
] | 14
|
2019-03-04T03:19:13.000Z
|
2022-03-08T06:08:42.000Z
|
import time
tic=time.time()
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'font.size': 32})
import trackeddy
import trackeddy.tracking as ttrack
from trackeddy.geometryfunc import *
from pylab import *
import random
import pdb
import cmocean as cm
import matplotlib.gridspec as gridspec
import trackeddy.utils.field_generator as fg
import importlib
importlib.reload(ttrack)
import sys
t = 10
n = 20
xx = linspace(10,12,200)
yy = linspace(10,12,200)
#print("Generate field")
#gf=fg.Generate_field(0.1,0.1,n,xx,yy,'nrand')
#data = gf.assemble_field(t,'Nint')
x = linspace(10,12,300)
y = linspace(10,12,300)
data = zeros((t,300,300))
for tt in range(t):
gf=fg.Generate_field(0.05,0.05,randint(5, n),xx,yy,'int')
data[tt,:,:] = gf.assemble_field(1)
##
################################################################################
################################################################################
#################################### FLAT ######################################
################################################################################
################################################################################
print('No-Int')
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
t0 = 0
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=False,debug=False)
####
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=False,debug=False)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
f_field = pos_f+neg_f
for tt in range(t0,t):
f = plt.figure(dpi=300,figsize=(10,5))
gs = gridspec.GridSpec(1, 2)
ax1 = plt.subplot(gs[0])
ax1.pcolormesh(x,y,data[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax1.yaxis.set_major_locator(plt.NullLocator())
ax1.xaxis.set_major_formatter(plt.NullFormatter())
ax2 = plt.subplot(gs[1])
ax2.pcolormesh(f_field[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
#ax2.contour(f_field[tt,:,:])
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.xaxis.set_major_formatter(plt.NullFormatter())
#ax1.set_title('Assamble: %03d' % tt)
plt.show()
#plt.savefig('plots_n/time_%03d.png' %tt)
m_ke_c = []
m_ke_f = []
m_ke_w = []
m_ke_j = []
for tt in range(shape(data)[0]):
u_c,v_c = geovelfield( data[tt,:,:] ,x,y)
u_f,v_f = geovelfield(f_field[tt,:,:],x,y)
#u_w,v_w = geovelfield(w_field[tt,:,:],x,y)
#u_j,v_j = geovelfield(j_field[tt,:,:],x,y)
ke_c = KE(u_c,v_c)
ke_f = KE(u_f,v_f)
#ke_w = KE(u_w,v_w)
#ke_j = KE(u_j,v_j)
m_ke_c.append(mean(ke_c))
m_ke_f.append(mean(ke_f))
#m_ke_w.append(mean(ke_w))
#m_ke_j.append(mean(ke_j))
import seaborn as sns
import pandas as pd
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_f]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
sys.exit()
df.to_pickle('./ke_validation_f_n')
sys.exit()
################################################################################
################################################################################
#################################### WAVE ######################################
################################################################################
################################################################################
print('Waves')
amplitude = 1
frequency = 20
phase = 1
waves = zeros(shape(data))
X,Y = meshgrid(x,y)
for t in range(0,t):
r = X+y/10
waves[t,:,:] = 0.3*sin(r*frequency-t + phase)
wave_data = waves+data
levels = {'max':wave_data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(wave_data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=False)
levels = {'max':wave_data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(wave_data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=False)
pos_w = reconstruct_syntetic(shape(wave_data),x,y,eddytd)
neg_w = reconstruct_syntetic(shape(wave_data),x,y,eddytdn)
w_field = pos_w+neg_w
for tt in range(t0,t):
f = plt.figure(dpi=300,figsize=(10,5))
gs = gridspec.GridSpec(1, 2)
ax1 = plt.subplot(gs[0])
ax1.pcolormesh(x,y,wave_data[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax1.yaxis.set_major_locator(plt.NullLocator())
ax1.xaxis.set_major_formatter(plt.NullFormatter())
ax2 = plt.subplot(gs[1])
ax2.pcolormesh(w_field[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
#ax2.contour(w_field[tt,:,:])
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.xaxis.set_major_formatter(plt.NullFormatter())
#ax1.set_title('Assamble: %03d' % tt)
plt.savefig('plots_n/time_w_%03d.png' %tt)
################################################################################
################################################################################
#################################### JETS ######################################
################################################################################
################################################################################
print('Jets')
k_y = 3
phase = 1
k_x = 2
jets = zeros(shape(data))
for t in range(0,t):
r = Y
k_y=random.uniform(2, 3)
phase=random.uniform(0, 1)
k_x=random.uniform(1, 2)
amp=0.3
jets[t,:,:] = amp*cos((k_y*(k_y*Y+phase+sin(k_x*X-t))))
jet_data = jets+data
levels = {'max':jet_data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(jet_data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=False)
levels = {'max':jet_data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(jet_data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=False)
pos_f = reconstruct_syntetic(shape(jet_data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(jet_data),x,y,eddytdn)
j_field = pos_f+neg_f
for tt in range(t0,t):
f = plt.figure(dpi=300,figsize=(10,5))
gs = gridspec.GridSpec(1, 2)
ax1 = plt.subplot(gs[0])
ax1.pcolormesh(x,y,jet_data[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax1.yaxis.set_major_locator(plt.NullLocator())
ax1.xaxis.set_major_formatter(plt.NullFormatter())
ax2 = plt.subplot(gs[1])
ax2.pcolormesh(j_field[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
#ax2.contour(w_field[tt,:,:])
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.xaxis.set_major_formatter(plt.NullFormatter())
#ax1.set_title('Assamble: %03d' % tt)
plt.savefig('plots_n/time_j_%03d.png' %tt)
################################################################################
################################################################################
##################################### KE #######################################
################################################################################
################################################################################
m_ke_c = []
m_ke_f = []
m_ke_w = []
m_ke_j = []
for tt in range(shape(data)[0]):
u_c,v_c = geovelfield( data[tt,:,:] ,x,y)
u_f,v_f = geovelfield(f_field[tt,:,:],x,y)
#u_w,v_w = geovelfield(w_field[tt,:,:],x,y)
#u_j,v_j = geovelfield(j_field[tt,:,:],x,y)
ke_c = KE(u_c,v_c)
ke_f = KE(u_f,v_f)
#ke_w = KE(u_w,v_w)
#ke_j = KE(u_j,v_j)
m_ke_c.append(mean(ke_c))
m_ke_f.append(mean(ke_f))
#m_ke_w.append(mean(ke_w))
#m_ke_j.append(mean(ke_j))
################################################################################
################################################################################
#################################### PLOT ######################################
################################################################################
################################################################################
import seaborn as sns
import pandas as pd
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_f]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
df.to_pickle('./ke_validation_f_n')
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False}, fontsize=32)
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
res = stats.theilslopes(df[r"$KE_r$"].values,df[r"$KE_c$"].values, 0.95)
lnr2=res[1] + res[2]*range(100)
lnr3=res[1] + res[3]*range(100)
g1.ax_joint.fill_between(range(100),lnr2, lnr3, facecolor='b',alpha=0.5)
r=res[0]
x0=0
y0=res[1] + res[0]*x0
x1=100
y1=res[1] + res[0]*x1
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %.2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate flat: ',mean([abs(y0/100),abs(1-y1/100)]))
g1.ax_joint.legend_.remove()
plt.savefig('e_vs_e_n.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_w]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False}, fontsize=32)
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
res = stats.theilslopes(df[r"$KE_r$"].values,df[r"$KE_c$"].values, 0.95)
lnr2=res[1] + res[2]*range(100)
lnr3=res[1] + res[3]*range(100)
g1.ax_joint.fill_between(range(100),lnr2, lnr3, facecolor='b',alpha=0.5)
r=res[0]
x0=0
y0=res[1] + res[0]*x0
x1=100
y1=res[1] + res[0]*x1
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %.2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate sin: ',mean([abs(y0/100),abs(1-y1/100)]))
g1.ax_joint.legend_.remove()
plt.savefig('w_vs_e_n.png')
#df.to_pickle('./ke_validation_w_n')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_j]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False}, fontsize=32)
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
res = stats.theilslopes(df[r"$KE_r$"].values,df[r"$KE_c$"].values, 0.95)
lnr2=res[1] + res[2]*range(100)
lnr3=res[1] + res[3]*range(100)
g1.ax_joint.fill_between(range(100),lnr2, lnr3, facecolor='b',alpha=0.5)
r=res[0]
x0=0
y0=res[1] + res[0]*x0
x1=100
y1=res[1] + res[0]*x1
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %.2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate jet: ',mean([abs(y0/100),abs(1-y1/100)]))
g1.ax_joint.legend_.remove()
plt.savefig('j_vs_e_n.png')
#df.to_pickle('./ke_validation_j_n')
# for ii in range(0,30):
# plt.figure()
# plt.pcolormesh(af[ii])
# plt.savefig('%03d.png' %ii)
# plt.show()
toc=time.time()
print("######## ELAPSED TIME: ###########")
print("######## %2f s ###########" % (toc-tic))
| 32.859599
| 127
| 0.545169
| 1,734
| 11,468
| 3.442907
| 0.121107
| 0.00871
| 0.022613
| 0.00804
| 0.806365
| 0.795477
| 0.794975
| 0.777052
| 0.759129
| 0.727136
| 0
| 0.045578
| 0.102721
| 11,468
| 348
| 128
| 32.954023
| 0.534597
| 0.077694
| 0
| 0.581818
| 0
| 0
| 0.071887
| 0.005368
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081818
| 0
| 0.081818
| 0.063636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b55ea9c0832a40234a62282328e8c185cbd1c88
| 6,187
|
py
|
Python
|
fypy/pricing/analytical/black_scholes.py
|
jkirkby3/fypy
|
28654800c91683685aee559aac13a17e3f4583b8
|
[
"MIT"
] | 16
|
2021-04-24T18:51:00.000Z
|
2022-03-31T16:17:21.000Z
|
fypy/pricing/analytical/black_scholes.py
|
jkirkby3/fypy
|
28654800c91683685aee559aac13a17e3f4583b8
|
[
"MIT"
] | null | null | null |
fypy/pricing/analytical/black_scholes.py
|
jkirkby3/fypy
|
28654800c91683685aee559aac13a17e3f4583b8
|
[
"MIT"
] | 6
|
2021-04-28T12:19:25.000Z
|
2022-03-31T16:19:36.000Z
|
"""
About: contains pricing/Greeks formulas for black-scholes and black76
"""
import numpy as np
from scipy.stats import norm
from typing import Union
def black76_price(F: float,
K: Union[float, np.ndarray],
is_call: bool,
vol: Union[float, np.ndarray],
disc: float,
T: float) -> Union[float, np.ndarray]:
"""
Price strikes of a common parity (ie only call or put). Use black76_price_strikes to price a mix of calls/puts
:param F: float, forward price
:param K: float or array, the Strike(s)
:param is_call: bool, determines if ALL strikes are call or all are put
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:return: float or np.ndarray, same shape as strikes
"""
vol_st = vol * np.sqrt(T)
d_1 = (np.log(F / K) + (0.5 * vol ** 2) * T) / vol_st
d_2 = d_1 - vol_st
if is_call:
return disc * (F * norm.cdf(d_1) - norm.cdf(d_2) * K)
return disc * (norm.cdf(-d_2) * K - F * norm.cdf(-d_1))
def black76_price_strikes(F: float,
K: np.array,
is_calls: np.ndarray,
vol: Union[float, np.ndarray],
disc: float,
T: float) -> np.ndarray:
"""
Price strikes of with possibly a mix of call and puts
:param F: float, forward price
:param K: float or array, the Strike(s)
:param is_calls: array of bools, for each strike its true for call or false for put
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:return: float or np.ndarray, same shape as strikes
"""
prices = np.zeros(len(is_calls))
if isinstance(vol, np.ndarray):
prices[is_calls] = black76_price(F=F, K=K[is_calls], is_call=True, vol=vol[is_calls], disc=disc, T=T)
prices[~is_calls] = black76_price(F=F, K=K[~is_calls], is_call=False, vol=vol[~is_calls], disc=disc, T=T)
else:
prices[is_calls] = black76_price(F=F, K=K[is_calls], is_call=True, vol=vol, disc=disc, T=T)
prices[~is_calls] = black76_price(F=F, K=K[~is_calls], is_call=False, vol=vol, disc=disc, T=T)
return prices
def black76_vega(F: float,
K: Union[float, np.ndarray],
vol: Union[float, np.ndarray],
disc: float,
T: float) -> Union[float, np.ndarray]:
"""
Vega(s) for strike(s)
:param F: float, forward price
:param K: float or array, the Strike(s)
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:return: float or np.ndarray, same shape as strikes
"""
vol_st = vol * np.sqrt(T)
d_1 = (np.log(F / K) + 0.5 * vol_st ** 2) / vol_st
return disc * F * norm.pdf(d_1) * np.sqrt(T)
def black76_delta(F: float,
K: Union[float, np.ndarray],
is_call: bool,
vol: Union[float, np.ndarray],
disc: float,
T: float) -> Union[float, np.ndarray]:
"""
Delta for strikes of a common parity (ie only call or put).
:param F: float, forward price
:param K: float or array, the Strike(s)
:param is_call: bool, determines if ALL strikes are call or all are put
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:return: float or np.ndarray, same shape as strikes
"""
vol_st = vol * np.sqrt(T)
d_1 = (np.log(F / K) + 0.5 * vol_st ** 2) / vol_st
delta = norm.cdf(d_1)
if not is_call:
delta -= 1.0
return disc * delta
def black_scholes_price(S: float,
K: Union[float, np.ndarray],
is_call: bool,
vol: Union[float, np.ndarray],
disc: float,
T: float,
div_disc: float = 1.0):
"""
Price strikes of a common parity (ie only call or put). Use black_scholes_price_strikes to price a mix of calls/puts
:param S: float, spot price
:param K: float or array, the Strike(s)
:param is_call: bool, determines if ALL strikes are call or all are put
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:param div_disc: float, the dividen discount factor
:return: float or np.ndarray, same shape as strikes
"""
return black76_price(S * div_disc / disc, K, is_call, vol, disc, T)
def black_scholes_price_strikes(S: float,
K: np.array,
is_calls: np.ndarray,
vol: Union[float, np.ndarray],
disc: float,
T: float,
div_disc: float = 1.0) -> np.ndarray:
"""
Price strikes of with possibly a mix of call and puts
:param S: float, spot price
:param K: float or array, the Strike(s)
:param is_calls: array of bools, for each strike its true for call or false for put
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:param div_disc: float, the dividen discount factor
:return: float or np.ndarray, same shape as strikes
"""
return black76_price_strikes(S * div_disc / disc, K, is_calls=is_calls, vol=vol, disc=disc, T=T)
| 42.376712
| 120
| 0.588007
| 955
| 6,187
| 3.727749
| 0.103665
| 0.060674
| 0.055056
| 0.069382
| 0.86264
| 0.851404
| 0.835674
| 0.830899
| 0.824157
| 0.824157
| 0
| 0.015855
| 0.306772
| 6,187
| 145
| 121
| 42.668966
| 0.814176
| 0.470179
| 0
| 0.507692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.046154
| 0
| 0.246154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ba698794e6ee860b423b987023157b410b6d91c
| 84,101
|
py
|
Python
|
alerter/test/data_transformers/contracts/test_chainlink.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 41
|
2019-08-23T12:40:42.000Z
|
2022-03-28T11:06:02.000Z
|
alerter/test/data_transformers/contracts/test_chainlink.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 147
|
2019-08-30T22:09:48.000Z
|
2022-03-30T08:46:26.000Z
|
alerter/test/data_transformers/contracts/test_chainlink.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 3
|
2019-09-03T21:12:28.000Z
|
2021-08-18T14:27:56.000Z
|
import copy
import json
import logging
import unittest
from datetime import datetime
from datetime import timedelta
from queue import Queue
from unittest import mock
import pika
import pika.exceptions
from freezegun import freeze_time
from parameterized import parameterized
from src.data_store.redis import RedisApi
from src.data_transformers.contracts.chainlink import (
ChainlinkContractsDataTransformer
)
from src.message_broker.rabbitmq import RabbitMQApi
from src.monitorables.contracts.chainlink.v3 import V3ChainlinkContract
from src.monitorables.contracts.chainlink.v4 import V4ChainlinkContract
from src.utils import env
from src.utils.constants.rabbitmq import (
HEALTH_CHECK_EXCHANGE, RAW_DATA_EXCHANGE, STORE_EXCHANGE, ALERT_EXCHANGE,
CL_CONTRACTS_DT_INPUT_QUEUE_NAME, CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY,
HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY,
CL_CONTRACT_TRANSFORMED_DATA_ROUTING_KEY)
from src.utils.exceptions import (
PANICException, ReceivedUnexpectedDataException,
MessageWasNotDeliveredException)
from test.utils.utils import (
connect_to_rabbit, delete_queue_if_exists, disconnect_from_rabbit,
delete_exchange_if_exists, save_chainlink_contract_to_redis)
class TestChainlinkContractsDataTransformer(unittest.TestCase):
def setUp(self) -> None:
# Dummy data and objects
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.connection_check_time_interval = timedelta(seconds=0)
self.test_last_monitored = datetime(2012, 1, 1).timestamp()
self.test_heartbeat = {
'component_name': 'Test Component',
'is_alive': True,
'timestamp': self.test_last_monitored,
}
self.test_exception = PANICException('test_exception', 1)
self.test_rabbit_queue_name = 'Test Queue'
self.max_queue_size = 1000
self.test_data_str = 'test_data'
self.test_publishing_queue = Queue(self.max_queue_size)
self.transformer_name = 'test_chainlink_contracts_data_transformer'
# Rabbit instance
self.rabbit_ip = env.RABBIT_IP
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
# Redis instance
self.redis_db = env.REDIS_DB
self.redis_host = env.REDIS_IP
self.redis_port = env.REDIS_PORT
self.redis_namespace = env.UNIQUE_ALERTER_IDENTIFIER
self.redis = RedisApi(
self.dummy_logger, self.redis_db, self.redis_host, self.redis_port,
'', self.redis_namespace, self.connection_check_time_interval)
# Test meta_data credentials
self.test_monitor_name = 'test_monitor'
self.test_node_id_1 = 'node_id_1'
self.test_parent_id_1 = 'parent_id_1'
self.test_node_name_1 = 'node_name_1'
# Test contract credentials
self.test_proxy_address_1 = 'test_proxy_address_1'
self.test_aggregator_address_1 = 'test_aggregator_address_1'
self.test_latest_round_1 = 40
self.test_latest_answer_1 = 34534534563464
self.test_latest_timestamp_1 = self.test_last_monitored + 30
self.test_answered_in_round_1 = 40
self.test_withdrawable_payment_1 = 3458347534235
self.test_owed_payment_1 = 34
self.test_historical_rounds_1 = [
{
'roundId': 38,
'roundAnswer': 10,
'roundTimestamp': int(self.test_last_monitored + 10),
'answeredInRound': 38,
'nodeSubmission': 5
},
{
'roundId': 39,
'roundAnswer': 5,
'roundTimestamp': int(self.test_last_monitored + 20),
'answeredInRound': 39,
'nodeSubmission': 10
}
]
self.test_historical_rounds_1_transformed = copy.deepcopy(
self.test_historical_rounds_1)
self.test_historical_rounds_1_transformed[0]['deviation'] = 50.0
self.test_historical_rounds_1_transformed[1]['deviation'] = 100.0
self.test_proxy_address_2 = 'test_proxy_address_2'
self.test_aggregator_address_2 = 'test_aggregator_address_2'
self.test_latest_round_2 = 50
self.test_latest_answer_2 = 3453453456
self.test_latest_timestamp_2 = self.test_last_monitored + 30
self.test_answered_in_round_2 = 40
self.test_withdrawable_payment_2 = 3458347
self.test_owed_payment_2 = 35
self.test_historical_rounds_2 = [
{
'roundId': 48,
'roundAnswer': 10,
'roundTimestamp': int(self.test_last_monitored + 10),
'answeredInRound': 48,
'nodeSubmission': 5
},
{
'roundId': 49,
'roundAnswer': 5,
'roundTimestamp': int(self.test_last_monitored + 20),
'answeredInRound': 49,
'nodeSubmission': 10
}
]
self.test_historical_rounds_2_transformed = copy.deepcopy(
self.test_historical_rounds_2)
self.test_historical_rounds_2_transformed[0]['deviation'] = 50.0
self.test_historical_rounds_2_transformed[1]['deviation'] = 100.0
self.test_historical_rounds_3 = [
{
'roundId': 28,
'roundAnswer': 10,
'roundTimestamp': int(self.test_last_monitored + 10),
'answeredInRound': 48,
'nodeSubmission': 5,
'noOfObservations': 4,
'noOfTransmitters': 14,
},
{
'roundId': 29,
'roundAnswer': 5,
'roundTimestamp': int(self.test_last_monitored + 20),
'answeredInRound': 49,
'nodeSubmission': 10,
'noOfObservations': 5,
'noOfTransmitters': 16,
}
]
self.test_historical_rounds_3_transformed = copy.deepcopy(
self.test_historical_rounds_3)
self.test_historical_rounds_3_transformed[0]['deviation'] = 50.0
self.test_historical_rounds_3_transformed[1]['deviation'] = 100.0
self.test_historical_rounds_4 = [
{
'roundId': 38,
'roundAnswer': 10,
'roundTimestamp': int(self.test_last_monitored + 10),
'answeredInRound': 38,
'nodeSubmission': 5,
'noOfObservations': 6,
'noOfTransmitters': 17,
},
{
'roundId': 39,
'roundAnswer': 5,
'roundTimestamp': int(self.test_last_monitored + 20),
'answeredInRound': 39,
'nodeSubmission': 10,
'noOfObservations': 7,
'noOfTransmitters': 18,
}
]
self.test_historical_rounds_4_transformed = copy.deepcopy(
self.test_historical_rounds_4)
self.test_historical_rounds_4_transformed[0]['deviation'] = 50.0
self.test_historical_rounds_4_transformed[1]['deviation'] = 100.0
# Some raw data examples
self.raw_data_example_result_v3 = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor_name,
'node_name': self.test_node_name_1,
'node_id': self.test_node_id_1,
'node_parent_id': self.test_parent_id_1,
'time': self.test_last_monitored + 60
},
'data': {
self.test_proxy_address_1: {
'contractVersion': 3,
'aggregatorAddress': self.test_aggregator_address_1,
'latestRound': self.test_latest_round_1,
'latestAnswer': self.test_latest_answer_1,
'latestTimestamp': self.test_latest_timestamp_1,
'answeredInRound': self.test_answered_in_round_1,
'withdrawablePayment': self.test_withdrawable_payment_1,
'historicalRounds': self.test_historical_rounds_1,
},
self.test_proxy_address_2: {
'contractVersion': 3,
'aggregatorAddress': self.test_aggregator_address_2,
'latestRound': self.test_latest_round_2,
'latestAnswer': self.test_latest_answer_2,
'latestTimestamp': self.test_latest_timestamp_2,
'answeredInRound': self.test_answered_in_round_2,
'withdrawablePayment': self.test_withdrawable_payment_2,
'historicalRounds': self.test_historical_rounds_2,
},
},
}
}
self.raw_data_example_result_v4 = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor_name,
'node_name': self.test_node_name_1,
'node_id': self.test_node_id_1,
'node_parent_id': self.test_parent_id_1,
'time': self.test_last_monitored + 60
},
'data': {
self.test_proxy_address_1: {
'contractVersion': 4,
'aggregatorAddress': self.test_aggregator_address_1,
'latestRound': self.test_latest_round_1,
'latestAnswer': self.test_latest_answer_1,
'latestTimestamp': self.test_latest_timestamp_1,
'answeredInRound': self.test_answered_in_round_1,
'owedPayment': self.test_owed_payment_1,
'historicalRounds': self.test_historical_rounds_3,
},
self.test_proxy_address_2: {
'contractVersion': 4,
'aggregatorAddress': self.test_aggregator_address_2,
'latestRound': self.test_latest_round_2,
'latestAnswer': self.test_latest_answer_2,
'latestTimestamp': self.test_latest_timestamp_2,
'answeredInRound': self.test_answered_in_round_2,
'owedPayment': self.test_owed_payment_2,
'historicalRounds': self.test_historical_rounds_4,
},
},
}
}
self.raw_data_example_error = {
'error': {
'meta_data': {
'monitor_name': self.test_monitor_name,
'node_parent_id': self.test_parent_id_1,
'time': self.test_last_monitored + 60
},
'message': self.test_exception.message,
'code': self.test_exception.code,
}
}
# Transformed data example
self.transformed_data_example_result_v3 = {
'result': {
'meta_data': {
'node_name': self.test_node_name_1,
'node_id': self.test_node_id_1,
'node_parent_id': self.test_parent_id_1,
'last_monitored': self.test_last_monitored + 60
},
'data': {
self.test_proxy_address_1: {
'contractVersion': 3,
'aggregatorAddress': self.test_aggregator_address_1,
'latestRound': self.test_latest_round_1,
'latestAnswer': self.test_latest_answer_1,
'latestTimestamp': self.test_latest_timestamp_1,
'answeredInRound': self.test_answered_in_round_1,
'withdrawablePayment': self.test_withdrawable_payment_1,
'historicalRounds':
self.test_historical_rounds_1_transformed
},
self.test_proxy_address_2: {
'contractVersion': 3,
'aggregatorAddress': self.test_aggregator_address_2,
'latestRound': self.test_latest_round_2,
'latestAnswer': self.test_latest_answer_2,
'latestTimestamp': self.test_latest_timestamp_2,
'answeredInRound': self.test_answered_in_round_2,
'withdrawablePayment': self.test_withdrawable_payment_2,
'historicalRounds':
self.test_historical_rounds_2_transformed
},
},
}
}
self.transformed_data_example_result_v3_last_round_obs = \
copy.deepcopy(self.transformed_data_example_result_v3)
self.transformed_data_example_result_v3_last_round_obs['result'][
'data'][self.test_proxy_address_1]['lastRoundObserved'] = \
self.test_latest_round_1 - 1
self.transformed_data_example_result_v3_last_round_obs['result'][
'data'][self.test_proxy_address_2]['lastRoundObserved'] = \
self.test_latest_round_2 - 1
self.transformed_data_example_result_v4 = {
'result': {
'meta_data': {
'node_name': self.test_node_name_1,
'node_id': self.test_node_id_1,
'node_parent_id': self.test_parent_id_1,
'last_monitored': self.test_last_monitored + 60
},
'data': {
self.test_proxy_address_1: {
'contractVersion': 4,
'aggregatorAddress': self.test_aggregator_address_1,
'latestRound': self.test_latest_round_1,
'latestAnswer': self.test_latest_answer_1,
'latestTimestamp': self.test_latest_timestamp_1,
'answeredInRound': self.test_answered_in_round_1,
'owedPayment': self.test_owed_payment_1,
'historicalRounds':
self.test_historical_rounds_3_transformed
},
self.test_proxy_address_2: {
'contractVersion': 4,
'aggregatorAddress': self.test_aggregator_address_2,
'latestRound': self.test_latest_round_2,
'latestAnswer': self.test_latest_answer_2,
'latestTimestamp': self.test_latest_timestamp_2,
'answeredInRound': self.test_answered_in_round_2,
'owedPayment': self.test_owed_payment_2,
'historicalRounds':
self.test_historical_rounds_4_transformed
},
},
}
}
self.transformed_data_example_result_v4_last_round_obs = \
copy.deepcopy(self.transformed_data_example_result_v4)
self.transformed_data_example_result_v4_last_round_obs['result'][
'data'][self.test_proxy_address_1]['lastRoundObserved'] = 29
self.transformed_data_example_result_v4_last_round_obs['result'][
'data'][self.test_proxy_address_2]['lastRoundObserved'] = 39
self.transformed_data_example_error = {
'error': {
'meta_data': {
'node_parent_id': self.test_parent_id_1,
'time': self.test_last_monitored + 60
},
'message': self.test_exception.message,
'code': self.test_exception.code,
}
}
self.invalid_transformed_data = {'bad_key': 'bad_value'}
# Chainlink contracts with received state
self.test_cl_contract_1_new_metrics = V3ChainlinkContract(
self.test_proxy_address_1, self.test_aggregator_address_1,
self.test_parent_id_1, self.test_node_id_1)
self.test_cl_contract_2_new_metrics = V3ChainlinkContract(
self.test_proxy_address_2, self.test_aggregator_address_2,
self.test_parent_id_1, self.test_node_id_1)
self.test_cl_contract_3_new_metrics = V4ChainlinkContract(
self.test_proxy_address_1, self.test_aggregator_address_1,
self.test_parent_id_1, self.test_node_id_1)
self.test_cl_contract_4_new_metrics = V4ChainlinkContract(
self.test_proxy_address_2, self.test_aggregator_address_2,
self.test_parent_id_1, self.test_node_id_1)
# Test state before receiving new metrics
self.test_state_v3 = {
self.test_node_id_1: {
self.test_proxy_address_1: copy.deepcopy(
self.test_cl_contract_1_new_metrics),
self.test_proxy_address_2: copy.deepcopy(
self.test_cl_contract_2_new_metrics),
},
}
self.test_state_v4 = {
self.test_node_id_1: {
self.test_proxy_address_1: copy.deepcopy(
self.test_cl_contract_3_new_metrics),
self.test_proxy_address_2: copy.deepcopy(
self.test_cl_contract_4_new_metrics),
},
}
# Update the states with received metrics
self.test_cl_contract_1_new_metrics.set_latest_round(
self.test_latest_round_1)
self.test_cl_contract_1_new_metrics.set_latest_answer(
self.test_latest_answer_1)
self.test_cl_contract_1_new_metrics.set_latest_timestamp(
self.test_latest_timestamp_1)
self.test_cl_contract_1_new_metrics.set_answered_in_round(
self.test_answered_in_round_1)
self.test_cl_contract_1_new_metrics.set_withdrawable_payment(
self.test_withdrawable_payment_1)
self.test_cl_contract_1_new_metrics.set_historical_rounds(
self.test_historical_rounds_1_transformed)
self.test_cl_contract_1_new_metrics.set_last_monitored(
self.test_last_monitored + 60)
self.test_cl_contract_1_new_metrics.set_last_round_observed(39)
self.test_cl_contract_2_new_metrics.set_latest_round(
self.test_latest_round_2)
self.test_cl_contract_2_new_metrics.set_latest_answer(
self.test_latest_answer_2)
self.test_cl_contract_2_new_metrics.set_latest_timestamp(
self.test_latest_timestamp_2)
self.test_cl_contract_2_new_metrics.set_answered_in_round(
self.test_answered_in_round_2)
self.test_cl_contract_2_new_metrics.set_withdrawable_payment(
self.test_withdrawable_payment_2)
self.test_cl_contract_2_new_metrics.set_historical_rounds(
self.test_historical_rounds_2_transformed)
self.test_cl_contract_2_new_metrics.set_last_monitored(
self.test_last_monitored + 60)
self.test_cl_contract_2_new_metrics.set_last_round_observed(49)
self.test_cl_contract_3_new_metrics.set_latest_round(
self.test_latest_round_1)
self.test_cl_contract_3_new_metrics.set_latest_answer(
self.test_latest_answer_1)
self.test_cl_contract_3_new_metrics.set_latest_timestamp(
self.test_latest_timestamp_1)
self.test_cl_contract_3_new_metrics.set_answered_in_round(
self.test_answered_in_round_1)
self.test_cl_contract_3_new_metrics.set_owed_payment(
self.test_owed_payment_1)
self.test_cl_contract_3_new_metrics.set_historical_rounds(
self.test_historical_rounds_3_transformed)
self.test_cl_contract_3_new_metrics.set_last_monitored(
self.test_last_monitored + 60)
self.test_cl_contract_3_new_metrics.set_last_round_observed(29)
self.test_cl_contract_4_new_metrics.set_latest_round(
self.test_latest_round_2)
self.test_cl_contract_4_new_metrics.set_latest_answer(
self.test_latest_answer_2)
self.test_cl_contract_4_new_metrics.set_latest_timestamp(
self.test_latest_timestamp_2)
self.test_cl_contract_4_new_metrics.set_answered_in_round(
self.test_answered_in_round_2)
self.test_cl_contract_4_new_metrics.set_owed_payment(
self.test_owed_payment_2)
self.test_cl_contract_4_new_metrics.set_historical_rounds(
self.test_historical_rounds_4_transformed)
self.test_cl_contract_4_new_metrics.set_last_monitored(
self.test_last_monitored + 60)
self.test_cl_contract_4_new_metrics.set_last_round_observed(39)
# Test state after receiving new metrics
self.test_state_v3_updated = {
self.test_node_id_1: {
self.test_proxy_address_1: self.test_cl_contract_1_new_metrics,
self.test_proxy_address_2: self.test_cl_contract_2_new_metrics,
},
}
self.test_state_v4_updated = {
self.test_node_id_1: {
self.test_proxy_address_1: self.test_cl_contract_3_new_metrics,
self.test_proxy_address_2: self.test_cl_contract_4_new_metrics,
},
}
meta_data_for_alerting_result_v3 = \
self.transformed_data_example_result_v3['result']['meta_data']
self.test_data_for_alerting_result_v3 = {
'result': {
'meta_data': meta_data_for_alerting_result_v3,
'data': {
self.test_proxy_address_1: {
'latestRound': {
'current': self.test_latest_round_1,
'previous': None,
},
'latestAnswer': {
'current': self.test_latest_answer_1,
'previous': None,
},
'latestTimestamp': {
'current': self.test_latest_timestamp_1,
'previous': None,
},
'answeredInRound': {
'current': self.test_answered_in_round_1,
'previous': None,
},
'withdrawablePayment': {
'current': self.test_withdrawable_payment_1,
'previous': None,
},
'historicalRounds': {
'current':
self.test_historical_rounds_1_transformed,
'previous': [],
},
'lastRoundObserved': {
'current': 39,
'previous': None
},
'contractVersion': 3,
'aggregatorAddress': self.test_aggregator_address_1,
},
self.test_proxy_address_2: {
'latestRound': {
'current': self.test_latest_round_2,
'previous': None,
},
'latestAnswer': {
'current': self.test_latest_answer_2,
'previous': None,
},
'latestTimestamp': {
'current': self.test_latest_timestamp_2,
'previous': None,
},
'answeredInRound': {
'current': self.test_answered_in_round_2,
'previous': None,
},
'withdrawablePayment': {
'current': self.test_withdrawable_payment_2,
'previous': None,
},
'historicalRounds': {
'current':
self.test_historical_rounds_2_transformed,
'previous': [],
},
'lastRoundObserved': {
'current': 49,
'previous': None
},
'contractVersion': 3,
'aggregatorAddress': self.test_aggregator_address_2,
},
}
}
}
meta_data_for_alerting_result_v4 = \
self.transformed_data_example_result_v4['result']['meta_data']
self.test_data_for_alerting_result_v4 = {
'result': {
'meta_data': meta_data_for_alerting_result_v4,
'data': {
self.test_proxy_address_1: {
'latestRound': {
'current': self.test_latest_round_1,
'previous': None,
},
'latestAnswer': {
'current': self.test_latest_answer_1,
'previous': None,
},
'latestTimestamp': {
'current': self.test_latest_timestamp_1,
'previous': None,
},
'answeredInRound': {
'current': self.test_answered_in_round_1,
'previous': None,
},
'owedPayment': {
'current': self.test_owed_payment_1,
'previous': None,
},
'historicalRounds': {
'current':
self.test_historical_rounds_3_transformed,
'previous': [],
},
'lastRoundObserved': {
'current': 29,
'previous': None
},
'contractVersion': 4,
'aggregatorAddress': self.test_aggregator_address_1,
},
self.test_proxy_address_2: {
'latestRound': {
'current': self.test_latest_round_2,
'previous': None,
},
'latestAnswer': {
'current': self.test_latest_answer_2,
'previous': None,
},
'latestTimestamp': {
'current': self.test_latest_timestamp_2,
'previous': None,
},
'answeredInRound': {
'current': self.test_answered_in_round_2,
'previous': None,
},
'owedPayment': {
'current': self.test_owed_payment_2,
'previous': None,
},
'historicalRounds': {
'current':
self.test_historical_rounds_4_transformed,
'previous': [],
},
'lastRoundObserved': {
'current': 39,
'previous': None
},
'contractVersion': 4,
'aggregatorAddress': self.test_aggregator_address_2,
},
}
}
}
self.test_data_transformer = ChainlinkContractsDataTransformer(
self.transformer_name, self.dummy_logger, self.redis, self.rabbitmq,
self.max_queue_size)
def tearDown(self) -> None:
# Delete any queues and exchanges which are common across many tests
connect_to_rabbit(self.test_data_transformer.rabbitmq)
delete_queue_if_exists(self.test_data_transformer.rabbitmq,
self.test_rabbit_queue_name)
delete_queue_if_exists(self.test_data_transformer.rabbitmq,
CL_CONTRACTS_DT_INPUT_QUEUE_NAME)
delete_exchange_if_exists(self.test_data_transformer.rabbitmq,
HEALTH_CHECK_EXCHANGE)
delete_exchange_if_exists(self.test_data_transformer.rabbitmq,
RAW_DATA_EXCHANGE)
delete_exchange_if_exists(self.test_data_transformer.rabbitmq,
STORE_EXCHANGE)
delete_exchange_if_exists(self.test_data_transformer.rabbitmq,
ALERT_EXCHANGE)
disconnect_from_rabbit(self.test_data_transformer.rabbitmq)
self.dummy_logger = None
self.connection_check_time_interval = None
self.rabbitmq = None
self.test_exception = None
self.redis = None
self.test_publishing_queue = None
self.test_data_transformer = None
self.test_cl_contract_1_new_metrics = None
self.test_cl_contract_2_new_metrics = None
self.test_cl_contract_3_new_metrics = None
self.test_cl_contract_4_new_metrics = None
def test_str_returns_transformer_name(self) -> None:
self.assertEqual(self.transformer_name,
str(self.test_data_transformer))
def test_transformer_name_returns_transformer_name(self) -> None:
self.assertEqual(self.transformer_name,
self.test_data_transformer.transformer_name)
def test_redis_returns_transformer_redis_instance(self) -> None:
self.assertEqual(self.redis, self.test_data_transformer.redis)
def test_state_returns_the_nodes_state(self) -> None:
self.test_data_transformer._state = self.test_data_str
self.assertEqual(self.test_data_str, self.test_data_transformer.state)
def test_publishing_queue_returns_publishing_queue(self) -> None:
self.test_data_transformer._publishing_queue = \
self.test_publishing_queue
self.assertEqual(self.test_publishing_queue,
self.test_data_transformer.publishing_queue)
def test_publishing_queue_has_the_correct_max_size(self) -> None:
self.assertEqual(self.max_queue_size,
self.test_data_transformer.publishing_queue.maxsize)
@mock.patch.object(RabbitMQApi, "start_consuming")
def test_listen_for_data_calls_start_consuming(
self, mock_start_consuming) -> None:
mock_start_consuming.return_value = None
self.test_data_transformer._listen_for_data()
mock_start_consuming.assert_called_once()
@mock.patch.object(RabbitMQApi, "basic_consume")
@mock.patch.object(RabbitMQApi, "basic_qos")
def test_initialise_rabbit_initializes_everything_as_expected(
self, mock_basic_qos, mock_basic_consume) -> None:
mock_basic_consume.return_value = None
# To make sure that there is no connection/channel already established
self.assertIsNone(self.rabbitmq.connection)
self.assertIsNone(self.rabbitmq.channel)
# To make sure that the exchanges and queues have not already been
# declared
self.rabbitmq.connect()
self.test_data_transformer.rabbitmq.queue_delete(
CL_CONTRACTS_DT_INPUT_QUEUE_NAME)
self.test_data_transformer.rabbitmq.exchange_delete(
HEALTH_CHECK_EXCHANGE)
self.test_data_transformer.rabbitmq.exchange_delete(RAW_DATA_EXCHANGE)
self.test_data_transformer.rabbitmq.exchange_delete(STORE_EXCHANGE)
self.test_data_transformer.rabbitmq.exchange_delete(ALERT_EXCHANGE)
self.rabbitmq.disconnect()
self.test_data_transformer._initialise_rabbitmq()
# Perform checks that the connection has been opened and marked as
# open, that the delivery confirmation variable is set and basic_qos
# called successfully.
self.assertTrue(self.test_data_transformer.rabbitmq.is_connected)
self.assertTrue(
self.test_data_transformer.rabbitmq.connection.is_open)
self.assertTrue(
self.test_data_transformer.rabbitmq
.channel._delivery_confirmation)
mock_basic_qos.assert_called_once_with(prefetch_count=round(
self.max_queue_size / 5))
# Check whether the producing exchanges have been created by
# using passive=True. If this check fails an exception is raised
# automatically.
self.test_data_transformer.rabbitmq.exchange_declare(
STORE_EXCHANGE, passive=True)
self.test_data_transformer.rabbitmq.exchange_declare(
ALERT_EXCHANGE, passive=True)
self.test_data_transformer.rabbitmq.exchange_declare(
HEALTH_CHECK_EXCHANGE, passive=True)
# Check whether the consuming exchanges and queues have been creating by
# sending messages with the same routing keys as for the bindings.
self.test_data_transformer.rabbitmq.basic_publish_confirm(
exchange=RAW_DATA_EXCHANGE,
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2), mandatory=True)
# Re-declare queue to get the number of messages, and check that the
# message received is the message sent
res = self.test_data_transformer.rabbitmq.queue_declare(
CL_CONTRACTS_DT_INPUT_QUEUE_NAME, False, True, False, False)
self.assertEqual(1, res.method.message_count)
_, _, body = self.test_data_transformer.rabbitmq.basic_get(
CL_CONTRACTS_DT_INPUT_QUEUE_NAME)
self.assertEqual(self.test_data_str, body.decode())
mock_basic_consume.assert_called_once()
def test_send_heartbeat_sends_a_heartbeat_correctly(self) -> None:
# This test creates a queue which receives messages with the same
# routing key as the ones set by send_heartbeat, and checks that the
# heartbeat is received
self.test_data_transformer._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_data_transformer.rabbitmq.queue_delete(
self.test_rabbit_queue_name)
res = self.test_data_transformer.rabbitmq.queue_declare(
queue=self.test_rabbit_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_data_transformer.rabbitmq.queue_bind(
queue=self.test_rabbit_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_data_transformer._send_heartbeat(self.test_heartbeat)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_data_transformer.rabbitmq.queue_declare(
queue=self.test_rabbit_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the HB
_, _, body = self.test_data_transformer.rabbitmq.basic_get(
self.test_rabbit_queue_name)
self.assertEqual(self.test_heartbeat, json.loads(body))
def test_load_state_successful_if_cl_contract_in_redis_and_redis_online(
self) -> None:
"""
We will perform this test for both V3 and V4 type contracts
"""
# Clean test db
self.redis.delete_all()
# Save state to Redis first
save_chainlink_contract_to_redis(self.redis,
self.test_cl_contract_1_new_metrics)
save_chainlink_contract_to_redis(self.redis,
self.test_cl_contract_4_new_metrics)
# Reset Chainlink contract to default values
self.test_cl_contract_1_new_metrics.reset()
self.test_cl_contract_4_new_metrics.reset()
# Load state
loaded_cl_contract_v3 = self.test_data_transformer.load_state(
self.test_cl_contract_1_new_metrics)
loaded_cl_contract_v4 = self.test_data_transformer.load_state(
self.test_cl_contract_4_new_metrics)
self.assertEqual(self.test_latest_round_1,
loaded_cl_contract_v3.latest_round)
self.assertEqual(self.test_latest_answer_1,
loaded_cl_contract_v3.latest_answer)
self.assertEqual(self.test_latest_timestamp_1,
loaded_cl_contract_v3.latest_timestamp)
self.assertEqual(self.test_answered_in_round_1,
loaded_cl_contract_v3.answered_in_round)
self.assertEqual(self.test_historical_rounds_1_transformed,
loaded_cl_contract_v3.historical_rounds)
self.assertEqual(self.test_withdrawable_payment_1,
loaded_cl_contract_v3.withdrawable_payment)
self.assertEqual(self.test_last_monitored + 60,
loaded_cl_contract_v3.last_monitored)
self.assertEqual(self.test_latest_round_2,
loaded_cl_contract_v4.latest_round)
self.assertEqual(self.test_latest_answer_2,
loaded_cl_contract_v4.latest_answer)
self.assertEqual(self.test_latest_timestamp_2,
loaded_cl_contract_v4.latest_timestamp)
self.assertEqual(self.test_answered_in_round_2,
loaded_cl_contract_v4.answered_in_round)
self.assertEqual(self.test_historical_rounds_4_transformed,
loaded_cl_contract_v4.historical_rounds)
self.assertEqual(self.test_owed_payment_2,
loaded_cl_contract_v4.owed_payment)
self.assertEqual(self.test_last_monitored + 60,
loaded_cl_contract_v4.last_monitored)
# Clean test db
self.redis.delete_all()
def test_load_state_keeps_same_state_if_cl_contract_in_redis_and_redis_off(
self) -> None:
"""
We will perform this test for both V3 and V4 type contracts
"""
# Clean test db
self.redis.delete_all()
# Save state to Redis first
save_chainlink_contract_to_redis(self.redis,
self.test_cl_contract_1_new_metrics)
save_chainlink_contract_to_redis(self.redis,
self.test_cl_contract_4_new_metrics)
# Reset Chainlink contract to default values
self.test_cl_contract_1_new_metrics.reset()
self.test_cl_contract_4_new_metrics.reset()
# Set the _do_not_use_if_recently_went_down function to return True
# as if redis is down
self.test_data_transformer.redis._do_not_use_if_recently_went_down = \
lambda: True
# Load state
loaded_cl_contract_v3 = self.test_data_transformer.load_state(
self.test_cl_contract_1_new_metrics)
loaded_cl_contract_v4 = self.test_data_transformer.load_state(
self.test_cl_contract_4_new_metrics)
self.assertEqual(None, loaded_cl_contract_v3.latest_round)
self.assertEqual(None, loaded_cl_contract_v3.latest_answer)
self.assertEqual(None, loaded_cl_contract_v3.latest_timestamp)
self.assertEqual(None, loaded_cl_contract_v3.answered_in_round)
self.assertEqual([], loaded_cl_contract_v3.historical_rounds)
self.assertEqual(None, loaded_cl_contract_v3.withdrawable_payment)
self.assertEqual(None, loaded_cl_contract_v3.last_monitored)
self.assertEqual(None, loaded_cl_contract_v4.latest_round)
self.assertEqual(None, loaded_cl_contract_v4.latest_answer)
self.assertEqual(None, loaded_cl_contract_v4.latest_timestamp)
self.assertEqual(None, loaded_cl_contract_v4.answered_in_round)
self.assertEqual([], loaded_cl_contract_v4.historical_rounds)
self.assertEqual(None, loaded_cl_contract_v4.owed_payment)
self.assertEqual(None, loaded_cl_contract_v4.last_monitored)
# Clean test db
self.redis.delete_all()
def test_load_state_keeps_same_state_if_contract_not_in_redis_and_redis_on(
self) -> None:
"""
We will perform this test for both V3 and V4 type contracts
"""
# Clean test db
self.redis.delete_all()
# Load state
loaded_cl_contract_v3 = self.test_data_transformer.load_state(
self.test_cl_contract_1_new_metrics)
loaded_cl_contract_v4 = self.test_data_transformer.load_state(
self.test_cl_contract_4_new_metrics)
self.assertEqual(self.test_latest_round_1,
loaded_cl_contract_v3.latest_round)
self.assertEqual(self.test_latest_answer_1,
loaded_cl_contract_v3.latest_answer)
self.assertEqual(self.test_latest_timestamp_1,
loaded_cl_contract_v3.latest_timestamp)
self.assertEqual(self.test_answered_in_round_1,
loaded_cl_contract_v3.answered_in_round)
self.assertEqual(self.test_historical_rounds_1_transformed,
loaded_cl_contract_v3.historical_rounds)
self.assertEqual(self.test_withdrawable_payment_1,
loaded_cl_contract_v3.withdrawable_payment)
self.assertEqual(self.test_last_monitored + 60,
loaded_cl_contract_v3.last_monitored)
self.assertEqual(self.test_latest_round_2,
loaded_cl_contract_v4.latest_round)
self.assertEqual(self.test_latest_answer_2,
loaded_cl_contract_v4.latest_answer)
self.assertEqual(self.test_latest_timestamp_2,
loaded_cl_contract_v4.latest_timestamp)
self.assertEqual(self.test_answered_in_round_2,
loaded_cl_contract_v4.answered_in_round)
self.assertEqual(self.test_historical_rounds_4_transformed,
loaded_cl_contract_v4.historical_rounds)
self.assertEqual(self.test_owed_payment_2,
loaded_cl_contract_v4.owed_payment)
self.assertEqual(self.test_last_monitored + 60,
loaded_cl_contract_v4.last_monitored)
# Clean test db
self.redis.delete_all()
def test_load_state_keeps_same_state_if_contract_not_in_redis_and_redis_off(
self) -> None:
# Clean test db
self.redis.delete_all()
# Set the _do_not_use_if_recently_went_down function to return True
# as if redis is down
self.test_data_transformer.redis._do_not_use_if_recently_went_down = \
lambda: True
# Load state
loaded_cl_contract_v3 = self.test_data_transformer.load_state(
self.test_cl_contract_1_new_metrics)
loaded_cl_contract_v4 = self.test_data_transformer.load_state(
self.test_cl_contract_4_new_metrics)
self.assertEqual(self.test_latest_round_1,
loaded_cl_contract_v3.latest_round)
self.assertEqual(self.test_latest_answer_1,
loaded_cl_contract_v3.latest_answer)
self.assertEqual(self.test_latest_timestamp_1,
loaded_cl_contract_v3.latest_timestamp)
self.assertEqual(self.test_answered_in_round_1,
loaded_cl_contract_v3.answered_in_round)
self.assertEqual(self.test_historical_rounds_1_transformed,
loaded_cl_contract_v3.historical_rounds)
self.assertEqual(self.test_withdrawable_payment_1,
loaded_cl_contract_v3.withdrawable_payment)
self.assertEqual(self.test_last_monitored + 60,
loaded_cl_contract_v3.last_monitored)
self.assertEqual(self.test_latest_round_2,
loaded_cl_contract_v4.latest_round)
self.assertEqual(self.test_latest_answer_2,
loaded_cl_contract_v4.latest_answer)
self.assertEqual(self.test_latest_timestamp_2,
loaded_cl_contract_v4.latest_timestamp)
self.assertEqual(self.test_answered_in_round_2,
loaded_cl_contract_v4.answered_in_round)
self.assertEqual(self.test_historical_rounds_4_transformed,
loaded_cl_contract_v4.historical_rounds)
self.assertEqual(self.test_owed_payment_2,
loaded_cl_contract_v4.owed_payment)
self.assertEqual(self.test_last_monitored + 60,
loaded_cl_contract_v4.last_monitored)
# Clean test db
self.redis.delete_all()
def test_update_state_raises_except_and_keeps_state_if_no_result_or_err(
self) -> None:
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
expected_state = copy.deepcopy(self.test_state_v3)
# First confirm that an exception is raised
self.assertRaises(ReceivedUnexpectedDataException,
self.test_data_transformer._update_state,
self.invalid_transformed_data)
# Check that the state was not modified
self.assertEqual(expected_state, self.test_data_transformer.state)
@parameterized.expand([
('self.transformed_data_example_result_v3', 'self.test_state_v3',
'self.test_state_v3_updated'),
('self.transformed_data_example_result_v4', 'self.test_state_v4',
'self.test_state_v4_updated'),
('self.transformed_data_example_error', 'self.test_state_v3',
'self.test_state_v3'),
])
def test_update_state_updates_state_correctly(
self, transformed_data, initial_state, expected_state) -> None:
self.test_data_transformer._state = copy.deepcopy(eval(initial_state))
self.test_data_transformer._state['dummy_id'] = self.test_data_str
self.test_data_transformer._update_state(eval(transformed_data))
evaluated_expected_state = eval(expected_state)
evaluated_expected_state['dummy_id'] = self.test_data_str
self.assertEqual(self.test_data_transformer.state,
evaluated_expected_state)
@parameterized.expand([
('self.transformed_data_example_result_v3',
'self.transformed_data_example_result_v3_last_round_obs'),
('self.transformed_data_example_result_v4',
'self.transformed_data_example_result_v4_last_round_obs'),
('self.transformed_data_example_error',
'self.transformed_data_example_error'),
])
def test_process_transformed_data_for_saving_returns_expected_data(
self, transformed_data: str, expected_processed_data: str) -> None:
processed_data = \
self.test_data_transformer._process_transformed_data_for_saving(
eval(transformed_data))
self.assertDictEqual(eval(expected_processed_data), processed_data)
def test_proc_trans_data_for_saving_raises_unexp_data_except_on_unexp_data(
self) -> None:
self.assertRaises(
ReceivedUnexpectedDataException,
self.test_data_transformer._process_transformed_data_for_saving,
self.invalid_transformed_data)
@parameterized.expand([
('self.transformed_data_example_result_v3', 'self.test_state_v3',
'self.test_data_for_alerting_result_v3'),
('self.transformed_data_example_result_v4', 'self.test_state_v4',
'self.test_data_for_alerting_result_v4'),
('self.transformed_data_example_error', 'self.test_state_v3',
'self.transformed_data_example_error'),
])
def test_process_transformed_data_for_alerting_returns_expected_data(
self, transformed_data, initial_state,
expected_processed_data) -> None:
self.test_data_transformer._state = copy.deepcopy(eval(initial_state))
actual_data = \
self.test_data_transformer._process_transformed_data_for_alerting(
eval(transformed_data))
self.assertEqual(eval(expected_processed_data), actual_data)
def test_proc_trans_data_for_alerting_raise_unex_data_except_on_unex_data(
self) -> None:
self.assertRaises(
ReceivedUnexpectedDataException,
self.test_data_transformer._process_transformed_data_for_alerting,
self.invalid_transformed_data)
@parameterized.expand([
('self.raw_data_example_result_v3', 'self.test_state_v3',
'self.transformed_data_example_result_v3'),
('self.raw_data_example_result_v4', 'self.test_state_v4',
'self.transformed_data_example_result_v4'),
('self.raw_data_example_error', 'self.test_state_v3',
'self.transformed_data_example_error'),
])
@mock.patch.object(ChainlinkContractsDataTransformer,
"_process_transformed_data_for_alerting")
@mock.patch.object(ChainlinkContractsDataTransformer,
"_process_transformed_data_for_saving")
def test_transform_data_returns_expected_data_if_result(
self, raw_data, init_state, expected_processed_data,
mock_process_for_saving, mock_process_for_alerting) -> None:
self.test_data_transformer._state = copy.deepcopy(eval(init_state))
mock_process_for_saving.return_value = {'key_1': 'val1'}
mock_process_for_alerting.return_value = {'key_2': 'val2'}
trans_data, data_for_alerting, data_for_saving = \
self.test_data_transformer._transform_data(eval(raw_data))
expected_trans_data = copy.deepcopy(eval(expected_processed_data))
self.assertEqual(expected_trans_data, trans_data)
self.assertEqual({'key_2': 'val2'}, data_for_alerting)
self.assertEqual({'key_1': 'val1'}, data_for_saving)
def test_transform_data_raises_unexpected_data_exception_on_unexpected_data(
self) -> None:
self.assertRaises(ReceivedUnexpectedDataException,
self.test_data_transformer._transform_data,
self.invalid_transformed_data)
def test_place_latest_data_on_queue_places_the_correct_data_on_queue(
self) -> None:
self.test_data_transformer._place_latest_data_on_queue(
self.test_data_for_alerting_result_v3,
self.transformed_data_example_result_v3
)
expected_data_for_alerting = {
'exchange': ALERT_EXCHANGE,
'routing_key': CL_CONTRACT_TRANSFORMED_DATA_ROUTING_KEY,
'data': self.test_data_for_alerting_result_v3,
'properties': pika.BasicProperties(delivery_mode=2),
'mandatory': True
}
expected_data_for_saving = {
'exchange': STORE_EXCHANGE,
'routing_key': CL_CONTRACT_TRANSFORMED_DATA_ROUTING_KEY,
'data': self.transformed_data_example_result_v3,
'properties': pika.BasicProperties(delivery_mode=2),
'mandatory': True
}
self.assertEqual(
2, self.test_data_transformer.publishing_queue.qsize())
self.assertDictEqual(
expected_data_for_alerting,
self.test_data_transformer.publishing_queue.queue[0])
self.assertDictEqual(
expected_data_for_saving,
self.test_data_transformer.publishing_queue.queue[1])
@parameterized.expand(
[(V3ChainlinkContract, 3,), (V4ChainlinkContract, 4,), ])
def test_create_state_entry_creates_new_entry_if_no_entry_for_contract(
self, contract_class, version) -> None:
"""
In this test we will check that a new state entry will be created for a
node's contract state if there is no entry for that node or contract
yet. This test will be performed for both v3 and v4 contracts
"""
# Add some dummy state to confirm that the state is updated correctly
self.test_data_transformer._state['dummy_id'] = self.test_data_str
# Test for when no entry has been added yet for both the contract and
# the node
state_created = self.test_data_transformer._create_state_entry(
self.test_node_id_1, self.test_proxy_address_1,
self.test_parent_id_1, version, self.test_aggregator_address_1)
expected_state = {
'dummy_id': self.test_data_str,
self.test_node_id_1: {
self.test_proxy_address_1: contract_class(
self.test_proxy_address_1, self.test_aggregator_address_1,
self.test_parent_id_1, self.test_node_id_1)
}
}
self.assertEqual(expected_state, self.test_data_transformer.state)
self.assertTrue(state_created)
# Test for when an entry has already been created for the node
state_created = self.test_data_transformer._create_state_entry(
self.test_node_id_1, self.test_proxy_address_2,
self.test_parent_id_1, version, self.test_aggregator_address_2)
expected_state[self.test_node_id_1][
self.test_proxy_address_2] = contract_class(
self.test_proxy_address_2, self.test_aggregator_address_2,
self.test_parent_id_1, self.test_node_id_1)
self.assertEqual(expected_state, self.test_data_transformer.state)
self.assertTrue(state_created)
@parameterized.expand([
(3, 'self.test_state_v3_updated',),
(4, 'self.test_state_v4_updated',),
])
def test_create_state_entry_no_new_contract_entry_if_already_created_with_same_version(
self, version, init_state) -> None:
"""
In this test we will check that no new entry will be created for a
node's contract state if there is already one with the same version.
This test will be performed for both v3 and v4 contracts
"""
self.test_data_transformer._state = copy.deepcopy(eval(init_state))
self.test_data_transformer._state['dummy_id'] = self.test_data_str
state_created = self.test_data_transformer._create_state_entry(
self.test_node_id_1, self.test_proxy_address_1,
self.test_parent_id_1, version, self.test_aggregator_address_1)
# We expect an unchanged state
expected_state = copy.deepcopy(eval(init_state))
expected_state['dummy_id'] = self.test_data_str
self.assertEqual(expected_state, self.test_data_transformer.state)
self.assertFalse(state_created)
@parameterized.expand([
('self.test_state_v3_updated', V4ChainlinkContract, 4,),
('self.test_state_v4_updated', V3ChainlinkContract, 3,),
])
def test_create_state_entry_creates_new_entry_if_contract_entry_has_a_different_version(
self, init_state, new_contract_class, new_version) -> None:
"""
In this test we will check that a new state entry will be created for a
node's contract state if there is an entry with a different version for
that node and contract
"""
self.test_data_transformer._state = copy.deepcopy(eval(init_state))
self.test_data_transformer._state['dummy_id'] = self.test_data_str
state_created = self.test_data_transformer._create_state_entry(
self.test_node_id_1, self.test_proxy_address_1,
self.test_parent_id_1, new_version, self.test_aggregator_address_1)
expected_state = {
'dummy_id': self.test_data_str,
self.test_node_id_1: {
self.test_proxy_address_1: new_contract_class(
self.test_proxy_address_1, self.test_aggregator_address_1,
self.test_parent_id_1, self.test_node_id_1),
self.test_proxy_address_2:
eval(init_state)[self.test_node_id_1][
self.test_proxy_address_2]
}
}
self.assertEqual(expected_state, self.test_data_transformer.state)
self.assertTrue(state_created)
@parameterized.expand([({}, False,), ('self.test_state_v3', True), ])
@mock.patch.object(ChainlinkContractsDataTransformer, "_transform_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_transforms_data_if_data_valid(
self, state, state_is_str, mock_ack, mock_trans_data) -> None:
"""
We will check that the data is transformed by checking that
`_transform_data` is called correctly. The actual transformations are
# already tested. Note we will test for both result and error, and when
# the node and contracts are both in the state and not in the state.
"""
mock_ack.return_value = None
mock_trans_data.return_value = (None, None, None)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body_result = json.dumps(self.raw_data_example_result_v3)
body_error = json.dumps(self.raw_data_example_error)
properties = pika.spec.BasicProperties()
if state_is_str:
self.test_data_transformer._state = copy.deepcopy(eval(state))
else:
self.test_data_transformer._state = copy.deepcopy(state)
# Send raw data
self.test_data_transformer._process_raw_data(blocking_channel,
method, properties,
body_result)
mock_trans_data.assert_called_once_with(
self.raw_data_example_result_v3)
mock_trans_data.reset_mock()
# To reset the state as if the node was not already added
if state_is_str:
self.test_data_transformer._state = copy.deepcopy(eval(state))
else:
self.test_data_transformer._state = copy.deepcopy(state)
self.test_data_transformer._process_raw_data(blocking_channel,
method, properties,
body_error)
mock_trans_data.assert_called_once_with(self.raw_data_example_error)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(2, mock_ack.call_count)
@parameterized.expand([
({},), (None,), ("test",), ({'bad_key': 'bad_value'},)
])
@mock.patch.object(ChainlinkContractsDataTransformer, "_transform_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_does_not_call_trans_data_if_err_res_not_in_data(
self, invalid_data, mock_ack, mock_trans_data) -> None:
mock_ack.return_value = None
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(invalid_data)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(blocking_channel,
method, properties,
body)
mock_trans_data.assert_not_called()
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
mock_ack.assert_called_once()
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_updates_state_if_no_processing_errors(
self, mock_ack) -> None:
# To make sure there is no state in redis as the state must be compared.
# We will check that the state has been updated correctly.
self.redis.delete_all()
mock_ack.return_value = None
# We must initialise rabbit to the environment and parameters needed by
# `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Make the state non-empty to check that the update does not modify
# nodes not in question
self.test_data_transformer._state['node2'] = self.test_data_str
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that there are 2 nodes in the state, one which was not modified,
# and the other having 2 contracts with metrics the same as the newly
# given data.
self.assertEqual(2, len(self.test_data_transformer._state.keys()))
self.assertEqual(2, len(self.test_data_transformer._state[
self.test_node_id_1].keys()))
contract_1_expected_data = copy.deepcopy(
self.test_cl_contract_1_new_metrics)
contract_2_expected_data = copy.deepcopy(
self.test_cl_contract_2_new_metrics)
self.assertEqual(self.test_data_str,
self.test_data_transformer._state['node2'])
self.assertEqual(
contract_1_expected_data,
self.test_data_transformer._state[self.test_node_id_1][
self.test_proxy_address_1])
self.assertEqual(
contract_2_expected_data,
self.test_data_transformer._state[self.test_node_id_1][
self.test_proxy_address_2])
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_transform_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_does_not_update_state_if_processing_fails(
self, mock_ack, mock_transform_data) -> None:
"""
We will automate processing failure by generating an exception from the
self._transform_data function.
"""
mock_ack.return_value = None
mock_transform_data.side_effect = self.test_exception
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Make the state non-empty and save it to redis. This will be used to
# check that the state is not updated with new metrics if processing
# fails
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
new_contract_1 = V3ChainlinkContract(
self.test_proxy_address_1, self.test_aggregator_address_1,
self.test_parent_id_1, self.test_node_id_1)
new_contract_2 = V3ChainlinkContract(
self.test_proxy_address_2, self.test_aggregator_address_2,
self.test_parent_id_1, self.test_node_id_1)
save_chainlink_contract_to_redis(self.redis, new_contract_1)
save_chainlink_contract_to_redis(self.redis, new_contract_2)
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that there is 1 node and 2 contracts in the state with
# unmodified data.
expected_data_contract_1 = copy.deepcopy(new_contract_1)
expected_data_contract_2 = copy.deepcopy(new_contract_2)
self.assertEqual(1, len(self.test_data_transformer._state.keys()))
self.assertEqual(2, len(self.test_data_transformer._state[
self.test_node_id_1].keys()))
self.assertEqual(
expected_data_contract_1,
self.test_data_transformer._state[self.test_node_id_1][
self.test_proxy_address_1])
self.assertEqual(
expected_data_contract_2,
self.test_data_transformer._state[self.test_node_id_1][
self.test_proxy_address_2])
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_transform_data")
@mock.patch.object(ChainlinkContractsDataTransformer,
"_place_latest_data_on_queue")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_places_data_on_queue_if_no_processing_errors(
self, mock_ack, mock_place_on_queue, mock_trans_data) -> None:
mock_ack.return_value = None
mock_trans_data.return_value = (
self.transformed_data_example_result_v3,
self.test_data_for_alerting_result_v3,
self.transformed_data_example_result_v3
)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
args, _ = mock_place_on_queue.call_args
self.assertDictEqual(self.test_data_for_alerting_result_v3, args[0])
self.assertDictEqual(self.transformed_data_example_result_v3, args[1])
self.assertEqual(2, len(args))
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@parameterized.expand([
({},), (None,), ("test",), ({'bad_key': 'bad_value'},)
])
@mock.patch.object(ChainlinkContractsDataTransformer,
"_place_latest_data_on_queue")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_no_data_on_queue_if_processing_error(
self, invalid_data, mock_ack, mock_place_on_queue) -> None:
mock_ack.return_value = None
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(invalid_data)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that place_on_queue was not called
mock_place_on_queue.assert_not_called()
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
mock_ack.assert_called_once()
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_sends_data_waiting_on_queue_if_no_process_errors(
self, mock_ack, mock_send_data) -> None:
mock_ack.return_value = None
mock_send_data.return_value = None
# Load the state to avoid loading from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that send_data was called
self.assertEqual(1, mock_send_data.call_count)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_transform_data")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_sends_data_waiting_on_queue_if_process_errors(
self, mock_ack, mock_send_data, mock_transform_data) -> None:
"""
We will automate processing errors by making self._transform_data
generate an exception.
"""
mock_ack.return_value = None
mock_send_data.return_value = None
mock_transform_data.side_effect = self.test_exception
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that send_data was called
self.assertEqual(1, mock_send_data.call_count)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@freeze_time("2012-01-01")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_heartbeat")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_sends_hb_if_no_proc_errors_and_send_data_success(
self, mock_ack, mock_send_data, mock_send_hb) -> None:
mock_ack.return_value = None
mock_send_data.return_value = None
mock_send_hb.return_value = None
test_hb = {
'component_name': self.test_data_transformer.transformer_name,
'is_alive': True,
'timestamp': datetime.now().timestamp(),
}
# Load the state to avoid loading data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
mock_send_hb.assert_called_once_with(test_hb)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_update_state")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_heartbeat")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_does_not_send_hb_if_proc_errors(
self, mock_ack, mock_send_hb, mock_update_state) -> None:
mock_ack.return_value = None
mock_send_hb.return_value = None
mock_update_state.side_effect = self.test_exception
# Load the state to avoid loading data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that send_heartbeat was not called
mock_send_hb.assert_not_called()
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_heartbeat")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_does_not_send_hb_if_send_data_fails(
self, mock_ack, mock_send_hb, mock_send_data) -> None:
mock_ack.return_value = None
mock_send_hb.return_value = None
mock_send_data.side_effect = MessageWasNotDeliveredException(
'test err')
# Load the state to avoid loading data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body)
# Check that send_heartbeat was not called
mock_send_hb.assert_not_called()
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@parameterized.expand([
(pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPConnectionError('test err'),),
(pika.exceptions.AMQPChannelError,
pika.exceptions.AMQPChannelError('test err'),),
(Exception, Exception('test'),)
])
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_raises_err_if_raised_by_send_data(
self, exception_type, exception_instance, mock_ack,
mock_send_data) -> None:
"""
We will perform this test only for errors we know that can be raised
"""
mock_ack.return_value = None
mock_send_data.side_effect = exception_instance
# Load the state to avoid having to load data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data and assert exception
self.assertRaises(
exception_type, self.test_data_transformer._process_raw_data,
blocking_channel, method, properties, body
)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@parameterized.expand([
(pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPConnectionError('test err'),),
(pika.exceptions.AMQPChannelError,
pika.exceptions.AMQPChannelError('test err'),),
(Exception, Exception('test'),)
])
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_heartbeat")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_raises_err_if_raised_by_send_hb(
self, exception_type, exception_instance, mock_ack,
mock_send_data, mock_send_hb) -> None:
"""
We will perform this test only for errors we know that can be raised
"""
mock_ack.return_value = None
mock_send_data.return_value = None
mock_send_hb.side_effect = exception_instance
# Load the state to avoid having to load data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data and assert exception
self.assertRaises(
exception_type, self.test_data_transformer._process_raw_data,
blocking_channel, method, properties, body
)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_no_msg_not_del_exception_if_raised_by_send_data(
self, mock_ack, mock_send_data) -> None:
mock_ack.return_value = None
mock_send_data.side_effect = MessageWasNotDeliveredException(
'test err')
# Load the state to avoid having to load data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data. Test would fail if an exception is raised
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body
)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_heartbeat")
@mock.patch.object(ChainlinkContractsDataTransformer, "_send_data")
@mock.patch.object(RabbitMQApi, "basic_ack")
def test_process_raw_data_no_msg_not_del_exception_if_raised_by_send_hb(
self, mock_ack, mock_send_data, mock_send_hb) -> None:
mock_ack.return_value = None
mock_send_data.return_value = None
mock_send_hb.side_effect = MessageWasNotDeliveredException('test err')
# Load the state to avoid having to load data from redis.
self.test_data_transformer._state = copy.deepcopy(self.test_state_v3)
# We must initialise rabbit to the environment and parameters needed
# by `_process_raw_data`
self.test_data_transformer._initialise_rabbitmq()
blocking_channel = self.test_data_transformer.rabbitmq.channel
method = pika.spec.Basic.Deliver(
routing_key=CHAINLINK_CONTRACTS_RAW_DATA_ROUTING_KEY)
body = json.dumps(self.raw_data_example_result_v3)
properties = pika.spec.BasicProperties()
# Send raw data. Test would fail if an exception is raised
self.test_data_transformer._process_raw_data(
blocking_channel, method, properties, body
)
# Make sure that the message has been acknowledged. This must be done
# in all test cases to cover every possible case, and avoid doing a
# very large amount of tests around this.
self.assertEqual(1, mock_ack.call_count)
| 46.800779
| 92
| 0.640468
| 9,566
| 84,101
| 5.209492
| 0.047146
| 0.100654
| 0.042381
| 0.070614
| 0.851427
| 0.81924
| 0.786451
| 0.742927
| 0.70478
| 0.657182
| 0
| 0.014748
| 0.290496
| 84,101
| 1,796
| 93
| 46.826837
| 0.820412
| 0.107847
| 0
| 0.578797
| 0
| 0
| 0.072469
| 0.015675
| 0
| 0
| 0
| 0
| 0.096705
| 1
| 0.030086
| false
| 0.003582
| 0.015043
| 0
| 0.045845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f02ece04f131e2907f4ccddc9b6046dd47200c86
| 137
|
py
|
Python
|
example_1/calculation/helper2.py
|
KrashLeviathan/se329_project_2
|
b5f36b56802885d6c0a7f86fc89ef3d8ec9ae897
|
[
"MIT"
] | null | null | null |
example_1/calculation/helper2.py
|
KrashLeviathan/se329_project_2
|
b5f36b56802885d6c0a7f86fc89ef3d8ec9ae897
|
[
"MIT"
] | null | null | null |
example_1/calculation/helper2.py
|
KrashLeviathan/se329_project_2
|
b5f36b56802885d6c0a7f86fc89ef3d8ec9ae897
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import hashlib
def do_something_2(c, d):
return int(hashlib.md5(c.encode()).hexdigest(), 16) / (d * 1e+20)
| 15.222222
| 69
| 0.649635
| 23
| 137
| 3.782609
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06087
| 0.160584
| 137
| 8
| 70
| 17.125
| 0.695652
| 0.145985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
f05a98a9e538b4f8b11ed3b7890fc33a81ae92bc
| 37
|
py
|
Python
|
cryptowatch/__init__.py
|
nlnsaoadc/py-cryptowatch
|
33ebfb20d1b9dee13dc1b169cd03c1138a43317f
|
[
"MIT"
] | null | null | null |
cryptowatch/__init__.py
|
nlnsaoadc/py-cryptowatch
|
33ebfb20d1b9dee13dc1b169cd03c1138a43317f
|
[
"MIT"
] | null | null | null |
cryptowatch/__init__.py
|
nlnsaoadc/py-cryptowatch
|
33ebfb20d1b9dee13dc1b169cd03c1138a43317f
|
[
"MIT"
] | null | null | null |
from .cryptowatch import Cryptowatch
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f064263f79ab0935f88c3f83ef74113ff7524b8f
| 160
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_br/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_br/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_br/__init__.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2009 Renato Lima - Akretion
import models
| 22.857143
| 74
| 0.70625
| 23
| 160
| 4.913043
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03876
| 0.19375
| 160
| 6
| 75
| 26.666667
| 0.837209
| 0.85625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0710fb1aee95f98062ccf78e5e7b79a6521e4cf
| 17,276
|
py
|
Python
|
xmoai/problems/xMOAIProblem.py
|
wmonteiro92/xmoai
|
032602a4f6a33f2cc798ff7f7afe5aefcc9b30e7
|
[
"MIT"
] | 2
|
2020-12-07T20:17:22.000Z
|
2021-03-22T11:31:20.000Z
|
xmoai/problems/xMOAIProblem.py
|
wmonteiro92/xmoai
|
032602a4f6a33f2cc798ff7f7afe5aefcc9b30e7
|
[
"MIT"
] | null | null | null |
xmoai/problems/xMOAIProblem.py
|
wmonteiro92/xmoai
|
032602a4f6a33f2cc798ff7f7afe5aefcc9b30e7
|
[
"MIT"
] | null | null | null |
import numpy as np
from pymoo.model.problem import Problem
from xmoai.problems.objectives import *
from xmoai.problems.restrictions import *
#https://pymoo.org/problems/index.html
#https://pymoo.org/problems/custom.html
#https://pymoo.org/misc/constraint_handling.html
class xMOAIProblem(Problem):
"""Defines the multiobjective problem to be solved by xMOAI in order
to generate counterfactuals. The problem may be a regression or a
classification problem. This class must not be called directly.
Instead, please use the methods provided in the "configure.py" file.
"""
def __init__(self, X_current, y_desired, upper_bounds, lower_bounds, \
max_changed_vars, y_acceptable_range, categorical_columns, \
integer_columns, trained_model, method_name, parallelization):
"""Class constructor.
:param x_original: the original individual
:type x_original: numpy.array
:param y_desired: the desired value to be predicted
:type y_desired: Object
:param upper_bounds: the maximum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in lower_bounds. For the categorical columns ordinally
encoded it represents the category with the minimum value.
:type upper_bounds: numpy.array
:param lower_bounds: the minimum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in upper_bounds. For the categorical columns ordinally
encoded it represents the category with the maximum value.
:type lower_bounds: numpy.array
:param y_acceptable_range: the lower (first value) and upper
(second value) limits allowed for the output. A counterfactual
is considered as being "valid" if it has its output within this range. For
regression problems it is understood as the predicted value where y_desired
is inside this range. For classification problems it is understood as
the probability of being within the expected class shown in y_desired.
:param categorical_columns: dictionary containing the categorical columns
and their allowed values. The keys are the i-th position of the indexes
and the values are the allowed categories. The minimum and maximum categories
must respect the values in lower_bounds and upper_bounds since this variable
is called after it in code.
:type categorical_columns: dict
:param integer_columns: lists the columns that allows only integer values.
It is used by xMOAI in rounding operations.
:type integer_columns: numpy.array
:param trained_model: the machine learning trained model to be used to
evaluate the counterfactuals.
:type trained_model: object
:param method_name: the method used by the machine learning model to obtain
its predictions (e.g. `predict`, `predict_proba`).
:type method_name: str
:param parallelization: parallelization options used by pymoo.
:type parallelization: Object
"""
self.X_current = X_current.flatten()
n_var = self.X_current.shape[0]
self.model = trained_model
self.y_desired = y_desired
self.y_acceptable_range = y_acceptable_range
self.max_changed_vars = max_changed_vars
self.categorical_columns = categorical_columns
self.categorical_indexes = np.array(list(categorical_columns.keys()))
self.integer_columns = integer_columns
if self.categorical_indexes.shape[0] > 0:
self.numerical_indexes = np.where(np.array(range(n_var)) != self.categorical_indexes)
else:
self.numerical_indexes = np.array(range(n_var))
self.method_name = method_name
super().__init__(n_var=n_var, n_obj=num_objectives, n_constr=3, \
xl=lower_bounds, xu=upper_bounds, \
parallelization=parallelization)
class RegressionProblem(xMOAIProblem):
"""Defines a multiobjective problem to be solved by xMOAI for regression
problems. This class must not be called directly. Instead, please use
the methods provided in the "configure.py" file.
"""
def __init__(self, X_current, y_desired, upper_bounds, lower_bounds, \
max_changed_vars, y_acceptable_range, categorical_columns, \
integer_columns, trained_model, method_name, parallelization):
"""Class constructor.
:param x_original: the original individual
:type x_original: numpy.array
:param y_desired: the desired value to be predicted
:type y_desired: Object
:param upper_bounds: the maximum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in lower_bounds. For the categorical columns ordinally
encoded it represents the category with the minimum value.
:type upper_bounds: numpy.array
:param lower_bounds: the minimum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in upper_bounds. For the categorical columns ordinally
encoded it represents the category with the maximum value.
:type lower_bounds: numpy.array
:param y_acceptable_range: the lower (first value) and upper
(second value) limits allowed for the output. A counterfactual
is considered as being "valid" if it has its output within this range. For
regression problems it is understood as the predicted value where y_desired
is inside this range. For classification problems it is understood as
the probability of being within the expected class shown in y_desired.
:param categorical_columns: dictionary containing the categorical columns
and their allowed values. The keys are the i-th position of the indexes
and the values are the allowed categories. The minimum and maximum categories
must respect the values in lower_bounds and upper_bounds since this variable
is called after it in code.
:type categorical_columns: dict
:param integer_columns: lists the columns that allows only integer values.
It is used by xMOAI in rounding operations.
:type integer_columns: numpy.array
:param trained_model: the machine learning trained model to be used to
evaluate the counterfactuals.
:type trained_model: object
:param method_name: the method used by the machine learning model to obtain
its predictions (e.g. `predict`, `predict_proba`).
:type method_name: str
:param parallelization: parallelization options used by pymoo.
:type parallelization: Object
"""
super().__init__(X_current, y_desired, upper_bounds, lower_bounds, \
max_changed_vars, y_acceptable_range, \
categorical_columns, integer_columns, \
trained_model, method_name, parallelization)
def _evaluate(self, x, out, *args, **kwargs):
"""Evaluates an individual.
:param x: the individual (or individuals) to be evaluated
:type x: numpy.array
:param out: the evaluation output.
:type out: dict
"""
f1, prediction = get_difference_target_regression(self.model, x, \
self.y_desired, self.method_name)
f2 = get_difference_attributes(x, self.X_current, self.categorical_columns)
f3 = get_modified_attributes(x, self.X_current)
g1 = get_changed_vars_threshold(f3, self.max_changed_vars)
g2, g3 = is_prediction_in_threshold_regression(self.y_acceptable_range, \
prediction)
out["F"] = np.column_stack([f1, f2, f3])
out["G"] = np.column_stack([g1, g2, g3])
class ClassificationProblemProbability(xMOAIProblem):
"""Defines a multiobjective problem to be solved by xMOAI for
classification problems where the trained model exposes the probability
of the classes. This class must not be called directly. Instead, please use
the methods provided in the "configure.py" file.
"""
def __init__(self, X_current, class_column, upper_bounds, lower_bounds, \
max_changed_vars, y_acceptable_range, categorical_columns, \
integer_columns, trained_model, method_name, parallelization):
"""Class constructor.
:param x_original: the original individual
:type x_original: numpy.array
:param y_desired: the desired value to be predicted
:type y_desired: Object
:param upper_bounds: the maximum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in lower_bounds. For the categorical columns ordinally
encoded it represents the category with the minimum value.
:type upper_bounds: numpy.array
:param lower_bounds: the minimum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in upper_bounds. For the categorical columns ordinally
encoded it represents the category with the maximum value.
:type lower_bounds: numpy.array
:param y_acceptable_range: the lower (first value) and upper
(second value) limits allowed for the output. A counterfactual
is considered as being "valid" if it has its output within this range. For
regression problems it is understood as the predicted value where y_desired
is inside this range. For classification problems it is understood as
the probability of being within the expected class shown in y_desired.
:param categorical_columns: dictionary containing the categorical columns
and their allowed values. The keys are the i-th position of the indexes
and the values are the allowed categories. The minimum and maximum categories
must respect the values in lower_bounds and upper_bounds since this variable
is called after it in code.
:type categorical_columns: dict
:param integer_columns: lists the columns that allows only integer values.
It is used by xMOAI in rounding operations.
:type integer_columns: numpy.array
:param trained_model: the machine learning trained model to be used to
evaluate the counterfactuals.
:type trained_model: object
:param method_name: the method used by the machine learning model to obtain
its predictions (e.g. `predict`, `predict_proba`).
:type method_name: str
:param parallelization: parallelization options used by pymoo.
:type parallelization: Object
"""
super().__init__(X_current, class_column, upper_bounds, lower_bounds, \
max_changed_vars, y_acceptable_range, \
categorical_columns, integer_columns, \
trained_model, method_name, parallelization)
def _evaluate(self, x, out, *args, **kwargs):
"""Evaluates an individual.
:param x: the individual (or individuals) to be evaluated
:type x: numpy.array
:param out: the evaluation output.
:type out: dict
"""
f1, prediction = get_difference_target_classification_proba(self.model, x, \
self.y_desired, self.method_name)
f2 = get_difference_attributes(x, self.X_current, self.categorical_columns)
f3 = get_modified_attributes(x, self.X_current)
g1 = get_changed_vars_threshold(f3, self.max_changed_vars)
g2, g3 = is_prediction_in_threshold_classification_proba(self.y_acceptable_range, \
prediction, self.y_desired)
out["F"] = np.column_stack([f1, f2, f3])
out["G"] = np.column_stack([g1, g2, g3])
class ClassificationProblemSimple(xMOAIProblem):
"""Defines a multiobjective problem to be solved by xMOAI for
classification problems where the trained model does not expose the probability
of the classes. This class must not be called directly. Instead, please use
the methods provided in the "configure.py" file.
"""
def __init__(self, X_current, class_column, upper_bounds, lower_bounds, \
max_changed_vars, categorical_columns, \
integer_columns, trained_model, method_name, parallelization):
"""Class constructor.
:param x_original: the original individual
:type x_original: numpy.array
:param y_desired: the desired value to be predicted
:type y_desired: Object
:param upper_bounds: the maximum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in lower_bounds. For the categorical columns ordinally
encoded it represents the category with the minimum value.
:type upper_bounds: numpy.array
:param lower_bounds: the minimum values allowed per attribute. It must
have the same length of x_original. Its values must be different from the
values informed in upper_bounds. For the categorical columns ordinally
encoded it represents the category with the maximum value.
:type lower_bounds: numpy.array
:param y_acceptable_range: the lower (first value) and upper
(second value) limits allowed for the output. A counterfactual
is considered as being "valid" if it has its output within this range. For
regression problems it is understood as the predicted value where y_desired
is inside this range. For classification problems it is understood as
the probability of being within the expected class shown in y_desired.
:type y_acceptable_range: np.array
:param categorical_columns: dictionary containing the categorical columns
and their allowed values. The keys are the i-th position of the indexes
and the values are the allowed categories. The minimum and maximum categories
must respect the values in lower_bounds and upper_bounds since this variable
is called after it in code.
:type categorical_columns: dict
:param integer_columns: lists the columns that allows only integer values.
It is used by xMOAI in rounding operations.
:type integer_columns: numpy.array
:param trained_model: the machine learning trained model to be used to
evaluate the counterfactuals.
:type trained_model: object
:param method_name: the method used by the machine learning model to obtain
its predictions (e.g. `predict`, `predict_proba`).
:type method_name: str
:param parallelization: parallelization options used by pymoo.
:type parallelization: Object
"""
super().__init__(X_current, class_column, upper_bounds, lower_bounds, \
max_changed_vars, None, categorical_columns, \
integer_columns, trained_model, method_name, parallelization)
def _evaluate(self, x, out, *args, **kwargs):
"""Evaluates an individual.
:param x: the individual (or individuals) to be evaluated
:type x: numpy.array
:param out: the evaluation output.
:type out: dict
"""
f1, prediction = get_difference_target_classification_simple(self.model, x, \
self.y_desired, self.method_name)
f2 = get_difference_attributes(x, self.X_current, self.categorical_columns)
f3 = get_modified_attributes(x, self.X_current)
g1 = get_changed_vars_threshold(f3, self.max_changed_vars)
g2 = is_prediction_in_threshold_classification_simple(prediction, self.y_desired)
out["F"] = np.column_stack([f1, f2, f3])
out["G"] = np.column_stack([g1, g2])
| 55.729032
| 98
| 0.654492
| 2,112
| 17,276
| 5.192235
| 0.096117
| 0.054167
| 0.025989
| 0.011672
| 0.906256
| 0.897045
| 0.893398
| 0.889933
| 0.889933
| 0.889933
| 0
| 0.003359
| 0.293529
| 17,276
| 310
| 99
| 55.729032
| 0.895125
| 0.593077
| 0
| 0.506494
| 0
| 0
| 0.001155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.051948
| 0
| 0.194805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b2d81cfc40b29bae1c9bcb333b248c15054c2172
| 31
|
py
|
Python
|
src/__init__.py
|
smithara/IAGA_SummerSchool2019
|
e4a3ee5e8948b591986764ba06282e1da608f190
|
[
"MIT"
] | 5
|
2019-05-27T10:27:30.000Z
|
2019-10-04T07:46:46.000Z
|
src/__init__.py
|
MagneticEarth/IAGA_SummerSchool2019
|
e4a3ee5e8948b591986764ba06282e1da608f190
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
MagneticEarth/IAGA_SummerSchool2019
|
e4a3ee5e8948b591986764ba06282e1da608f190
|
[
"MIT"
] | 2
|
2020-04-22T10:49:03.000Z
|
2021-01-07T19:21:33.000Z
|
from . import mag_lib, sha_lib
| 15.5
| 30
| 0.774194
| 6
| 31
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 1
| 31
| 31
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6541fe69aeae3ae5b0056d7b538ff13e9ece798d
| 6,688
|
py
|
Python
|
simfempy/meshes/testmeshes.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
simfempy/meshes/testmeshes.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
simfempy/meshes/testmeshes.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
import pygmsh
import numpy as np
import simfempy
__pygmsh6__ = False
if hasattr(pygmsh, "built_in"): __pygmsh6__ = True
# ------------------------------------- #
def unitline(h):
if __pygmsh6__:
geom = pygmsh.built_in.Geometry()
p0 = geom.add_point([0, 0, 0], lcar=h)
p1 = geom.add_point([1, 0, 0], lcar=h)
p = geom.add_line(p0, p1)
geom.add_physical(p0, label=10000)
geom.add_physical(p1, label=10001)
geom.add_physical(p, label=1000)
mesh = pygmsh.generate_mesh(geom, verbose=False)
else:
with pygmsh.geo.Geometry() as geom:
p0 = geom.add_point([0, 0, 0], mesh_size=h)
p1 = geom.add_point([1, 0, 0], mesh_size=h)
p = geom.add_line(p0, p1)
geom.add_physical(p0, label="10000")
geom.add_physical(p1, label="10001")
geom.add_physical(p, label="1000")
mesh = geom.generate_mesh()
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=mesh)
# ------------------------------------- #
def unitsquare(h):
a=1
if __pygmsh6__:
geom = pygmsh.built_in.Geometry()
p = geom.add_rectangle(xmin=-a, xmax=a, ymin=-a, ymax=a, z=0, lcar=h)
geom.add_physical(p.surface, label=100)
# geom.add_physical(p.points[0], label=11111)
for i in range(4): geom.add_physical(p.line_loop.lines[i], label=1000 + i)
mesh = pygmsh.generate_mesh(geom, verbose=False)
else:
with pygmsh.geo.Geometry() as geom:
p = geom.add_rectangle(xmin=-a, xmax=a, ymin=-a, ymax=a, z=0, mesh_size=h)
geom.add_physical(p.surface, label="100")
# geom.add_physical(p.points[0], label="11111")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
# print(f"{mesh=}")
# print(f"{mesh.cell_data=}")
# print(f"{mesh.cell_sets=}")
# print("{mesh=}")
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=mesh)
# ------------------------------------- #
def unitcube(h):
if __pygmsh6__:
geom = pygmsh.built_in.Geometry()
x, y, z = [-1, 1], [-1, 1], [-1, 1]
p = geom.add_rectangle(xmin=x[0], xmax=x[1], ymin=y[0], ymax=y[1], z=z[0], lcar=h)
geom.add_physical(p.surface, label=100)
axis = [0, 0, z[1] - z[0]]
top, vol, ext = geom.extrude(p.surface, axis)
geom.add_physical(top, label=105)
geom.add_physical(ext[0], label=101)
geom.add_physical(ext[1], label=102)
geom.add_physical(ext[2], label=103)
geom.add_physical(ext[3], label=104)
geom.add_physical(vol, label=10)
mesh = pygmsh.generate_mesh(geom, verbose=False)
else:
with pygmsh.geo.Geometry() as geom:
x, y, z = [-1, 1], [-1, 1], [-1, 1]
p = geom.add_rectangle(xmin=x[0], xmax=x[1], ymin=y[0], ymax=y[1], z=z[0], mesh_size=h)
geom.add_physical(p.surface, label="100")
axis = [0, 0, z[1] - z[0]]
top, vol, ext = geom.extrude(p.surface, axis)
geom.add_physical(top, label="105")
geom.add_physical(ext[0], label="101")
geom.add_physical(ext[1], label="102")
geom.add_physical(ext[2], label="103")
geom.add_physical(ext[3], label="104")
geom.add_physical(vol, label="10")
mesh = geom.generate_mesh()
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=mesh)
# ------------------------------------- #
def backwardfacingstep(h=1.):
if __pygmsh6__:
geom = pygmsh.built_in.Geometry()
X = []
X.append([-1.0, 1.0])
X.append([-1.0, 0.0])
X.append([ 0.0, 0.0])
X.append([ 0.0, -1.0])
X.append([ 3.0, -1.0])
X.append([ 3.0, 1.0])
p = geom.add_polygon(X=np.insert(np.array(X), 2, 0, axis=1), lcar=h)
geom.add_physical(p.surface, label=100)
ll = p.line_loop
for i in range(len(ll.lines)): geom.add_physical(ll.lines[i], label=1000+i)
mesh = pygmsh.generate_mesh(geom, verbose=False)
else:
with pygmsh.geo.Geometry() as geom:
X = []
X.append([-1.0, 1.0])
X.append([-1.0, 0.0])
X.append([0.0, 0.0])
X.append([0.0, -1.0])
X.append([3.0, -1.0])
X.append([3.0, 1.0])
p = geom.add_polygon(points=np.insert(np.array(X), 2, 0, axis=1), mesh_size=h)
geom.add_physical(p.surface, label="100")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=mesh)
# ------------------------------------- #
def backwardfacingstep3d(h):
X = []
X.append([-1.0, 1.0])
X.append([-1.0, 0.0])
X.append([0.0, 0.0])
X.append([0.0, -1.0])
X.append([3.0, -1.0])
X.append([3.0, 1.0])
if __pygmsh6__:
geom = pygmsh.built_in.Geometry()
p = geom.add_polygon(X=np.insert(np.array(X), 2, -1.0, axis=1), lcar=h)
geom.add_physical(p.surface, label=100)
axis = [0, 0, 2]
top, vol, ext = geom.extrude(p.surface, axis)
next = len(ext)
geom.add_physical(top, label=101+next)
for i in range(next):
geom.add_physical(ext[i], label=101+i)
geom.add_physical(vol, label=10)
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=pygmsh.generate_mesh(geom, verbose=False))
else:
with pygmsh.geo.Geometry() as geom:
p = geom.add_polygon(points=np.insert(np.array(X), 2, -1.0, axis=1), mesh_size=h)
geom.add_physical(p.surface, label="100")
axis = [0, 0, 2]
top, vol, ext = geom.extrude(p.surface, axis)
next = len(ext)
geom.add_physical(top, label=f"{101 + next}")
for i in range(next):
geom.add_physical(ext[i], label=f"{101 + i}")
geom.add_physical(vol, label="10")
mesh = geom.generate_mesh()
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=mesh)
# ------------------------------------- #
def equilateral(h):
geom = pygmsh.built_in.Geometry()
a = 1.0
X = []
X.append([-0.5*a, 0, 0])
X.append([0, -0.5*np.sqrt(3)*a, 0])
X.append([0.5*a, 0, 0])
X.append([0, 0.5*np.sqrt(3)*a, 0])
p = geom.add_polygon(X=X, lcar = h)
geom.add_physical(p.surface, label=100)
for i in range(4): geom.add_physical(p.line_loop.lines[i], label=1000 + i)
return simfempy.meshes.simplexmesh.SimplexMesh(mesh=pygmsh.generate_mesh(geom, verbose=False))
| 40.047904
| 102
| 0.548445
| 999
| 6,688
| 3.55956
| 0.093093
| 0.108268
| 0.168729
| 0.07649
| 0.924072
| 0.908605
| 0.904106
| 0.894544
| 0.845332
| 0.828178
| 0
| 0.065026
| 0.252691
| 6,688
| 166
| 103
| 40.289157
| 0.646459
| 0.061752
| 0
| 0.638889
| 0
| 0
| 0.015028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.020833
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e8ed5c9988585d3400ecfba9d8bb98397afdb7b1
| 58
|
py
|
Python
|
tds/protocol/__init__.py
|
by46/geek
|
04b08d0dff80c524bd471ead3fe524423eebf123
|
[
"MIT"
] | null | null | null |
tds/protocol/__init__.py
|
by46/geek
|
04b08d0dff80c524bd471ead3fe524423eebf123
|
[
"MIT"
] | null | null | null |
tds/protocol/__init__.py
|
by46/geek
|
04b08d0dff80c524bd471ead3fe524423eebf123
|
[
"MIT"
] | null | null | null |
from .login import login
from .pre_login import pre_login
| 29
| 32
| 0.827586
| 10
| 58
| 4.6
| 0.4
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 32
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33110d9c5000a805b0918e14b8190effbbef31cd
| 133
|
py
|
Python
|
courses/dl2/cgan/data/base_data_loader.py
|
royalbhati/fastai
|
745ddabcf9301b0078a16ac6333cd41684df149b
|
[
"Apache-2.0"
] | 67
|
2019-05-29T18:55:20.000Z
|
2022-03-14T10:03:24.000Z
|
courses/dl2/cgan/data/base_data_loader.py
|
royalbhati/fastai
|
745ddabcf9301b0078a16ac6333cd41684df149b
|
[
"Apache-2.0"
] | 17
|
2020-08-25T14:15:32.000Z
|
2022-03-27T02:12:19.000Z
|
courses/dl2/cgan/data/base_data_loader.py
|
royalbhati/fastai
|
745ddabcf9301b0078a16ac6333cd41684df149b
|
[
"Apache-2.0"
] | 89
|
2020-08-17T23:45:42.000Z
|
2022-03-27T20:53:43.000Z
|
class BaseDataLoader():
def __init__(self): pass
def load_data(): return None
def initialize(self, opt): self.opt = opt
| 22.166667
| 45
| 0.676692
| 18
| 133
| 4.722222
| 0.666667
| 0.164706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 133
| 5
| 46
| 26.6
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.75
| false
| 0.25
| 0
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
332b86f9b51ac8c1c4d177296715010000058e16
| 58
|
py
|
Python
|
LAMMPyS/__init__.py
|
permissionx/LAMMPyS
|
2423980ff0e0b535df661859f15698e9ef15bd2f
|
[
"MIT"
] | 2
|
2019-04-22T07:40:00.000Z
|
2020-10-29T06:57:37.000Z
|
LAMMPyS/__init__.py
|
permissionx/LAMMPyS
|
2423980ff0e0b535df661859f15698e9ef15bd2f
|
[
"MIT"
] | null | null | null |
LAMMPyS/__init__.py
|
permissionx/LAMMPyS
|
2423980ff0e0b535df661859f15698e9ef15bd2f
|
[
"MIT"
] | null | null | null |
from .dump import *
from .sv import *
from .group import *
| 19.333333
| 20
| 0.706897
| 9
| 58
| 4.555556
| 0.555556
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 58
| 3
| 20
| 19.333333
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
332e2e7236c7c9624b57d2693cc9ecb15a71c068
| 68
|
py
|
Python
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/astroid/tests/testdata/python3/data/__init__.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/astroid/tests/testdata/python3/data/__init__.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 61
|
2017-06-03T05:49:22.000Z
|
2022-03-27T17:42:07.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/astroid/tests/testdata/python3/data/__init__.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
__revision__="$Id: __init__.py,v 1.1 2005-06-13 20:55:20 syt Exp $"
| 34
| 67
| 0.691176
| 15
| 68
| 2.6
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 0.117647
| 68
| 1
| 68
| 68
| 0.383333
| 0
| 0
| 0
| 0
| 1
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3337472f4c57772c390fcf8fe9a77212226247c9
| 159
|
py
|
Python
|
mywebapp/items/views.py
|
diegodiego9/py-django-webapp
|
e4bd267032a31b8e4116311f047905a2535a2f38
|
[
"MIT"
] | null | null | null |
mywebapp/items/views.py
|
diegodiego9/py-django-webapp
|
e4bd267032a31b8e4116311f047905a2535a2f38
|
[
"MIT"
] | null | null | null |
mywebapp/items/views.py
|
diegodiego9/py-django-webapp
|
e4bd267032a31b8e4116311f047905a2535a2f38
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
# simplest view possible
def home(request):
return HttpResponse("Hello, world. You're at the Items-app homepage.")
| 19.875
| 74
| 0.754717
| 22
| 159
| 5.454545
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157233
| 159
| 7
| 75
| 22.714286
| 0.895522
| 0.138365
| 0
| 0
| 0
| 0
| 0.350746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6839a72946449d8a0dfbf230f23b64abc88c644c
| 38
|
py
|
Python
|
AutoPypline/__init__.py
|
Sumukha21/AutoPipeline
|
5d335ea63400a546983b2a1f0c46b3915b25cd94
|
[
"MIT"
] | 2
|
2021-05-19T12:19:31.000Z
|
2021-07-02T13:10:56.000Z
|
AutoPypline/__init__.py
|
Sumukha21/AutoPypline
|
5d335ea63400a546983b2a1f0c46b3915b25cd94
|
[
"MIT"
] | null | null | null |
AutoPypline/__init__.py
|
Sumukha21/AutoPypline
|
5d335ea63400a546983b2a1f0c46b3915b25cd94
|
[
"MIT"
] | null | null | null |
from AutoPypline import auto_pipeline
| 19
| 37
| 0.894737
| 5
| 38
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68ac37f1996ac3f7e18b595c52311e7aa7d582dc
| 1,660
|
py
|
Python
|
function/python_3_9/test/test_utils.py
|
aws-samples/amazon-s3-object-lambda-default-configuration
|
3908515d48d5e42fd9bb6dadc1dc9fe5132a1425
|
[
"MIT-0"
] | 13
|
2021-11-23T17:07:13.000Z
|
2022-03-08T16:57:45.000Z
|
function/python_3_9/test/test_utils.py
|
aws-samples/amazon-s3-object-lambda-default-configuration
|
3908515d48d5e42fd9bb6dadc1dc9fe5132a1425
|
[
"MIT-0"
] | 1
|
2022-01-13T14:29:52.000Z
|
2022-01-13T14:29:52.000Z
|
function/python_3_9/test/test_utils.py
|
aws-samples/amazon-s3-object-lambda-default-configuration
|
3908515d48d5e42fd9bb6dadc1dc9fe5132a1425
|
[
"MIT-0"
] | null | null | null |
import json
from src.request.utils import *
def test_get_part_number():
user_request = {
'url': 'https://s3.amazonaws.com?partNumber=1',
'headers': {
'h1': 'v1'
}
}
assert get_part_number(user_request) == '1'
def test_get_part_number_case_insensitive():
user_request = {
'url': 'https://s3.amazonaws.com?hello=world&PARTnumber=1',
'headers': {
'h1': 'v1'
}
}
assert get_part_number(user_request) == '1'
def test_get_part_number_not_exist():
user_request = {
'url': 'https://s3.amazonaws.com?hello=world&Range=1',
'headers': {
'h1': 'v1'
}
}
assert get_part_number(user_request) is None
def test_get_range_from_query_param():
user_request = {
'url': 'https://s3.amazonaws.com?range=bytes=1',
'headers': {
'h1': 'v1'
}
}
assert get_range(user_request) == 'bytes=1'
def test_get_range_from_query_param_case_insensitive():
user_request = {
'url': 'https://s3.amazonaws.com?raNGe=bytes=1',
'headers': {
'h1': 'v1'
}
}
assert get_range(user_request) == 'bytes=1'
def test_get_range_from_header():
user_request = {
'url': 'https://s3.amazonaws.com',
'headers': {
'Range': 'bytes=3-'
}
}
assert get_range(user_request) == 'bytes=3-'
def test_get_range_from_header_case_insensitive():
user_request = {
'url': 'https://s3.amazonaws.com',
'headers': {
'RANge': 'bytes=3-'
}
}
assert get_range(user_request) == 'bytes=3-'
| 22.739726
| 67
| 0.559639
| 194
| 1,660
| 4.484536
| 0.190722
| 0.177011
| 0.08046
| 0.152874
| 0.937931
| 0.914943
| 0.87931
| 0.796552
| 0.796552
| 0.645977
| 0
| 0.025316
| 0.286145
| 1,660
| 72
| 68
| 23.055556
| 0.708861
| 0
| 0
| 0.465517
| 0
| 0
| 0.242169
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 1
| 0.12069
| false
| 0
| 0.034483
| 0
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d7ac1d2e792dbcf74e6cfdd9e446ac5d4bf4d539
| 117
|
py
|
Python
|
local_run.py
|
augustand/PyLinden
|
b3e818409af5a4bbff354e081ca3c88f9e898a6b
|
[
"MIT"
] | null | null | null |
local_run.py
|
augustand/PyLinden
|
b3e818409af5a4bbff354e081ca3c88f9e898a6b
|
[
"MIT"
] | null | null | null |
local_run.py
|
augustand/PyLinden
|
b3e818409af5a4bbff354e081ca3c88f9e898a6b
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
from __future__ import unicode_literals, print_function
import pylinden
pylinden.pylinden()
| 14.625
| 55
| 0.769231
| 14
| 117
| 6
| 0.785714
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.119658
| 117
| 7
| 56
| 16.714286
| 0.805825
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7fd450e94964b12003b6107e444abf0b6a7cf7d
| 207
|
py
|
Python
|
epymetheus/exceptions/exceptions.py
|
shishaboy/epymetheus
|
d8916b20c6b79e86e5aadb39c7c01a582659f03b
|
[
"BSD-3-Clause"
] | null | null | null |
epymetheus/exceptions/exceptions.py
|
shishaboy/epymetheus
|
d8916b20c6b79e86e5aadb39c7c01a582659f03b
|
[
"BSD-3-Clause"
] | null | null | null |
epymetheus/exceptions/exceptions.py
|
shishaboy/epymetheus
|
d8916b20c6b79e86e5aadb39c7c01a582659f03b
|
[
"BSD-3-Clause"
] | null | null | null |
class NoTradeError(RuntimeError):
"""
Exception class to raise if no trades are yielded.
"""
class NotRunError(ValueError):
"""
Exception class to raise if strategy is not run.
"""
| 18.818182
| 54
| 0.652174
| 24
| 207
| 5.625
| 0.708333
| 0.207407
| 0.237037
| 0.311111
| 0.340741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251208
| 207
| 10
| 55
| 20.7
| 0.870968
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
cc0c5850af5d063044d2b37a6701ead2875962bc
| 8,165
|
py
|
Python
|
WebServer/microservices/db/unittest/user_test.py
|
AnneEjsing/TrafficDataAnonymisation
|
6ee5b4a46d53a656299d6a53896175b78008228a
|
[
"MIT"
] | 1
|
2020-03-12T13:27:58.000Z
|
2020-03-12T13:27:58.000Z
|
WebServer/microservices/db/unittest/user_test.py
|
AnneEjsing/TrafficDataAnonymisation
|
6ee5b4a46d53a656299d6a53896175b78008228a
|
[
"MIT"
] | 7
|
2020-04-02T12:47:45.000Z
|
2022-03-02T07:35:49.000Z
|
WebServer/microservices/db/unittest/user_test.py
|
AnneEjsing/Traffic-Data-Anonymisation-Web
|
6ee5b4a46d53a656299d6a53896175b78008228a
|
[
"MIT"
] | null | null | null |
import sys
import os
sys.path.append(os.getcwd() + '/..')
import dbresolver
import user
import unittest2
import psycopg2
import testing.postgresql
import aiounittest
import json
# mock request to resemble aiohttp request
class request:
dic = {}
def __init__(self,dict):
self.dic = dict
async def json(self):
return self.dic
def initailise_database(postgresql):
with psycopg2.connect(**postgresql.dsn()) as conn:
with conn.cursor() as cursor:
with open("test_data.sql","r") as f:
cursor.execute(f.read())
conn.commit()
os.environ['POSTGRES_DB'] = postgresql.dsn()['database']
os.environ['POSTGRES_USER'] = postgresql.dsn()['user']
os.environ['POSTGRES_HOST'] = postgresql.dsn()['host']
os.environ["POSTGRES_PASSWORD"] = ""
os.environ["POSTGRES_PORT"] = str(postgresql.dsn()['port'])
class UserGetUpdateTests(aiounittest.AsyncTestCase):
@classmethod
def setUpClass(cls):
cls.postgresql = testing.postgresql.Postgresql(port=7654)
initailise_database(cls.postgresql)
@classmethod
def tearDownClass(cls):
cls.postgresql.stop()
## GET USER BY ID
async def test_user_get_by_id_pass(self):
req = request({'id':'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11'})
expect = {"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11", "email": "notadmin@notadmin.no", "role": "user"}
res = await user.user_get_id(req)
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect, res)
async def test_user_get_by_id_fail_wrong_input(self):
req = request({'id':'a0eebc99-9c0b-4ef8-XXXX-6bb9bd380b11'})
res = await user.user_get_id(req)
self.assertEqual(res.status,500)
async def test_user_get_by_id_fail_missing_input(self):
req = request({})
res = await user.user_get_id(req)
self.assertEqual(res.status,500)
## GET USER BY EMAIL
async def test_user_get_by_email_pass(self):
req = request({"email": "notadmin@notadmin.no"})
expect = {"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11", "email": "notadmin@notadmin.no", "role": "user"}
res = await user.user_get_email(req)
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect, res)
async def test_user_get_by_email_fail_wrong_input(self):
req = request({"email": "notadmin@notadmin.dk"})
res = await user.user_get_email(req)
self.assertEqual(res.status,404)
async def test_user_get_by_email_fail_missing_input(self):
req = request({})
res = await user.user_get_email(req)
self.assertEqual(res.status,500)
async def test_user_get_by_email_fail_wrong_param_type(self):
req = request({"email": 1})
res = await user.user_get_email(req)
self.assertEqual(res.status,500)
## Login
async def test_user_login(self):
req = request({"email":"notadmin@notadmin.no","password":"passpass"})
expect = {"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11", "email": "notadmin@notadmin.no", "role": "user"}
res = await user.user_login(req)
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect,res)
async def test_user_login_wrong_input_name(self):
req = request({"emil":"notadmin@notadmin.no","password":"passpass"})
expect = 500
res = await user.user_login(req)
self.assertEqual(expect,res.status)
async def test_user_login_wrong_email(self):
req = request({"email":"notadmin@notadmi.no","password":"passpass"})
expect = 401
res = await user.user_login(req)
self.assertEqual(expect,res.status)
async def test_user_login_wrong_password(self):
req = request({"email":"notadmin@notadmin.no","password":"notpasspass"})
expect = 401
res = await user.user_login(req)
self.assertEqual(expect,res.status)
async def test_user_login_wrong_format(self):
req = request({"email":1,"password":2})
expect = 500
res = await user.user_login(req)
self.assertEqual(expect,res.status)
## GET ALL
def test_user_get_all_pass(self):
expect = [{"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11", "email": "notadmin@notadmin.no", "role": "user"}, {"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b12", "email": "admin@admin.no", "role": "admin"}]
res = user.user_list(request({}))
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect[0],res[0])
self.assertDictContainsSubset(expect[1],res[1])
## User update
async def test_user_update(self):
req = request({"email":"notadmin@notadmin.no","password":"passpass","rights":"admin","id":"a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11"})
expect = {"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11", "email": "notadmin@notadmin.no", "role": "admin"}
res = await user.user_update(req)
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect,res)
async def test_user_update_wrong_input_name(self):
req = request({"emil":"notadmin@notadmin.no","password":"passpass"})
expect = 500
res = await user.user_update(req)
self.assertEqual(expect,res.status)
async def test_user_update_incorrect_id(self):
req = request({"email":"notadmin@notadmin.no","password":"passpass","rights":"admin","id":"a0eebc99-9c0b-4ef8-bbd-6bb9bd380b11"})
expect = 500
res = await user.user_update(req)
self.assertEqual(expect,res.status)
async def test_user_update_nonexisting_id(self):
req = request({"email":"notadmin@notadmin.no","password":"passpass","rights":"admin","id":"a0eebc99-9c0b-4ef8-bb7d-6bb9bd380b11"})
expect = 404
res = await user.user_update(req)
self.assertEqual(expect,res.status)
class UserCreateTests(aiounittest.AsyncTestCase):
@classmethod
def setUpClass(cls):
cls.postgresql = testing.postgresql.Postgresql(port=7654)
initailise_database(cls.postgresql)
@classmethod
def tearDownClass(cls):
cls.postgresql.stop()
async def test_user_signup(self):
req = request({"email":"notadmin@notadmin.yes","password":"passpass","rights":"admin"})
expect = {"email": "notadmin@notadmin.yes", "role": "admin"}
res = await user.user_signup(req)
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect,res)
async def test_user_signup_wrong_input_names(self):
req = request({"emaail":"notadmin@notadmin.yes","password":"passpass","rights":"admin"})
expect = 500
res = await user.user_signup(req)
self.assertEqual(expect,res.status)
async def test_user_signup_same_email(self):
req = request({"email":"notadmin@notadmin.no","password":"passpass","rights":"admin"})
expect = 500
res = await user.user_signup(req)
self.assertEqual(expect,res.status)
class UserDeleteTests(aiounittest.AsyncTestCase):
@classmethod
def setUpClass(cls):
cls.postgresql = testing.postgresql.Postgresql(port=7654)
initailise_database(cls.postgresql)
@classmethod
def tearDownClass(cls):
cls.postgresql.stop()
async def test_user_delete_pass(self):
req = request({'id':'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11'})
expect = {"user_id": "a0eebc99-9c0b-4ef8-bb6d-6bb9bd380b11"}
res = await user.user_delete(req)
res = json.loads(res.body.decode('utf-8'))
self.assertDictContainsSubset(expect, res)
async def test_user_delete_fail_wrong_input(self):
req = request({'id':'a0eebc99-9c0b-4ef8-XXXX-6bb9bd380b11'})
res = await user.user_delete(req)
self.assertEqual(res.status,500)
async def test_user_delete_fail_missing_input(self):
req = request({})
res = await user.user_delete(req)
self.assertEqual(res.status,500)
if __name__ == '__main__':
unittest2.main()
| 38.880952
| 216
| 0.661727
| 1,015
| 8,165
| 5.158621
| 0.123153
| 0.036669
| 0.048319
| 0.067227
| 0.804622
| 0.78304
| 0.744461
| 0.720588
| 0.692704
| 0.677235
| 0
| 0.043445
| 0.196571
| 8,165
| 209
| 217
| 39.066986
| 0.754726
| 0.012125
| 0
| 0.541667
| 0
| 0
| 0.189968
| 0.070276
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.053571
| false
| 0.107143
| 0.053571
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
0bdf19ed9810ccbdfba36fe3aa3efbf48ead5a08
| 192
|
py
|
Python
|
project/cms_post/admin.py
|
cborao/Django-cms-post
|
44486c3f2d231ac0e3d7958dd0c9d0085dac30fc
|
[
"MIT"
] | null | null | null |
project/cms_post/admin.py
|
cborao/Django-cms-post
|
44486c3f2d231ac0e3d7958dd0c9d0085dac30fc
|
[
"MIT"
] | null | null | null |
project/cms_post/admin.py
|
cborao/Django-cms-post
|
44486c3f2d231ac0e3d7958dd0c9d0085dac30fc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from .models import Content, Comment
admin.site.register(Content)
admin.site.register(Comment)
| 21.333333
| 36
| 0.8125
| 27
| 192
| 5.777778
| 0.444444
| 0.128205
| 0.217949
| 0.294872
| 0.358974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 192
| 8
| 37
| 24
| 0.917647
| 0.135417
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0423351d6f5cc18fb910646a151c5964252b1027
| 37
|
py
|
Python
|
algsel/scoring/__init__.py
|
janvanrijn/openml-algsel
|
eb30b63c4ad926b4f180c2e910fbf0ffeabcdfd8
|
[
"BSD-3-Clause"
] | null | null | null |
algsel/scoring/__init__.py
|
janvanrijn/openml-algsel
|
eb30b63c4ad926b4f180c2e910fbf0ffeabcdfd8
|
[
"BSD-3-Clause"
] | null | null | null |
algsel/scoring/__init__.py
|
janvanrijn/openml-algsel
|
eb30b63c4ad926b4f180c2e910fbf0ffeabcdfd8
|
[
"BSD-3-Clause"
] | null | null | null |
from .oasc_validator import Validator
| 37
| 37
| 0.891892
| 5
| 37
| 6.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f08cd84104d99a341a27d9e9516a830f6a3d0157
| 37
|
py
|
Python
|
roglick/game/__init__.py
|
Kromey/roglick
|
b76202af71df0c30be0bd5f06a3428c990476e0e
|
[
"MIT"
] | 6
|
2015-05-05T21:28:35.000Z
|
2019-04-14T13:42:38.000Z
|
roglick/game/__init__.py
|
Kromey/roglick
|
b76202af71df0c30be0bd5f06a3428c990476e0e
|
[
"MIT"
] | null | null | null |
roglick/game/__init__.py
|
Kromey/roglick
|
b76202af71df0c30be0bd5f06a3428c990476e0e
|
[
"MIT"
] | null | null | null |
from .game_master import GameMaster
| 12.333333
| 35
| 0.837838
| 5
| 37
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 2
| 36
| 18.5
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f09a3a5a9ea5431d78fc1f8b1b596cf7c47472b9
| 5,839
|
py
|
Python
|
spotmask.py
|
CheerfulUser/tessffi
|
576c6baed6e2a5762da2a97e12f2e5a8e233b74c
|
[
"MIT"
] | null | null | null |
spotmask.py
|
CheerfulUser/tessffi
|
576c6baed6e2a5762da2a97e12f2e5a8e233b74c
|
[
"MIT"
] | null | null | null |
spotmask.py
|
CheerfulUser/tessffi
|
576c6baed6e2a5762da2a97e12f2e5a8e233b74c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from scipy.signal import fftconvolve
import argparse
# turn off runtime warnings (lots from logic on nans)
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def size_limit(x,y,image):
yy,xx = image.shape
ind = ((y > 0) & (y < yy-1) & (x > 0) & (x < xx-1))
return ind
def region_cut(table,wcs):
ra = table.ra.values
dec = table.dec.values
foot = wcs.calc_footprint()
minra = min(foot[:,0])
maxra = max(foot[:,0])
mindec = min(foot[:,1])
maxdec = max(foot[:,1])
inddec = (dec < maxdec) & (dec> mindec)
indra = (ra < maxra) & (ra> minra)
ind = indra * inddec
tab = table.iloc[ind]
return tab
def circle_app(rad):
"""
makes a kinda circular aperture, probably not worth using.
"""
mask = np.zeros((int(rad*2+.5)+1,int(rad*2+.5)+1))
c = rad
x,y =np.where(mask==0)
dist = np.sqrt((x-c)**2 + (y-c)**2)
ind = (dist) < rad + .2
mask[y[ind],x[ind]]= 1
return mask
def check_table_format(table):
try:
temp = table.x
temp = table.y
temp = table.ra
temp = table.dec
temp = table.radius
temp = table.mag
temp = table.mjd_start
temp = table.mjd_end
except:
message = ("mask_table must be a csv with the following columns:\nx\ny\nra\ndec\nradius\nmag\nmjd_start\nmjd_end\n"
+ "Only a position (x,y) or (ra,dec) and size (radius) or (mag) is needed to run.")
raise ValueError()
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
from astropy.nddata import Cutout2D
from astropy.wcs import WCS
from scipy.signal import fftconvolve
import argparse
# turn off runtime warnings (lots from logic on nans)
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def size_limit(x,y,image):
yy,xx = image.shape
ind = ((y > 0) & (y < yy-1) & (x > 0) & (x < xx-1))
return ind
def region_cut(table,wcs):
ra = table.ra
dec = table.dec
foot = wcs.calc_footprint()
minra = min(foot[:,0])
maxra = max(foot[:,0])
mindec = min(foot[:,1])
maxdec = max(foot[:,1])
inddec = (dec < maxdec) & (dec> mindec)
indra = (ra < maxra) & (ra> minra)
ind = indra * inddec
tab = table.iloc[ind]
return tab
def circle_app(rad):
"""
makes a kinda circular aperture, probably not worth using.
"""
mask = np.zeros((int(rad*2+.5)+1,int(rad*2+.5)+1))
c = rad
x,y =np.where(mask==0)
dist = np.sqrt((x-c)**2 + (y-c)**2)
ind = (dist) < rad + .2
mask[y[ind],x[ind]]= 1
return mask
def check_table_format(table):
try:
temp = table.x
temp = table.y
temp = table.ra
temp = table.dec
temp = table.radius
temp = table.mag
temp = table.mjd_start
temp = table.mjd_end
except:
message = ("mask_table must be a csv with the following columns:\nx\ny\nra\ndec\nradius\nmag\nmjd_start\nmjd_end\n"
+ "Only a position (x,y) or (ra,dec) and size (radius) or (mag) is needed to run.")
raise ValueError()
def Spot_mask(fits_file,mask_table,ext=0):
table = pd.read_csv(mask_table)
check_table_format(table)
hdu = fits.open(fits_file)[ext]
# uses the file name to set the time, not versitile
t = float(fits_file.split('/')[-1].split('_')[1])
image = hdu.data
wcs = WCS(hdu.header)
spotmask = np.zeros_like(image,dtype=float)
for i in range(len(table)):
row = table.iloc[i]
start = row.mjd_start
end = row.mjd_end
cont = True
if np.isfinite(start):
if t < start:
cont = False
if np.isfinite(end):
if t > end:
cont = False
if cont:
if np.isfinite(row.x) & np.isfinite(row.y):
x = int(row.x + 0.5)
y = int(row.y + 0.5)
elif np.isfinite(row.ra) & np.isfinite(row.dec):
x,y = wcs.all_world2pix(row.ra,row.dec,0)
if size_limit(x,y,image):
pass
else:
x = np.nan
y = np.nan
print('coordinates ra={}, dec={} not in range'.format(np.round(ra,2),np.round(row.dec,2)))
pass
else:
print('no position provided')
# make aperture time
rad = row.radius
mag = row.mag
if np.isfinite(rad):
ap = circle_app(rad)
temp = np.zeros_like(image)
temp[y,x] = 1
conv = fftconvolve(temp, ap,mode='same')#.astype(int)
temp = (conv > 0.9) * 1.
spotmask += conv
elif np.isfinite(mag):
mags = np.array([18,17,16,15,14,13.5,12,10,9,8,7])
size = (np.array([3,4,5,6,7,8,10,14,16,18])).astype(int)
diff = mag - mags
ind = np.where(diff < 0)[0][-1]
ap = circle_app(size[ind])
temp = np.zeros_like(image)
temp[y,x] = 1
conv = fftconvolve(temp, ap,mode='same')#.astype(int)
temp = (conv > 0.5) * 1
spotmask += conv
else:
print('no radius or magnitude provided')
spotmask = (spotmask >= .5).astype(int) * 64
return spotmask
| 28.763547
| 123
| 0.554376
| 841
| 5,839
| 3.800238
| 0.230678
| 0.045056
| 0.008761
| 0.010013
| 0.710889
| 0.705882
| 0.705882
| 0.705882
| 0.705882
| 0.705882
| 0
| 0.024744
| 0.31478
| 5,839
| 203
| 124
| 28.763547
| 0.774056
| 0.060969
| 0
| 0.70625
| 0
| 0.025
| 0.086485
| 0.0213
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05625
| false
| 0.0125
| 0.1375
| 0
| 0.2375
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0a29734a0480e9b9bf236f26f373d8688cd45bb
| 144
|
py
|
Python
|
src/wai/annotations/domain/audio/speech/__init__.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
src/wai/annotations/domain/audio/speech/__init__.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | 3
|
2021-06-30T23:42:47.000Z
|
2022-03-01T03:45:07.000Z
|
src/wai/annotations/domain/audio/speech/__init__.py
|
waikato-ufdl/wai-annotations-core
|
bac3429e9488efb456972c74f9d462f951c4af3d
|
[
"Apache-2.0"
] | null | null | null |
from ._SpeechDomainSpecifier import SpeechDomainSpecifier
from ._SpeechInstance import SpeechInstance
from ._Transcription import Transcription
| 36
| 57
| 0.895833
| 12
| 144
| 10.5
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 144
| 3
| 58
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f0a55eda49c2b18dd9867dcf75154b423378fd7f
| 6,041
|
py
|
Python
|
amorf/metrics.py
|
DSAAR/amorf
|
5cc5e346e6d4f918d588ff527aaa45136f036851
|
[
"MIT"
] | 13
|
2020-03-24T12:03:51.000Z
|
2022-03-25T09:15:58.000Z
|
amorf/metrics.py
|
DSAAR/amorf
|
5cc5e346e6d4f918d588ff527aaa45136f036851
|
[
"MIT"
] | null | null | null |
amorf/metrics.py
|
DSAAR/amorf
|
5cc5e346e6d4f918d588ff527aaa45136f036851
|
[
"MIT"
] | 2
|
2020-08-14T11:30:02.000Z
|
2022-03-10T12:28:47.000Z
|
import numpy as np
from numpy import mean, sqrt
import torch as torch
def average_correlation_coefficient(y_pred, y_true):
"""Calculate Average Correlation Coefficient
Args:
y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values
y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values
Returns:
float: Average Relative Mean Squared Error
Raises:
ValueError : If Parameters are not both of type np.ndarray or torch.Tensor
"""
if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray):
top = np.sum((y_true - mean(y_true, axis=0)) *
(y_pred - mean(y_pred, axis=0)), axis=0)
bottom = np.sqrt(np.sum((y_true - mean(y_true, axis=0))**2, axis=0)
* np.sum((y_pred - mean(y_pred, axis=0))**2, axis=0))
return np.sum(top / bottom) / len(y_true[0])
elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor):
top = torch.sum((y_true - torch.mean(y_true, dim=0))
* (y_pred - torch.mean(y_pred, dim=0)), dim=0)
bottom = torch.sqrt(torch.sum((y_true - torch.mean(y_true, dim=0))**2, dim=0) *
torch.sum((y_pred - torch.mean(y_pred, dim=0))**2, dim=0))
return torch.sum(top / bottom) / len(y_true[0])
else:
raise ValueError(
'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
def average_relative_error(y_pred, y_true):
"""Calculate Average Relative Error
Args:
y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values
y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values
Returns:
float: Average Relative Mean Squared Error
Raises:
ValueError : If Parameters are not both of type np.ndarray or torch.Tensor
"""
if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray):
return sum(sum(abs(y_true - y_pred) / y_true) /
len(y_true)) / len(y_true[0, :])
elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return torch.sum(torch.sum(torch.abs(y_true - y_pred) / y_true, dim=0) /
len(y_true)) / len(y_true[0, :])
else:
raise ValueError(
'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
def average_relative_root_mean_squared_error(y_pred, y_true):
"""Calculate Average Relative Root Mean Squared Error (aRRMSE)
Args:
y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values
y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values
Returns:
float : Average Relative Root Mean Squared Error
Raises:
ValueError : If Parameters are not both of type np.ndarray or torch.Tensor
"""
if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray):
return sum(sqrt(sum((y_true - y_pred)**2) /
sum((y_true - mean(y_true, axis=0))**2))) / len(y_pred[0, :])
elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return torch.sum(torch.sqrt(torch.sum((y_true - y_pred)**2, dim=0) /
torch.sum(((y_true - torch.mean(y_true, dim=0))**2), dim=0))) / len(y_pred[0, :])
else:
raise ValueError(
'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
def mean_squared_error(y_pred, y_true):
"""Calculate Mean Squared Error (MSE)
Args:
y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values
y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values
Returns:
float : Mean Squared Error
Raises:
ValueError : If Parameters are not both of type np.ndarray or torch.Tensor
"""
if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray):
return sum((sum((y_true - y_pred)**2) /
len(y_true)))
elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return torch.sum(torch.sum((y_true - y_pred)**2) /
len(y_true))
else:
raise ValueError(
'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
def average_root_mean_squared_error(y_pred, y_true):
"""Calculate Average Root Mean Squared Error (aRMSE)
Args:
y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values
y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values
Returns:
float : Average Root Mean Squared Error
Raises:
ValueError : If Parameters are not both of type np.ndarray or torch.Tensor
"""
if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray):
return sum(sqrt((sum((y_true - y_pred)**2) /
len(y_true))))/len(y_true[0, :])
elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor):
return torch.sum(torch.sqrt(torch.sum((y_true - y_pred)**2, dim=0)
/ len(y_true) ))/len(y_true[0])
else:
raise ValueError(
'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
def __validate_dimensions(y_pred, y_true):
"""Validates dimensions of the two input parameters
Args:
y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values
y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values
Raises:
ValueError: If dimensions are not identical
"""
if len(y_true) is not len(y_pred) and len(y_true[0]) is not len(y_pred[0]):
raise ValueError('Dimensions of y_true and y_pred do not match.')
| 42.244755
| 117
| 0.635987
| 930
| 6,041
| 3.991398
| 0.080645
| 0.086207
| 0.082974
| 0.118534
| 0.894397
| 0.873114
| 0.856142
| 0.829741
| 0.795259
| 0.765625
| 0
| 0.009377
| 0.258566
| 6,041
| 142
| 118
| 42.542254
| 0.819379
| 0.362026
| 0
| 0.435484
| 0
| 0
| 0.106002
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.048387
| 0
| 0.306452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0df6210c870e5566baf5e0516d40a1d49690972
| 44
|
py
|
Python
|
data_process/__init__.py
|
Annelise2019/DeepLearning_Project
|
f63dcc266a5d9c33c118cabe8145f46f8e35945b
|
[
"MIT"
] | 4
|
2021-05-04T03:23:15.000Z
|
2021-10-22T03:38:35.000Z
|
data_process/__init__.py
|
Annelise2019/DeepLearning_Project
|
f63dcc266a5d9c33c118cabe8145f46f8e35945b
|
[
"MIT"
] | null | null | null |
data_process/__init__.py
|
Annelise2019/DeepLearning_Project
|
f63dcc266a5d9c33c118cabe8145f46f8e35945b
|
[
"MIT"
] | null | null | null |
from .skeleton_feeder import SkeletonFeeder
| 22
| 43
| 0.886364
| 5
| 44
| 7.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 44
| 1
| 44
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
500404fe645fd8c986b7294b3b03e9ebc92a46f7
| 103,246
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/acs/tests/latest/test_agentpool_decorator.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import importlib
import unittest
from unittest.mock import Mock, patch
from azure.cli.command_modules.acs._consts import (
CONST_AVAILABILITY_SET,
CONST_DEFAULT_NODE_OS_TYPE,
CONST_DEFAULT_NODE_VM_SIZE,
CONST_DEFAULT_WINDOWS_NODE_VM_SIZE,
CONST_NODEPOOL_MODE_SYSTEM,
CONST_NODEPOOL_MODE_USER,
CONST_SCALE_DOWN_MODE_DEALLOCATE,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SPOT_EVICTION_POLICY_DEALLOCATE,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_MACHINE_SCALE_SETS,
AgentPoolDecoratorMode,
DecoratorEarlyExitException,
DecoratorMode,
)
from azure.cli.command_modules.acs.agentpool_decorator import (
AKSAgentPoolAddDecorator,
AKSAgentPoolContext,
AKSAgentPoolModels,
AKSAgentPoolParamDict,
AKSAgentPoolUpdateDecorator,
)
from azure.cli.command_modules.acs.tests.latest.mocks import MockCLI, MockClient, MockCmd
from azure.cli.command_modules.acs.tests.latest.utils import get_test_data_file_path
from azure.cli.core.azclierror import (
ArgumentUsageError,
CLIInternalError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
)
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import get_file_json
class AKSAgentPoolModelsTestCase(unittest.TestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
def test__init__(self):
# load models directly (instead of through the `get_sdk` method provided by the cli component)
from azure.cli.core.profiles._shared import AZURE_API_PROFILES
sdk_profile = AZURE_API_PROFILES["latest"][self.resource_type]
api_version = sdk_profile.default_api_version
module_name = "azure.mgmt.containerservice.v{}.models".format(api_version.replace("-", "_"))
module = importlib.import_module(module_name)
standalone_models = AKSAgentPoolModels(self.cmd, self.resource_type, AgentPoolDecoratorMode.STANDALONE)
self.assertEqual(standalone_models.UnifiedAgentPoolModel, getattr(module, "AgentPool"))
managedcluster_models = AKSAgentPoolModels(self.cmd, self.resource_type, AgentPoolDecoratorMode.MANAGED_CLUSTER)
self.assertEqual(managedcluster_models.UnifiedAgentPoolModel, getattr(module, "ManagedClusterAgentPoolProfile"))
class AKSAgentPoolContextCommonTestCase(unittest.TestCase):
def _remove_defaults_in_agentpool(self, agentpool):
self.defaults_in_agentpool = {}
for attr_name, attr_value in vars(agentpool).items():
if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None:
self.defaults_in_agentpool[attr_name] = attr_value
setattr(agentpool, attr_name, None)
return agentpool
def _restore_defaults_in_agentpool(self, agentpool):
for key, value in self.defaults_in_agentpool.items():
if getattr(agentpool, key, None) is None:
setattr(agentpool, key, value)
return agentpool
def create_initialized_agentpool_instance(
self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs
):
"""Helper function to create a properly initialized agentpool instance.
:return: the AgentPool object
"""
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name)
else:
agentpool = self.models.UnifiedAgentPoolModel()
agentpool.name = nodepool_name
# remove defaults
if remove_defaults:
self._remove_defaults_in_agentpool(agentpool)
# set properties
for key, value in kwargs.items():
setattr(agentpool, key, value)
# resote defaults
if restore_defaults:
self._restore_defaults_in_agentpool(agentpool)
return agentpool
def common__init__(self):
# fail on not passing dictionary-like parameters
with self.assertRaises(CLIInternalError):
AKSAgentPoolContext(self.cmd, [], self.models, DecoratorMode.CREATE, self.agentpool_decorator_mode)
# fail on not passing decorator_mode with Enum type DecoratorMode
with self.assertRaises(CLIInternalError):
AKSAgentPoolContext(self.cmd, AKSAgentPoolParamDict({}), self.models, 1, self.agentpool_decorator_mode)
def common_attach_agentpool(self):
ctx_1 = AKSAgentPoolContext(
self.cmd, AKSAgentPoolParamDict({}), self.models, DecoratorMode.CREATE, self.agentpool_decorator_mode
)
agentpool = self.create_initialized_agentpool_instance()
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.agentpool, agentpool)
# fail on attach again
with self.assertRaises(CLIInternalError):
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.existing_agentpool, None)
def common_attach_existing_agentpool(self):
ctx_1 = AKSAgentPoolContext(
self.cmd, AKSAgentPoolParamDict({}), self.models, DecoratorMode.UPDATE, self.agentpool_decorator_mode
)
agentpool = self.create_initialized_agentpool_instance()
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.existing_agentpool, agentpool)
# fail on attach again
with self.assertRaises(CLIInternalError):
ctx_1.attach_existing_agentpool(agentpool)
def common_attach_agentpools(self):
ctx_1 = AKSAgentPoolContext(
self.cmd, AKSAgentPoolParamDict({}), self.models, DecoratorMode.CREATE, self.agentpool_decorator_mode
)
agentpool_1 = self.create_initialized_agentpool_instance()
agentpool_2 = self.create_initialized_agentpool_instance()
agentpools = [agentpool_1, agentpool_2]
ctx_1.attach_agentpools(agentpools)
self.assertEqual(ctx_1._agentpools, agentpools)
# fail on attach again
with self.assertRaises(CLIInternalError):
ctx_1.attach_agentpools(agentpools)
def common_validate_counts_in_autoscaler(self):
ctx = AKSAgentPoolContext(
self.cmd, AKSAgentPoolParamDict({}), self.models, DecoratorMode.CREATE, self.agentpool_decorator_mode
)
# default
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(3, False, None, None, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.CREATE)
# custom value
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(5, True, 1, 10, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.CREATE)
# fail on min_count/max_count not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(5, True, None, None, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.CREATE)
# fail on min_count > max_count
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(5, True, 3, 1, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.CREATE)
# fail on node_count < min_count in create mode
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(5, True, 7, 10, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.CREATE)
# skip node_count check in update mode
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(5, True, 7, 10, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.UPDATE)
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(None, True, 7, 10, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.UPDATE)
# fail on enable_cluster_autoscaler not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(5, False, 3, None, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.UPDATE)
# min_count set to 0 for user node pools
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(0, True, 0, 1, CONST_NODEPOOL_MODE_USER, DecoratorMode.CREATE)
# fail on min_count < 1 for system node pools
with self.assertRaises(InvalidArgumentValueError):
ctx._AKSAgentPoolContext__validate_counts_in_autoscaler(1, True, 0, 1, CONST_NODEPOOL_MODE_SYSTEM, DecoratorMode.CREATE)
def common_get_resource_group_name(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"resource_group_name": "test_rg_name"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_resource_group_name(), "test_rg_name")
def common_get_cluster_name(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"cluster_name": "test_cluster_name", "name": "test_name"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_1.get_cluster_name(), "test_name")
else:
self.assertEqual(ctx_1.get_cluster_name(), "test_cluster_name")
def common_get_snapshot_id(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"snapshot_id": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_snapshot_id(), None)
creation_data = self.models.CreationData(source_resource_id="test_source_resource_id")
agentpool = self.create_initialized_agentpool_instance(creation_data=creation_data)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_snapshot_id(), "test_source_resource_id")
def common_get_snapshot(self):
# custom value
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"snapshot_id": "test_source_resource_id",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock()
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_1.get_snapshot(), mock_snapshot)
# test cache
self.assertEqual(ctx_1.get_snapshot(), mock_snapshot)
def common_get_kubernetes_version(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"kubernetes_version": ""}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_kubernetes_version(), "")
agentpool = self.create_initialized_agentpool_instance(orchestrator_version="test_kubernetes_version")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_kubernetes_version(), "test_kubernetes_version")
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"kubernetes_version": "", "snapshot_id": "test_snapshot_id"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(kubernetes_version="test_kubernetes_version")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_kubernetes_version(), "test_kubernetes_version")
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"kubernetes_version": "custom_kubernetes_version",
"snapshot_id": "test_snapshot_id",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(kubernetes_version="test_kubernetes_version")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_kubernetes_version(), "custom_kubernetes_version")
def common_get_node_vm_size(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_vm_size": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_node_vm_size(), CONST_DEFAULT_NODE_VM_SIZE)
agentpool = self.create_initialized_agentpool_instance(vm_size="Standard_ABCD_v2")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_node_vm_size(), "Standard_ABCD_v2")
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_vm_size": None, "snapshot_id": "test_snapshot_id"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(vm_size="test_vm_size")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_node_vm_size(), "test_vm_size")
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"node_vm_size": "custom_node_vm_size",
"snapshot_id": "test_snapshot_id",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(vm_size="test_vm_size")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_node_vm_size(), "custom_node_vm_size")
# custom value
ctx_4 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"node_vm_size": None,
"os_type": "WINDOWS",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
# fail on windows os type for ManagedCluster mode (aks create)
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_node_vm_size()
else:
self.assertEqual(ctx_4.get_node_vm_size(), CONST_DEFAULT_WINDOWS_NODE_VM_SIZE)
def common_get_os_type(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"os_type": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_os_type(), CONST_DEFAULT_NODE_OS_TYPE)
agentpool = self.create_initialized_agentpool_instance(os_type="test_os_type")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_os_type(), "test_os_type")
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"os_type": None, "snapshot_id": "test_snapshot_id"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(os_type="test_os_type")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_os_type(), "test_os_type")
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"os_type": "custom_os_type",
"snapshot_id": "test_snapshot_id",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(os_type="test_os_type")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_os_type(), "custom_os_type")
# custom value
ctx_4 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"os_type": "windows",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
# fail on windows os type for ManagedCluster mode (aks create)
with self.assertRaises(InvalidArgumentValueError):
ctx_4.get_os_type()
else:
self.assertEqual(ctx_4.get_os_type(), "windows")
def common_get_os_sku(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"os_sku": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_os_sku(), None)
agentpool = self.create_initialized_agentpool_instance(os_sku="test_os_sku")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_os_sku(), "test_os_sku")
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"os_sku": None, "snapshot_id": "test_snapshot_id"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(os_sku="test_os_sku")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_os_sku(), "test_os_sku")
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"os_sku": "custom_os_sku",
"snapshot_id": "test_snapshot_id",
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_snapshot = Mock(os_sku="test_os_sku")
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_os_sku(), "custom_os_sku")
def common_get_vnet_subnet_id(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"vnet_subnet_id": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_vnet_subnet_id(), None)
agentpool = self.create_initialized_agentpool_instance(vnet_subnet_id="test_vnet_subnet_id")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_vnet_subnet_id(), "test_vnet_subnet_id")
def common_get_pod_subnet_id(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"pod_subnet_id": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_pod_subnet_id(), None)
agentpool = self.create_initialized_agentpool_instance(pod_subnet_id="test_pod_subnet_id")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_pod_subnet_id(), "test_pod_subnet_id")
def common_get_enable_node_public_ip(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_node_public_ip": False}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_enable_node_public_ip(), False)
agentpool = self.create_initialized_agentpool_instance(enable_node_public_ip=True)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_enable_node_public_ip(), True)
def common_get_node_public_ip_prefix_id(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_public_ip_prefix_id": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_node_public_ip_prefix_id(), None)
agentpool = self.create_initialized_agentpool_instance(node_public_ip_prefix_id="test_node_public_ip_prefix_id")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_node_public_ip_prefix_id(), "test_node_public_ip_prefix_id")
def common_get_node_count_and_enable_cluster_autoscaler_min_max_count(
self,
):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"node_count": 3,
"enable_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(
ctx_1.get_node_count_and_enable_cluster_autoscaler_min_max_count(),
(3, False, None, None),
)
agentpool = self.create_initialized_agentpool_instance(
count=5,
enable_auto_scaling=True,
min_count=1,
max_count=10,
)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(
ctx_1.get_node_count_and_enable_cluster_autoscaler_min_max_count(),
(5, True, 1, 10),
)
def common_get_update_enable_disable_cluster_autoscaler_and_min_max_count(
self,
):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"update_cluster_autoscaler": False,
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_1 = self.create_initialized_agentpool_instance(count=3)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(
ctx_1.get_update_enable_disable_cluster_autoscaler_and_min_max_count(),
(False, False, False, None, None),
)
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"update_cluster_autoscaler": True,
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_2 = self.create_initialized_agentpool_instance(count=3)
ctx_2.attach_agentpool(agentpool_2)
ctx_2._agentpools = [agentpool_2, agentpool_2]
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
# fail on multi-agent pool
with self.assertRaises(ArgumentUsageError):
ctx_2.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
else:
# fail on min count and max count not specifed
with self.assertRaises(RequiredArgumentMissingError):
ctx_2.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"update_cluster_autoscaler": False,
"enable_cluster_autoscaler": True,
"disable_cluster_autoscaler": True,
"min_count": None,
"max_count": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_3 = self.create_initialized_agentpool_instance(count=3)
ctx_3.attach_agentpool(agentpool_3)
# fail on mutually exclusive update_cluster_autoscaler, enable_cluster_autoscaler and disable_cluster_autoscaler
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_3.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
# custom value
ctx_4 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"update_cluster_autoscaler": False,
"enable_cluster_autoscaler": True,
"disable_cluster_autoscaler": False,
"min_count": 1,
"max_count": 5,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_4 = self.create_initialized_agentpool_instance(count=3, enable_auto_scaling=True)
ctx_4.attach_agentpool(agentpool_4)
# fail on cluster autoscaler already enabled
with self.assertRaises(DecoratorEarlyExitException):
ctx_4.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
# custom value
ctx_5 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"update_cluster_autoscaler": True,
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": False,
"min_count": 1,
"max_count": 5,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_5 = self.create_initialized_agentpool_instance(count=3, enable_auto_scaling=False)
ctx_5.attach_agentpool(agentpool_5)
# fail on cluster autoscaler not enabled
with self.assertRaises(InvalidArgumentValueError):
ctx_5.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
# custom value
ctx_6 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"update_cluster_autoscaler": False,
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": True,
"min_count": None,
"max_count": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_6 = self.create_initialized_agentpool_instance(count=3, enable_auto_scaling=False)
ctx_6.attach_agentpool(agentpool_6)
# fail on cluster autoscaler already disabled
with self.assertRaises(DecoratorEarlyExitException):
ctx_6.get_update_enable_disable_cluster_autoscaler_and_min_max_count()
def common_get_priority(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"priority": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_priority(), CONST_SCALE_SET_PRIORITY_REGULAR)
agentpool = self.create_initialized_agentpool_instance(scale_set_priority="test_priority")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_priority(), "test_priority")
def common_get_eviction_policy(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"eviction_policy": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_eviction_policy(), CONST_SPOT_EVICTION_POLICY_DELETE)
agentpool = self.create_initialized_agentpool_instance(scale_set_eviction_policy="test_eviction_policy")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_eviction_policy(), "test_eviction_policy")
def common_get_spot_max_price(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"spot_max_price": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_spot_max_price(), -1)
agentpool = self.create_initialized_agentpool_instance(spot_max_price=1.2345)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_spot_max_price(), 1.2345)
def common_get_nodepool_labels(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"nodepool_labels": "test_nodepool_labels", "labels": "test_labels"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_1.get_nodepool_labels(), "test_nodepool_labels")
else:
self.assertEqual(ctx_1.get_nodepool_labels(), "test_labels")
agentpool = self.create_initialized_agentpool_instance(node_labels={"key1": "value1", "key2": "value2"})
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_nodepool_labels(), {"key1": "value1", "key2": "value2"})
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"nodepool_labels": "test_nodepool_labels", "labels": "test_labels"}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_2.get_nodepool_labels(), "test_nodepool_labels")
else:
self.assertEqual(ctx_2.get_nodepool_labels(), "test_labels")
agentpool_2 = self.create_initialized_agentpool_instance(node_labels={"key1": "value1", "key2": "value2"})
ctx_2.attach_agentpool(agentpool_2)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_2.get_nodepool_labels(), "test_nodepool_labels")
else:
self.assertEqual(ctx_2.get_nodepool_labels(), "test_labels")
def common_get_nodepool_tags(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"nodepool_tags": "test_nodepool_tags", "tags": "test_tags"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_1.get_nodepool_tags(), "test_nodepool_tags")
else:
self.assertEqual(ctx_1.get_nodepool_tags(), "test_tags")
agentpool = self.create_initialized_agentpool_instance(tags={})
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_nodepool_tags(), {})
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"nodepool_tags": "test_nodepool_tags", "tags": "test_tags"}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_2.get_nodepool_tags(), "test_nodepool_tags")
else:
self.assertEqual(ctx_2.get_nodepool_tags(), "test_tags")
agentpool_2 = self.create_initialized_agentpool_instance(tags={})
ctx_2.attach_agentpool(agentpool_2)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
self.assertEqual(ctx_2.get_nodepool_tags(), "test_nodepool_tags")
else:
self.assertEqual(ctx_2.get_nodepool_tags(), "test_tags")
def common_get_node_taints(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_taints": "abc=xyz:123,123=456:abc"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_node_taints(), ["abc=xyz:123", "123=456:abc"])
agentpool = self.create_initialized_agentpool_instance(node_taints=[])
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_node_taints(), [])
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_taints": ""}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_node_taints(), [])
agentpool_2 = self.create_initialized_agentpool_instance(node_taints=["abc=xyz:123", "123=456:abc"])
ctx_2.attach_agentpool(agentpool_2)
self.assertEqual(ctx_2.get_node_taints(), [])
def common_get_node_osdisk_size(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_osdisk_size": 0}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_node_osdisk_size(), 0)
agentpool = self.create_initialized_agentpool_instance(os_disk_size_gb=10)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_node_osdisk_size(), 10)
def common_get_node_osdisk_type(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"node_osdisk_type": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_node_osdisk_type(), None)
agentpool = self.create_initialized_agentpool_instance(os_disk_type="test_node_osdisk_type")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_node_osdisk_type(), "test_node_osdisk_type")
def common_get_max_surge(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"max_surge": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_max_surge(), None)
upgrade_settings_1 = self.models.AgentPoolUpgradeSettings(max_surge="test_max_surge")
agentpool_1 = self.create_initialized_agentpool_instance(upgrade_settings=upgrade_settings_1)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_max_surge(), "test_max_surge")
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"max_surge": "test_max_surge"}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_max_surge(), "test_max_surge")
upgrade_settings_2 = self.models.AgentPoolUpgradeSettings(max_surge="test_ap_max_surge")
agentpool_2 = self.create_initialized_agentpool_instance(upgrade_settings=upgrade_settings_2)
ctx_2.attach_agentpool(agentpool_2)
self.assertEqual(ctx_2.get_max_surge(), "test_max_surge")
def common_get_vm_set_type(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"vm_set_type": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_vm_set_type(), CONST_VIRTUAL_MACHINE_SCALE_SETS)
agentpool = self.create_initialized_agentpool_instance(
type=CONST_AVAILABILITY_SET, type_properties_type=CONST_AVAILABILITY_SET
)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_vm_set_type(), CONST_AVAILABILITY_SET)
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"vm_set_type": "test_vm_set_type"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
# fail on invalid vm_set_type
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_vm_set_type()
def common_get_ppg(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"ppg": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_ppg(), None)
agentpool = self.create_initialized_agentpool_instance(
proximity_placement_group_id="test_proximity_placement_group_id"
)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_ppg(), "test_proximity_placement_group_id")
def common_get_enable_encryption_at_host(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_encryption_at_host": False}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_enable_encryption_at_host(), False)
agentpool = self.create_initialized_agentpool_instance(enable_encryption_at_host=True)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_enable_encryption_at_host(), True)
def common_get_enable_ultra_ssd(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_ultra_ssd": False}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_enable_ultra_ssd(), False)
agentpool = self.create_initialized_agentpool_instance(enable_ultra_ssd=True)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_enable_ultra_ssd(), True)
def common_get_enable_fips_image(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_fips_image": False}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_enable_fips_image(), False)
agentpool = self.create_initialized_agentpool_instance(enable_fips=True)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_enable_fips_image(), True)
def common_get_zones(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"zones": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_zones(), None)
agentpool = self.create_initialized_agentpool_instance(availability_zones=[1, 2, 3])
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_zones(), [1, 2, 3])
def common_get_max_pods(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"max_pods": 0}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_max_pods(), None)
agentpool = self.create_initialized_agentpool_instance(max_pods=110)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_max_pods(), 110)
def common_get_mode(self):
# default
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"mode": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_mode(), CONST_NODEPOOL_MODE_SYSTEM)
else:
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"mode": CONST_NODEPOOL_MODE_USER}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_mode(), CONST_NODEPOOL_MODE_USER)
agentpool = self.create_initialized_agentpool_instance(mode="test_mode")
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_mode(), "test_mode")
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"mode": "test_mode"}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_mode(), "test_mode")
agentpool_2 = self.create_initialized_agentpool_instance(mode="test_ap_mode")
ctx_2.attach_agentpool(agentpool_2)
self.assertEqual(ctx_2.get_mode(), "test_mode")
def common_get_scale_down_mode(self):
# default
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"scale_down_mode": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_scale_down_mode(), None)
else:
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"scale_down_mode": CONST_SCALE_DOWN_MODE_DELETE}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_scale_down_mode(), CONST_SCALE_DOWN_MODE_DELETE)
agentpool = self.create_initialized_agentpool_instance(scale_down_mode=CONST_SCALE_DOWN_MODE_DEALLOCATE)
ctx_1.attach_agentpool(agentpool)
self.assertEqual(ctx_1.get_scale_down_mode(), CONST_SCALE_DOWN_MODE_DEALLOCATE)
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"scale_down_mode": "test_scale_down_mode"}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_scale_down_mode(), "test_scale_down_mode")
agentpool_2 = self.create_initialized_agentpool_instance(scale_down_mode="test_ap_scale_down_mode")
ctx_2.attach_agentpool(agentpool_2)
self.assertEqual(ctx_2.get_scale_down_mode(), "test_scale_down_mode")
def common_get_kubelet_config(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"kubelet_config": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_kubelet_config(), None)
agentpool_1 = self.create_initialized_agentpool_instance(
kubelet_config=self.models.KubeletConfig(pod_max_pids=100)
)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(
ctx_1.get_kubelet_config(),
self.models.KubeletConfig(pod_max_pids=100),
)
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"kubelet_config": "fake-path"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_kubelet_config()
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"kubelet_config": get_test_data_file_path("invalidconfig.json")}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
# fail on invalid file content
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_kubelet_config()
def common_get_linux_os_config(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"linux_os_config": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_linux_os_config(), None)
agentpool_1 = self.create_initialized_agentpool_instance(
linux_os_config=self.models.LinuxOSConfig(swap_file_size_mb=200)
)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(
ctx_1.get_linux_os_config(),
self.models.LinuxOSConfig(swap_file_size_mb=200),
)
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"linux_os_config": "fake-path"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_linux_os_config()
# custom value
ctx_3 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"linux_os_config": get_test_data_file_path("invalidconfig.json")}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
# fail on invalid file content
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_linux_os_config()
def common_get_aks_custom_headers(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"aks_custom_headers": None,
}
),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_aks_custom_headers(), {})
# custom value
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict(
{
"aks_custom_headers": "abc=def,xyz=123",
}
),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_aks_custom_headers(), {"abc": "def", "xyz": "123"})
def common_get_no_wait(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"no_wait": False}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_no_wait(), False)
class AKSAgentPoolContextStandaloneModeTestCase(AKSAgentPoolContextCommonTestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
self.agentpool_decorator_mode = AgentPoolDecoratorMode.STANDALONE
self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
def test__init__(self):
self.common__init__()
def test_attach_agentpool(self):
self.common_attach_agentpool()
def test_attach_existing_agentpool(self):
self.common_attach_existing_agentpool()
def test_attach_agentpools(self):
self.common_attach_agentpools()
def test_validate_counts_in_autoscaler(self):
self.common_validate_counts_in_autoscaler()
def test_get_resource_group_name(self):
self.common_get_resource_group_name()
def test_get_cluster_name(self):
self.common_get_cluster_name()
def test_get_nodepool_name(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"nodepool_name": "test_nodepool_name"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
self.assertEqual(ctx_1.get_nodepool_name(), "test_nodepool_name")
agentpool_1 = self.create_initialized_agentpool_instance("test_ap_name")
ctx_1.attach_agentpool(agentpool_1)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
self.assertEqual(ctx_1.get_nodepool_name(), "test_ap_name")
# custom
ctx_2 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"nodepool_name": "test_nodepool_name"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
mock_agentpool_instance_1 = Mock()
mock_agentpool_instance_1.name = "test_nodepool_name"
mock_agentpool_operations = Mock(list=Mock(return_value=[mock_agentpool_instance_1]))
# fail on existing nodepool name
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=mock_agentpool_operations,
), self.assertRaises(InvalidArgumentValueError):
ctx_2.get_nodepool_name()
def test_get_snapshot_id(self):
self.common_get_snapshot_id()
def test_get_snapshot(self):
self.common_get_snapshot()
def test_get_kubernetes_version(self):
self.common_get_kubernetes_version()
def test_get_node_vm_size(self):
self.common_get_node_vm_size()
def test_get_os_type(self):
self.common_get_os_type()
def test_get_os_sku(self):
self.common_get_os_sku()
def test_get_vnet_subnet_id(self):
self.common_get_vnet_subnet_id()
def test_get_pod_subnet_id(self):
self.common_get_pod_subnet_id()
def test_get_enable_node_public_ip(self):
self.common_get_enable_node_public_ip()
def test_get_node_public_ip_prefix_id(self):
self.common_get_node_public_ip_prefix_id()
def test_get_node_count_and_enable_cluster_autoscaler_min_max_count(
self,
):
self.common_get_node_count_and_enable_cluster_autoscaler_min_max_count()
def test_get_update_enable_disable_cluster_autoscaler_and_min_max_count(self):
self.common_get_update_enable_disable_cluster_autoscaler_and_min_max_count()
def test_get_priority(self):
self.common_get_priority()
def test_get_eviction_policy(self):
self.common_get_eviction_policy()
def test_get_spot_max_price(self):
self.common_get_spot_max_price()
def test_get_nodepool_labels(self):
self.common_get_nodepool_labels()
def test_get_nodepool_tags(self):
self.common_get_nodepool_tags()
def test_get_node_taints(self):
self.common_get_node_taints()
def test_get_node_osdisk_size(self):
self.common_get_node_osdisk_size()
def test_get_node_osdisk_type(self):
self.common_get_node_osdisk_type()
def test_get_max_surge(self):
self.common_get_max_surge()
def test_get_vm_set_type(self):
self.common_get_vm_set_type()
def test_get_ppg(self):
self.common_get_ppg()
def test_get_enable_encryption_at_host(self):
self.common_get_enable_encryption_at_host()
def test_get_enable_ultra_ssd(self):
self.common_get_enable_ultra_ssd()
def test_get_enable_fips_image(self):
self.common_get_enable_fips_image()
def test_get_zones(self):
self.common_get_zones()
def test_get_max_pods(self):
self.common_get_max_pods()
def test_get_mode(self):
self.common_get_mode()
def test_get_scale_down_mode(self):
self.common_get_scale_down_mode()
def test_get_kubelet_config(self):
self.common_get_kubelet_config()
def test_get_linux_os_config(self):
self.common_get_linux_os_config()
def test_get_aks_custom_headers(self):
self.common_get_aks_custom_headers()
def test_get_no_wait(self):
self.common_get_no_wait()
class AKSAgentPoolContextManagedClusterModeTestCase(AKSAgentPoolContextCommonTestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER
self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
def test__init__(self):
self.common__init__()
def test_attach_agentpool(self):
self.common_attach_agentpool()
def test_attach_existing_agentpool(self):
self.common_attach_existing_agentpool()
def test_attach_agentpools(self):
self.common_attach_agentpools()
def test_validate_counts_in_autoscaler(self):
self.common_validate_counts_in_autoscaler()
def test_get_resource_group_name(self):
self.common_get_resource_group_name()
def test_get_cluster_name(self):
self.common_get_cluster_name()
def test_get_nodepool_name(self):
# default
ctx_1 = AKSAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_nodepool_name(), "nodepool1")
agentpool_1 = self.create_initialized_agentpool_instance("test_ap_name")
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_nodepool_name(), "test_ap_name")
def test_get_snapshot_id(self):
self.common_get_snapshot_id()
def test_get_snapshot(self):
self.common_get_snapshot()
def test_get_kubernetes_version(self):
self.common_get_kubernetes_version()
def test_get_node_vm_size(self):
self.common_get_node_vm_size()
def test_get_os_type(self):
self.common_get_os_type()
def test_get_os_sku(self):
self.common_get_os_sku()
def test_get_vnet_subnet_id(self):
self.common_get_vnet_subnet_id()
def test_get_pod_subnet_id(self):
self.common_get_pod_subnet_id()
def test_get_enable_node_public_ip(self):
self.common_get_enable_node_public_ip()
def test_get_node_public_ip_prefix_id(self):
self.common_get_node_public_ip_prefix_id()
def test_get_node_count_and_enable_cluster_autoscaler_min_max_count(
self,
):
self.common_get_node_count_and_enable_cluster_autoscaler_min_max_count()
def test_get_update_enable_disable_cluster_autoscaler_and_min_max_count(self):
self.common_get_update_enable_disable_cluster_autoscaler_and_min_max_count()
def test_get_priority(self):
self.common_get_priority()
def test_get_eviction_policy(self):
self.common_get_eviction_policy()
def test_get_spot_max_price(self):
self.common_get_spot_max_price()
def test_get_nodepool_labels(self):
self.common_get_nodepool_labels()
def test_get_nodepool_tags(self):
self.common_get_nodepool_tags()
def test_get_node_taints(self):
self.common_get_node_taints()
def test_get_node_osdisk_size(self):
self.common_get_node_osdisk_size()
def test_get_node_osdisk_type(self):
self.common_get_node_osdisk_type()
def test_get_max_surge(self):
self.common_get_max_surge()
def test_get_vm_set_type(self):
self.common_get_vm_set_type()
def test_get_ppg(self):
self.common_get_ppg()
def test_get_enable_encryption_at_host(self):
self.common_get_enable_encryption_at_host()
def test_get_enable_ultra_ssd(self):
self.common_get_enable_ultra_ssd()
def test_get_enable_fips_image(self):
self.common_get_enable_fips_image()
def test_get_zones(self):
self.common_get_zones()
def test_get_max_pods(self):
self.common_get_max_pods()
def test_get_mode(self):
self.common_get_mode()
def test_get_scale_down_mode(self):
self.common_get_scale_down_mode()
def test_get_kubelet_config(self):
self.common_get_kubelet_config()
def test_get_linux_os_config(self):
self.common_get_linux_os_config()
def test_get_aks_custom_headers(self):
self.common_get_aks_custom_headers()
def test_get_no_wait(self):
self.common_get_no_wait()
class AKSAgentPoolAddDecoratorCommonTestCase(unittest.TestCase):
def _remove_defaults_in_agentpool(self, agentpool):
self.defaults_in_agentpool = {}
for attr_name, attr_value in vars(agentpool).items():
if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None:
self.defaults_in_agentpool[attr_name] = attr_value
setattr(agentpool, attr_name, None)
return agentpool
def _restore_defaults_in_agentpool(self, agentpool):
for key, value in self.defaults_in_agentpool.items():
if getattr(agentpool, key, None) is None:
setattr(agentpool, key, value)
return agentpool
def create_initialized_agentpool_instance(
self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs
):
"""Helper function to create a properly initialized agentpool instance.
:return: the AgentPool object
"""
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name)
else:
agentpool = self.models.UnifiedAgentPoolModel()
agentpool.name = nodepool_name
# remove defaults
if remove_defaults:
self._remove_defaults_in_agentpool(agentpool)
# set properties
for key, value in kwargs.items():
setattr(agentpool, key, value)
# resote defaults
if restore_defaults:
self._restore_defaults_in_agentpool(agentpool)
return agentpool
def common_ensure_agentpool(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1._ensure_agentpool(None)
agentpool_1 = self.create_initialized_agentpool_instance()
# fail on inconsistent agentpool with internal context
with self.assertRaises(CLIInternalError):
dec_1._ensure_agentpool(agentpool_1)
def common_remove_restore_defaults_in_agentpool(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1._remove_defaults_in_agentpool(None)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1._restore_defaults_in_agentpool(None)
agentpool_1 = self.create_initialized_agentpool_instance(remove_defaults=False, restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1._remove_defaults_in_agentpool(agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
self.assertEqual(dec_1.context.get_intermediate("defaults_in_agentpool"), self.defaults_in_agentpool)
dec_agentpool_2 = dec_1._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_2 = self.create_initialized_agentpool_instance()
self.assertEqual(dec_agentpool_2, ground_truth_agentpool_2)
def common_init_agentpool(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{"nodepool_name": "test_nodepool_name"},
self.resource_type,
self.agentpool_decorator_mode,
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
dec_agentpool_1 = dec_1.init_agentpool()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
"test_nodepool_name", remove_defaults=False, restore_defaults=False
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
self.assertEqual(dec_agentpool_1, dec_1.context.agentpool)
def common_set_up_snapshot_properties(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{"kubernetes_version": "test_kubernetes_version", "os_type": None, "os_sku": None, "node_vm_size": None},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_snapshot_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_snapshot_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
orchestrator_version="test_kubernetes_version",
vm_size=CONST_DEFAULT_NODE_VM_SIZE,
os_type=CONST_DEFAULT_NODE_OS_TYPE,
os_sku=None,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_2 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"kubernetes_version": "",
"os_type": None,
"os_sku": None,
"node_vm_size": None,
"snapshot_id": "test_snapshot_id",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_2.set_up_snapshot_properties(None)
agentpool_2 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_2.context.attach_agentpool(agentpool_2)
mock_snapshot_2 = Mock(
kubernetes_version="test_kubernetes_version",
os_type="test_os_type",
os_sku="test_os_sku",
vm_size="test_vm_size",
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.get_snapshot_by_snapshot_id",
return_value=mock_snapshot_2,
):
dec_agentpool_2 = dec_2.set_up_snapshot_properties(agentpool_2)
dec_agentpool_2 = self._restore_defaults_in_agentpool(dec_agentpool_2)
ground_truth_creation_data_2 = dec_2.models.CreationData(source_resource_id="test_snapshot_id")
ground_truth_agentpool_2 = self.create_initialized_agentpool_instance(
orchestrator_version="test_kubernetes_version",
vm_size="test_vm_size",
os_type="test_os_type",
os_sku="test_os_sku",
creation_data=ground_truth_creation_data_2,
)
self.assertEqual(dec_agentpool_2, ground_truth_agentpool_2)
def common_set_up_node_network_properties(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"vnet_subnet_id": "test_vnet_subnet_id",
"pod_subnet_id": "test_pod_subnet_id",
"enable_node_public_ip": True,
"node_public_ip_prefix_id": "test_node_public_ip_prefix_id",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_label_tag_taint(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_node_network_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
vnet_subnet_id="test_vnet_subnet_id",
pod_subnet_id="test_pod_subnet_id",
enable_node_public_ip=True,
node_public_ip_prefix_id="test_node_public_ip_prefix_id",
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_auto_scaler_properties(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"node_count": 3,
"enable_cluster_autoscaler": True,
"min_count": 1,
"max_count": 5,
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_auto_scaler_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_auto_scaler_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
count=3, enable_auto_scaling=True, min_count=1, max_count=5
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_priority_properties(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"priority": CONST_SCALE_SET_PRIORITY_SPOT,
"eviction_policy": CONST_SPOT_EVICTION_POLICY_DEALLOCATE,
"spot_max_price": float(1.2345),
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_label_tag_taint(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_priority_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
scale_set_priority=CONST_SCALE_SET_PRIORITY_SPOT,
scale_set_eviction_policy=CONST_SPOT_EVICTION_POLICY_DEALLOCATE,
spot_max_price=float(1.2345),
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_label_tag_taint(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"nodepool_labels": "test_nodepool_labels",
"labels": "test_labels",
"nodepool_tags": "test_nodepool_tags",
"tags": "test_tags",
"node_taints": "abc=xyz:123,123=456:abc",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_label_tag_taint(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_label_tag_taint(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
ground_truth_mc_agentpool_1 = self.create_initialized_agentpool_instance(
node_labels="test_nodepool_labels",
tags="test_nodepool_tags",
node_taints=["abc=xyz:123", "123=456:abc"],
)
self.assertEqual(dec_agentpool_1, ground_truth_mc_agentpool_1)
else:
ground_truth_sd_agentpool_1 = self.create_initialized_agentpool_instance(
node_labels="test_labels", tags="test_tags", node_taints=["abc=xyz:123", "123=456:abc"]
)
self.assertEqual(dec_agentpool_1, ground_truth_sd_agentpool_1)
def common_set_up_osdisk_properties(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"node_osdisk_size": 123,
"node_osdisk_type": "test_node_osdisk_type",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_osdisk_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_osdisk_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
os_disk_size_gb=123, os_disk_type="test_node_osdisk_type"
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_upgrade_settings(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{"max_surge": "test_max_surge"},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_upgrade_settings(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_upgrade_settings(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_upgrade_settings_1 = self.models.AgentPoolUpgradeSettings(max_surge="test_max_surge")
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
upgrade_settings=ground_truth_upgrade_settings_1
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_vm_properties(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"vm_set_type": CONST_VIRTUAL_MACHINE_SCALE_SETS.lower(),
"ppg": "test_ppg",
"enable_encryption_at_host": True,
"enable_ultra_ssd": True,
"enable_fips_image": True,
"zones": [1, 2, 3],
"max_pods": 110,
"mode": "test_mode",
"scale_down_mode": "test_scale_down_mode",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_label_tag_taint(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_vm_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
proximity_placement_group_id="test_ppg",
enable_encryption_at_host=True,
enable_ultra_ssd=True,
enable_fips=True,
availability_zones=[1, 2, 3],
max_pods=110,
mode="test_mode",
scale_down_mode="test_scale_down_mode",
)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
ground_truth_agentpool_1.type = CONST_VIRTUAL_MACHINE_SCALE_SETS
else:
ground_truth_agentpool_1.type_properties_type = CONST_VIRTUAL_MACHINE_SCALE_SETS
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_custom_node_config(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"kubelet_config": get_test_data_file_path("kubeletconfig.json"),
"linux_os_config": get_test_data_file_path("linuxosconfig.json"),
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_label_tag_taint(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_custom_node_config(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_kubelet_config_1 = get_file_json(get_test_data_file_path("kubeletconfig.json"))
ground_truth_linux_os_config_1 = get_file_json(get_test_data_file_path("linuxosconfig.json"))
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
kubelet_config=ground_truth_kubelet_config_1,
linux_os_config=ground_truth_linux_os_config_1,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
class AKSAgentPoolAddDecoratorStandaloneModeTestCase(AKSAgentPoolAddDecoratorCommonTestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
self.agentpool_decorator_mode = AgentPoolDecoratorMode.STANDALONE
self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
self.client = MockClient()
def test_ensure_agentpool(self):
self.common_ensure_agentpool()
def test_remove_resotre_defaults_in_agentpool(self):
self.common_remove_restore_defaults_in_agentpool()
def test_init_agentpool(self):
self.common_init_agentpool()
def test_set_up_snapshot_properties(self):
self.common_set_up_snapshot_properties()
def test_set_up_node_network_properties(self):
self.common_set_up_node_network_properties()
def test_set_up_auto_scaler_properties(self):
self.common_set_up_auto_scaler_properties()
def test_set_up_priority_properties(self):
self.common_set_up_priority_properties()
def test_set_up_label_tag_taint(self):
self.common_set_up_label_tag_taint()
def test_set_up_osdisk_properties(self):
self.common_set_up_osdisk_properties()
def test_set_up_upgrade_settings(self):
self.common_set_up_upgrade_settings()
def test_set_up_vm_properties(self):
self.common_set_up_vm_properties()
def test_set_up_custom_node_config(self):
self.common_set_up_custom_node_config()
def test_construct_agentpool_profile_default(self):
import inspect
from azure.cli.command_modules.acs.custom import aks_agentpool_add
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_agentpool_add).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"cluster_name",
"nodepool_name",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"cluster_name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
}
raw_param_dict.update(optional_params)
# default value in `aks_create`
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
raw_param_dict,
self.resource_type,
self.agentpool_decorator_mode,
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
dec_agentpool_1 = dec_1.construct_agentpool_profile_default()
ground_truth_upgrade_settings_1 = self.models.AgentPoolUpgradeSettings()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
nodepool_name="test_nodepool_name",
os_type=CONST_DEFAULT_NODE_OS_TYPE,
vm_size=CONST_DEFAULT_NODE_VM_SIZE,
enable_node_public_ip=False,
enable_auto_scaling=False,
count=3,
node_taints=[],
os_disk_size_gb=0,
upgrade_settings=ground_truth_upgrade_settings_1,
type_properties_type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips=False,
mode=CONST_NODEPOOL_MODE_USER,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_1.context.raw_param.print_usage_statistics()
def test_add_agentpool(self):
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_resource_group_name",
"cluster_name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.add_agentpool(None)
agentpool_1 = self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_name")
dec_1.context.attach_agentpool(agentpool_1)
with patch("azure.cli.command_modules.acs.agentpool_decorator.sdk_no_wait") as put_agentpool:
dec_1.add_agentpool(agentpool_1)
put_agentpool.assert_called_once_with(
False,
self.client.begin_create_or_update,
"test_resource_group_name",
"test_cluster_name",
"test_nodepool_name",
agentpool_1,
headers={},
)
class AKSAgentPoolAddDecoratorManagedClusterModeTestCase(AKSAgentPoolAddDecoratorCommonTestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER
self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
self.client = MockClient()
def test_ensure_agentpool(self):
self.common_ensure_agentpool()
def test_remove_resotre_defaults_in_agentpool(self):
self.common_remove_restore_defaults_in_agentpool()
def test_init_agentpool(self):
self.common_init_agentpool()
def test_set_up_snapshot_properties(self):
self.common_set_up_snapshot_properties()
def test_set_up_node_network_properties(self):
self.common_set_up_node_network_properties()
def test_set_up_auto_scaler_properties(self):
self.common_set_up_auto_scaler_properties()
def test_set_up_priority_properties(self):
self.common_set_up_priority_properties()
def test_set_up_label_tag_taint(self):
self.common_set_up_label_tag_taint()
def test_set_up_osdisk_properties(self):
self.common_set_up_osdisk_properties()
def test_set_up_upgrade_settings(self):
self.common_set_up_upgrade_settings()
def test_set_up_vm_properties(self):
self.common_set_up_vm_properties()
def test_set_up_custom_node_config(self):
self.common_set_up_custom_node_config()
def test_construct_agentpool_profile_default(self):
import inspect
from azure.cli.command_modules.acs.custom import aks_create
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_create).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
"ssh_key_value",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_cluster_name",
"ssh_key_value": None,
}
raw_param_dict.update(optional_params)
# default value in `aks_create`
dec_1 = AKSAgentPoolAddDecorator(
self.cmd,
self.client,
raw_param_dict,
self.resource_type,
self.agentpool_decorator_mode,
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
dec_agentpool_1 = dec_1.construct_agentpool_profile_default()
upgrade_settings_1 = self.models.AgentPoolUpgradeSettings()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
nodepool_name="nodepool1",
orchestrator_version="",
vm_size=CONST_DEFAULT_NODE_VM_SIZE,
os_type=CONST_DEFAULT_NODE_OS_TYPE,
enable_node_public_ip=False,
enable_auto_scaling=False,
count=3,
node_taints=[],
os_disk_size_gb=0,
upgrade_settings=upgrade_settings_1,
type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips=False,
mode=CONST_NODEPOOL_MODE_SYSTEM,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_1.context.raw_param.print_usage_statistics()
class AKSAgentPoolUpdateDecoratorCommonTestCase(unittest.TestCase):
def _remove_defaults_in_agentpool(self, agentpool):
self.defaults_in_agentpool = {}
for attr_name, attr_value in vars(agentpool).items():
if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None:
self.defaults_in_agentpool[attr_name] = attr_value
setattr(agentpool, attr_name, None)
return agentpool
def _restore_defaults_in_agentpool(self, agentpool):
for key, value in self.defaults_in_agentpool.items():
if getattr(agentpool, key, None) is None:
setattr(agentpool, key, value)
return agentpool
def create_initialized_agentpool_instance(
self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs
):
"""Helper function to create a properly initialized agentpool instance.
:return: the AgentPool object
"""
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name)
else:
agentpool = self.models.UnifiedAgentPoolModel()
agentpool.name = nodepool_name
# remove defaults
if remove_defaults:
self._remove_defaults_in_agentpool(agentpool)
# set properties
for key, value in kwargs.items():
setattr(agentpool, key, value)
# resote defaults
if restore_defaults:
self._restore_defaults_in_agentpool(agentpool)
return agentpool
def common_ensure_agentpool(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1._ensure_agentpool(None)
agentpool_1 = self.create_initialized_agentpool_instance()
# fail on inconsistent agentpool with internal context
with self.assertRaises(CLIInternalError):
dec_1._ensure_agentpool(agentpool_1)
def common_update_auto_scaler_properties(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": False,
"update_cluster_autoscaler": True,
"min_count": 1,
"max_count": 5,
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.update_auto_scaler_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(
enable_auto_scaling=True, node_count=3, min_count=2, max_count=4
)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.update_auto_scaler_properties(agentpool_1)
grond_truth_agentpool_1 = self.create_initialized_agentpool_instance(
enable_auto_scaling=True, node_count=3, min_count=1, max_count=5
)
self.assertEqual(dec_agentpool_1, grond_truth_agentpool_1)
dec_2 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"enable_cluster_autoscaler": False,
"disable_cluster_autoscaler": True,
"update_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_2.update_auto_scaler_properties(None)
agentpool_2 = self.create_initialized_agentpool_instance(
enable_auto_scaling=True, node_count=3, min_count=2, max_count=4
)
dec_2.context.attach_agentpool(agentpool_2)
dec_agentpool_2 = dec_2.update_auto_scaler_properties(agentpool_2)
grond_truth_agentpool_2 = self.create_initialized_agentpool_instance(
enable_auto_scaling=False, node_count=3, min_count=None, max_count=None
)
self.assertEqual(dec_agentpool_2, grond_truth_agentpool_2)
def common_update_label_tag_taint(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"nodepool_labels": "test_nodepool_labels",
"nodepool_tags": "test_nodepool_tags",
"labels": "test_labels",
"tags": "test_tags",
"node_taints": "",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.update_label_tag_taint(None)
agentpool_1 = self.create_initialized_agentpool_instance(
node_labels={"abc": "xyz"}, tags={"123": "456"}, node_taints=["test_node_taints"]
)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.update_label_tag_taint(agentpool_1)
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
grond_truth_agentpool_1 = self.create_initialized_agentpool_instance(
node_labels="test_nodepool_labels", tags="test_nodepool_tags", node_taints=[]
)
else:
grond_truth_agentpool_1 = self.create_initialized_agentpool_instance(
node_labels="test_labels", tags="test_tags", node_taints=[]
)
self.assertEqual(dec_agentpool_1, grond_truth_agentpool_1)
def common_update_upgrade_settings(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"max_surge": "test_max_surge",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.update_upgrade_settings(None)
upgrade_settings_1 = self.models.AgentPoolUpgradeSettings(max_surge="test_ap_max_surge")
agentpool_1 = self.create_initialized_agentpool_instance(upgrade_settings=upgrade_settings_1)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.update_upgrade_settings(agentpool_1)
ground_truth_upgrade_settings_1 = self.models.AgentPoolUpgradeSettings(max_surge="test_max_surge")
grond_truth_agentpool_1 = self.create_initialized_agentpool_instance(
upgrade_settings=ground_truth_upgrade_settings_1
)
self.assertEqual(dec_agentpool_1, grond_truth_agentpool_1)
dec_2 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"max_surge": "test_max_surge",
},
self.resource_type,
self.agentpool_decorator_mode,
)
agentpool_2 = self.create_initialized_agentpool_instance()
dec_2.context.attach_agentpool(agentpool_2)
dec_agentpool_2 = dec_2.update_upgrade_settings(agentpool_2)
ground_truth_upgrade_settings_2 = self.models.AgentPoolUpgradeSettings(max_surge="test_max_surge")
grond_truth_agentpool_2 = self.create_initialized_agentpool_instance(
upgrade_settings=ground_truth_upgrade_settings_2
)
self.assertEqual(dec_agentpool_2, grond_truth_agentpool_2)
def common_update_vm_properties(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"mode": "test_mode",
"scale_down_mode": "test_scale_down_mode",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.update_vm_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(
mode="test_ap_mode", scale_down_mode="test_ap_scale_down_mode"
)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.update_vm_properties(agentpool_1)
grond_truth_agentpool_1 = self.create_initialized_agentpool_instance(
mode="test_mode", scale_down_mode="test_scale_down_mode"
)
self.assertEqual(dec_agentpool_1, grond_truth_agentpool_1)
class AKSAgentPoolUpdateDecoratorStandaloneModeTestCase(AKSAgentPoolUpdateDecoratorCommonTestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
self.agentpool_decorator_mode = AgentPoolDecoratorMode.STANDALONE
self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
self.client = MockClient()
def test_ensure_agentpool(self):
self.common_ensure_agentpool()
def test_fetch_agentpool(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_resource_group_name",
"cluster_name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
},
self.resource_type,
self.agentpool_decorator_mode,
)
self.client.get = Mock(return_value=self.create_initialized_agentpool_instance())
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
dec_agentpool_1 = dec_1.fetch_agentpool()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance()
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
self.assertEqual(dec_agentpool_1, dec_1.context.agentpool)
self.client.get.assert_called_once_with("test_resource_group_name", "test_cluster_name", "test_nodepool_name")
def test_update_auto_scaler_properties(self):
self.common_update_auto_scaler_properties()
def test_update_label_tag_taint(self):
self.common_update_label_tag_taint()
def test_update_upgrade_settings(self):
self.common_update_upgrade_settings()
def test_update_vm_properties(self):
self.common_update_vm_properties()
def test_update_agentpool_profile_default(self):
import inspect
from azure.cli.command_modules.acs.custom import aks_agentpool_update
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_agentpool_update).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"cluster_name",
"nodepool_name",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"cluster_name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
}
raw_param_dict.update(optional_params)
# default value in `aks_create`
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
raw_param_dict,
self.resource_type,
self.agentpool_decorator_mode,
)
self.client.get = Mock(
return_value=self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_name")
)
dec_agentpool_1 = dec_1.update_agentpool_profile_default()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
nodepool_name="test_nodepool_name",
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_1.context.raw_param.print_usage_statistics()
def test_update_agentpool(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_resource_group_name",
"cluster_name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.update_agentpool(None)
agentpool_1 = self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_name")
dec_1.context.attach_agentpool(agentpool_1)
with patch("azure.cli.command_modules.acs.agentpool_decorator.sdk_no_wait") as put_agentpool:
dec_1.update_agentpool(agentpool_1)
put_agentpool.assert_called_once_with(
False,
self.client.begin_create_or_update,
"test_resource_group_name",
"test_cluster_name",
"test_nodepool_name",
agentpool_1,
headers={},
)
class AKSAgentPoolUpdateDecoratorManagedClusterModeTestCase(AKSAgentPoolUpdateDecoratorCommonTestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER
self.models = AKSAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
self.client = MockClient()
def test_ensure_agentpool(self):
self.common_ensure_agentpool()
def test_fetch_agentpool(self):
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_resource_group_name",
"name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
},
self.resource_type,
self.agentpool_decorator_mode,
)
agentpools = [
self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_1"),
self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_2"),
]
dec_agentpool_1 = dec_1.fetch_agentpool(agentpools)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_1")
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
self.assertEqual(dec_agentpool_1, dec_1.context.agentpool)
def test_update_auto_scaler_properties(self):
self.common_update_auto_scaler_properties()
def test_update_label_tag_taint(self):
self.common_update_label_tag_taint()
def test_update_upgrade_settings(self):
self.common_update_upgrade_settings()
def test_update_agentpool_profile_default(self):
import inspect
from azure.cli.command_modules.acs.custom import aks_update
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_update).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_cluster_name",
}
raw_param_dict.update(optional_params)
# default value in `aks_create`
dec_1 = AKSAgentPoolUpdateDecorator(
self.cmd,
self.client,
raw_param_dict,
self.resource_type,
self.agentpool_decorator_mode,
)
agentpools = [
self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_1"),
self.create_initialized_agentpool_instance(nodepool_name="test_nodepool_2"),
]
dec_agentpool_1 = dec_1.update_agentpool_profile_default(agentpools)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
nodepool_name="test_nodepool_1",
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_1.context.raw_param.print_usage_statistics()
def test_update_vm_properties(self):
self.common_update_vm_properties()
if __name__ == "__main__":
unittest.main()
| 39.049168
| 138
| 0.655192
| 11,185
| 103,246
| 5.607331
| 0.030577
| 0.0287
| 0.046303
| 0.054721
| 0.937721
| 0.907076
| 0.879732
| 0.847221
| 0.797331
| 0.736025
| 0
| 0.011133
| 0.266587
| 103,246
| 2,643
| 139
| 39.063942
| 0.817134
| 0.037648
| 0
| 0.668493
| 0
| 0
| 0.080492
| 0.031053
| 0
| 0
| 0
| 0
| 0.088128
| 1
| 0.091781
| false
| 0
| 0.009132
| 0
| 0.109589
| 0.001826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
500ff000cbe607c82319059361e31b442a3bc781
| 8,624
|
py
|
Python
|
river/signals.py
|
manzerw/django-river
|
58fa53ce9e1d790201afa8d044e85cb8ae00c55f
|
[
"BSD-3-Clause"
] | null | null | null |
river/signals.py
|
manzerw/django-river
|
58fa53ce9e1d790201afa8d044e85cb8ae00c55f
|
[
"BSD-3-Clause"
] | null | null | null |
river/signals.py
|
manzerw/django-river
|
58fa53ce9e1d790201afa8d044e85cb8ae00c55f
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.dispatch import Signal
from river.models import Workflow
from river.models.hook import BEFORE, AFTER
from river.models.on_approved_hook import OnApprovedHook
from river.models.on_complete_hook import OnCompleteHook
from river.models.on_transit_hook import OnTransitHook
pre_on_complete = Signal()
post_on_complete = Signal()
pre_transition = Signal()
post_transition = Signal()
pre_approve = Signal()
post_approve = Signal()
LOGGER = logging.getLogger(__name__)
class TransitionSignal(object):
def __init__(self, status, workflow_object, field_name, transition_approval):
self.status = status
self.workflow_object = workflow_object
self.field_name = field_name
self.transition_approval = transition_approval
self.content_type = ContentType.objects.get_for_model(
self.workflow_object.__class__
)
self.workflow = Workflow.objects.get(
content_type=self.content_type,
field_name=self.field_name,
)
def __enter__(self):
if self.status:
for hook in OnTransitHook.objects.filter(
(
Q(object_id__isnull=True)
| Q(
object_id=self.workflow_object.pk,
content_type=self.content_type,
)
)
& (
Q(transition__isnull=True)
| Q(transition=self.transition_approval.transition)
)
& Q(
workflow__field_name=self.field_name,
transition_meta=self.transition_approval.transition.meta,
hook_type=BEFORE,
)
):
hook.execute(self._get_context(BEFORE))
LOGGER.debug(
"The signal that is fired right before the transition "
f"( {self.transition_approval.transition} ) happened "
f"for {self.workflow_object}"
)
def __exit__(self, type, value, traceback):
if self.status:
for hook in OnTransitHook.objects.filter(
(
Q(object_id__isnull=True)
| Q(
object_id=self.workflow_object.pk,
content_type=self.content_type,
)
)
& (
Q(transition__isnull=True)
| Q(transition=self.transition_approval.transition)
)
& Q(
workflow=self.workflow,
transition_meta=self.transition_approval.transition.meta,
hook_type=AFTER,
)
):
hook.execute(self._get_context(AFTER))
LOGGER.debug(
"The signal that is fired right after the transition "
f"( {self.transition_approval.transition} ) happened "
f"for {self.workflow_object}"
)
def _get_context(self, when):
return {
"hook": {
"type": "on-transit",
"when": when,
"payload": {
"workflow": self.workflow,
"workflow_object": self.workflow_object,
"transition_approval": self.transition_approval,
},
},
}
class ApproveSignal(object):
def __init__(self, workflow_object, field_name, transition_approval):
self.workflow_object = workflow_object
self.field_name = field_name
self.transition_approval = transition_approval
self.content_type = ContentType.objects.get_for_model(
self.workflow_object.__class__
)
self.workflow = Workflow.objects.get(
content_type=self.content_type,
field_name=self.field_name,
)
def __enter__(self):
for hook in OnApprovedHook.objects.filter(
(
Q(object_id__isnull=True)
| Q(object_id=self.workflow_object.pk, content_type=self.content_type)
)
& (
Q(transition_approval__isnull=True)
| Q(transition_approval=self.transition_approval)
)
& Q(
workflow__field_name=self.field_name,
transition_approval_meta=self.transition_approval.meta,
hook_type=BEFORE,
)
):
hook.execute(self._get_context(BEFORE))
LOGGER.debug(
"The signal that is fired right before a transition approval is "
f"approved for {self.workflow_object} due to transition "
f"{self.transition_approval.transition.source_state.label} "
f"-> {self.transition_approval.transition.destination_state.label}"
)
def __exit__(self, type, value, traceback):
for hook in OnApprovedHook.objects.filter(
(
Q(object_id__isnull=True)
| Q(object_id=self.workflow_object.pk, content_type=self.content_type)
)
& (
Q(transition_approval__isnull=True)
| Q(transition_approval=self.transition_approval)
)
& Q(
workflow__field_name=self.field_name,
transition_approval_meta=self.transition_approval.meta,
hook_type=AFTER,
)
):
hook.execute(self._get_context(AFTER))
LOGGER.debug(
"The signal that is fired right after a transition approval is "
f"approved for {self.workflow_object} due to transition "
f"{self.transition_approval.transition.source_state.label} "
f"-> {self.transition_approval.transition.destination_state.label}"
)
def _get_context(self, when):
return {
"hook": {
"type": "on-approved",
"when": when,
"payload": {
"workflow": self.workflow,
"workflow_object": self.workflow_object,
"transition_approval": self.transition_approval,
},
},
}
class OnCompleteSignal(object):
def __init__(self, workflow_object, field_name):
self.workflow_object = workflow_object
self.field_name = field_name
self.workflow = getattr(self.workflow_object.river, self.field_name)
self.status = self.workflow.on_final_state
self.content_type = ContentType.objects.get_for_model(
self.workflow_object.__class__
)
self.workflow = Workflow.objects.get(
content_type=self.content_type, field_name=self.field_name
)
def __enter__(self):
if self.status:
for hook in OnCompleteHook.objects.filter(
(
Q(object_id__isnull=True)
| Q(
object_id=self.workflow_object.pk,
content_type=self.content_type,
)
)
& Q(workflow__field_name=self.field_name, hook_type=BEFORE)
):
hook.execute(self._get_context(BEFORE))
LOGGER.debug(
"The signal that is fired right before the workflow "
f"of {self.workflow_object} is complete"
)
def __exit__(self, type, value, traceback):
if self.status:
for hook in OnCompleteHook.objects.filter(
(
Q(object_id__isnull=True)
| Q(
object_id=self.workflow_object.pk,
content_type=self.content_type,
)
)
& Q(workflow__field_name=self.field_name, hook_type=AFTER)
):
hook.execute(self._get_context(AFTER))
LOGGER.debug(
"The signal that is fired right after the workflow "
f"of {self.workflow_object} is complete"
)
def _get_context(self, when):
return {
"hook": {
"type": "on-complete",
"when": when,
"payload": {
"workflow": self.workflow,
"workflow_object": self.workflow_object,
},
},
}
| 35.344262
| 86
| 0.544759
| 825
| 8,624
| 5.384242
| 0.100606
| 0.089149
| 0.097253
| 0.086448
| 0.848717
| 0.848717
| 0.842864
| 0.828681
| 0.805718
| 0.762044
| 0
| 0
| 0.375
| 8,624
| 243
| 87
| 35.489712
| 0.824119
| 0
| 0
| 0.622727
| 0
| 0
| 0.128131
| 0.05102
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.040909
| 0.013636
| 0.122727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5051ec0ca5accc390de2a0b5b403a6ab90703a3d
| 49,908
|
py
|
Python
|
jobs/ui/icons_rc.py
|
botacatalin/job-scraping
|
7f01dec6139ce484449440f71df5004be4d4f4d4
|
[
"BSD-2-Clause"
] | null | null | null |
jobs/ui/icons_rc.py
|
botacatalin/job-scraping
|
7f01dec6139ce484449440f71df5004be4d4f4d4
|
[
"BSD-2-Clause"
] | null | null | null |
jobs/ui/icons_rc.py
|
botacatalin/job-scraping
|
7f01dec6139ce484449440f71df5004be4d4f4d4
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt4 (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x03\xba\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\x42\x28\x9b\x78\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x06\x16\x16\x1f\x07\x6b\x37\
\x08\x3e\x00\x00\x02\x88\x49\x44\x41\x54\x68\xde\xed\x59\x31\x4f\
\xe3\x30\x18\x7d\x49\x07\xa4\x8a\x13\x0b\x0c\x6d\x74\x8a\xd4\xbf\
\x80\x28\x0b\xd2\x49\x9d\x2b\x06\x16\x32\x57\x1d\x2a\x66\x76\x7e\
\xc1\xf1\x1f\x18\xd9\x10\x03\x63\x54\x04\xd3\x55\x0c\x0c\x5d\x2b\
\xd0\x29\x1d\xb8\x9b\xee\x2a\x44\x18\x78\x0c\xc5\x4d\x9a\x44\x25\
\x76\x9d\x3a\x03\xcf\x93\xd5\xda\xef\xf9\xf3\xf7\xc5\xf6\xf7\x59\
\x84\x24\xd6\xd1\x42\x03\xf5\x59\x03\xc6\xb3\x36\x82\x8f\x89\xe4\
\x7c\xcc\xdf\x1c\xf6\x78\xc5\x17\x2e\xc2\x0b\xaf\xd8\xa3\x93\x7f\
\xd6\x7c\x7f\xab\xf2\x98\x03\xbe\x31\x2f\xde\x38\xe0\x31\xab\x7a\
\x04\x54\xd8\x65\x90\x9b\x3a\x8e\x80\x5d\x56\x96\x15\xd0\xe6\x50\
\x89\x5c\x60\xc8\xb6\xba\x00\x97\xfd\xa5\xc8\x05\xfa\x74\x55\x04\
\xec\xf1\x49\x0b\x3d\x49\x3e\x71\x4f\x56\x40\x87\xa1\x36\x7a\x92\
\x0c\xd9\xc9\x2f\xa0\xc2\x53\xad\xe4\x02\xa7\x59\x2e\x99\x45\x7f\
\x51\x08\x3d\x49\x5e\xa4\x25\xa4\x05\x14\xb3\xfa\xc8\x0a\x9f\x08\
\xe8\x14\x4a\x4f\x32\xe9\x0b\x49\xcf\xd7\xeb\x7a\x59\x08\xe7\x23\
\xc2\x8a\x1d\x46\x2e\x06\xd8\x92\x3d\x9b\x14\xf0\x07\x3b\x78\x14\
\x1d\x3b\xf6\xc3\xd9\x4a\xe8\x81\x2d\x9c\x65\x9d\x86\xed\xc2\x8d\
\x1f\x47\x3b\xe9\x03\x15\x85\x6f\xfe\x03\x3d\xd6\x58\xa3\xc7\x07\
\xe9\xb1\x43\x11\x90\x42\x40\x57\x81\x7e\x63\x66\xbd\x0d\x05\x09\
\xdd\xb8\x80\xaa\xc2\x81\xeb\xcd\xc5\x8f\x27\x3d\x3e\x98\xde\x17\
\xa6\x4e\x78\x84\xba\xb4\x2b\x5d\x2f\xe8\xe5\x41\x1d\x47\x51\x14\
\x1c\xae\xc4\xfb\x93\x38\x14\x02\x1c\x6c\x2b\x0c\xff\xb1\xa0\x97\
\x0f\xdb\x70\x00\x10\xec\x49\xef\x9f\x0e\x27\x24\xc9\xde\xd4\x07\
\xf6\x95\x0c\xe8\xe2\x1e\x1e\x6a\xa8\xc1\xc3\x3d\x5c\xa5\x39\xf6\
\x01\x8b\xeb\xf8\x8b\x35\xa5\xe1\xcb\x23\xc4\xa6\x8d\x96\x31\x7a\
\x60\x0d\x2d\x1b\x0d\x63\xf4\x00\xd0\xb0\x15\xbe\x00\x3a\x51\x97\
\x17\x70\x8b\x5d\x58\x89\xb6\x8b\x5b\x55\x01\xa0\x2f\x19\x3a\xcd\
\xcc\xdb\x6d\x53\x29\x0c\x49\x5f\xd7\x16\xfc\x5a\xdd\x16\xfc\x44\
\x53\x8b\xe4\x0f\x01\x16\xff\xe1\x9b\xd2\x50\x2b\xd1\x97\x4e\x34\
\x00\x00\xfe\xdb\x18\x6b\x5c\x8f\x3c\xc6\x5f\x02\xbe\x04\xd8\x18\
\x19\x15\x30\x52\x3f\x8e\x75\x84\x61\x88\x4d\x1b\x13\xf8\xc6\xd6\
\xef\x63\x62\x03\xb8\x34\x26\xe0\x12\xb0\x08\x38\xf8\x9d\x32\xe8\
\xe7\x58\x7e\x0b\x88\xef\x08\x6c\x00\x01\xee\x8c\xac\xff\x0e\x81\
\x78\x17\x9c\x4b\x0c\x13\xf7\x81\xb4\x45\x64\xef\x05\xe7\x1f\x76\
\x90\x7c\x9a\x65\xdf\x07\xe4\xef\x05\x73\x4f\xb3\x67\x9c\xac\x7c\
\x03\x4e\xf0\x1c\x59\x40\xe6\x79\x7e\xb3\xc0\x06\x4d\xde\xe4\x9c\
\x25\xf5\x3c\x37\x9e\xa0\x00\xa1\x29\x33\x9c\x07\xfd\x92\x26\xa9\
\x1e\x71\x80\xd7\xc2\xe9\x5f\x71\x10\xd1\xa3\x6c\x89\xca\x12\xa4\
\x6a\x4b\x90\xac\x36\x9e\xae\x2f\x41\xc1\x42\x24\xae\x8d\x96\x6c\
\x40\xe3\x45\x2b\xf1\x81\x36\x58\xb6\x13\x2e\x69\xb4\x70\x29\xee\
\x0b\x06\x4b\xb7\x51\x2b\xa4\x78\x6d\x99\x2e\xdf\xbf\x03\x80\xb0\
\xdc\x7b\x4b\x87\x62\xbe\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\
\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x36\x2d\x30\
\x36\x2d\x32\x32\x54\x32\x32\x3a\x33\x31\x3a\x30\x37\x2b\x30\x32\
\x3a\x30\x30\x5e\x74\x31\x24\x00\x00\x00\x25\x74\x45\x58\x74\x64\
\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x36\x2d\
\x30\x36\x2d\x32\x32\x54\x32\x32\x3a\x33\x31\x3a\x30\x37\x2b\x30\
\x32\x3a\x30\x30\x2f\x29\x89\x98\x00\x00\x00\x19\x74\x45\x58\x74\
\x53\x6f\x66\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xe7\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\x42\x28\x9b\x78\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x07\x03\x0b\x1f\x19\xdf\x61\
\x8b\x63\x00\x00\x03\xb5\x49\x44\x41\x54\x68\xde\xcd\x99\xcf\x6b\
\x53\x59\x14\xc7\x3f\x79\x29\x19\xa1\x2e\x8d\x25\x99\x8a\x95\x42\
\x75\xaf\x30\xa4\xb8\xb1\x8b\x2e\x4b\x96\x63\x41\x0a\x82\x60\x77\
\x0e\x82\xab\x71\x27\x64\xa3\x7f\x85\xe0\xcc\x46\x68\x8b\xe9\xa6\
\x12\x61\x02\x61\xa0\xa0\xa5\x2b\x11\xea\x2c\x06\x13\x08\xae\x84\
\xd2\x30\x92\x7e\x67\xf1\xee\xbb\x7d\xf9\xe1\x4b\x6e\x7e\xbc\xd7\
\x73\x21\xef\x71\x73\xcf\x39\xdf\x7b\xcf\x79\xf7\x9c\x7b\x4f\x4a\
\x38\xd2\x2c\x2b\x2c\x92\xb7\x0d\xea\xb6\x1d\x51\xe1\xd8\x51\x9e\
\x86\x6f\x59\xdd\xd7\x8e\x4e\x14\x45\x27\xda\xd1\x7d\x65\x87\x97\
\x3a\xdc\xb0\x8c\x36\x55\x55\x5b\xc3\x52\x5b\x7f\x69\x53\x99\x61\
\x64\xa7\x06\x9a\x20\xc5\x5d\x9e\x71\xad\xa7\xbf\x4d\x93\x06\x0d\
\x20\x47\x8e\xcb\xa4\x7b\x46\xfc\xc3\xef\xfc\xc1\x20\x05\x03\x10\
\xae\xea\x7d\xd7\xdc\x6a\x7a\xa2\x5b\xca\x29\xdd\x31\x2e\xad\x9c\
\x6e\xe9\x89\x6a\x5d\xeb\xf4\x5e\xab\xa3\x9b\x60\x5e\x7b\x21\x51\
\xdf\x55\xd6\x03\xcd\x0d\x5c\xd4\x39\x3d\x50\x59\xdf\x43\x9c\x7b\
\x9a\x1f\x05\x40\x41\x8d\x90\x90\xd7\x5a\x72\x70\x57\xb4\xa4\xd7\
\x21\xee\x86\x0a\xae\x00\x36\xd4\xb2\xec\xd5\x1f\xb3\x47\xb6\x82\
\xaa\x56\x46\x4b\x1b\xc3\x03\x48\xeb\xb9\x65\x6c\xaa\x38\x92\xf2\
\xa0\x15\xd5\xb4\xb2\x9e\x77\xf9\xcd\x0f\x00\xa4\xb5\x65\x59\x0e\
\xb5\x30\x96\x7a\x84\x16\x74\x68\xe5\x6d\xf5\x42\xe8\x65\x38\x9b\
\xfd\xb6\x2e\x8e\xad\x1e\xa1\x8b\xda\x0e\xad\xc2\x00\x00\x1b\x76\
\x68\x49\xde\x44\xd4\x23\xe4\xa9\x64\xe5\x6e\x44\x01\x28\x58\xd7\
\x2b\x4d\x4c\x79\xd0\x02\x08\xad\x4e\x97\x0e\x0f\x99\xb7\x1f\xde\
\xf6\x04\x67\x7f\xb6\x0a\x81\x21\x1a\xe1\x7d\x21\x3c\x64\xcf\xba\
\xde\x64\x6c\xdf\xeb\x0b\x81\x3b\xee\xf5\x03\xb0\x6a\x3f\xbc\x85\
\xa9\xa8\x47\x68\xc1\x7e\x94\xab\xdd\x00\x52\x76\xcf\x2f\x4e\x4d\
\x3d\x42\x45\x1b\x23\x52\x9d\x00\xd6\xed\xae\xe7\x26\x70\x59\xcb\
\x8e\x1c\xc1\xee\xb8\x1e\x06\x90\xd1\x67\xd3\xed\xb6\xe9\x3e\x96\
\x24\x3d\x72\xe2\x29\x18\x4d\x9f\xfd\x7c\xc1\xef\xdc\xb4\x21\xc7\
\x6d\x36\x07\x92\xa4\xbf\x1d\xb9\x82\x30\xb5\x79\x06\xa0\x6a\x02\
\xae\x5b\xc4\x43\x1f\x25\x49\x07\x8e\x5c\x4b\x26\x58\x57\x03\x00\
\x59\x93\x44\x94\x9d\x5d\x6a\x34\x00\x68\xd7\x24\x37\x59\xe1\x01\
\x6b\x78\x00\x6c\xb9\x26\xc8\x23\xd3\x0e\x00\x1e\x6b\xe0\x01\x45\
\x00\x4e\x4d\x77\x3c\x00\xfc\x4c\xb1\x08\x68\xd6\x24\xda\x35\xe7\
\x85\x1c\xdd\x04\x68\x5f\x92\x74\xa2\x59\x8f\x15\x2e\xc4\x6c\x80\
\x33\x23\x5c\x60\xc5\x63\xd1\x74\x55\x62\x05\xf0\xce\x3c\x17\x3d\
\xf2\xe6\xf5\x4b\xac\x00\xfe\x35\xcf\x7c\x00\xa0\x4d\x33\x56\x00\
\x0d\xe3\x86\x16\x40\x93\x76\xac\x00\xfe\xe3\xab\x0f\x60\xc6\x00\
\x68\x44\x0e\x7f\xcc\x3d\xe3\xaa\x9d\xe4\x1f\xd8\x6e\xf0\xb1\xcf\
\x7f\x2d\x5e\xf2\x22\x42\xe6\x17\xb2\x40\x1e\x7d\x93\x24\xbd\x89\
\xf8\x64\x96\x35\x2a\x45\x45\xca\xb2\x24\xe9\x9b\x17\xeb\xc2\xf7\
\xa1\x19\xea\x5c\x07\x72\x11\x63\x6a\xfc\xc6\xaf\x7d\x4d\x70\x83\
\x9f\x80\x13\x3e\xf5\x35\xc1\x9f\xd4\x22\xa4\xfa\xa6\xaf\xa3\x8a\
\x24\xa9\x3e\xc2\x6e\x36\xce\x4e\x88\x49\xce\x2a\x1e\x75\x80\xbe\
\xe7\xfb\x69\x52\x86\x4b\xfe\x0a\x04\x00\xd2\x5c\x8e\x15\x40\x8e\
\x54\x27\x00\xf8\x39\x56\x00\x57\xcc\xb3\xee\x71\x64\x5e\x57\x62\
\x05\x70\xc7\x3c\x8f\x3c\x2a\xb4\x80\x20\x2b\x88\x8b\xd6\x00\x68\
\x51\xf1\x38\xe6\x2d\x00\xbf\x30\x17\x9b\xfa\x3c\x37\x01\x78\xcb\
\xb1\x47\x90\x09\x78\x06\x55\x3c\xf3\xf7\x5d\x70\xcb\x4f\xc9\x76\
\x38\x05\xe2\x34\x82\x3f\x55\x3f\x09\x4c\x3a\x2d\xf7\x63\xc1\x2b\
\x00\x66\x28\x39\xce\xa4\x15\xfa\x1d\x9e\x4a\xcc\x84\xb4\x0a\x9d\
\x83\xa3\x59\xe2\x87\xd3\x73\x70\x3c\x4f\xfc\x82\x02\x25\x7e\x45\
\x73\x0e\x2e\xa9\x12\xbf\xa6\x43\x89\x5f\x54\xa2\xc4\xaf\x6a\xcf\
\xc1\x65\x75\xe2\xd7\xf5\x81\x2f\x24\x58\xb0\x08\xd8\x13\x2d\xd9\
\xa0\xde\xa2\xd5\xae\x1e\x2a\x3f\x50\x75\x5e\x0f\xb5\x3b\x89\xa2\
\x95\xdf\xba\xcb\x76\xa7\xda\xd7\x53\xdd\xd6\xd5\xae\xb2\x64\x46\
\x57\x75\x5b\x4f\xb5\xaf\xd3\x8e\xf1\x63\x95\xed\xfc\x96\xd2\xba\
\x0d\xd6\x9d\x50\x9a\xfa\xa0\xb2\xca\xfa\xa0\x66\x97\xda\x20\xe0\
\xae\x07\x21\x67\x1c\x00\xfe\xfc\xa6\x56\xba\x75\x71\xac\xa9\x14\
\xaf\x07\xd7\x8e\xbb\x69\xc2\xe5\xfb\xff\x01\xdc\x14\xf8\xa0\x0a\
\x86\x6a\x7e\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\
\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x36\x2d\x30\x37\x2d\x30\
\x33\x54\x31\x31\x3a\x33\x31\x3a\x32\x35\x2b\x30\x32\x3a\x30\x30\
\xe7\xd3\xef\x6b\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\
\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x36\x2d\x30\x37\x2d\
\x30\x33\x54\x31\x31\x3a\x33\x31\x3a\x32\x35\x2b\x30\x32\x3a\x30\
\x30\x96\x8e\x57\xd7\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x62\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\x42\x28\x9b\x78\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x06\x16\x16\x1d\x2a\x1c\xde\
\x36\xc9\x00\x00\x04\x30\x49\x44\x41\x54\x68\xde\xed\x99\x3f\x4c\
\x14\x41\x14\xc6\xbf\x13\x4e\xc8\x89\x85\x5c\x02\x4a\x72\xc4\xc2\
\x0a\x63\xa3\x48\x42\x34\x04\x0a\x03\x85\x26\x86\x10\x88\x89\x8d\
\x10\x2b\x0a\x0b\xa9\xae\x13\x28\xec\x08\x89\x76\x10\x02\x09\x95\
\x9d\xc6\x10\x4d\xa4\xb2\x22\x14\x16\xc6\x46\x13\x30\xe2\x85\x53\
\x08\x18\x02\x39\xee\x0f\x9f\x85\xb7\xec\xbf\x37\xb3\xb3\xcb\x6a\
\xe5\x37\x15\x3b\xef\x7d\xef\xb7\x33\x7b\xb3\x3b\x03\x88\xc8\xad\
\x83\x2f\x99\x63\x8e\x2f\xd9\x11\xdd\x25\x7a\xf9\x51\x96\x68\xa9\
\xc4\xd1\x7f\x0d\x70\x95\x45\x3a\x55\xe4\xd5\x68\x4e\xa7\x10\x4d\
\x63\x48\xba\xfe\x4e\x62\x2c\x9a\x51\x54\x80\x6b\x06\x57\xfe\x2a\
\x40\xb3\xc1\x95\xbf\x0a\x10\x9b\xfe\x03\x98\x00\xa4\x22\xbb\x1b\
\x64\xea\x01\x3a\xf1\x06\x15\xec\x63\x1d\x4f\x51\x17\xaa\x74\x1d\
\x9e\x62\x1d\xfb\xa8\xe0\x0d\x3a\xb5\x91\x9a\x45\xe2\x21\x2b\x8e\
\xa5\xe6\x1d\x53\x8e\xbe\x5d\x7a\xb5\xeb\xe8\x4d\xf1\x9d\xa3\xa7\
\xc2\x87\x51\x56\xc2\x31\x5f\x89\xac\x31\x40\xd6\xd7\x3b\x16\x16\
\x60\x92\x7e\x6d\x31\x61\x04\x90\xe0\x96\x90\x3d\x69\x0e\x90\xe0\
\x33\xca\x3a\x63\x04\x70\x46\x91\xfd\xcc\x71\x03\x5a\x80\x19\x85\
\x41\x85\x67\x8d\x00\xce\xba\x9e\x1d\xa7\x66\x4c\x00\x06\xa8\xd2\
\xb2\xf1\x33\xb0\xac\xf4\x18\x08\x02\x68\xe4\xa6\x22\xb5\xcc\x3e\
\x63\x80\x3e\x96\x15\x2e\x9b\x6c\xd4\x03\xcc\x2b\x12\x8f\x38\xe2\
\x8a\xd3\x03\x80\x23\x3c\x52\x38\xcd\xeb\x00\x7a\x95\xb3\xff\xc0\
\x03\x1a\x04\x00\x3e\x50\x3e\x09\xbd\x2a\x80\x06\x7e\x15\x13\x8a\
\x1c\xf4\x3d\x29\xc1\x00\xe0\xa0\xe7\xab\xc9\xd2\x57\x36\xc8\x00\
\x4f\xc4\xf0\x02\xef\x08\xbf\x14\x13\x00\xf0\x0e\x0b\xa2\xe7\x13\
\x09\x20\xc1\x35\x31\x58\x5e\xc3\xcc\x00\xa4\xf5\x94\x24\xd7\xec\
\x15\xc1\x0e\xed\x12\x43\x57\x58\x73\x22\x80\x1a\xae\x88\xbe\x5d\
\x56\x84\xfd\x36\xbc\x2f\xbc\xa9\x8a\x18\x46\x25\xf2\xcb\x18\x00\
\x2a\x18\x46\x51\xb8\x6e\x57\xab\x92\xd4\x71\x47\xe0\x1c\x17\xef\
\x2a\xcc\x08\x80\xe0\xb8\xe0\xbc\xc3\x3a\xf7\x14\x48\xeb\x5f\x89\
\xcd\xb1\x00\x34\x3b\xb6\x30\xb6\x06\xdc\x53\x70\x4f\x18\xa6\xb7\
\xc8\x9f\x68\xf8\x2d\xe5\xf1\x56\xb8\x5a\xad\x68\x01\x5c\x16\x42\
\x16\x62\x29\xaf\x72\xb2\x2a\x56\x87\x69\x5f\x18\xd2\x7a\xe5\xa0\
\x82\x3f\x7c\xf1\x3f\x34\xd1\xf5\xc2\x94\xed\x3b\xa7\x20\x2d\x7c\
\x3e\x7e\x40\x41\x73\x4f\x1f\x0d\xae\xd8\x2a\xe0\x83\xef\x5a\x0a\
\x69\x7b\x0a\x32\x42\xd2\x37\xed\xa0\x2e\x19\x5c\x09\x72\xcb\xd8\
\x00\xad\x42\xf7\x86\xd6\x70\x0a\xab\xae\xbf\x57\x31\xa5\x8d\x97\
\xdc\x5a\xf5\x23\xa0\x07\x28\xa3\x0f\x2f\x8e\xff\x7a\x81\x3e\x94\
\x43\x03\x64\x00\xa0\x16\x00\xd0\x24\x74\xff\x84\x5e\xdb\x18\x44\
\x1b\xda\x01\xac\xe2\x13\x82\x24\xb9\x35\xd9\x00\x5b\x42\x77\x3a\
\xd0\x14\xf8\x64\x50\x5a\xed\xb6\x05\x58\x53\x90\x13\xba\xcf\x1b\
\x5a\x9b\x49\x72\xcb\xe9\x01\x2e\xc4\x0a\x20\xb9\x05\x00\x5c\x8a\
\x15\x40\x72\xcb\x01\x40\x82\x00\x70\x1a\x05\x24\x3c\xdd\x25\x34\
\x63\x27\xa6\xf2\xe7\x90\xf7\x9c\x29\x01\x44\x3d\x8a\xd6\x08\x14\
\xb1\xee\x4b\x4a\xe2\x76\x6c\xf7\x7f\xdb\x57\x1e\x58\xff\xf3\x9d\
\x60\xbd\x8c\x5e\x09\x69\xfd\xb1\x01\x48\x4e\x56\xc5\xea\xeb\xa2\
\x47\x78\x63\x17\xd8\xaa\x79\xc1\x98\xb7\x56\xf1\xd3\xb4\xc7\xfd\
\x41\x52\xcb\x6d\x21\x68\x2e\x16\x80\x39\xc1\x79\x9b\xb5\xde\x8f\
\xd2\x05\x21\xac\xc2\xb6\x13\x97\x6f\x13\x37\x28\x0b\xfe\xaf\xe2\
\x7e\x4a\x5a\x92\xb6\xd4\x21\x5a\x82\x4b\xa2\x6f\xbf\x1f\x20\xc9\
\x2f\x62\xe8\xc4\x89\x00\x26\x44\xcf\x2f\x4c\xfa\x01\xc0\x41\xca\
\x1a\x8a\x5c\x7e\x48\xe1\xe8\xd8\xea\xb9\x87\x4b\xde\x44\x1c\xb8\
\xb7\x93\xc6\xad\x97\x07\xa2\xdf\x8a\x73\x5a\xdd\x29\xdd\x0a\xe2\
\x32\x1f\x87\x2e\xff\x58\x79\x46\xd0\xed\x8c\xf3\xa6\xbd\xa6\x4a\
\xf3\xce\x3d\x6d\x40\x6b\x50\x9e\x33\x90\xaf\xdd\xb1\xde\xd4\x0c\
\xf3\xca\xd4\x3c\x47\xed\x87\x47\xd9\x92\x1c\xd5\x7a\x64\xf4\x00\
\xe0\x0d\x1e\x52\xad\xcf\x1c\xf6\x1e\xb2\x38\x5a\x23\x87\xf9\x59\
\x93\x7d\xc8\x1b\xde\x1c\xc9\x66\x84\x7a\x95\xb8\xcc\x47\xbc\xce\
\x96\xea\xce\xb9\x86\x2d\xbc\xce\x47\x5c\x16\xb7\x60\x4e\x8d\xf8\
\xab\xc9\x77\x32\x4d\x33\x95\xf9\x9d\xdf\x95\x0f\x9b\x57\xd3\x52\
\x2d\x19\xa0\x86\x8b\x86\xa6\xe6\x5a\x94\x4f\x1a\xd4\x0f\x53\x56\
\x79\xce\x15\x5e\x47\xae\x73\x66\x43\x00\xf0\x2e\xf7\x62\x29\xbf\
\xc7\xbb\xea\x2a\xfa\x9f\xd4\x15\xc5\xb9\x51\x18\xad\xf1\x8a\xae\
\x46\xd0\xaf\x3a\xc5\x2c\x7f\x45\x2e\xfe\x8b\x59\xd7\x7f\x19\x22\
\x00\x80\x60\x9a\x53\xda\xb5\x41\xd6\x21\xa7\x98\x0e\x76\x37\x01\
\x00\xc1\x8b\x9c\x15\xf6\xf8\x2a\xed\x72\x96\x17\xcd\x9c\x4d\x01\
\x40\x30\xc9\x5b\x7c\xce\x0d\x6d\xe9\x0d\x3e\xe7\x2d\x83\x05\xfb\
\xb8\xfd\xd9\x17\x84\x51\x02\xed\xb8\x89\x96\xe3\x06\xe4\x8e\xdb\
\x7b\xac\x22\xa4\xe1\x6f\x12\x54\xad\xe6\xc1\x27\x5c\xd7\x00\x00\
\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\x72\x65\x61\x74\
\x65\x00\x32\x30\x31\x36\x2d\x30\x36\x2d\x32\x32\x54\x32\x32\x3a\
\x32\x39\x3a\x34\x32\x2b\x30\x32\x3a\x30\x30\x5a\x5f\x8f\x4d\x00\
\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\
\x66\x79\x00\x32\x30\x31\x36\x2d\x30\x36\x2d\x32\x32\x54\x32\x32\
\x3a\x32\x39\x3a\x34\x32\x2b\x30\x32\x3a\x30\x30\x2b\x02\x37\xf1\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\
\x67\x9b\xee\x3c\x1a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x04\x3c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\x42\x28\x9b\x78\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x07\x03\x0b\x20\x14\xf9\x0e\
\xdd\xe2\x00\x00\x03\x0a\x49\x44\x41\x54\x68\xde\xcd\x59\xbf\x4b\
\x23\x41\x14\xfe\xb2\xf1\xb6\x11\x96\xc0\x91\xc2\x10\xb0\xc8\xc1\
\xd5\x57\x89\xe0\x5d\x61\x21\x21\x20\xa4\x0b\xa4\x49\xd0\x2a\xff\
\xc1\xfd\x09\x07\x5a\xd8\xda\x5c\x6b\x2b\x16\x82\x17\x39\xe1\xb6\
\x4a\xe3\x41\x6a\x0d\x88\x90\x54\x16\x06\xc4\x1f\x87\x7c\x57\xec\
\xec\xec\x24\x26\xfb\xcb\xcd\xce\xbe\xd7\x0c\xbb\x33\xef\xfb\x66\
\xe6\xcd\xbc\x37\x33\x39\x22\xa2\x2c\x63\x13\x15\x94\xa4\x02\x43\
\xa9\xd7\xf8\x8d\x87\x88\xf6\x18\x5e\x8b\xdc\xe1\x09\x1f\xe9\x27\
\x8f\x3c\xe1\x0e\x8b\xe1\xad\x86\xab\x66\xb2\x43\x9b\xaf\x0c\x2b\
\xaf\xfc\xc3\x0e\xcd\x64\x08\xe4\xd8\xe4\x20\x34\xb4\x2a\x03\x36\
\x99\x0b\xb2\x9f\x0b\xf0\x81\x2d\xfc\xc0\x97\xa9\x6f\x2f\xe8\xe1\
\x06\x23\x31\xef\x10\xbe\xb0\x82\x55\xac\xc1\x9c\xaa\xfb\x17\xdf\
\xf1\x2b\xae\x0f\x94\xd9\x9d\xea\xd3\x3d\x8f\xd8\xa0\x35\xb7\x85\
\xc5\x06\x8f\x78\x3f\xd5\xaa\xcb\x72\x9c\x29\x58\xe7\x68\xc2\xcc\
\x05\xab\xe1\x66\x95\x26\xab\xbc\x98\x68\x3b\xe2\x7a\x54\x02\x2d\
\x3e\x29\x06\xfa\xac\x45\x58\x2d\x8e\xd6\xd8\x57\x2c\x3c\xb1\x15\
\x9e\x40\x9e\xfb\x4a\xd3\x5b\xb6\x69\x44\x86\x07\x41\x83\x6d\xde\
\x2a\x96\xf6\x99\x0f\x43\x20\xcf\x63\xa5\xd1\x19\x0b\xb1\xc0\x5d\
\x2d\xf0\x4c\xb1\x76\xfc\x96\xc2\xdb\x26\x6a\xef\x0f\x66\x71\x8e\
\xa8\x79\x1e\x4c\x8c\x42\x00\x81\x96\xac\xfa\xcc\xdd\x77\x83\xbb\
\xba\xcb\x67\x69\xb7\xe5\x47\x60\x5d\xba\xde\x33\x37\x13\x83\x07\
\xc1\x4d\x49\xe1\x69\x72\x45\xa8\x95\xca\xca\xc2\x4b\xae\xf7\xde\
\x28\x78\x8b\xb2\x3c\x9b\x40\x57\x99\xfb\xa4\xe1\x41\x28\xbe\xd0\
\x9d\x45\x60\x4b\xf1\xfc\xf7\xbb\xde\x6c\x77\xf4\x56\xc4\xd6\x34\
\x81\x1c\x2f\xe5\xba\x2f\x2c\x04\x1e\x04\x0b\x72\x5f\xb8\x74\xc3\
\x94\xfb\xab\x29\xb9\xb5\x17\x06\x0f\x82\x6d\x89\xd3\x54\x09\x98\
\x32\xe0\xf6\x63\xee\x7a\x61\xd5\x90\x1b\xf4\xc0\x89\x2c\xce\xe7\
\x8e\xe4\x15\x7d\xcf\x8f\xaa\x35\x89\xd5\xf1\x08\xd8\x32\xe2\x2d\
\x1a\x1e\x84\x8c\x94\xb6\x4b\xa0\x28\x93\xad\x6a\x2a\x04\xaa\x02\
\xed\x95\x45\x87\x80\xbb\x45\xdc\x87\x8c\xf7\xef\x55\x53\xa6\x2c\
\xbb\x84\x01\xa0\x2e\x92\xa3\x53\xbc\x44\xcd\xd1\x63\xc9\x0b\x4e\
\x45\xa9\x0e\x80\xcb\x32\xd1\x6e\xa4\xd2\x7f\x10\x6c\x08\xc4\x47\
\x2e\x83\xdb\x32\xfc\x58\xa9\x11\xb0\x64\x68\xda\x36\x50\x11\xc3\
\xd1\xc3\x38\x95\x09\x00\x80\x31\x7a\xa2\x54\x31\x50\x12\xc5\x9b\
\xd4\xe0\x55\xb4\x92\x47\x60\x94\x2a\x01\x17\xad\xb4\x24\x09\x0c\
\x7d\x1b\x7c\xc4\x87\x88\x10\xff\x70\xe7\xf3\xd7\x45\x0b\x45\xe0\
\x13\x7e\xe2\x6b\x8c\x5e\xda\xd8\xc1\x55\x10\x01\x70\x2c\xfc\x71\
\x63\xae\xcf\x1e\x32\xae\x1c\xce\xb5\xb9\x21\x6a\x8c\x8d\x18\x3d\
\x4b\x54\x0c\x65\x30\xe6\xc9\x1e\xec\x58\xb6\x6d\xec\xcd\xfd\x27\
\x27\x7e\x09\x43\x7c\x0e\x20\x70\x85\x6f\x89\x3b\xe1\x04\x81\xa0\
\x11\x00\xe0\x6b\x2c\x8e\x48\x02\xde\x14\xac\x24\x0c\xe1\x2f\x2e\
\x9a\x42\x60\x35\x55\x02\x2e\xda\xd0\xc0\xb5\x28\xae\xc1\x4a\x0d\
\xde\xc2\x9a\x28\x5d\x6b\x0f\xc7\x06\x1e\x70\x2e\xd8\xd4\x53\x1b\
\x01\x17\xe9\x1c\x0f\xda\x53\x32\x30\x03\x49\xa9\xf6\xb4\x3c\x03\
\x07\x13\xed\x47\x33\xed\x87\xd3\x0c\x1c\xcf\xb5\x5f\x50\x80\xda\
\xaf\x68\x32\x70\x49\xa5\xfd\x9a\x0e\xd4\x7e\x51\x09\x6a\xbf\xaa\
\xcd\xc0\x65\xb5\xf6\xeb\x7a\xd7\x17\x34\x3e\x58\x38\xaa\xf9\xc9\
\x06\x4c\xe9\xd1\x2a\xd3\xcf\x76\x8e\x2e\xf8\xe1\x32\x9c\x43\x69\
\x7d\xba\xf5\x74\x21\x8f\xd7\x41\x3e\xf0\x56\x12\x7e\xbe\xff\x0f\
\x0e\x99\x34\x8b\x18\x98\x66\xb1\x00\x00\x00\x25\x74\x45\x58\x74\
\x64\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x36\
\x2d\x30\x37\x2d\x30\x33\x54\x31\x31\x3a\x33\x32\x3a\x32\x30\x2b\
\x30\x32\x3a\x30\x30\x5e\xdc\x7b\xcf\x00\x00\x00\x25\x74\x45\x58\
\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\
\x36\x2d\x30\x37\x2d\x30\x33\x54\x31\x31\x3a\x33\x32\x3a\x32\x30\
\x2b\x30\x32\x3a\x30\x30\x2f\x81\xc3\x73\x00\x00\x00\x19\x74\x45\
\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\
\x6e\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x4b\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x04\x00\x00\x00\x00\x60\xb9\x55\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\x00\xaa\x8d\x23\x32\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\x42\x28\x9b\x78\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe0\x07\x03\x0b\x20\x31\xb2\x0a\
\x09\xa5\x00\x00\x03\x19\x49\x44\x41\x54\x68\xde\xed\x99\x4d\x4b\
\x1b\x51\x14\x86\xdf\x99\x94\x96\x86\x40\x37\x55\xa9\xa1\x08\x82\
\xd4\x44\xed\xc2\x52\xaa\xd5\xb6\xda\x4d\x11\x82\x05\xe9\x2f\x90\
\x2c\xf2\x03\xb2\x77\xdd\x8d\xd0\x9f\x20\xb8\x71\x27\x2e\xb2\x1c\
\xd3\xd6\x45\x21\xb8\x14\x84\x82\x20\x25\x51\xd3\x2e\xda\x18\x25\
\x71\xe1\xdb\xc5\x38\x99\xd1\x44\xef\x87\x33\xbd\x59\xf4\xdc\x4d\
\x3e\xce\xdc\xf7\x99\x3b\x67\xee\xc7\x39\x16\xa1\x68\x09\xbc\xc5\
\x20\xfa\x5b\x0d\xa8\xb4\xda\x1e\x1c\xd4\x15\xfb\xa3\x7c\x4b\x32\
\xc7\x02\x1b\xbc\xc9\x1a\x2c\x30\xc7\xa4\x7c\xaf\x72\x6e\x71\xe6\
\x59\xe2\x39\x65\xed\x9c\x25\xe6\x19\x0f\x07\x20\xc6\x2c\xcb\xd2\
\xd2\x41\x2b\x33\xcb\xd8\x6d\x01\x32\xdc\xd1\x12\xf7\x6c\x87\x19\
\x7d\x80\x01\x16\x6f\x25\xee\x59\x91\x03\x3a\x00\xd3\xac\x86\x22\
\x4f\x92\x55\x4e\xab\x02\x2c\xb2\x19\x9a\x3c\x49\x36\xb9\x28\x0f\
\x10\xe3\x72\xa8\xe2\x9e\x2d\x77\x0a\xc9\x4e\xf2\xeb\x91\xc8\x93\
\xe4\x7a\x3b\x42\x3b\x40\x34\x77\xef\x8f\x82\x00\x60\x31\x52\x79\
\x92\x57\x63\xe1\x6a\xe4\x87\x1b\x7a\x9d\xac\x79\xf9\x8d\xb0\x02\
\x8b\xd1\x00\x4a\xe8\x51\x5d\x9b\x34\xec\x27\x9e\x63\xdf\xfb\x62\
\x07\xfe\x58\xf9\x27\xf2\x40\x0f\x56\x3a\xad\x86\x99\xc8\x07\x3f\
\x68\x99\xab\x31\x10\xd3\x9a\xf3\x8f\xf9\x9e\x1f\x78\xa2\x71\xe5\
\x8e\xf7\x42\x7a\x00\x59\x2d\x79\x37\x9c\x66\xb5\x10\xb2\x41\x80\
\xb8\xc6\x82\x7b\xcc\x29\x82\x63\x1c\xd3\x44\x28\xbb\xfb\x05\x17\
\x20\xaf\x7c\x79\x8d\x53\x04\x47\x59\x65\x95\xa3\x04\x67\x34\x10\
\xf2\x3e\x40\x49\x59\xfe\xe5\x85\x3c\x49\x6d\x84\x92\x07\x90\x54\
\xd8\x6c\xf9\xf2\x23\x81\xe5\xba\xca\x11\x82\x33\xac\x2b\xf5\x73\
\xce\xa4\x0b\x90\xd3\x92\x3f\xba\xf4\xeb\x11\x47\x08\xbe\x51\x44\
\xc8\xb9\x00\x05\x25\xf9\xc9\x0e\xf2\xba\x08\x05\x02\x4c\x08\x36\
\xda\x41\xfb\xc3\x49\x82\xe9\x0e\xf2\x2e\x42\x9a\xe0\x6b\x05\x84\
\x06\x13\xe0\xbc\x82\xfc\xc4\x0d\xf2\x7a\x08\xf3\x36\x06\x25\x67\
\xf0\x1a\xde\xe1\x1b\xd2\x70\xd0\x7b\xad\x4f\x2f\x1c\xa4\xf1\x05\
\x73\x38\x91\xec\x75\xd0\x46\xbf\x82\x7c\x0a\x0e\xfa\x6e\xf4\xeb\
\x83\x83\x14\xbe\x62\x4e\xf2\x88\xd6\x0f\xae\x4a\x0d\xfe\x0b\x82\
\x29\x1e\x4a\x0d\xeb\x21\x53\x04\x5f\xf1\x58\xc2\x77\x15\x74\x84\
\x4e\xbf\x95\xe4\x7d\x84\x69\x09\x04\x07\xdc\x15\xb8\x6c\x71\x88\
\xe0\x30\x0f\xa4\xe5\x49\xf2\x80\xc3\x04\x9f\xf0\xb3\xc0\x6f\x17\
\xac\x09\x5c\x46\x35\xe4\x7d\x84\x61\x81\x57\xed\x8e\x30\x4c\x1e\
\x00\xd8\xc5\xa3\x8b\xdd\x8b\x9c\x59\xad\x4f\x77\x45\xae\x36\x2a\
\x02\x8f\x8f\x98\x90\x94\x6d\xb7\x71\x7c\x12\x78\x54\x2c\x3a\x98\
\x95\xea\xec\x3e\x1a\x4a\x23\xd0\xc0\x3d\x09\xcf\x4d\xf1\x08\x44\
\x6b\x95\xff\x00\x36\xf6\x8c\x02\xec\x59\x4c\xe0\x97\x54\xb8\x44\
\x11\x84\x4d\x3c\xb4\x51\x87\x63\xec\xfe\x1d\xd4\x6d\x00\x1b\xc6\
\x00\x36\x00\x8b\x40\x12\x3f\x02\x73\xd7\x75\x16\xfe\x23\x20\x1e\
\xa3\x6c\x03\x28\x63\xdb\xc8\xfd\x6f\xa3\xec\x9d\x8e\xd7\x8c\x00\
\xac\x01\xb8\xc8\x0f\xc4\xf1\x5d\xb8\x33\x0a\xfb\x11\x54\x30\x84\
\x53\x6f\x04\x4e\xb1\x24\xec\xf4\x19\x00\x4b\xb2\x01\x4f\x85\x11\
\xb0\x84\x53\x00\x0a\xc7\xf3\x2d\x4e\x88\xf2\xbe\xad\x36\xce\x4d\
\x41\x6f\x6d\xc7\x73\xe3\x09\x0a\x10\x21\x65\x86\x65\xac\xd8\xa5\
\x49\xaa\x7d\x2c\xe0\x2c\x72\xf9\x33\x2c\xf8\xf2\xe8\xb6\x44\x65\
\x17\xa4\x6a\xbb\x20\x59\x6d\x3c\x5d\xdf\x05\x05\x0b\x2f\x71\x6d\
\xb4\x64\x03\x1a\x2f\x5a\x79\x13\xb4\xc1\xb2\x9d\x17\x92\x46\x0b\
\x97\x6e\x33\x5a\xba\xf5\x5b\x24\xc5\x6b\xcb\x74\xf9\xfe\x2f\x21\
\x1f\xb7\x29\xf7\x15\xb5\x2c\x00\x00\x00\x25\x74\x45\x58\x74\x64\
\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x31\x36\x2d\
\x30\x37\x2d\x30\x33\x54\x31\x31\x3a\x33\x32\x3a\x34\x39\x2b\x30\
\x32\x3a\x30\x30\x0d\x2b\x37\x9b\x00\x00\x00\x25\x74\x45\x58\x74\
\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x36\
\x2d\x30\x37\x2d\x30\x33\x54\x31\x31\x3a\x33\x32\x3a\x34\x39\x2b\
\x30\x32\x3a\x30\x30\x7c\x76\x8f\x27\x00\x00\x00\x19\x74\x45\x58\
\x74\x53\x6f\x66\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\
\x6b\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x17\x22\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x00\x00\x00\x02\x00\x08\x03\x00\x00\x00\xc3\xa6\x24\xc8\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\
\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\
\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\x70\
\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\xb2\x50\x4c\x54\
\x45\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x9b\x1a\x28\xb4\x00\x00\x00\xe5\x74\x52\x4e\x53\x00\
\x01\x02\x03\x04\x05\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\
\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1f\x20\x21\x22\
\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x33\
\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\
\x44\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x51\x52\x53\x54\x55\x56\
\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60\x61\x62\x63\x64\x65\x67\
\x69\x6a\x6b\x6c\x6e\x6f\x70\x72\x73\x75\x77\x78\x79\x7a\x7b\x7c\
\x7d\x7e\x7f\x80\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\
\x8e\x8f\x90\x91\x93\x94\x95\x97\x98\x99\x9b\x9c\x9d\x9e\x9f\xa0\
\xa1\xa2\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\
\xb3\xb4\xb6\xb8\xb9\xba\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\
\xc7\xc8\xca\xcb\xcc\xce\xcf\xd0\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\
\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xea\
\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\
\xfb\xfc\xfd\xfe\x52\x4b\xf7\x73\x00\x00\x12\xf1\x49\x44\x41\x54\
\x78\xda\xed\x9d\xfd\x7f\x15\xd5\x9d\xc7\xcf\xcd\x33\x41\xe8\x42\
\x0c\x66\x61\x93\xa0\x74\x2d\xa0\x11\x0a\x2e\x29\xd9\x44\x2b\xdd\
\x10\x31\x26\x86\x4a\x8b\xd4\x06\x5b\xed\x12\x71\x89\x2b\x96\x86\
\x44\xb3\x66\x5b\x21\xed\x6e\xd2\xd4\x96\xb6\x71\x8b\xf2\xb0\x0d\
\x96\xdd\x26\xb6\x98\x36\xf6\x4a\x23\x4f\x49\x29\x2d\x5d\x4c\x14\
\x0a\x0d\x60\x82\x77\xfe\x8f\xfd\xa1\xdb\xd7\x16\x7a\xcf\x99\xb9\
\xb9\x67\x9c\x33\x67\xde\xef\xdf\x9d\x7c\xf8\x7e\xde\xde\x7b\xe6\
\xcc\xc3\x15\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x22\xbb\
\xb4\x6c\x75\x75\x43\x63\xd3\xf6\x1d\x91\x65\x7b\x53\x63\x43\xf5\
\xea\xb2\xd2\xec\x88\x55\x5f\x58\xb1\xb9\xe3\xe0\x89\x29\x07\xfe\
\x8f\xa9\x13\x07\x3b\x36\x57\x14\x46\xa2\xfc\xa2\x0d\xdd\xa3\x34\
\x9e\x9c\xd1\xee\x0d\x45\x56\x97\x9f\x57\xdb\x39\x4c\xcd\x6a\x86\
\x3b\x6b\xf3\xec\x6c\x3f\x56\xd9\x73\x91\x7e\xbd\x70\xb1\xa7\x32\
\x66\x5d\xfd\x8b\x5a\x4e\xd3\xac\x77\x4e\xb7\x2c\xb2\xaa\xfe\xea\
\x7e\x3a\x4d\x95\xfe\x6a\x5b\xda\xcf\xa8\x1b\xa2\xce\xe9\x30\x54\
\x97\x61\x41\xfd\x59\x1b\x59\xf7\x4d\x7f\x45\xb8\x31\x2b\xec\xfd\
\xaf\x1b\xa1\xc6\x74\x18\x59\x17\xea\xfa\x8b\x0f\x50\x61\xba\x1c\
\x28\x0e\xef\x5e\x6f\xf3\x04\xfd\xa5\xcf\x44\x73\x48\x77\x8a\xab\
\xf8\xf2\xd7\xb5\x14\xa8\x0a\x61\xfd\x99\x6d\x09\x9a\xd3\x45\xa2\
\x2d\x33\x6c\xfd\x2f\x18\xa0\x36\x9d\x0c\x2c\x08\x57\xff\x6b\xc7\
\xe8\x4c\x2f\x63\x6b\xc3\x74\xee\xdf\xce\xc7\xbf\xfe\xaf\x81\xf6\
\xd0\xec\x09\xe4\x1f\xa2\x2e\x3f\x38\x94\x1f\x8e\xfe\x0b\x06\xe9\
\xca\x1f\x06\x0b\x42\xb1\xf9\x73\x8c\xa6\xfc\xe2\x58\x08\x36\x85\
\x96\x9e\xa5\x27\xff\x38\xbb\xd4\xf4\xfe\x57\x9d\xa7\x25\x3f\x39\
\xbf\xca\xf0\xff\xff\xe9\xdf\x6f\x03\x8c\xfe\x0c\x28\xe6\xf3\xdf\
\xff\x6f\x01\x83\xd7\x01\x05\xac\xff\x3e\x8c\x95\xa0\xb1\xe7\x02\
\xf9\xa9\x9f\xff\x8d\xc7\xfb\x7a\x5a\xa3\xfb\x60\x48\x6b\x4f\x5f\
\x7c\x3c\xf5\xb3\x41\x43\xf7\x03\xb2\x52\xd9\xff\xb9\xb4\x77\x4b\
\x7d\x79\x49\x8e\x00\x91\x53\x52\x5e\xbf\x65\xef\xa5\x54\x76\x84\
\xcc\xdc\x13\x6c\xf7\xfc\x0f\x38\xd7\x55\x43\xf5\x37\x6a\x50\xd3\
\x75\xce\xf3\x00\xdb\x8d\xbc\xfe\xe3\x71\xff\xff\x78\x7b\x79\x06\
\x7d\x27\x23\xa3\xbc\xfd\xb8\xc7\xeb\x02\x06\x5e\x19\x5a\xe0\xe9\
\xfa\x5f\xa2\x77\x09\x45\xab\x58\xd2\xeb\xe9\xff\xa3\x31\xe3\xae\
\x0e\x67\x7a\xba\xfe\x7f\x78\x05\x15\xbb\xb1\xe2\xb0\xa7\xfb\x03\
\x4c\xbb\x43\xa4\xcd\x43\xe8\x78\x35\xf5\x7a\xa1\x3a\xee\x61\x98\
\x6d\x66\x65\xae\x72\xff\xe0\x3a\xb3\x89\xaf\x7e\xaf\x8b\x81\x4d\
\x67\xdc\xbf\x4c\x8d\xba\x4f\x30\xdb\xf5\xfe\xcf\xc9\xe6\x5c\x8a\
\xf5\x4e\x6e\xf3\xa4\xdb\x44\x87\x4d\xba\x57\xb8\xd9\x75\xd1\x52\
\x49\xa9\xa9\x51\xe9\xba\xa8\x6e\x36\x27\x6c\xb1\xdb\xfd\xff\xf1\
\x52\x1a\x4d\x95\x52\xb7\x95\xc0\x84\x39\x17\x05\xdc\x9e\xff\xd9\
\x37\x93\x3e\x53\x67\xe6\x7e\x97\xb1\x1e\x30\x25\xe9\x3a\x97\xa0\
\x2d\x31\xda\x9c\x0e\xb1\x16\x97\xc1\x1a\xf2\xdc\x60\x96\xfa\xf9\
\xcf\xcb\xeb\xa9\x72\xba\xac\xbf\xac\x1c\xed\x88\x19\xd7\x04\x36\
\xaa\xfb\xbf\x9b\x1e\xa7\xcf\xdd\x6a\x03\x36\x1a\x71\xd2\xaa\x3e\
\x05\xe4\xff\xff\xf4\x3e\x03\xd4\xa7\x82\x26\x6c\xad\xd4\x29\x23\
\xee\xa4\xc3\xf4\x50\xaf\x03\xea\x0c\x48\xa8\x7c\xff\xcb\x3e\xd6\
\x7f\xe9\xae\x04\x95\xe7\x02\x43\xc1\x07\xac\x56\x9e\xff\x73\xfe\
\x97\xfe\xd9\xa0\x72\x3f\x20\xf8\xcb\x2b\xaa\xf7\x7f\x8d\xb1\xff\
\xa3\x63\x47\x48\xb5\x27\xd8\x1f\x74\xba\x45\xaa\xfd\x7f\xf6\x7f\
\xb5\x50\xa9\xba\x2e\x10\xf4\xfb\x04\x5b\xc2\xb1\x59\x1d\x6e\x54\
\x97\x5a\x5a\x02\x5e\xa2\x28\xde\xff\x79\x86\xeb\x7f\x9a\xc8\x55\
\x5c\x1d\x3e\x1d\xec\x32\xbb\x52\xe1\xe6\x26\x9a\xd3\xc5\x26\xc5\
\x98\x83\xfd\x9e\xed\x51\x9c\x01\x70\xff\x87\x36\x32\x14\x67\x02\
\x3d\x41\x06\xcb\xbb\x68\xf2\x09\x8a\x45\x28\x4e\xb6\x2f\x06\xf9\
\x76\xf9\x5a\xc5\xfd\x9f\xb4\xa6\x13\xc5\x9d\xa2\xb5\x01\xc6\xea\
\x94\xdf\xb2\xc6\xfd\xbf\x5a\x59\x21\xbf\xe9\xb2\x33\xc0\x58\xf2\
\xeb\x40\xbd\x74\xa6\x97\x5e\xf9\x15\xa1\xe0\x42\x15\xc9\x3f\x97\
\x78\xfe\x43\x33\x4b\xe4\xb3\x0e\xee\x77\x86\x36\xc8\x9f\xff\xa2\
\x31\xdd\xc8\x9f\x1a\xdb\x10\x58\xa6\xee\x70\x3d\xbd\x18\x6e\xe4\
\xcf\xde\x76\x07\x96\x69\x54\x9a\xa9\x9c\xc2\x74\x53\x2e\x1d\xf6\
\x68\x50\x91\x0a\xe5\xcf\x7f\xb3\x09\xa4\x7f\x33\x48\xfe\xf4\x78\
\x50\xbf\x38\x59\x21\x4d\xd4\x45\x5f\xfa\xe9\x92\x8e\xbb\x22\xa0\
\x44\x9b\xa5\x89\x6a\xa8\x4b\x3f\x35\xd2\x71\x6f\x0e\x28\x51\x87\
\xf4\xfd\x2f\xbc\xff\xc3\x07\x72\xa4\x6f\x91\xe9\x08\x28\xd1\x41\
\x59\xa0\xbd\xb4\xe5\x07\x7b\x65\xf3\x3e\x18\x50\xa0\x13\xb2\x40\
\x5b\x28\xcb\x0f\xb6\xc8\xe6\x7d\x22\x98\x3c\xd9\xd2\xdf\x7f\xaf\
\xa7\x2c\x3f\xa8\x97\xcd\x7b\x2a\x98\x27\xc5\x4b\xd9\x05\x30\x65\
\x27\x20\x98\x9b\x6f\xcb\xa4\x79\x4a\x28\xcb\x0f\x4a\xa4\x03\x2f\
\x0b\x24\xcf\x6a\x69\x1e\x4e\x02\xfc\x39\x0d\x90\x0e\x7c\x75\x20\
\x79\xa4\x77\xa9\x8c\xd3\x95\x3f\x8c\x9b\x75\xf7\x55\x83\xf4\x6e\
\x40\xaa\xf2\x07\xe9\x9d\x81\x0d\x81\xc4\x69\x94\xc5\xe9\xa3\x2a\
\x7f\xe8\x93\x4d\xbc\x31\x90\x38\x4d\x46\xde\xa7\x6a\x33\xd2\x7b\
\xb0\x9b\x02\x89\xb3\x5d\x16\xa7\x95\xaa\xfc\xa1\x55\x36\xf1\xed\
\x81\xc4\xd9\x21\x8b\xb3\x83\xaa\x22\x31\x71\x04\x40\x00\x04\x40\
\x00\x04\x40\x00\x04\x40\x00\x04\x40\x00\x04\x40\x80\x30\x09\x50\
\x58\x51\xfb\xc8\x13\xff\xb4\x73\xd7\xae\x9d\xdb\x9e\x78\xa4\xb6\
\xa2\x10\x01\x22\x23\xc0\xec\xfb\x9b\x5f\x3a\xf2\x17\x3f\x71\x7b\
\xfe\x8d\x97\x9a\xef\x9f\x8d\x00\x96\x0b\x90\xbf\xe6\xf9\xc1\x6b\
\xf2\xa7\xec\xae\x0d\x3e\xbf\x26\x1f\x01\x6c\x15\x20\xff\xb3\xaf\
\xb9\xfe\x0e\x87\xe3\x4c\xbe\xf6\xd9\x7c\x04\xb0\x4f\x80\x8c\x4f\
\x7e\xdb\xf3\x8f\x73\x5e\xfa\xf6\x27\x33\x10\xc0\x2a\x01\xf2\xb7\
\x9e\x71\x52\xe2\xcc\xd6\x7c\x04\xb0\x46\x80\xd9\xcf\xa4\xfe\x0b\
\xcd\xce\xf8\x33\xb3\x11\xc0\x0a\x01\xe6\xec\xbc\x30\xbd\xdf\x69\
\xbf\xb0\x73\x0e\x02\x84\x5e\x80\x58\xe3\xb8\x33\x6d\xc6\x1b\x63\
\x08\x10\x6e\x01\xca\x7e\xea\xa4\xc5\x4f\xcb\x10\x20\xc4\x02\xdc\
\xf4\xe2\x35\x27\x4d\xae\xbd\x78\x13\x02\x84\x55\x80\x65\x27\x1d\
\x0d\x9c\x5c\x86\x00\xe1\x14\xe0\xf1\xab\x8e\x16\xae\x3e\x8e\x00\
\x21\x14\x60\xd6\x0f\x1c\x6d\xfc\x60\x16\x02\x84\x4d\x80\xdb\x4f\
\x39\x1a\x39\x75\x3b\x02\x84\x4b\x80\x95\xe3\x8e\x56\xc6\x57\x22\
\x40\x98\x04\x58\xf3\x07\x47\x33\x7f\x58\x83\x00\xe1\x11\xe0\xd3\
\x93\x8e\x76\x26\x3f\x8d\x00\x61\x11\xa0\x31\xe1\xf8\x40\xa2\x11\
\x01\xc2\x21\xc0\x83\xd7\x1c\x5f\xb8\xf6\x20\x02\x84\x41\x80\xaa\
\xab\x8e\x4f\x5c\xad\x42\x00\xf3\x05\x58\xf6\x7b\xc7\x37\x7e\xbf\
\x0c\x01\x4c\x17\x60\xe1\x39\xc7\x47\xce\x2d\x44\x00\xb3\x05\xc8\
\x19\x72\x7c\x65\x28\x07\x01\x8c\x16\xe0\xeb\x8e\xcf\x7c\x1d\x01\
\x4c\x16\xe0\xa1\x14\xaa\xbc\x32\x3a\xf0\xf2\xd7\x9e\x7a\xea\x6b\
\x2f\x0f\x8c\x5e\x49\xe1\x3f\x7b\x08\x01\xcc\x15\xe0\x56\xaf\x0b\
\xc0\xa3\xad\xab\xae\xbb\xdd\x6b\xce\xaa\xd6\xa3\x5e\x17\x82\xb7\
\x22\x80\xa9\x02\x64\xfe\xcc\xd3\xe9\x7c\xff\x93\xb7\x25\xfb\xaf\
\x6f\x7b\xb2\xdf\xd3\x06\xc2\xcf\x32\x11\xc0\x50\x01\xbe\xe4\xe5\
\x4e\xcf\x6d\x05\xf2\x03\x14\x6c\xf3\x72\xff\xe8\x97\x10\xc0\x4c\
\x01\xe6\xb9\xb7\xf7\xfe\x57\xe7\xaa\x8f\x31\xf7\xab\xef\xbb\x3b\
\x34\x0f\x01\x8c\x14\xe0\x3b\xae\xbb\xf9\x7b\x3c\xbc\xc5\xb8\x64\
\x8f\xeb\x95\x84\xef\x20\x80\x89\x02\x54\xb8\xf5\xf6\x86\xc7\x7d\
\xbc\x65\x6f\xb8\x1d\xa9\x02\x01\xcc\x8b\x13\x8b\xbb\xb4\xf6\x2d\
\xcf\x7b\x38\x39\xdf\x72\x39\x54\x3c\xc6\xc4\x8d\x8b\x53\xaf\xee\
\xec\x83\xad\xa9\x1c\x6c\xeb\x07\xea\xa3\xd5\x33\x71\xe3\xe2\xbc\
\xa9\x3e\x7b\x4f\xf1\x85\xda\xd5\xea\x1d\x85\x37\x99\xb8\x69\x71\
\xee\x57\xdf\xdb\xff\xb1\x54\x8f\xf7\x31\xf5\x33\x05\xf7\x33\x71\
\xc3\xe2\x28\x17\x6e\xbf\xbd\x25\xf5\x03\xde\xf2\x5b\xe5\x82\x92\
\x89\x9b\x15\xe7\x3e\x55\x5b\x97\x97\x4f\xe7\x90\xcb\x2f\xab\x8e\
\x79\x1f\x02\x18\x15\xe7\x15\x55\x59\xeb\xa7\x77\xcc\xf5\xaa\x63\
\xbe\x82\x00\x26\xc5\x99\xa3\xda\xbf\xfb\xca\x74\x8f\xfa\x15\xd5\
\x9e\xe2\x1c\x04\x30\x28\xce\x17\x15\x55\xbd\x3a\xed\x93\xf6\xd8\
\xab\x8a\xc3\x7e\x11\x01\x0c\x8a\x73\x44\x5e\xd4\x48\x1a\x2f\xfb\
\xc9\x1f\x91\x1f\xf7\x08\x02\x98\x13\x67\x91\xfe\x05\x80\xfb\x32\
\x60\x11\x02\x18\x13\xe7\x59\xc5\xd5\xfb\xb4\x76\x6d\x63\x8a\x3b\
\x0c\x9e\x45\x00\x63\xe2\x0c\xc8\x6b\xba\x27\xbd\x23\xdf\x23\x3f\
\xf2\x00\x02\x98\x12\x67\x86\xfc\x51\xc0\x1f\xa6\x7b\xec\x1f\xca\
\x1f\x16\x9c\x81\x00\x86\xc4\x59\x23\xbf\x04\x74\x67\xba\xc7\xbe\
\x53\x7e\x59\x68\x0d\x02\x18\x12\xe7\x39\x69\x47\xdf\x4b\xff\xe0\
\xdf\x93\x1e\xfc\x39\x04\x30\x24\x8e\xfc\x3a\xc0\x03\xe9\x1f\xfc\
\x01\x73\xae\x07\x20\x40\x72\x72\xa5\x37\xf3\x4e\xe4\xa5\x7f\xf4\
\xbc\x09\xe9\xed\xc5\xb9\x08\x60\x44\x9c\x3b\xa4\xff\x8f\xee\xd7\
\x71\xf8\xfd\xd2\xc3\xdf\x81\x00\x46\xc4\x91\x3f\x0e\xf4\x79\x1d\
\x87\xff\xbc\x31\x0f\x09\x21\x40\x72\x9e\x91\x9e\x03\x68\xf9\x25\
\xa0\x42\xe9\x79\xc0\x33\x08\x60\x44\x9c\x3d\xb2\x20\xaf\xeb\x39\
\xfe\xeb\xb2\xe3\xef\x41\x00\x23\xe2\xfc\xdc\xe7\x20\xd2\x7f\xe8\
\xcf\x11\xc0\x88\x38\xef\xf9\xba\x04\x50\x2c\x02\xde\x43\x00\x23\
\xe2\x48\xbf\xa3\x3f\xa5\xe7\xf8\x9f\x92\xae\x31\x10\xc0\x84\x38\
\x33\xfc\x3e\x4d\x93\x9f\x66\xce\x40\x00\x03\xe2\xcc\x93\xf6\x33\
\x57\xcf\x1f\x98\x2b\xfd\x03\xf3\x10\xc0\x80\x38\xb7\x49\xdf\xea\
\xa6\xeb\x2f\x48\xdf\x3b\x77\x1b\x02\x18\x10\x67\x99\x2c\xc7\xa8\
\xae\xbf\x30\x2a\xfb\x0b\xcb\x10\xc0\x80\x38\x9f\xf0\x79\x1b\x40\
\x71\xc7\xe1\x27\x10\xc0\x80\x38\x8b\x65\x39\xfe\x43\xd7\x5f\x78\
\x4b\xf6\x17\x96\x22\x80\x01\x71\xfe\x4a\x96\x63\x97\xae\xbf\xf0\
\x8e\xec\x2f\xdc\x8c\x00\x26\xc4\xf9\x9d\x24\xc7\x63\x9a\x8e\x9f\
\x29\xdb\x68\x98\x8a\x21\x80\x09\x71\xfe\x4d\xb2\x4d\xa3\xeb\x24\
\xad\x48\xf6\x0f\xfd\x1d\x1b\x41\x46\xc4\xa9\x4a\x1e\xa3\x4f\xd7\
\xf1\xeb\x4c\x79\x4b\x00\x02\x48\x78\x29\xe9\xdd\x40\xda\x4e\xd2\
\xbf\x29\xfb\x87\xf6\x21\x80\x19\x71\x66\xff\x26\x49\x8a\x7f\xd4\
\x75\xf4\x98\x6c\x8d\xe1\xbc\x84\x00\x86\xc4\x49\x72\x5f\xf8\x8f\
\xb5\x2d\xd0\x3e\x2e\xdd\x09\x6e\x47\x00\x53\xe2\xec\xb8\xf1\xdd\
\x7e\x27\x17\x6a\x3b\x76\xaf\x54\x80\xcd\x08\x60\x4c\x9c\xca\xeb\
\xb6\x6b\x13\x9d\xfa\xae\xd3\x2d\x96\x3f\x19\x52\x82\x00\xe6\xc4\
\x99\xf9\x67\x27\x83\xa7\xef\xd5\x78\x60\xf9\x07\xc0\x09\x81\x00\
\x26\xbd\x29\x74\xf9\xf3\x47\x27\x1d\xc7\x79\x77\xdf\xa3\x3a\x7f\
\xf0\xfd\xe3\xf2\x0f\x80\xdd\x08\x60\xd6\xbb\x82\x85\x88\xdd\x5c\
\xa2\xf9\x61\x8d\x19\xc7\xe4\x8f\x07\xaf\x43\x00\xd3\x04\xd0\xcf\
\x37\xe4\xfd\x4f\xce\x44\x00\xeb\x05\x50\xbd\x7e\xf2\xc7\x02\x01\
\x6c\x17\x60\xd9\x79\x85\x00\xff\x8c\x00\xb6\x0b\xb0\x78\x4c\xd1\
\xff\x54\x09\x02\x58\x2e\xc0\xad\x67\x55\x6f\x8a\xdc\x23\x10\xc0\
\x6e\x01\xfe\xe6\xb4\xaa\xff\xc4\x1d\x08\x60\xb7\x00\xf3\x8e\x2b\
\x5f\x17\xfe\x9f\x4c\xdc\x6e\x01\xe6\xb8\xfc\x00\xc9\x6a\x26\x6e\
\xb5\x00\xb3\x06\xd5\xfd\xbf\xce\xc4\xad\x16\xe0\xe6\x23\xea\xfe\
\xf9\xc1\x08\xbb\x05\xb8\x7d\xc4\xa5\xff\xd7\x98\xb8\xcd\x02\xdc\
\x73\xde\xa5\xff\x4b\xc5\x4c\xdc\x62\x01\x1e\x9d\x74\xe9\xdf\x79\
\x5c\x30\x71\x6b\x05\x88\xb5\xba\xd5\xef\xfc\x77\x0c\x01\xac\x15\
\x20\xb7\xd7\xb5\xff\x89\xa0\x7e\x40\x1e\x01\x3e\x84\xe5\xff\x4f\
\x5c\xfb\x77\x9a\x04\x02\xd8\x2a\x80\xeb\xf2\xdf\x71\x9c\xd7\x32\
\x10\xc0\x56\x01\xee\x79\xcf\xbd\xff\xb7\x3f\x22\x10\xc0\x52\x01\
\xdc\x97\xff\x8e\x33\x7e\xab\x40\x00\x3b\x05\x88\xb5\xb9\xd7\xef\
\x4c\x56\x0a\x04\xb0\x53\x80\xdc\x97\x3d\xf4\xaf\xeb\xd5\x83\x08\
\x60\x1c\x85\x47\xbc\xf4\xff\x2f\x02\x01\xec\x14\x60\xf1\xaf\xbc\
\xf4\xdf\x1d\x43\x00\x3b\x05\xb8\xef\x82\x97\xfe\xff\x3d\xd8\xfe\
\x11\xc0\x37\x1e\x9b\xf2\xd2\xff\x37\x02\xee\x1f\x01\x3e\xf4\x7f\
\xc9\x75\x74\xc6\x04\x02\x58\x29\x40\x9b\xa7\xfe\x77\x19\x2c\x2a\
\x02\xa4\xc3\x0b\x9e\xfa\x6f\x11\x08\x60\xa7\x00\xbb\xbc\xd4\x3f\
\xf9\xa8\x40\x00\x3b\x05\xe8\xf4\xd2\xff\x85\x7b\x05\x02\xd8\x29\
\xc0\xd3\x5e\xfa\xff\xd5\x62\x81\x00\x76\x0a\xf0\x70\xc2\x43\xff\
\x83\xf3\x04\x02\xd8\x29\xc0\xdf\x5f\xf5\xd0\xff\xab\x79\x02\x01\
\xec\x14\xe0\x6f\xdf\xf5\xd0\xff\xbf\x66\x08\x04\xb0\x53\x80\x9b\
\x4e\xb9\xd7\x7f\xed\x71\x83\x02\x23\x80\x5e\xbe\xe9\xde\xff\xa5\
\xb5\x02\x01\x6c\x15\xe0\x01\xf7\xfe\x7f\x73\x97\x40\x00\x5b\x05\
\x28\x38\xe7\xbe\xfc\x2f\x12\x08\x60\xad\x00\xaf\xb8\xf6\xdf\x9b\
\x27\x10\xc0\x5a\x01\x1a\x5c\xfb\xdf\x11\x13\x08\x60\xad\x00\x19\
\x6f\xbb\xd4\x7f\x65\xbd\x79\xa1\x11\x40\x1f\xeb\x5d\xfa\xff\x9f\
\x95\x02\x01\x2c\x16\x20\xf6\x96\xba\xff\x53\xc5\x02\x01\x6c\x16\
\xa0\x4e\xdd\xff\x2f\x8b\x04\x02\xd8\x2c\x40\xec\x17\xca\xfe\x87\
\x0a\x04\x02\x58\x2d\xc0\xdf\x29\xfb\x3f\xf2\x11\x81\x00\x76\x0b\
\xb0\x53\xd5\xff\x7f\xcd\x14\x08\x60\xb9\x00\x6f\xaa\x5e\x02\x9e\
\x27\x10\xc0\x72\x01\xe6\x2b\xfa\x1f\xfb\x6b\x81\x00\xb6\x0b\xf0\
\x98\xe2\x15\xc0\x6b\x05\x02\x58\x2f\xc0\x7e\xb9\x00\x1d\x02\x01\
\xec\x17\x40\x7e\x1d\x70\x30\x1b\x01\xec\x17\x40\xfa\x73\xf0\xce\
\xc4\x42\x81\x00\xf6\x0b\x70\x8b\xfc\xf1\x5f\x81\x00\x11\x10\xe0\
\x2e\xa9\x00\x65\x08\x10\x05\x01\xfe\x41\x16\xfc\x98\x40\x80\x28\
\x08\xf0\x39\x59\xf0\x03\x08\x10\x09\x01\xb6\x85\xf2\x1c\x10\x01\
\x08\x8e\x00\x04\x47\x00\x82\x23\x00\xc1\x11\x80\xe0\x08\x40\x70\
\x04\x20\x38\x02\x10\x1c\x01\x08\x8e\x00\x04\x27\x0e\xc1\x99\x23\
\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\
\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\
\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\
\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\
\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\x23\xc1\x99\
\x23\xc1\xed\x9c\xe3\x97\x65\xc1\xbf\x8c\x00\x91\x10\xa0\x49\x16\
\xbc\x09\x01\x22\x21\x80\xf4\x57\x43\xd7\x23\x40\x24\x04\x58\x29\
\x0b\xbe\x12\x01\x22\x21\x40\xc6\x3b\xc9\x73\xbf\x93\x81\x00\x91\
\x10\x40\x74\x25\xcf\xdd\x25\x10\x20\x1a\x02\xcc\x3f\x9f\x2c\xf6\
\xf9\xf9\x08\x10\x11\x01\xc4\x23\xc9\x62\x3f\x22\x10\x20\x2a\x02\
\x24\xfb\xe1\xb8\xfd\x02\x01\xa2\x23\xc0\xbc\xb3\x37\x86\x3e\x3b\
\x0f\x01\x22\x24\x80\x98\xdb\x7b\x7d\xe6\xde\xb9\x02\x01\xa2\x24\
\x80\x10\x0d\xe3\xff\x9f\x78\xbc\x21\x14\x91\x11\x40\xef\xd7\x40\
\xc7\xc9\x3f\xe6\x3d\xd9\x31\x4f\x20\x40\xf4\x04\x10\x42\x2c\xbc\
\xf7\x33\x9f\xb9\x77\x61\x68\xe2\x22\x40\xc4\x41\x00\x04\x40\x00\
\x04\x40\x00\x04\x40\x00\x04\x40\x00\x04\x40\x00\x04\x40\x00\x04\
\x40\x00\x04\x40\x00\x04\x20\x0e\x02\x20\x00\x02\x20\x00\x02\x20\
\x00\x02\x20\x00\x02\x20\x00\x02\x20\x00\x02\x20\x80\x0b\x73\x57\
\x3d\xf8\xc4\x13\x0f\xae\x9a\x8b\x00\x51\x14\x60\xfe\xd6\xfe\x6b\
\x7f\xcc\x7b\xad\x7f\xeb\x7c\x04\x88\x98\x00\xb3\x9e\xbb\xf2\xe7\
\x91\xaf\x3c\x37\x0b\x01\xa2\x24\x40\xcd\xd8\x8d\xa1\xc7\x6a\x10\
\x20\x3a\x02\x6c\xfb\xe0\x2f\x53\x7f\xb0\x0d\x01\xa2\x22\xc0\x8b\
\xc9\x73\xbf\x88\x00\xd1\x10\xa0\x51\x16\xbc\x11\x01\xa2\x20\xc0\
\x8a\xf7\x65\xc1\xdf\x5f\x81\x00\x11\x10\xe0\x88\x23\xe5\x08\x02\
\xd8\x2f\xc0\x43\x8e\x82\x87\x10\xc0\x7a\x01\xde\x52\x09\xf0\x16\
\x02\xd8\x2e\xc0\x47\x1d\x25\x1f\x45\x00\xcb\x05\x78\x4a\x2d\xc0\
\x53\x08\x60\xb9\x00\x7d\x6a\x01\xfa\x10\xc0\x72\x01\x8e\xaa\x05\
\x38\x8a\x00\x96\x0b\xf0\xae\x5a\x80\x77\x11\xc0\x6e\x01\x32\x1c\
\x17\x32\x10\xc0\xee\x4f\x80\x0b\xea\xfe\x2f\xf0\x09\x60\xb9\x00\
\x6f\xab\x05\x78\x1b\x01\x2c\x17\xe0\x47\x6a\x01\x7e\x84\x00\x96\
\x0b\xf0\xac\x5a\x80\x67\x11\xc0\x72\x01\xee\x54\x0b\x70\x27\x02\
\x58\x2e\x80\x38\xa5\xea\xff\x94\x40\x00\xdb\x05\x68\x54\x09\xd0\
\x88\x00\xd6\x0b\x90\xa9\xd8\x0b\x3c\x9a\x89\x00\xd6\x0b\x20\xd6\
\x24\x64\xc1\x13\x6b\x04\x02\xd8\x2f\x80\x78\x5a\x16\xfc\x69\x81\
\x00\x51\x10\x40\x7c\x37\x79\xee\xef\x0a\x04\x88\x86\x00\x99\xbb\
\x93\xc5\xde\x9d\x89\x00\x11\x11\x40\x88\x2f\x5c\xbd\x31\xf4\xd5\
\x2f\x98\x9f\x1a\x01\xf4\x51\xfa\xfd\xeb\x96\x82\x89\xef\x97\x0a\
\x04\x88\x92\x00\x42\x2c\xdf\xfd\xeb\x3f\x05\xfe\xf5\xee\xe5\xa1\
\x88\x8c\x00\x9a\xb9\xeb\xe1\x27\x5f\x78\xe1\xc9\x87\xef\x0a\x4b\
\x5e\x04\x88\x38\x08\x80\x00\x08\x80\x00\x08\x80\x00\x08\x80\x00\
\x08\x80\x00\x08\x80\x00\x81\x4e\x7c\xbb\x2c\x4e\x2b\x55\xf9\x43\
\xab\x6c\xe2\xdb\x03\x89\xd3\x24\x8b\xd3\x43\x55\xfe\xd0\x23\x9b\
\x78\x53\x20\x71\x1a\x43\xf9\x80\x65\x98\xe9\x33\xeb\x4e\xb6\x06\
\x59\x9c\x38\x55\xf9\x43\x5c\x36\xf1\x86\x40\xe2\x54\xcb\xe2\x8c\
\x53\x95\x3f\x8c\xcb\x26\x5e\x1d\x48\x9c\xd5\xd2\x7b\x2b\x73\xe8\
\xca\x0f\x72\xa4\x03\x5f\x1d\x48\x9e\x32\x69\x9e\x12\xca\xf2\x83\
\x12\xe9\xc0\xcb\x02\xc9\x53\x2a\xcd\x53\x4e\x59\x7e\x50\x2e\x1d\
\x78\x30\x77\xb3\x64\x4f\xc9\xf2\xd4\x53\x96\x1f\xd4\xcb\xe6\x3d\
\x95\x1d\x4c\xa0\x13\xb2\x40\x5b\x28\xcb\x0f\xb6\xc8\xe6\x7d\x22\
\xa0\x40\x07\x65\x81\xf6\x52\x96\x1f\xec\x95\xcd\xfb\x60\x40\x81\
\x3a\x64\x81\x2e\x71\x1a\xe0\xc7\x49\xc0\x25\xd9\xbc\x3b\x02\x4a\
\xb4\x59\xba\x28\xa9\xa1\x2e\xfd\xd4\x48\xc7\xbd\x39\xa0\x44\x15\
\xd2\x44\x5d\xd4\xa5\x9f\x2e\xe9\xb8\x2b\x02\x4a\x54\x28\x4d\x74\
\x2e\x83\xbe\x74\x93\x71\x4e\x3a\xee\xc2\xa0\x32\x8d\xb2\x13\x60\
\xc2\x2e\xc0\x68\x60\x99\xba\xa5\x99\xda\x29\x4c\x37\xed\xd2\x61\
\x77\x07\x96\x69\x83\x34\xd3\x71\x0a\xd3\xcd\x71\xe9\xb0\x37\x04\
\x96\xa9\x48\xfe\xae\x8d\x25\x34\xa6\x97\x25\xf2\x59\x17\x05\x97\
\x6a\x58\x1a\xaa\x97\xca\xf4\xd2\x2b\x1d\xf5\x70\x80\xa9\x3a\xa5\
\xa9\x12\x2b\xe8\x4c\x27\x2b\xa4\xef\xb5\x71\x3a\x03\x8c\x55\x2b\
\xff\x5c\x3a\x4c\x69\x3a\x39\x2c\x9f\x74\x6d\x80\xb1\xf2\x2e\xca\
\x73\x55\xd3\x9a\x3e\xaa\xe5\x73\xbe\x98\x17\x64\xb0\x1e\x79\xb0\
\x38\x9b\x41\xfa\x36\x81\xe2\xf2\x39\x07\x7b\x0f\x76\xa5\xe2\xa5\
\x8b\x9b\x28\x4e\x17\x9b\x14\x63\xae\x0c\x34\x59\xec\xb4\x3c\xd9\
\x99\x5c\x9a\xd3\x43\xee\x19\xf9\x94\x4f\xc7\x82\xcd\xd6\xa2\x70\
\xb3\x99\xea\xf4\xd0\xac\x18\x72\x4b\xc0\xd9\x16\x29\xb2\x4d\x56\
\xd2\x9d\x96\xef\xd9\x49\xc5\x90\x17\x05\x9d\xae\x5f\x11\x6e\xac\
\x94\xf6\xd2\xa7\x74\x4c\x31\xe2\x7e\x93\x4f\x50\x1c\x27\x3e\x93\
\xfe\xd2\x65\x66\x5c\x35\x61\x03\x4e\xb6\x87\x54\xf9\xf6\xc5\x68\
\x30\xcd\x65\xf6\x7e\xd5\x7c\x87\x0c\x48\x58\xa7\xfc\x05\x8e\x9d\
\x54\xe8\xdf\x2a\xdb\x71\xea\x4c\xd8\xa4\x18\x56\x46\x5c\x4f\x87\
\xe9\xb0\x5e\x39\xdc\x61\x23\x36\xdb\x36\x2a\x33\x5e\xbe\x9b\x16\
\xa7\xcf\xdd\x97\x95\xc3\xdd\x68\x44\xc8\xac\x11\xb5\x01\x7c\x06\
\x4c\xff\xff\x7f\x75\xff\x23\x59\x66\xc4\x5c\xe7\xf2\x6b\xac\x2d\
\xac\x04\xa7\xb7\xfe\x6b\x71\x19\xec\x3a\x53\x92\x1e\x70\x09\xba\
\x8f\xb3\xc1\xe9\x9c\xff\xed\x77\x19\xeb\x01\x63\xa2\x16\x4f\xb8\
\x44\x8d\xb3\x23\x94\xfa\xfe\x4f\xdc\x65\xa8\x13\xc5\xe6\x84\x6d\
\x76\xfb\x4d\xee\x31\x76\x85\x53\xa4\x72\xcc\x6d\xa6\x26\x5d\x6a\
\xc9\x1e\x76\x4b\x3b\xd9\xcc\xb5\xc1\x14\xc8\x6d\x9e\x74\x9b\xe8\
\x70\xb6\x49\x81\xab\x12\x6e\x79\x9d\x33\x9b\xb8\x43\xc4\xeb\xd6\
\xca\xa6\x33\xae\xe3\x4c\x54\x99\x95\xb9\xcd\x71\x27\xce\x5d\x62\
\x9e\xa8\x8e\x7b\x18\x66\x9b\x61\xa1\x33\x07\x3c\x84\x76\x0e\x73\
\xaf\xb0\x2b\x2b\x0e\x7b\x99\xe4\x80\x71\x3f\x71\xb6\x60\xcc\x4b\
\xee\x44\x2f\x4f\x8c\x28\x59\xd2\x9b\xf0\x32\xc7\xb1\x05\xe6\x45\
\x5f\xeb\x29\xb9\xe3\x1c\x6f\x2f\x67\x31\x90\xfc\xab\xbf\xbc\xfd\
\xb8\xb7\x19\x26\xd6\x9a\x98\xbf\xdd\xf1\xca\xb9\xae\x1a\xde\x21\
\x72\x03\x39\x35\x5d\xe7\x3c\x0f\xd0\xcc\x67\x6f\xb3\x0e\x39\xde\
\xb9\xb4\x77\x4b\x7d\x79\x09\x1a\x08\x21\x72\x4a\xca\xeb\xb7\xec\
\xbd\x94\xc2\xf0\x0e\x65\x99\xf9\x2f\xc9\x1f\x74\x52\x65\x3c\xde\
\xd7\xd3\xba\x23\xb2\xb4\xf6\xf4\xc5\xc7\x53\x1e\xda\x60\xbe\xa9\
\x2e\x17\x1c\x73\xc0\x77\x8e\x15\x98\xfb\x69\x56\x7c\x96\x7e\xfc\
\xe6\x6c\xb1\xc9\xdf\x67\x4b\xcf\xd3\x90\xbf\x9c\x5f\x6a\xf6\x8a\
\x66\x15\x06\xf8\xdb\xff\x2a\xd3\xd7\xb4\x4b\xf9\x16\xf0\xf3\xf3\
\x7f\xa9\x30\x9e\x62\x56\x82\xfe\xad\xff\x8a\x45\x08\x28\x18\xa4\
\x29\x7f\x18\x2c\x10\xa1\x20\xff\x10\x5d\xf9\xc1\xa1\x7c\x11\x12\
\xb2\xda\x13\xd4\xa5\x9b\x44\x7b\x96\x08\x0f\x6b\xc7\x68\x4c\x2f\
\x63\x6b\x45\xa8\x58\x30\x40\x67\x3a\x19\x58\x20\x42\x46\x66\x1b\
\x5f\x03\xfa\x3e\xfe\xdb\x32\x45\xf8\xa8\x1a\xa6\x39\x3d\x0c\x57\
\x89\x50\x92\xdd\x3c\x41\x79\xe9\x33\xd1\x9c\x2d\xc2\x4a\xf1\x01\
\xfa\x4b\x97\x03\xc5\x22\xcc\xac\x1b\xa1\xc2\x74\x18\x59\x27\x42\
\x4e\xd6\x46\x96\x02\xd3\xff\xf2\xdf\x98\x25\xc2\x4f\x46\xdd\x10\
\x55\x4e\x87\xa1\x3a\x6b\xee\x9e\xad\xee\xa7\xce\x54\xe9\xb7\xeb\
\x31\x9a\x45\x2d\xa7\xe9\xd4\x3b\xa7\x5b\x16\x09\xdb\x88\x55\xf6\
\x5c\xa4\x59\x2f\x5c\xec\xa9\xb4\xf4\x95\x1a\x79\xb5\x9d\xac\x08\
\xdd\xd6\x7d\x9d\xb5\x79\xc2\x66\x8a\x36\x74\x8f\x52\x73\x72\x46\
\xbb\x37\x14\x89\x28\x50\x58\xb1\xb9\xe3\xe0\x89\x29\x1a\xff\x13\
\x53\x27\x0e\x76\x6c\xae\x28\x14\xd1\x22\xbb\xb4\x6c\x75\x75\x43\
\x63\xd3\xf6\xe8\x3e\x18\xb2\xbd\xa9\xb1\xa1\x7a\x75\x59\x69\xb6\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xab\xf8\x5f\x4d\xde\xae\
\xd1\x51\x7b\x2e\x42\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = b"\
\x00\x08\
\x04\xd2\x59\x47\
\x00\x69\
\x00\x6e\x00\x66\x00\x6f\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x07\xa7\x57\x87\
\x00\x61\
\x00\x64\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x07\x58\x47\
\x00\x71\
\x00\x75\x00\x69\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x06\xcb\x4f\xc7\
\x00\x72\
\x00\x65\x00\x6d\x00\x6f\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x06\x38\x5a\xa7\
\x00\x68\
\x00\x6f\x00\x6d\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x03\x76\xc2\x07\
\x00\x71\
\x00\x75\x00\x65\x00\x73\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x01\
\x00\x00\x00\x70\x00\x00\x00\x00\x00\x01\x00\x00\x16\x9e\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x12\x4f\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x0e\x0f\
\x00\x00\x00\x16\x00\x00\x00\x00\x00\x01\x00\x00\x03\xbe\
\x00\x00\x00\x2a\x00\x00\x00\x00\x00\x01\x00\x00\x08\xa9\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 62.074627
| 96
| 0.726697
| 12,028
| 49,908
| 3.013801
| 0.024526
| 0.181076
| 0.228662
| 0.273434
| 0.231062
| 0.228993
| 0.226179
| 0.221628
| 0.216083
| 0.213848
| 0
| 0.340584
| 0.017111
| 49,908
| 803
| 97
| 62.15193
| 0.398398
| 0.003026
| 0
| 0.13308
| 0
| 0.941698
| 0
| 0
| 0
| 1
| 0.000161
| 0
| 0
| 1
| 0.002535
| false
| 0
| 0.001267
| 0
| 0.003802
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
505805dbf791a59fd47436c59f3aa78a5e801d9e
| 83
|
py
|
Python
|
docs/test.py
|
tcgvn/dash-draggable
|
feb9702ee8f248c5405007d2262a988e8da7ef24
|
[
"MIT"
] | 21
|
2021-01-07T07:58:11.000Z
|
2022-02-21T02:08:24.000Z
|
docs/test.py
|
tcgvn/dash-draggable
|
feb9702ee8f248c5405007d2262a988e8da7ef24
|
[
"MIT"
] | 6
|
2021-03-25T07:45:32.000Z
|
2022-01-26T19:21:33.000Z
|
docs/test.py
|
tcgvn/dash-draggable
|
feb9702ee8f248c5405007d2262a988e8da7ef24
|
[
"MIT"
] | 7
|
2021-06-19T08:08:24.000Z
|
2022-01-27T21:40:35.000Z
|
import os
def ok(file=__file__):
print(os.path.dirname(os.path.abspath(file)))
| 20.75
| 49
| 0.722892
| 14
| 83
| 4
| 0.642857
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 83
| 4
| 49
| 20.75
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aca419d8218f1233116f930fecff190065ae42ad
| 733
|
py
|
Python
|
notebooks/utils_plot.py
|
pilyugin620/CalGal
|
5a8f64bcfdf14a28cf940816abf462c452a6ac4e
|
[
"MIT"
] | 1
|
2022-02-19T15:38:13.000Z
|
2022-02-19T15:38:13.000Z
|
notebooks/utils_plot.py
|
pilyugin620/HII_regions_catalog
|
5a8f64bcfdf14a28cf940816abf462c452a6ac4e
|
[
"MIT"
] | null | null | null |
notebooks/utils_plot.py
|
pilyugin620/HII_regions_catalog
|
5a8f64bcfdf14a28cf940816abf462c452a6ac4e
|
[
"MIT"
] | 1
|
2022-02-19T11:45:20.000Z
|
2022-02-19T11:45:20.000Z
|
from matplotlib.offsetbox import AnchoredText
def textonly(ax, txt, fontsize=14, loc=3, fontweight='bold', *args, **kwargs):
at = AnchoredText(txt,
prop=dict(size=fontsize, fontweight=fontweight),
frameon=True,
loc=loc)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
return at
def textonly2(ax, txt, fontsize=14, loc=3, fontweight='bold', *args, **kwargs):
at = AnchoredText(txt,
prop=dict(size=fontsize, fontweight=fontweight),
frameon=False,
loc=loc)
# at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
return at
| 34.904762
| 79
| 0.585266
| 89
| 733
| 4.752809
| 0.404494
| 0.023641
| 0.061466
| 0.070922
| 0.827423
| 0.827423
| 0.827423
| 0.827423
| 0.827423
| 0.827423
| 0
| 0.024857
| 0.286494
| 733
| 20
| 80
| 36.65
| 0.783939
| 0.075034
| 0
| 0.625
| 0
| 0
| 0.056213
| 0.044379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c587f5b8b90b4a64f6469443450118c5a977b4ba
| 28
|
py
|
Python
|
redpanda/__init__.py
|
B3AU/redpanda
|
5a7be30dbc65968930b61154b84cf18fb874cc0e
|
[
"MIT"
] | null | null | null |
redpanda/__init__.py
|
B3AU/redpanda
|
5a7be30dbc65968930b61154b84cf18fb874cc0e
|
[
"MIT"
] | null | null | null |
redpanda/__init__.py
|
B3AU/redpanda
|
5a7be30dbc65968930b61154b84cf18fb874cc0e
|
[
"MIT"
] | null | null | null |
from redpanda.core import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5d21dca7387cc631d230dc2aff75139b81db29c
| 78
|
py
|
Python
|
ltr/client/__init__.py
|
tanjie123/hello-ltr
|
fe1ad1989e1bb17dfc8d1c09931480becf59766e
|
[
"Apache-2.0"
] | 109
|
2019-04-18T01:24:29.000Z
|
2022-03-12T17:37:30.000Z
|
ltr/client/__init__.py
|
tanjie123/hello-ltr
|
fe1ad1989e1bb17dfc8d1c09931480becf59766e
|
[
"Apache-2.0"
] | 63
|
2019-04-14T01:01:24.000Z
|
2022-03-03T20:48:41.000Z
|
ltr/client/__init__.py
|
tanjie123/hello-ltr
|
fe1ad1989e1bb17dfc8d1c09931480becf59766e
|
[
"Apache-2.0"
] | 41
|
2019-04-22T15:22:41.000Z
|
2022-02-26T00:03:02.000Z
|
from .elastic_client import ElasticClient
from .solr_client import SolrClient
| 26
| 41
| 0.871795
| 10
| 78
| 6.6
| 0.7
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 42
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68006d4eecaa427b15b0daf87a02f00a7ba2b83e
| 275
|
py
|
Python
|
cakechat/utils/offense_detector/config.py
|
jacswork/cakechat
|
d46c3ef05be8adfeac5d48ff1cfcefb87ac1eb2e
|
[
"Apache-2.0"
] | 1
|
2018-12-30T07:52:37.000Z
|
2018-12-30T07:52:37.000Z
|
cakechat/utils/offense_detector/config.py
|
jacswork/cakechat
|
d46c3ef05be8adfeac5d48ff1cfcefb87ac1eb2e
|
[
"Apache-2.0"
] | 1
|
2020-04-03T19:25:17.000Z
|
2020-04-03T19:25:17.000Z
|
cakechat/utils/offense_detector/config.py
|
Spark3757/chatbot
|
4e8eae70af2d5b68564d86b7ea0dbec956ae676f
|
[
"Apache-2.0"
] | 1
|
2020-12-04T15:25:45.000Z
|
2020-12-04T15:25:45.000Z
|
import os
import pkg_resources
import cakechat.utils.offense_detector
OFFENSIVE_PHRASES_PATH = pkg_resources.resource_filename(cakechat.utils.offense_detector.__name__,
os.path.join('data', 'offensive_phrases.csv'))
| 34.375
| 103
| 0.672727
| 29
| 275
| 5.965517
| 0.586207
| 0.138728
| 0.231214
| 0.323699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254545
| 275
| 7
| 104
| 39.285714
| 0.843902
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.076364
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
680f048546445a4c185b2411c27beea91abc96b5
| 7,181
|
py
|
Python
|
tests/potential/EamPotential/test__EamPotential__init.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 4
|
2018-01-18T19:59:56.000Z
|
2020-08-25T11:56:52.000Z
|
tests/potential/RoseEosEmbeddingFunction/test__EamPotential__init.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1
|
2018-04-22T23:02:13.000Z
|
2018-04-22T23:02:13.000Z
|
tests/potential/EamPotential/test__EamPotential__init.py
|
eragasa/pypospack
|
21cdecaf3b05c87acc532d992be2c04d85bfbc22
|
[
"MIT"
] | 1
|
2019-09-14T07:04:42.000Z
|
2019-09-14T07:04:42.000Z
|
import pytest
from collections import OrderedDict
# The testing functions provided below are coarse-grained tests to demonstrate
# that functionality works. More robust unit-testing of arguments generally
# isn't developed except to catch bugs in development.
#
# Imports of classes and functions should be done explicitly and within the
# scope of each testing function. This serves two purposes: (1) provide an
# idiomatic example of how to use the class/functions. (2) provide explicitly
# the steps reached to get expected behavior.
#
# Methods of class and functions should test return values and ensure that
# arguments which are passed in are not mutated.
# Methods of classes should not only test return values and attributes mutated
# by the method, but also insure that objects, including lists and dictionaries,
# are not mutated by the method (unless the method, explicitly does so., but also
# expected behavior of
def test__import1():
from pypospack.potential import EamPotential
def test_1sym____init____morse_exponential_universal():
#<--- variables unique for the test ---------------------------------------
symbols = ['Ni']
func_pair='morse'
func_density='eam_dens_exp'
func_embedding='eam_embed_universal'
#<--- setup of the code to conduct the test -------------------------------
from pypospack.potential import EamPotential
#<--- code being tested ---------------------------------------------------
eam = EamPotential(
symbols=symbols,
func_pair=func_pair,
func_density=func_density,
func_embedding=func_embedding)
#<--- setup testing -------------------------------------------------------
# it isn't necessary to explicitly define the expected values of the
# pair potentials, density functions, or embedding functions encapsulated
# with in EamEmbeddingFunction. Those classes should have their own suites
# of tests developed.
from pypospack.potential import MorsePotential
from pypospack.potential import ExponentialDensityFunction
from pypospack.potential import UniversalEmbeddingFunction
pair = MorsePotential(symbols=symbols)
dens = ExponentialDensityFunction(symbols=symbols)
embed = UniversalEmbeddingFunction(symbols=symbols)
p_names = ["p_{}".format(p) for p in pair.parameter_names]
d_names = ["d_{}".format(p) for p in dens.parameter_names]
e_names = ["e_{}".format(p) for p in embed.parameter_names]
parameter_names = p_names + d_names + e_names
#<--- setup testing attributes --------------------------------------------
# All public attributes and properties should be tested for expected
# behavior. This includes all attributes and properties which are
# initialized to None after class constructor is called
#<------ testing eam.obj_pair is inherited from the correct base class
from pypospack.potential import PairPotential
assert isinstance(eam.obj_pair,PairPotential)
#<------ testing eam.obj_density is inherited from the correct base class
from pypospack.potential import EamDensityFunction
assert isinstance(eam.obj_density,EamDensityFunction)
#<------ testing eam.obj_embedding is inherited from correct base class
from pypospack.potential import EamEmbeddingFunction
assert isinstance(eam.obj_embedding,EamEmbeddingFunction)
#<------ testing eam.symbols
assert type(eam.symbols) is list
assert eam.symbols == symbols
#<------ testing eam.parameter_names
assert type(eam.parameter_names) is list
assert len(eam.parameter_names) == len(parameter_names)
for pn in parameter_names:
pn in eam.parameter_names
#<------ testing eam.parameters
assert type(eam.parameters) is OrderedDict
assert len(eam.parameters) == len(eam.parameter_names)
for pn in eam.parameter_names:
assert pn in eam.parameters
for pn,pv in eam.parameters.items():
assert pv is None
#<------ testing attributes should be set to None
assert eam.pair == None
assert eam.density == None
assert eam.embedding == None
def test_2sym____init____morse_exponential_universal():
#<--- variables unique for the test ---------------------------------------
symbols = ['Ni','Al']
func_pair='morse'
func_density='eam_dens_exp'
func_embedding='eam_embed_universal'
#<--- setup of the code to conduct the test -------------------------------
from pypospack.potential import EamPotential
#<--- code being tested ---------------------------------------------------
eam = EamPotential(
symbols=symbols,
func_pair=func_pair,
func_density=func_density,
func_embedding=func_embedding)
from pypospack.potential import EamPotential
eam = EamPotential(
symbols=symbols,
func_pair='morse',
func_density='eam_dens_exp',
func_embedding='eam_embed_universal')
#<--- setup testing
from pypospack.potential import MorsePotential
from pypospack.potential import ExponentialDensityFunction
from pypospack.potential import UniversalEmbeddingFunction
pair = MorsePotential(symbols=symbols)
dens = ExponentialDensityFunction(symbols=symbols)
embed = UniversalEmbeddingFunction(symbols=symbols)
p_names = ["p_{}".format(p) for p in pair.parameter_names]
d_names = ["d_{}".format(p) for p in dens.parameter_names]
e_names = ["e_{}".format(p) for p in embed.parameter_names]
parameter_names = p_names + d_names + e_names
#<--- testing attributes
# All public attributes and properties should be tested for expected
# behavior. This includes all attributes and properties which are
# initialized to None after class constructor is called
#<------ testing eam.obj_pair is inherited from the correct base class
from pypospack.potential import PairPotential
assert isinstance(eam.obj_pair,PairPotential)
#<------ testing eam.obj_density is inherited from the correct base class
from pypospack.potential import EamDensityFunction
assert isinstance(eam.obj_density,EamDensityFunction)
#<------ testing eam.obj_embedding is inherited from correct base class
from pypospack.potential import EamEmbeddingFunction
assert isinstance(eam.obj_embedding,EamEmbeddingFunction)
#<------ testing eam.symbols
assert type(eam.symbols) is list
assert eam.symbols == symbols
#<------ testing eam.parameter_names
assert type(eam.parameter_names) is list
assert len(eam.parameter_names) == len(parameter_names)
for pn in parameter_names:
pn in eam.parameter_names
#<------ testing eam.parameters
assert type(eam.parameters) is OrderedDict
assert len(eam.parameters) == len(eam.parameter_names)
for pn in eam.parameter_names:
assert pn in eam.parameters
for pn,pv in eam.parameters.items():
assert pv is None
#<------ testing attributes should be set to None
assert eam.pair == None
assert eam.density == None
assert eam.embedding == None
| 42.241176
| 82
| 0.684306
| 851
| 7,181
| 5.643948
| 0.191539
| 0.069956
| 0.073288
| 0.093275
| 0.792421
| 0.775765
| 0.769727
| 0.769727
| 0.769727
| 0.769727
| 0
| 0.000864
| 0.193845
| 7,181
| 169
| 83
| 42.491124
| 0.828813
| 0.388943
| 0
| 0.893617
| 0
| 0
| 0.031812
| 0
| 0
| 0
| 0
| 0
| 0.297872
| 1
| 0.031915
| false
| 0
| 0.202128
| 0
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8913cb395508ed6b342b3a60f497db4acde3180
| 33
|
py
|
Python
|
lib/__init__.py
|
HA247RethinkIT/haproxy-galera-etcd
|
90f781aeaec5c652496f958775c9382481b942e1
|
[
"MIT"
] | 5
|
2016-02-20T20:19:39.000Z
|
2021-02-23T15:34:59.000Z
|
import_main.py
|
marskar/main
|
b374fa739557b8b571050223b7784b621522cb38
|
[
"MIT"
] | 1
|
2019-10-09T08:22:25.000Z
|
2019-10-09T08:22:35.000Z
|
import_main.py
|
marskar/main
|
b374fa739557b8b571050223b7784b621522cb38
|
[
"MIT"
] | 1
|
2019-09-27T14:33:31.000Z
|
2019-09-27T14:33:31.000Z
|
import __main__
__main__.main()
| 8.25
| 15
| 0.787879
| 4
| 33
| 4.5
| 0.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 3
| 16
| 11
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a8a3be9c9a187add7dc07b40a09fe76936c18b13
| 1,348
|
py
|
Python
|
fewshot_re_kit/sentence_encoder.py
|
gaotianyu1350/new_fewrel_bertpair
|
27184050d476fc93576948fb26680d508a2824bb
|
[
"MIT"
] | 180
|
2018-11-23T12:01:40.000Z
|
2022-03-21T07:26:25.000Z
|
fewshot_re_kit/sentence_encoder.py
|
readzw/HATT-Proto
|
8630f048ecc52714dda45e3d731ec68156439b4f
|
[
"MIT"
] | 17
|
2019-05-15T08:33:50.000Z
|
2021-01-06T03:08:29.000Z
|
fewshot_re_kit/sentence_encoder.py
|
readzw/HATT-Proto
|
8630f048ecc52714dda45e3d731ec68156439b4f
|
[
"MIT"
] | 42
|
2019-01-31T08:40:57.000Z
|
2021-12-09T05:34:32.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch import optim
from . import network
class CNNSentenceEncoder(nn.Module):
def __init__(self, word_vec_mat, max_length, word_embedding_dim=50, pos_embedding_dim=5, hidden_size=230):
nn.Module.__init__(self)
self.hidden_size = hidden_size
self.max_length = max_length
self.embedding = network.embedding.Embedding(word_vec_mat, max_length, word_embedding_dim, pos_embedding_dim)
self.encoder = network.encoder.Encoder(max_length, word_embedding_dim, pos_embedding_dim, hidden_size)
def forward(self, inputs):
x = self.embedding(inputs)
x = self.encoder(x)
return x
class PCNNSentenceEncoder(nn.Module):
def __init__(self, word_vec_mat, max_length, word_embedding_dim=50, pos_embedding_dim=5, hidden_size=230):
nn.Module.__init__(self)
self.hidden_size = hidden_size
self.max_length = max_length
self.embedding = network.embedding.Embedding(word_vec_mat, max_length, word_embedding_dim, pos_embedding_dim)
self.encoder = network.encoder.Encoder(max_length, word_embedding_dim, pos_embedding_dim, hidden_size)
def forward(self, inputs):
x = self.embedding(inputs)
x = self.encoder.pcnn(x, inputs['mask'])
return x
| 37.444444
| 117
| 0.728487
| 189
| 1,348
| 4.846561
| 0.195767
| 0.157205
| 0.085153
| 0.144105
| 0.805677
| 0.805677
| 0.805677
| 0.805677
| 0.805677
| 0.805677
| 0
| 0.010929
| 0.18546
| 1,348
| 35
| 118
| 38.514286
| 0.823315
| 0
| 0
| 0.642857
| 0
| 0
| 0.00297
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.214286
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a8ae8fbdc3fa987240ed18bc1871da0a116a6737
| 170
|
py
|
Python
|
sympy/deprecated/tests/test_class_registry.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 2
|
2021-01-09T23:11:25.000Z
|
2021-01-11T15:04:22.000Z
|
sympy/deprecated/tests/test_class_registry.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 3
|
2021-02-28T03:58:40.000Z
|
2021-03-07T06:12:47.000Z
|
sympy/deprecated/tests/test_class_registry.py
|
ethankward/sympy
|
44664d9f625a1c68bc492006cfe1012cb0b49ee4
|
[
"BSD-3-Clause"
] | 3
|
2019-05-18T21:32:31.000Z
|
2019-07-26T11:05:46.000Z
|
from sympy.testing.pytest import warns_deprecated_sympy
def test_C():
from sympy.deprecated.class_registry import C
with warns_deprecated_sympy():
C.Add
| 24.285714
| 55
| 0.758824
| 24
| 170
| 5.125
| 0.583333
| 0.146341
| 0.325203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 170
| 6
| 56
| 28.333333
| 0.878571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
763fa0892c69669a077155be65a22fa2e8e426e3
| 4,549
|
py
|
Python
|
biencoder/beir/custommodels/sentence_bert_asym.py
|
dumpmemory/sgpt
|
18023c6f66aae0c69545ff7f0de614d11dd590eb
|
[
"MIT"
] | 91
|
2022-02-11T07:48:42.000Z
|
2022-03-25T10:07:18.000Z
|
biencoder/beir/custommodels/sentence_bert_asym.py
|
dumpmemory/sgpt
|
18023c6f66aae0c69545ff7f0de614d11dd590eb
|
[
"MIT"
] | 1
|
2022-03-12T22:26:24.000Z
|
2022-03-13T06:29:55.000Z
|
biencoder/beir/custommodels/sentence_bert_asym.py
|
dumpmemory/sgpt
|
18023c6f66aae0c69545ff7f0de614d11dd590eb
|
[
"MIT"
] | 9
|
2022-02-14T09:53:21.000Z
|
2022-03-16T13:35:41.000Z
|
### Simple wrappers for using ST models for BEIR - Mostly based on beir.retrieval.models.SentenceBERT ###
from sentence_transformers import SentenceTransformer, models
from torch import Tensor
from typing import List, Dict, Union, Tuple
import numpy as np
class SentenceBERTAsym:
def __init__(self, model_path: Union[str, Tuple] = None, sep: str = " ", **kwargs):
self.sep = sep
self.model = SentenceTransformer(model_path, **kwargs)
def encode_queries(self, queries: List[str], batch_size: int = 16, **kwargs) -> Union[List[Tensor], np.ndarray, Tensor]:
queries = [{'QRY': q} for q in queries]
return self.model.encode(queries, batch_size=batch_size, **kwargs)
def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int = 8, **kwargs) -> Union[List[Tensor], np.ndarray, Tensor]:
sentences = [{'DOCPOS': (doc["title"] + self.sep + doc["text"]).strip()} if "title" in doc else doc["text"].strip() for doc in corpus]
return self.model.encode(sentences, batch_size=batch_size, **kwargs)
class SentenceBERTBOSEOS:
def __init__(self,
model_path: Union[str, Tuple] = None,
sep: str = " ",
speca=False,
specb=False,
**kwargs):
self.sep = sep
self.model = SentenceTransformer(model_path, **kwargs)
word_embedding_model = self.model._first_module()
assert isinstance(word_embedding_model, models.Transformer)
self.speca = speca
self.specb = specb
if self.specb:
tokens = ["[SOS]", "{SOS}"]
word_embedding_model.tokenizer.add_tokens(tokens, special_tokens=True)
word_embedding_model.auto_model.resize_token_embeddings(len(word_embedding_model.tokenizer))
# Will be replaced with the rep ones
word_embedding_model.bos_spec_token_q = word_embedding_model.tokenizer.encode("[SOS]", add_special_tokens=False)[0]
word_embedding_model.bos_spec_token_d = word_embedding_model.tokenizer.encode("{SOS}", add_special_tokens=False)[0]
word_embedding_model.bos_spec_token_q_rep = word_embedding_model.tokenizer.encode("[", add_special_tokens=False)[0]
word_embedding_model.eos_spec_token_q = word_embedding_model.tokenizer.encode("]", add_special_tokens=False)[0]
word_embedding_model.bos_spec_token_d_rep = word_embedding_model.tokenizer.encode("{", add_special_tokens=False)[0]
word_embedding_model.eos_spec_token_d = word_embedding_model.tokenizer.encode("}", add_special_tokens=False)[0]
word_embedding_model.replace_bos = True
elif self.speca:
tokens = ["[SOS]", "[EOS]", "{SOS}", "{EOS}"]
word_embedding_model.tokenizer.add_tokens(tokens, special_tokens=True)
word_embedding_model.auto_model.resize_token_embeddings(len(word_embedding_model.tokenizer))
word_embedding_model.bos_spec_token_q = word_embedding_model.tokenizer.encode("[SOS]", add_special_tokens=False)[0]
word_embedding_model.eos_spec_token_q = word_embedding_model.tokenizer.encode("[EOS]", add_special_tokens=False)[0]
word_embedding_model.bos_spec_token_d = word_embedding_model.tokenizer.encode("{SOS}", add_special_tokens=False)[0]
word_embedding_model.eos_spec_token_d = word_embedding_model.tokenizer.encode("{EOS}", add_special_tokens=False)[0]
def encode_queries(self, queries: List[str], batch_size: int = 16, **kwargs) -> Union[List[Tensor], np.ndarray, Tensor]:
if self.speca or self.specb:
# Will be replaced with [ in the models tokenization
# If we would put [ here, there is a risk of it getting chained with a different token when encoding
queries = ["[SOS]" + q for q in queries]
return self.model.encode(queries, batch_size=batch_size, **kwargs)
def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int = 8, **kwargs) -> Union[List[Tensor], np.ndarray, Tensor]:
if self.speca or self.specb:
# Will be replaced with { in the models tokenization
# If we would put { here, there is a risk of it getting chained with a different token when encoding
sentences = [("{SOS}" + doc["title"] + self.sep + doc["text"]).strip() if "title" in doc else "{SOS}" + doc["text"].strip() for doc in corpus]
return self.model.encode(sentences, batch_size=batch_size, **kwargs)
| 56.8625
| 154
| 0.674214
| 596
| 4,549
| 4.89094
| 0.187919
| 0.129331
| 0.179074
| 0.129674
| 0.817153
| 0.817153
| 0.817153
| 0.81681
| 0.81681
| 0.81681
| 0
| 0.004459
| 0.211255
| 4,549
| 79
| 155
| 57.582278
| 0.807971
| 0.095186
| 0
| 0.407407
| 0
| 0
| 0.030702
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 1
| 0.111111
| false
| 0
| 0.074074
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
765caaf8c0631ce179698ec3b41a4fb87d3fad1f
| 6,800
|
py
|
Python
|
openprocurement/auctions/geb/tests/blanks/chronograph.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
|
2965b52bf8826b9a8f8870c9a4d2052f945f5799
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/geb/tests/blanks/chronograph.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
|
2965b52bf8826b9a8f8870c9a4d2052f945f5799
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/geb/tests/blanks/chronograph.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
|
2965b52bf8826b9a8f8870c9a4d2052f945f5799
|
[
"Apache-2.0"
] | null | null | null |
from openprocurement.auctions.geb.tests.fixtures.active_tendering import (
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_ONE_BID,
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_TWO_BIDS,
END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_TWO_BIDS_AND_ONE_DRAFT
)
from openprocurement.auctions.geb.tests.fixtures.active_enquiry import (
END_ACTIVE_ENQUIRY_UNSUCCESSFUL_NO_ACTIVE_BIDS,
END_ACTIVE_ENQUIRY_AUCTION_DEFAULT_FIXTURE,
END_ACTIVE_ENQUIRY_AUCTION_QUALIFICATION
)
def check_rectification_period_end(test_case):
request_data = {'data': {'id': test_case.auction['data']['id']}}
response = test_case.app.patch_json(test_case.ENTRYPOINTS['auction'], request_data)
response = test_case.app.get(test_case.ENTRYPOINTS['auction'])
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'active.tendering')
def check_tender_period_end_no_active_bids(test_case):
context = test_case.procedure.snapshot()
auction = context['auction']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'unsuccessful')
def check_tender_period_end_no_minNumberOfQualifiedBids(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_ONE_BID)
auction = context['auction']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'unsuccessful')
def check_tender_period_end_successful(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_TWO_BIDS)
auction = context['auction']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'active.enquiry')
def check_enquiry_period_end_unsuccessful(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_ENQUIRY_UNSUCCESSFUL_NO_ACTIVE_BIDS)
auction = context['auction']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'unsuccessful')
def check_enquiry_period_end_active_qualification(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_ENQUIRY_AUCTION_QUALIFICATION)
auction = context['auction']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'active.qualification')
def check_enquiry_period_end_active_auction(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_ENQUIRY_AUCTION_DEFAULT_FIXTURE)
auction = context['auction']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'active.auction')
def check_enquiry_period_end_set_unsuccessful_bids(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_ENQUIRY_UNSUCCESSFUL_NO_ACTIVE_BIDS)
auction = context['auction']
bids = context['bids']
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
bid_url_pattern = '/auctions/{auction}/bids/{bid}?acc_token={token}'
for bid in bids:
bid_url = bid_url_pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
response = test_case.app.get(bid_url)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'unsuccessful')
def chronograph(test_case, auction):
auth = test_case.app.authorization
test_case.app.authorization = ('Basic', ('chronograph', ''))
request_data = {'data': {'id': auction['id']}}
entrypoint = '/auctions/{}'.format(auction['id'])
test_case.app.patch_json(entrypoint, request_data)
test_case.app.authorization = auth
def check_tender_period_end_delete_draft_bids(test_case):
context = test_case.procedure.snapshot(fixture=END_ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_TWO_BIDS_AND_ONE_DRAFT)
auction = context['auction']
bids = context['bids']
draft_bid = [bid for bid in bids if bid['data']['status'] == 'draft'][0]
bid_url_pattern = '/auctions/{auction}/bids/{bid}?acc_token={token}'
bid_url = bid_url_pattern.format(auction=auction['data']['id'],
bid=draft_bid['data']['id'],
token=draft_bid['access']['token'])
auth = test_case.app.authorization
test_case.app.authorization = ('Basic', ('{}'.format(draft_bid['access']['owner']), ''))
test_case.app.get(bid_url)
test_case.app.authorization = auth
request_data = {'data': {'id': auction['data']['id']}}
entrypoint = '/auctions/{}'.format(auction['data']['id'])
response = test_case.app.patch_json(entrypoint, request_data)
response = test_case.app.get(entrypoint)
test_case.assertEqual(response.status, '200 OK')
test_case.assertEqual(response.json['data']["status"], 'active.enquiry')
test_case.app.authorization = ('Basic', ('{}'.format(draft_bid['access']['owner']), ''))
test_case.app.get(bid_url, status=404)
test_case.app.authorization = auth
| 39.766082
| 124
| 0.71
| 837
| 6,800
| 5.464755
| 0.081243
| 0.118933
| 0.069742
| 0.07477
| 0.903804
| 0.857455
| 0.807171
| 0.769786
| 0.745081
| 0.713161
| 0
| 0.005303
| 0.140294
| 6,800
| 170
| 125
| 40
| 0.777113
| 0
| 0
| 0.605263
| 0
| 0
| 0.122224
| 0.01412
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.087719
| false
| 0
| 0.017544
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f2c88601db729d1bef8719704a1a5d90b627f57
| 28
|
py
|
Python
|
dedupe_trees/__init__.py
|
davidmreed/dedupe_trees.py
|
d6b0f9feb7514a77f4562513762f4d1aa4b98510
|
[
"MIT"
] | 1
|
2017-12-28T01:48:13.000Z
|
2017-12-28T01:48:13.000Z
|
dedupe_trees/__init__.py
|
davidmreed/dedupe.py
|
d6b0f9feb7514a77f4562513762f4d1aa4b98510
|
[
"MIT"
] | null | null | null |
dedupe_trees/__init__.py
|
davidmreed/dedupe.py
|
d6b0f9feb7514a77f4562513762f4d1aa4b98510
|
[
"MIT"
] | null | null | null |
from .dedupe_trees import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f41fee85d0c3ee836dc59a05d671376cf8f62da
| 22
|
py
|
Python
|
__init__.py
|
vwrobel/cvtools
|
a9f41186bff71a7c528b889cce015b099149daf4
|
[
"MIT"
] | 1
|
2021-03-18T00:28:09.000Z
|
2021-03-18T00:28:09.000Z
|
__init__.py
|
vwrobel/cvtools
|
a9f41186bff71a7c528b889cce015b099149daf4
|
[
"MIT"
] | null | null | null |
__init__.py
|
vwrobel/cvtools
|
a9f41186bff71a7c528b889cce015b099149daf4
|
[
"MIT"
] | null | null | null |
from .cvtools import *
| 22
| 22
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f88cc7fb9c0d539687293479033e563d2887f8a
| 151
|
py
|
Python
|
tests/b.py
|
tuanky/lambdata_DS9
|
01bbc86e2da210c1e6349a2cc6853d1c1c5d282a
|
[
"MIT"
] | null | null | null |
tests/b.py
|
tuanky/lambdata_DS9
|
01bbc86e2da210c1e6349a2cc6853d1c1c5d282a
|
[
"MIT"
] | 3
|
2020-03-24T18:25:17.000Z
|
2021-02-02T22:34:24.000Z
|
tests/b.py
|
tuanky/lambdata_DS9
|
01bbc86e2da210c1e6349a2cc6853d1c1c5d282a
|
[
"MIT"
] | null | null | null |
print("Hello World from %s!" % __name__)
if __name__ == '__main__':
print("Hello World again from %s!" % __name__)
#python a.py
#python b.py
| 25.166667
| 56
| 0.642384
| 22
| 151
| 3.681818
| 0.590909
| 0.246914
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205298
| 151
| 6
| 57
| 25.166667
| 0.675
| 0.145695
| 0
| 0
| 0
| 0
| 0.425197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
96d19fe2e664c0b5dbebb243a38c3638c2e01fdf
| 7,914
|
py
|
Python
|
tools/c7n_gcp/tests/test_appengine.py
|
anastasiia-zolochevska/cloud-custodian
|
f25315a01bec808c16ab0e2d433d6151cf5769e4
|
[
"Apache-2.0"
] | 8
|
2021-05-18T02:22:03.000Z
|
2021-09-11T02:49:04.000Z
|
tools/c7n_gcp/tests/test_appengine.py
|
anastasiia-zolochevska/cloud-custodian
|
f25315a01bec808c16ab0e2d433d6151cf5769e4
|
[
"Apache-2.0"
] | 79
|
2019-03-20T12:27:06.000Z
|
2019-08-14T14:07:04.000Z
|
tools/c7n_gcp/tests/test_appengine.py
|
anastasiia-zolochevska/cloud-custodian
|
f25315a01bec808c16ab0e2d433d6151cf5769e4
|
[
"Apache-2.0"
] | 3
|
2017-09-21T13:36:46.000Z
|
2021-09-20T16:38:29.000Z
|
# Copyright 2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gcp_common import BaseTest
class AppEngineAppTest(BaseTest):
def test_app_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
session_factory = self.replay_flight_data(
'app-engine-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-dryrun',
'resource': 'gcp.app-engine'},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(resources[0]['name'], app_name)
def test_app_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/' + project_id
session_factory = self.replay_flight_data(
'app-engine-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-dryrun',
'resource': 'gcp.app-engine'},
session_factory=session_factory)
resource = policy.resource_manager.get_resource(
{'resourceName': app_name})
self.assertEqual(resource['name'], app_name)
class AppEngineCertificateTest(BaseTest):
def test_certificate_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
certificate_id = '12277184'
certificate_name = '{}/authorizedCertificates/{}'.format(app_name, certificate_id)
session_factory = self.replay_flight_data(
'app-engine-certificate-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-certificate-dryrun',
'resource': 'gcp.app-engine-certificate'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['name'], certificate_name)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
def test_certificate_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/' + project_id
certificate_id = '12277184'
certificate_name = '{}/authorizedCertificates/{}'.format(app_name, certificate_id)
session_factory = self.replay_flight_data(
'app-engine-certificate-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-certificate-dryrun',
'resource': 'gcp.app-engine-certificate'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resource = policy.resource_manager.get_resource(
{'resourceName': certificate_name})
self.assertEqual(resource['name'], certificate_name)
self.assertEqual(resource[parent_annotation_key]['name'], app_name)
class AppEngineDomainTest(BaseTest):
def test_domain_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
domain_id = 'gcp-li.ga'
domain_name = '{}/authorizedDomains/{}'.format(app_name, domain_id)
session_factory = self.replay_flight_data(
'app-engine-domain-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-domain-dryrun',
'resource': 'gcp.app-engine-domain'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['name'], domain_name)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
class AppEngineDomainMappingTest(BaseTest):
def test_domain_mapping_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
domain_mapping_id = 'alex.gcp-li.ga'
domain_mapping_name = '{}/domainMappings/{}'.format(app_name, domain_mapping_id)
session_factory = self.replay_flight_data(
'app-engine-domain-mapping-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-domain-mapping-dryrun',
'resource': 'gcp.app-engine-domain-mapping'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['name'], domain_mapping_name)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
def test_domain_mapping_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/' + project_id
domain_mapping_id = 'alex.gcp-li.ga'
domain_mapping_name = '{}/domainMappings/{}'.format(app_name, domain_mapping_id)
session_factory = self.replay_flight_data(
'app-engine-domain-mapping-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-domain-mapping-dryrun',
'resource': 'gcp.app-engine-domain-mapping'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resource = policy.resource_manager.get_resource(
{'resourceName': domain_mapping_name})
self.assertEqual(resource['name'], domain_mapping_name)
self.assertEqual(resource[parent_annotation_key]['name'], app_name)
class AppEngineFirewallIngressRuleTest(BaseTest):
def test_firewall_ingress_rule_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
rule_priority = 2147483647
session_factory = self.replay_flight_data(
'app-engine-firewall-ingress-rule-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-firewall-ingress-rule-dryrun',
'resource': 'gcp.app-engine-firewall-ingress-rule'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['priority'], rule_priority)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
def test_firewall_ingress_rule_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
rule_priority = 2147483647
rule_priority_full = '{}/firewall/ingressRules/{}'.format(app_name, rule_priority)
session_factory = self.replay_flight_data(
'app-engine-firewall-ingress-rule-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-firewall-ingress-rule-dryrun',
'resource': 'gcp.app-engine-firewall-ingress-rule'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resource = policy.resource_manager.get_resource(
{'resourceName': rule_priority_full})
self.assertEqual(resource['priority'], rule_priority)
self.assertEqual(resource[parent_annotation_key]['name'], app_name)
| 42.095745
| 97
| 0.678292
| 909
| 7,914
| 5.629263
| 0.137514
| 0.063318
| 0.077975
| 0.031659
| 0.824702
| 0.786789
| 0.768028
| 0.756889
| 0.747704
| 0.73676
| 0
| 0.008461
| 0.208491
| 7,914
| 187
| 98
| 42.320856
| 0.808429
| 0.070508
| 0
| 0.691176
| 0
| 0
| 0.188641
| 0.110869
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.066176
| false
| 0
| 0.007353
| 0
| 0.110294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96d1b3c0cf69b111fd709d4db90fa1070c60c860
| 1,847
|
py
|
Python
|
apps/site/models/__init__.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 9
|
2015-05-29T22:22:20.000Z
|
2022-02-01T20:39:00.000Z
|
apps/site/models/__init__.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 143
|
2015-01-22T15:03:40.000Z
|
2020-06-27T01:55:29.000Z
|
apps/site/models/__init__.py
|
LocalGround/localground
|
aa5a956afe7a84a7763a3b23d62a9fd925831cd7
|
[
"Apache-2.0"
] | 5
|
2015-03-16T20:51:49.000Z
|
2017-02-07T20:48:49.000Z
|
# abstract
from localground.apps.site.models.abstract.base import \
Base, BaseAudit, BaseUploadedMedia
from localground.apps.site.models.abstract.mixins import ExtentsMixin, \
PointMixin, ExtrasMixin, ProjectMixin, GenericRelationMixin, \
MediaMixin, NamedMixin, ObjectPermissionsMixin
# layers
from localground.apps.site.models.layer import Layer
from localground.apps.site.models.symbol import Symbol
from localground.apps.site.models.icon import Icon
# lookups
from localground.apps.site.models.lookups import StatusCode, UploadSource, \
UploadType, ErrorCode, ObjectTypes
# overlays
from localground.apps.site.models.record import Record
from localground.apps.site.models.tileset import OverlaySource, \
OverlayType, TileSet
# accounts
# from localground.apps.site.models.base import Base
from localground.apps.site.models.project import Project
from localground.apps.site.models.userprofile import UserProfile
from localground.apps.site.models.permissions import \
ObjectUserPermissions, UserAuthorityObject, \
UserAuthority, ObjectAuthority, ProjectUser
from localground.apps.site.models.genericassociation import GenericAssociation
# prints
from localground.apps.site.models.datatype import DataType
from localground.apps.site.models.field import Field
from localground.apps.site.models.dataset import Dataset
from localground.apps.site.models.layout import Layout
from localground.apps.site.models.prints import Print
# uploads
from localground.apps.site.models.mapimage import MapImage, ImageOpts
from localground.apps.site.models.photo import Photo
from localground.apps.site.models.audio import Audio
from localground.apps.site.models.video import Video
# styled map
from localground.apps.site.models.styledmap import StyledMap
# document
from localground.apps.site.models.document import Document
| 36.215686
| 78
| 0.828912
| 224
| 1,847
| 6.834821
| 0.267857
| 0.23514
| 0.297845
| 0.360549
| 0.465056
| 0.048334
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097997
| 1,847
| 50
| 79
| 36.94
| 0.918968
| 0.06876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.766667
| 0
| 0.766667
| 0.033333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96f0528a775f4832c7d1e8c3684c40931c403717
| 18,578
|
py
|
Python
|
xsugar/test/test_master_data.py
|
edmundsj/xsugar
|
08596f500b043661b9fc807803319ee8f5d6de54
|
[
"MIT"
] | null | null | null |
xsugar/test/test_master_data.py
|
edmundsj/xsugar
|
08596f500b043661b9fc807803319ee8f5d6de54
|
[
"MIT"
] | 40
|
2021-03-21T16:08:34.000Z
|
2021-05-28T01:52:07.000Z
|
xsugar/test/test_master_data.py
|
edmundsj/xsugar
|
08596f500b043661b9fc807803319ee8f5d6de54
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import os
from shutil import rmtree
from numpy.testing import assert_equal, assert_allclose
from xsugar import Experiment, ureg
from ast import literal_eval
from itertools import zip_longest
from spectralpy import power_spectrum
from sciparse import assertDataDictEqual
@pytest.fixture
def exp(path_data):
wavelengths = np.array([1, 2, 3])
temperatures = np.array([25, 50])
frequency = 8500
exp = Experiment(name='TEST1', kind='test',
frequency=frequency,
wavelengths=wavelengths,
temperatures=temperatures)
yield exp
rmtree(path_data['data_base_path'], ignore_errors=True)
rmtree(path_data['figures_base_path'], ignore_errors=True)
rmtree(path_data['designs_base_path'], ignore_errors=True)
def testGenerateMasterData1Var(exp, exp_data):
js, ns = exp_data['major_separator'], exp_data['minor_separator']
name1 = 'TEST1' + js + 'wavelength' + ns + '1' + js + \
'temperature' + ns + '25'
name2 = 'TEST1' + js + 'wavelength' + ns + '1' + js + \
'temperature' + ns + '35'
scalar_data = {
name1: 1,
name2: 2,
}
desired_data = pd.DataFrame({
'temperature': [25, 35],
'Value': [1, 2]})
actual_data = exp.master_data(data_dict=scalar_data)
assert_frame_equal(actual_data, desired_data)
def test_master_data_units(exp_units, convert_name):
name = convert_name('TEST1~wavelength=25nm~temperature=305K')
data_dict = {name: ureg.mV * 1.5}
desired_master_data = pd.DataFrame({
'wavelength (nm)': [25],
'temperature (K)': [305],
'voltage (mV)': [1.5]})
actual_master_data = exp_units.master_data(data_dict)
assert_frame_equal(actual_master_data, desired_master_data)
def test_master_data_nodrop(exp_units, convert_name):
"""
Checks that we do not drop the last column.
"""
name1 = convert_name('TEST1~wavelength=25nm~temperature=305K')
name2 = convert_name('TEST1~wavelength=35nm~temperature=306K')
data_dict = {
name1: ureg.mV * 1.5,
name2: ureg.mV * 1.5,
}
desired_master_data = pd.DataFrame({
'wavelength (nm)': [25, 35],
'temperature (K)': [305, 306],
'voltage (mV)': [1.5, 1.5]})
actual_master_data = exp_units.master_data(data_dict)
assert_frame_equal(actual_master_data, desired_master_data)
def testGenerateMasterData2Var(exp, exp_data):
js, ns = exp_data['major_separator'], exp_data['minor_separator']
name1 = 'TEST1' + js + 'wavelength' + ns + '1' + js + \
'temperature' + ns + '25'
name2 = 'TEST1' + js + 'wavelength' + ns + '1' + js + \
'temperature' + ns + '35'
name3 = 'TEST1' + js + 'wavelength' + ns + '2' + js + \
'temperature' + ns + '25'
name4 = 'TEST1' + js + 'wavelength' + ns + '2' + js + \
'temperature' + ns + '35'
scalar_data = {
name1: 1,
name2: 2,
name3: 3,
name4: 4}
desired_data = pd.DataFrame({
'wavelength': [1, 1, 2, 2],
'temperature': [25, 35, 25, 35],
'Value': [1, 2, 3, 4]})
actual_data = exp.master_data(data_dict=scalar_data)
assert_frame_equal(actual_data, desired_data)
def test_data_from_master(exp, exp_data):
js, ns = exp_data['major_separator'], exp_data['minor_separator']
master_data = pd.DataFrame({
'wavelength': [1, 2],
'Value': [3, 4]})
name1 = 'TEST1' + js + 'wavelength' + ns + '1'
name2 = 'TEST1' + js + 'wavelength' + ns + '2'
desired_data = {
name1: 3,
name2: 4
}
actual_data = exp.data_from_master(master_data)
assertDataDictEqual(actual_data, desired_data)
def test_data_from_master_units(exp_units, convert_name):
desired_name = convert_name('TEST1~temperature=305K~wavelength=25nm')
master_data = pd.DataFrame({
'temperature (K)': [305],
'wavelength (nm)': [25],
'voltage (mV)': [1.5]})
desired_data = {desired_name: 1.5 * ureg.mV}
actual_data = exp_units.data_from_master(master_data)
assertDataDictEqual(actual_data, desired_data)
def test_data_from_master_2var(exp, exp_data, convert_name):
master_data = pd.DataFrame({
'temperature': [25.0, 25.0, 25.0, 35.0, 35.0, 35.0],
'wavelength': [0, 1, 2, 0, 1, 2],
'Value': [0, 1, 2, 3, 4, 5]})
names = [
convert_name('TEST1~temperature=25.0~wavelength=0'),
convert_name('TEST1~temperature=25.0~wavelength=1'),
convert_name('TEST1~temperature=25.0~wavelength=2'),
convert_name('TEST1~temperature=35.0~wavelength=0'),
convert_name('TEST1~temperature=35.0~wavelength=1'),
convert_name('TEST1~temperature=35.0~wavelength=2'),
]
desired_data_dict = {name: i for i, name in enumerate(names)}
actual_data_dict = exp.data_from_master(master_data)
assertDataDictEqual(actual_data_dict, desired_data_dict)
def testGenerateMasterDataDict1Var(exp, exp_data):
js, ns = exp_data['major_separator'], exp_data['minor_separator']
name1 = 'TEST1' + js + 'wavelength' + ns + '1'
name2 = 'TEST1' + js + 'wavelength' + ns + '2'
name_all = 'TEST1' + js + 'wavelength' + ns + 'all'
data_dict = {
name1: 3.0,
name2: 4.0}
desired_data = {
name_all: pd.DataFrame({
'wavelength': [1, 2],
'Value': [3.0, 4.0]})}
actual_data = exp.master_data_dict(data_dict)
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_1var_units(exp_units, convert_name):
name1 = convert_name('TEST1~wavelength=1nm')
name2 = convert_name('TEST1~wavelength=2nm')
name_all = convert_name('TEST1~wavelength=all')
data_dict = {
name1: 3.0 * ureg.nA,
name2: 4.0 * ureg.nA}
desired_data = {
name_all: pd.DataFrame({
'wavelength (nm)': [1, 2],
'current (nA)': [3.0, 4.0]})}
actual_data = exp_units.master_data_dict(data_dict)
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_2var(exp, exp_data, convert_name):
js, ns = exp_data['major_separator'], exp_data['minor_separator']
master_data = pd.DataFrame({
'wavelength': [0, 1, 2, 0, 1, 2],
'temperature': [25.0, 25.0, 25.0, 35.0, 35.0, 35.0],
'Value': [0, 1, 2, 3, 4, 5]})
names = [
convert_name('TEST1~temperature=25.0~wavelength=0'),
convert_name('TEST1~temperature=25.0~wavelength=1'),
convert_name('TEST1~temperature=25.0~wavelength=2'),
convert_name('TEST1~temperature=35.0~wavelength=0'),
convert_name('TEST1~temperature=35.0~wavelength=1'),
convert_name('TEST1~temperature=35.0~wavelength=2'),
]
data_dict = {name: i for i, name in enumerate(names)}
desired_data = {
convert_name('TEST1~temperature=x~wavelength=c'):
{
convert_name('TEST1~temperature=x~wavelength=0'):
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [0, 3]}),
convert_name('TEST1~temperature=x~wavelength=1'):
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [1, 4] }),
convert_name('TEST1~temperature=x~wavelength=2'):
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [2, 5] })
},
convert_name('TEST1~temperature=c~wavelength=x'):
{
convert_name('TEST1~temperature=25.0~wavelength=x'):
pd.DataFrame({
'wavelength': [0, 1, 2],
'Value': [0, 1, 2]}),
convert_name('TEST1~temperature=35.0~wavelength=x'):
pd.DataFrame({
'wavelength': [0,1,2],
'Value': [3, 4, 5]})
},
}
actual_data = exp.master_data_dict(data_dict)
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_includue_x(exp, exp_data, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~temperature=25.0~wavelength=1',
'TEST1~temperature=25.0~wavelength=2',
'TEST1~temperature=35.0~wavelength=1',
'TEST1~temperature=35.0~wavelength=2',
]]
data_dict = {
names[0]: 1.0,
names[1]: 2.0,
names[2]: 3.0,
names[3]: 4.0,
}
desired_data = {
convert_name('TEST1~temperature=c~wavelength=x'): {
convert_name('TEST1~temperature=25.0~wavelength=x'):
pd.DataFrame({
'wavelength': [1, 2],
'Value': [1.0, 2.0]}),
convert_name('TEST1~temperature=35.0~wavelength=x'):
pd.DataFrame({
'wavelength': [1, 2],
'Value': [3.0, 4.0]}),
},
}
actual_data = exp.master_data_dict(
data_dict, x_axis_include=['wavelength'])
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_exclude_x(exp, exp_data, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~temperature=25.0~wavelength=1',
'TEST1~temperature=25.0~wavelength=2',
'TEST1~temperature=35.0~wavelength=1',
'TEST1~temperature=35.0~wavelength=2',
]]
data_dict = {
names[0]: 1.0,
names[1]: 2.0,
names[2]: 3.0,
names[3]: 4.0,
}
desired_data = {
convert_name('TEST1~temperature=x~wavelength=c'): {
convert_name('TEST1~temperature=x~wavelength=1'):
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [1.0, 3.0]}),
convert_name('TEST1~temperature=x~wavelength=2'):
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [2.0, 4.0]}),
},
}
actual_data = exp.master_data_dict(
data_dict, x_axis_exclude=['wavelength'])
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_includue_c(exp, exp_data, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~temperature=25.0~wavelength=1',
'TEST1~temperature=25.0~wavelength=2',
'TEST1~temperature=35.0~wavelength=1',
'TEST1~temperature=35.0~wavelength=2',
]]
data_dict = {
names[0]: 1.0,
names[1]: 2.0,
names[2]: 3.0,
names[3]: 4.0,
}
desired_data = {
convert_name('TEST1~temperature=c~wavelength=x'): {
convert_name('TEST1~temperature=25.0~wavelength=x'):
pd.DataFrame({
'wavelength': [1, 2],
'Value': [1.0, 2.0]}),
convert_name('TEST1~temperature=35.0~wavelength=x'):
pd.DataFrame({
'wavelength': [1, 2],
'Value': [3.0, 4.0]}),
},
}
actual_data = exp.master_data_dict(
data_dict, c_axis_include=['temperature'])
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_exclude_c(exp, exp_data, convert_name):
names = [convert_name(name) for name in \
[
'TEST1~temperature=25.0~wavelength=1',
'TEST1~temperature=25.0~wavelength=2',
'TEST1~temperature=35.0~wavelength=1',
'TEST1~temperature=35.0~wavelength=2',
]]
data_dict = {
names[0]: 1.0,
names[1]: 2.0,
names[2]: 3.0,
names[3]: 4.0,
}
desired_data = {
convert_name('TEST1~temperature=c~wavelength=x'): {
convert_name('TEST1~temperature=25.0~wavelength=x'):
pd.DataFrame({
'wavelength': [1, 2],
'Value': [1.0, 2.0]}),
convert_name('TEST1~temperature=35.0~wavelength=x'):
pd.DataFrame({
'wavelength': [1, 2],
'Value': [3.0, 4.0]}),
},
}
actual_data = exp.master_data_dict(
data_dict, c_axis_exclude=['wavelength'])
assertDataDictEqual(actual_data, desired_data)
def test_master_data_dict_3var(exp, exp_data, convert_name):
master_data = pd.DataFrame({
'wavelength': [0, 0, 0, 0, 1, 1, 1, 1],
'temperature': [25.0, 25.0, 35.0, 35.0, 25.0, 25.0, 35.0, 35.0],
'material': ['Au', 'Al', 'Au', 'Al', 'Au', 'Al', 'Au', 'Al'],
'Value': [0, 1, 2, 3, 4, 5, 6, 7]})
names = [
convert_name('TEST1~material=Au~temperature=25.0~wavelength=0'),
convert_name('TEST1~material=Al~temperature=25.0~wavelength=0'),
convert_name('TEST1~material=Au~temperature=35.0~wavelength=0'),
convert_name('TEST1~material=Al~temperature=35.0~wavelength=0'),
convert_name('TEST1~material=Au~temperature=25.0~wavelength=1'),
convert_name('TEST1~material=Al~temperature=25.0~wavelength=1'),
convert_name('TEST1~material=Au~temperature=35.0~wavelength=1'),
convert_name('TEST1~material=Al~temperature=35.0~wavelength=1'),
]
data_dict = {name: i for i, name in enumerate(names)}
desired_data = \
{
'TEST1~material=x~temperature=c~wavelength=0':
{'TEST1~material=x~temperature=25.0~wavelength=0':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [1, 0]}),
'TEST1~material=x~temperature=35.0~wavelength=0':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [3, 2]}),
},
'TEST1~material=x~temperature=c~wavelength=1':
{'TEST1~material=x~temperature=25.0~wavelength=1':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [5, 4]}),
'TEST1~material=x~temperature=35.0~wavelength=1':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [7, 6]}),
},
'TEST1~material=x~temperature=25.0~wavelength=c':
{'TEST1~material=x~temperature=25.0~wavelength=0':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [1, 0]}),
'TEST1~material=x~temperature=25.0~wavelength=1':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [5, 4]}),
},
'TEST1~material=x~temperature=35.0~wavelength=c':
{'TEST1~material=x~temperature=35.0~wavelength=0':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [3, 2]}),
'TEST1~material=x~temperature=35.0~wavelength=1':
pd.DataFrame({
'material': ['Al', 'Au'],
'Value': [7, 6]}),
},
'TEST1~material=c~temperature=x~wavelength=0':
{'TEST1~material=Au~temperature=x~wavelength=0':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [0, 2]
}),
'TEST1~material=Al~temperature=x~wavelength=0':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [1, 3]})
},
'TEST1~material=c~temperature=x~wavelength=1':
{'TEST1~material=Au~temperature=x~wavelength=1':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [4, 6]}),
'TEST1~material=Al~temperature=x~wavelength=1':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [5, 7]}),
},
'TEST1~material=Au~temperature=x~wavelength=c':
{'TEST1~material=Au~temperature=x~wavelength=0':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [0, 2]
}),
'TEST1~material=Au~temperature=x~wavelength=1':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [4, 6]})
},
'TEST1~material=Al~temperature=x~wavelength=c':
{'TEST1~material=Al~temperature=x~wavelength=0':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [1, 3]}),
'TEST1~material=Al~temperature=x~wavelength=1':
pd.DataFrame({
'temperature': [25.0, 35.0],
'Value': [5, 7]}),
},
'TEST1~material=c~temperature=25.0~wavelength=x':
{'TEST1~material=Au~temperature=25.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [0, 4]}),
'TEST1~material=Al~temperature=25.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [1, 5]}),
},
'TEST1~material=c~temperature=35.0~wavelength=x':
{'TEST1~material=Au~temperature=35.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [2, 6]
}),
'TEST1~material=Al~temperature=35.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [3, 7]
}),
},
'TEST1~material=Au~temperature=c~wavelength=x':
{'TEST1~material=Au~temperature=25.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [0, 4]}),
'TEST1~material=Au~temperature=35.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [2, 6]}),
},
'TEST1~material=Al~temperature=c~wavelength=x':
{'TEST1~material=Al~temperature=25.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [1, 5]}),
'TEST1~material=Al~temperature=35.0~wavelength=x':
pd.DataFrame({
'wavelength': [0, 1],
'Value': [3, 7]}),
}
}
actual_data = exp.master_data_dict(data_dict)
assertDataDictEqual(actual_data, desired_data)
| 38.463768
| 73
| 0.538163
| 2,161
| 18,578
| 4.476631
| 0.058769
| 0.073909
| 0.069465
| 0.089312
| 0.873165
| 0.834298
| 0.810006
| 0.784474
| 0.768555
| 0.70891
| 0
| 0.069488
| 0.306707
| 18,578
| 482
| 74
| 38.543568
| 0.681599
| 0.002315
| 0
| 0.602198
| 0
| 0
| 0.283115
| 0.203467
| 0
| 0
| 0
| 0
| 0.03956
| 1
| 0.035165
| false
| 0
| 0.026374
| 0
| 0.061538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96f8448a489cff4729fa90a963e20a2fa37c06da
| 123
|
py
|
Python
|
src/utils.py
|
catalyst-team/detector
|
383c17ba7701d960ca92be0aafbff05207f2de3a
|
[
"Apache-2.0"
] | 15
|
2019-05-15T13:42:51.000Z
|
2020-11-09T23:13:06.000Z
|
src/utils.py
|
catalyst-team/detector
|
383c17ba7701d960ca92be0aafbff05207f2de3a
|
[
"Apache-2.0"
] | 1
|
2020-01-09T08:53:49.000Z
|
2020-01-16T19:41:16.000Z
|
src/utils.py
|
catalyst-team/detection
|
383c17ba7701d960ca92be0aafbff05207f2de3a
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
def detach(tensor: torch.Tensor) -> np.ndarray:
return tensor.detach().cpu().numpy()
| 17.571429
| 47
| 0.707317
| 18
| 123
| 4.833333
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154472
| 123
| 6
| 48
| 20.5
| 0.836538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8c412d91d6c2fe86f901ba866fdb88c37f6845d2
| 42
|
py
|
Python
|
tests/molecular/molecules/molecule/fixtures/cage/two_plus_five/__init__.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 21
|
2018-04-12T16:25:24.000Z
|
2022-02-14T23:05:43.000Z
|
tests/molecular/molecules/molecule/fixtures/cage/two_plus_five/__init__.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 8
|
2019-03-19T12:36:36.000Z
|
2020-11-11T12:46:00.000Z
|
tests/molecular/molecules/molecule/fixtures/cage/two_plus_five/__init__.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 5
|
2018-08-07T13:00:16.000Z
|
2021-11-01T00:55:10.000Z
|
from .twelve_plus_thirty import * # noqa
| 21
| 41
| 0.761905
| 6
| 42
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 1
| 42
| 42
| 0.857143
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b33f4012d020db75e04406be7e09c8a12934b8d
| 17,468
|
py
|
Python
|
cogs/events.py
|
bradybellini/discord-cogs
|
872c4734b47815f1891c550aa0a3d0829c95cb39
|
[
"MIT"
] | null | null | null |
cogs/events.py
|
bradybellini/discord-cogs
|
872c4734b47815f1891c550aa0a3d0829c95cb39
|
[
"MIT"
] | null | null | null |
cogs/events.py
|
bradybellini/discord-cogs
|
872c4734b47815f1891c550aa0a3d0829c95cb39
|
[
"MIT"
] | null | null | null |
import discord
import datetime
import sqlite3
from discord.ext import commands
class Events(commands.Cog, name='Event System Cog'):
def __init__(self, client):
self.client = client
@commands.group(invoke_without_command=True)
async def event(self, ctx):
embed = discord.Embed(colour=0xffa2ce)
embed.set_author(name="Help Module" ,icon_url=f'{self.client.user.avatar_url}')
embed.set_footer(text="Made by brady#5078")
embed.add_field(name="__Events__ Module", value="This module allows users to manage events in the database. \n`<>` = input optional \n`[]` = input required")
embed.add_field(name="Available Commands", value='1. `m.event new [event date] <event description>` \n2. `m.event update [event id] [updated event date] <updated event description>` \n3. `m.event status <event id> [new event status]` \n4. `m.event delete [event id] \n5. `m.event search [query]` \n`m.event upcoming`')
embed.add_field(name="Command Descriptions", value="""1. Adds a new event to the database. If no description is givent, it will be 'None'.
2. Updates an event in the database. If an event id does not match or does not exist, a new event will be created with the given inputs.
3. Check and events status or update an events status
4. Delete and existing event. Note: This cannot be undone. If an event was accidentaly deleted, just make a new event with the same details.
5. Searches for events with the provided search query. Note: This searches all columns EXCEPT date added. You are able to search for the event id, status, date, description and who added the event.
6. Gets the next 3 upcoming events, if any exists.""")
embed.add_field(name="Command Examples", value="1. `m.event new 04/23/2020 Come celebrate Brady's b-day at 3pm pst!` \n2. `m.event update 34 05/24/2021 Brady's new birthday` \n3. `m.event status 34 Event is closed` or `m.event status 34` \n4. `m.event delete 34` \n5. `m.event search 12/25/2019` \n6. `m.event upcoming`")
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
#@TODO Add error handling for new and update
#@TODO Make new event func more efficient
@event.command()
async def new(self, ctx, event_date, *, event=None):
date_added = datetime.datetime.now()
status = 'upcoming'
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
sql = ("INSERT INTO events(created_by, event_date, event, date_added, status) VALUES(?,?,?,?,?)")
val = (str(ctx.message.author), event_date, event, date_added, status)
cursor.execute(sql, val)
main.commit()
cursor.close()
main.close()
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
cursor.execute(f'SELECT max(id) FROM events WHERE status = "upcoming"')
event_id = str(cursor.fetchone()).replace(',', '')
embed = discord.Embed(colour=0xff005b, description=f"{event}")
embed.set_author(name=f"Event id - {event_id}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{ctx.message.author}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{datetime.datetime.now().date()}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{event_date}`", inline=True)
await ctx.send(embed=embed)
main.commit()
cursor.close()
main.close()
#@TODO fix event number when result is none. It currently says the previous max id number and not the new one.
@event.command()
async def update(self, ctx, id, event_date, *, event=None):
date_added = datetime.datetime.now()
status = 'upcoming'
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
cursor.execute(f'SELECT id FROM events WHERE id = {id}')
result = cursor.fetchone()
if result is None:
sql = ("INSERT INTO events(created_by, event_date, event, date_added, status) VALUES(?,?,?,?,?)")
val = (str(ctx.message.author), event_date, event, date_added, status)
cursor.execute(f'SELECT max(id) FROM events WHERE status = "upcoming"')
event_id = str(cursor.fetchone()).replace(',', '')
embed = discord.Embed(colour=0xff005b, description=f"{event}")
embed.set_author(name=f"Event id - {event_id}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{ctx.message.author}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{datetime.datetime.now().date()}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{event_date}`", inline=True)
await ctx.send(embed=embed)
elif result is not None:
sql = ("UPDATE events SET created_by = ?, event_date = ?, event = ? WHERE id = ?")
val = (str(ctx.message.author), event_date, event, id)
cursor.execute(f'SELECT max(id) FROM events WHERE status = "upcoming"')
event_id = str(cursor.fetchone()).replace(',', '')
embed = discord.Embed(colour=0xff005b, description=f"{event}")
embed.set_author(name=f"Event - {event_id} - Updated")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{ctx.message.author}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{date_added}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{event_date}`", inline=True)
await ctx.send(embed=embed)
cursor.execute(sql, val)
main.commit()
cursor.close()
main.close()
@event.command()
async def status(self, ctx, id=None, *, status=None):
if id is None:
embed = discord.Embed(colour=0xffffff, description="No event id provided")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
else:
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
try:
cursor.execute(f'SELECT id FROM events WHERE id = {id}')
result = cursor.fetchone()
cursor.execute(f'SELECT status FROM events WHERE id = {id}')
old_status = str(cursor.fetchone()).replace(',','')
except:
result = None
if result is None:
embed = discord.Embed(colour=0xffffff, description="No event exist with that id")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
elif status is None:
embed = discord.Embed(colour=0xff9cdd)
embed.set_author(name=f"Event - {id} - Status")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Status", value=f"{old_status}", inline=True)
await ctx.send(embed=embed)
elif result is not None:
sql = ("UPDATE events SET status = ? WHERE id = ?")
val = (status, id)
embed = discord.Embed(colour=0xff9cdd)
embed.set_author(name=f"Event - {id} - Status Updated")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Old Status", value=f"{old_status}", inline=True)
embed.add_field(name="New Status", value=f"('{status}')", inline=True)
await ctx.send(embed=embed)
cursor.execute(sql, val)
main.commit()
cursor.close()
main.close()
@event.command()
async def delete(self, ctx, id=None):
if id is None:
embed = discord.Embed(colour=0xffffff, description="No event id provided")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
else:
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
cursor.execute(f'SELECT id FROM events WHERE id = {id}')
result = cursor.fetchone()
if result is None:
embed = discord.Embed(colour=0xffffff, description="No event exist with that id")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
elif result is not None:
sql = ("DELETE FROM events WHERE id = ?")
embed = discord.Embed(colour=0xff9cdd)
embed.set_author(name=f"Event - {id} - Deleted")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
cursor.execute(sql, (id,))
main.commit()
cursor.close()
main.close()
@event.command()
async def search(self, ctx, *, query=None):
if query is None:
embed = discord.Embed(colour=0xffffff, description="No search query provided")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
else:
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
cursor.execute(f'''SELECT * FROM events WHERE id LIKE '%{query}%' OR event_date LIKE '%{query}%' OR event LIKE '%{query}%'
OR created_by LIKE '%{query}%' OR status LIKE '%{query}%' ORDER BY event_date''')
result = cursor.fetchmany(size=3)
if not result:
result = None
if result is None:
embed = discord.Embed(colour=0xffffff, description="No event found with provided search query")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
elif result is not None:
embed = discord.Embed(colour=0xff005b, description=f"{result[0][2]}")
embed.set_author(name=f"Event id - {result[0][0]}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{result[0][3]}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{result[0][4]}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{result[0][1]}`", inline=True)
embed.add_field(name="Event Status", value=f"`{result[0][5]}`", inline=True)
await ctx.send(embed=embed)
try:
embed = discord.Embed(colour=0xff005b, description=f"{result[1][2]}")
embed.set_author(name=f"Event id - {result[1][0]}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{result[1][3]}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{result[1][4]}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{result[1][1]}`", inline=True)
embed.add_field(name="Event Status", value=f"`{result[1][5]}`", inline=True)
await ctx.send(embed=embed)
except:
pass
try:
embed = discord.Embed(colour=0xff005b, description=f"{result[2][2]}")
embed.set_author(name=f"Event id - {result[2][0]}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{result[2][3]}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{result[2][4]}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{result[2][1]}`", inline=True)
embed.add_field(name="Event Status", value=f"`{result[2][5]}`", inline=True)
await ctx.send(embed=embed)
except:
pass
cursor.close()
main.close()
@event.command()
async def upcoming(self, ctx):
main = sqlite3.connect('main.sqlite')
cursor = main.cursor()
cursor.execute(f'''SELECT * FROM events WHERE status = 'upcoming' ORDER BY event_date ASC LIMIT 3''')
result = cursor.fetchmany(size=3)
if not result:
result = None
if result is None:
embed = discord.Embed(colour=0xffffff, description="No upcoming events")
embed.timestamp = datetime.datetime.utcnow()
embed.set_author(name="Something is not right...")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
await ctx.send(embed=embed)
elif result is not None:
embed = discord.Embed(colour=0xff005b, description=f"{result[0][2]}")
embed.set_author(name=f"Event id - {result[0][0]}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{result[0][3]}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{result[0][4]}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{result[0][1]}`", inline=True)
embed.add_field(name="Event Status", value=f"`{result[0][5]}`", inline=True)
await ctx.send(embed=embed)
try:
embed = discord.Embed(colour=0xff005b, description=f"{result[1][2]}")
embed.set_author(name=f"Event id - {result[1][0]}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{result[1][3]}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{result[1][4]}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{result[1][1]}`", inline=True)
embed.add_field(name="Event Status", value=f"`{result[1][5]}`", inline=True)
await ctx.send(embed=embed)
except:
pass
try:
embed = discord.Embed(colour=0xff005b, description=f"{result[2][2]}")
embed.set_author(name=f"Event id - {result[2][0]}")
embed.set_footer(text="Bot", icon_url=f'{self.client.user.avatar_url}')
embed.timestamp = datetime.datetime.utcnow()
embed.add_field(name="Event added by", value=f"`{result[2][3]}`", inline=True)
embed.add_field(name="Date event was added", value=f"`{result[2][4]}`", inline=True)
embed.add_field(name="Date of Event", value=f"`{result[2][1]}`", inline=True)
embed.add_field(name="Event Status", value=f"`{result[2][5]}`", inline=True)
await ctx.send(embed=embed)
except:
pass
cursor.close()
main.close()
def setup(client):
client.add_cog(Events(client))
print('Events Cog loaded')
| 60.864111
| 329
| 0.573849
| 2,189
| 17,468
| 4.503883
| 0.098218
| 0.032458
| 0.052744
| 0.068973
| 0.794097
| 0.780505
| 0.775129
| 0.770464
| 0.761943
| 0.75322
| 0
| 0.015215
| 0.288871
| 17,468
| 287
| 330
| 60.864112
| 0.778458
| 0.010992
| 0
| 0.80292
| 0
| 0.029197
| 0.306472
| 0.041334
| 0
| 0
| 0.009262
| 0.003484
| 0
| 1
| 0.007299
| false
| 0.014599
| 0.014599
| 0
| 0.025547
| 0.00365
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8c95195f9f393e9b4e6aeccb01e0db9606b3d7b9
| 80,418
|
py
|
Python
|
pmutt/empirical/nasa.py
|
wittregr/pMuTT
|
1678fd3d3a10d8ef5389c02970a7ebaa92fc7344
|
[
"MIT"
] | 28
|
2018-10-29T17:44:30.000Z
|
2022-03-23T14:20:16.000Z
|
pmutt/empirical/nasa.py
|
wittregr/pMuTT
|
1678fd3d3a10d8ef5389c02970a7ebaa92fc7344
|
[
"MIT"
] | 101
|
2018-10-18T19:49:30.000Z
|
2022-01-19T10:59:57.000Z
|
pmutt/empirical/nasa.py
|
wittregr/pMuTT
|
1678fd3d3a10d8ef5389c02970a7ebaa92fc7344
|
[
"MIT"
] | 16
|
2018-12-15T17:01:21.000Z
|
2022-01-03T17:42:23.000Z
|
# -*- coding: utf-8 -*-
"""
pmutt.empirical.nasa
Operations related to Nasa polynomials
"""
import inspect
from copy import copy
from warnings import warn
import numpy as np
from scipy.optimize import Bounds, LinearConstraint, minimize, minimize_scalar
from pmutt import (_apply_numpy_operation, _get_R_adj, _is_iterable,
_pass_expected_arguments)
from pmutt import constants as c
from pmutt.empirical import EmpiricalBase
from pmutt.io.cantera import obj_to_cti
from pmutt.io.json import json_to_pmutt, remove_class
from pmutt.mixture import _get_mix_quantity
class Nasa(EmpiricalBase):
"""Stores the NASA polynomial coefficients for species. Inherits from
:class:`~pmutt.empirical.EmpiricalBase`
The thermodynamic properties are calculated using the following form:
:math:`\\frac {Cp} {R} = a_{1} + a_{2} T + a_{3} T^{2} + a_{4} T^{3}
+ a_{5} T^{4}`
:math:`\\frac {H} {RT} = a_{1} + a_{2} \\frac {T} {2} + a_{3}
\\frac {T^{2}} {3} + a_{4} \\frac {T^{3}} {4} + a_{5}
\\frac {T^{4}} {5} + a_{6} \\frac {1} {T}`
:math:`\\frac {S} {R} = a_{1} \\ln {T} + a_{2} T + a_{3}
\\frac {T^{2}} {2} + a_{4} \\frac {T^{3}} {3} + a_{5}
\\frac {T^{4}} {4} + a_{7}`
Attributes
----------
T_low : float
Lower temperature bound (in K)
T_mid : float
Middle temperature bound (in K)
T_high : float
High temperature bound (in K)
a_low : (7,) `numpy.ndarray`_
NASA polynomial to use between T_low and T_mid
a_high : (7,) `numpy.ndarray`_
NASA polynomial to use between T_mid and T_high
cat_site : :class:`~pmutt.chemkin.CatSite` object, optional
Catalyst site for adsorption. Used only for Chemkin input/output.
Default is None
n_sites : int, optional
Number of catalyst sites occupied by species. If cat_site is not
assigned, then n_sites is None. If cat_site is specified, the
default is 1
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
def __init__(self,
name,
T_low,
T_mid,
T_high,
a_low,
a_high,
cat_site=None,
n_sites=None,
**kwargs):
super().__init__(name=name, **kwargs)
self.T_low = T_low
self.T_mid = T_mid
self.T_high = T_high
self.a_low = np.array(a_low)
self.a_high = np.array(a_high)
if inspect.isclass(cat_site):
self.cat_site = _pass_expected_arguments(cat_site, **kwargs)
else:
self.cat_site = cat_site
if self.cat_site is not None and n_sites is None:
n_sites = 1
self.n_sites = n_sites
def get_a(self, T):
"""Returns the correct polynomial range based on T_low, T_mid and
T_high
Parameters
----------
T : float
Temperature in K
Returns
-------
a : (7,) `numpy.ndarray`_
NASA polynomial coefficients
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if type(self.T_mid) is list:
self.T_mid = self.T_mid[0]
if T < self.T_mid:
if T < self.T_low:
warn_msg = ('Requested temperature ({} K), below T_low ({} K)'
'for Nasa object, {}'
''.format(T, self.T_low, self.name))
warn(warn_msg, RuntimeWarning)
return self.a_low
else:
if T > self.T_high:
warn_msg = ('Requested temperature ({} K), above T_high ({} K)'
'for Nasa object, {}'
''.format(T, self.T_high, self.name))
warn(warn_msg, RuntimeWarning)
return self.a_high
def get_CpoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
CpoR = np.zeros(len(T))
for i, T_i in enumerate(T):
a = self.get_a(T_i)
CpoR[i] = get_nasa_CpoR(a=a, T=T_i) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
a = self.get_a(T=T)
CpoR = get_nasa_CpoR(a=a, T=T) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
return CpoR
def get_Cp(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
Cp : float or (N,) `numpy.ndarray`_
Heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_CpoR(T=T) * R_adj
def get_HoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
HoRT : float or (N,) `numpy.ndarray`_
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
HoRT = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
a = self.get_a(T=T_i)
HoRT[i] = get_nasa_HoRT(a=a, T=T_i) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
a = self.get_a(T=T)
HoRT = get_nasa_HoRT(a=a, T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
return HoRT
def get_H(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
H : float or (N,) `numpy.ndarray`_
Enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_HoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
def get_SoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
SoR : float or (N,) `numpy.ndarray`_
Dimensionless entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
SoR = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
a = self.get_a(T=T_i)
SoR[i] = get_nasa_SoR(a=a, T=T_i) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
a = self.get_a(T=T)
SoR = get_nasa_SoR(a=a, T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
return SoR
def get_S(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
S : float or (N,) `numpy.ndarray`_
Entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_SoR(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * R_adj
def get_GoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless Gibbs free energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
GoRT : float or (N,) `numpy.ndarray`_
Dimensionless Gibbs free energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
GoRT = self.get_HoRT(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs) \
- self.get_SoR(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs)
return GoRT
def get_G(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the Gibbs energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
G : float or (N,) `numpy.ndarray`_
Gibbs energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_GoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
@classmethod
def from_data(cls,
name,
T,
CpoR,
T_ref,
HoRT_ref,
SoR_ref,
elements=None,
T_mid=None,
**kwargs):
"""Calculates the NASA polynomials using thermodynamic data
Parameters
----------
name : str
Name of the species
T : (N,) `numpy.ndarray`_
Temperatures in K used for fitting CpoR.
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacity corresponding to T.
T_ref : float
Reference temperature in K used fitting empirical coefficients.
HoRT_ref : float
Dimensionless reference enthalpy that corresponds to T_ref.
SoR_ref : float
Dimensionless entropy that corresponds to T_ref.
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_low = min(T)
T_high = max(T)
# Find midpoint temperature, and a[0] through a[4] parameters
a_low, a_high, T_mid_out = _fit_CpoR(T=T, CpoR=CpoR, T_mid=T_mid)
# Fit a[5] parameter using reference enthalpy
a_low[5], a_high[5] = _fit_HoRT(T_ref=T_ref,
HoRT_ref=HoRT_ref,
a_low=a_low,
a_high=a_high,
T_mid=T_mid_out)
# Fit a[6] parameter using reference entropy
a_low[6], a_high[6] = _fit_SoR(T_ref=T_ref,
SoR_ref=SoR_ref,
a_low=a_low,
a_high=a_high,
T_mid=T_mid_out)
return cls(name=name,
T_low=T_low,
T_high=T_high,
T_mid=T_mid_out,
a_low=a_low,
a_high=a_high,
elements=elements,
**kwargs)
@classmethod
def from_statmech(cls,
name,
statmech_model,
T_low,
T_high,
T_mid=None,
references=None,
elements=None,
**kwargs):
"""Calculates the NASA polynomials using statistical mechanic models.
Deprecated as of Version 1.2.13. Please use ``from_model`` instead.
Parameters
----------
name : str
Name of the species
statmech_model : `pmutt.statmech.StatMech` object or class
Statistical Mechanics model to generate data
T_low : float
Lower limit temerature in K
T_high : float
Higher limit temperature in K
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
references : `pmutt.empirical.references.References` object
Reference to adjust enthalpy
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
kwargs : keyword arguments
Used to initalize ``statmech_model`` or ``EmpiricalBase``
attributes to be stored.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
"""
err_msg = ('Nasa.from_statmech is deprecated as of Version 1.2.13. '
'Please use the more generic function, Nasa.from_model.')
raise RuntimeError(err_msg)
@classmethod
def from_model(cls,
model,
name=None,
T_low=None,
T_high=None,
T_mid=None,
elements=None,
n_T=50,
**kwargs):
"""Calculates the NASA polynomials using the model passed
Parameters
----------
model : Model object or class
Model to generate data. Must contain the methods `get_CpoR`,
`get_HoRT` and `get_SoR`
name : str, optional
Name of the species. If not passed, `model.name` will be used.
T_low : float, optional
Lower limit temerature in K. If not passed, `model.T_low` will
be used.
T_high : float, optional
Higher limit temperature in K. If not passed, `model.T_high`
will be used.
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
elements : dict, optional
Composition of the species. If not passed, `model.elements`
will be used. Keys of dictionary are elements, values are
stoichiometric values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
n_T : int, optional
Number of data points between `T_low` and `T_high` for fitting
heat capacity. Default is 50.
kwargs : keyword arguments
Used to initalize model if a class is passed.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
"""
# Initialize the model object
if inspect.isclass(model):
model = model(name=name, elements=elements, **kwargs)
if name is None:
try:
name = model.name
except AttributeError:
err_msg = ('Name must either be passed to from_model directly '
'or be an attribute of model.')
raise AttributeError(err_msg)
if T_low is None:
try:
T_low = model.T_low
except AttributeError:
err_msg = ('T_low must either be passed to from_model '
'directly or be an attribute of model.')
raise AttributeError(err_msg)
if T_high is None:
try:
T_high = model.T_high
except AttributeError:
err_msg = ('T_high must either be passed to from_model '
'directly or be an attribute of model.')
raise AttributeError(err_msg)
if elements is None:
try:
elements = model.elements
except AttributeError:
pass
# Check if inputted T_low and T_high are outside model's T_low and
# T_high range
try:
if T_low < model.T_low:
warn_msg = ('Inputted T_low is lower than model T_low. Fitted '
'empirical object may not be valid.')
warn(warn_msg, UserWarning)
except AttributeError:
pass
try:
if T_high > model.T_high:
warn_msg = ('Inputted T_high is higher than model T_high. '
'Fitted empirical object may not be valid.')
warn(warn_msg, UserWarning)
except AttributeError:
pass
# Generate heat capacity data
T = np.linspace(T_low, T_high, n_T)
try:
CpoR = model.get_CpoR(T=T)
except ValueError:
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
else:
if not _is_iterable(CpoR) or len(CpoR) != len(T):
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
# Generate enthalpy and entropy data
T_mean = (T_low + T_high) / 2.
HoRT_ref = model.get_HoRT(T=T_mean)
SoR_ref = model.get_SoR(T=T_mean)
return cls.from_data(name=name,
T=T,
CpoR=CpoR,
T_ref=T_mean,
HoRT_ref=HoRT_ref,
SoR_ref=SoR_ref,
T_mid=T_mid,
model=model,
elements=elements,
**kwargs)
def to_cti(self):
"""Writes the object in Cantera's CTI format.
Returns
-------
CTI_str : str
Object represented as a CTI string.
"""
elements = {key: int(val) for key, val in self.elements.items()}
if self.n_sites is None:
size_str = ''
else:
size_str = ' size={},'.format(self.n_sites)
cti_str = ('species(name="{}", atoms={},{}\n'
' thermo=(NASA([{}, {}],\n'
' [{: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}]),\n'
' NASA([{}, {}], \n'
' [{: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}])))\n').format(
self.name, obj_to_cti(elements), size_str, self.T_low,
self.T_mid, self.a_low[0], self.a_low[1], self.a_low[2],
self.a_low[3], self.a_low[4], self.a_low[5],
self.a_low[6], self.T_mid, self.T_high, self.a_high[0],
self.a_high[1], self.a_high[2], self.a_high[3],
self.a_high[4], self.a_high[5], self.a_high[6])
return cti_str
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
obj_dict = super().to_dict()
obj_dict['class'] = str(self.__class__)
obj_dict['type'] = 'nasa'
obj_dict['a_low'] = self.a_low.tolist()
obj_dict['a_high'] = self.a_high.tolist()
obj_dict['T_low'] = self.T_low
obj_dict['T_mid'] = self.T_mid
obj_dict['T_high'] = self.T_high
try:
obj_dict['cat_site'] = self.cat_site.to_dict()
except AttributeError:
obj_dict['cat_site'] = None
obj_dict['n_sites'] = self.n_sites
return obj_dict
def to_omkm_yaml(self):
"""Returns a dictionary compatible with Cantera's YAML format
Returns
-------
yaml_dict : dict
Dictionary compatible with Cantera's YAML format
"""
yaml_dict = {
'name': self.name,
'composition': self.elements,
'thermo': {'model': 'NASA7',
'temperature-ranges': [float(self.T_low),
float(self.T_mid),
float(self.T_high)],
'data': [self.a_low.tolist(),
self.a_high.tolist()]}
}
if self.n_sites is not None:
yaml_dict['sites'] = self.n_sites
return yaml_dict
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
Nasa : Nasa object
"""
json_obj = remove_class(json_obj)
# Reconstruct statmech model
json_obj['model'] = json_to_pmutt(json_obj['model'])
json_obj['cat_site'] = json_to_pmutt(json_obj['cat_site'])
json_obj['misc_models'] = json_to_pmutt(json_obj['misc_models'])
return cls(**json_obj)
class Nasa9(EmpiricalBase):
"""Stores the NASA9 polynomials for species.
Inherits from :class:`~pmutt.empirical.EmpiricalBase`
:math:`\\frac {Cp} {R} = a_{1} T^{-2} + a_{2} T^{-1} + a_{3} + a_{4} T
+ a_{5} T^{2} + a_{6} T^{3} + a_{7} T^{4}`
:math:`\\frac {H} {RT} = -a_{1} \\frac {T^{-2}} {2} +
a_{2} \\frac {ln {T}} {T} + a_{3} + a_{4} \\frac {T} {2} + a_{5}
\\frac {T^{2}} {3} + a_{6} \\frac {T^{3}} {4} + a_{7} \\frac {T^{4}} {5} +
a_{8} \\frac {1} {T}`
:math:`\\frac {S} {R} = -a_{1}\\frac {T^{-2}} {2} - a_2 \\frac {1} {T} +
a_{3} \\ln {T} + a_{4} T + a_{5} \\frac {T^{2}} {2} + a_{6}
\\frac {T^{3}} {3} + a_{7}\\frac {T^{4}} {4} + a_{9}`
Attributes
----------
nasas : list of :class:`~pmutt.empirical.nasa.SingleNasa9`
NASA9 polynomials for each temperature interval
T_low : float
Lower temperature bound (in K). Determined from inputted `nasas`
T_high : float
High temperature bound (in K). Determined from inputted `nasas`
"""
def __init__(self, name, nasas, n_sites=1, **kwargs):
super().__init__(name=name, **kwargs)
self.n_sites = n_sites
self.nasas = nasas
def __iter__(self):
for nasa in self.nasas:
yield nasa
def __getitem__(self, key):
return self.nasas[key]
def __len__(self):
return len(self.nasas)
@property
def nasas(self):
return self._nasas
@nasas.setter
def nasas(self, val):
self._nasas = copy(val)
@property
def T_low(self):
T_lows = [nasa.T_low for nasa in self.nasas]
return np.min(T_lows)
@property
def T_high(self):
T_higs = [nasa.T_high for nasa in self.nasas]
return np.max(T_highs)
def _get_nasa(self, T):
"""Gets the relevant :class:`~pmutt.empirical.nasa.SingleNasa9` object
given a temperature
Attributes
----------
T : float
Temperature in float
Returns
-------
nasa : :class:`~pmutt.empirical.nasa.SingleNasa9` object
Relevant NASA9 polynomial for T
Raises
------
ValueError:
Raised if no valid :class:`~pmutt.empirical.nasa.SingleNasa9`
exists for T
"""
for nasa in self.nasas:
if T <= nasa.T_high and T >= nasa.T_low:
return nasa
else:
err_msg = ('Requested T ({} K) has no valid SingleNasa9 object for '
'species, {}. The global T_low is {} K and global '
'T_high is {} K.'
''.format(T, self.name, self.T_low, self.T_high))
raise ValueError(err_msg)
def get_CpoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
CpoR = np.zeros(len(T))
for i, T_i in enumerate(T):
nasa = self._get_nasa(T_i)
CpoR[i] = nasa.get_CpoR(T=T_i) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
nasa = self._get_nasa(T=T)
CpoR = nasa.get_CpoR(T=T) \
+ np.sum(_get_mix_quantity(self.misc_models,
method_name='get_CpoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
if len(CpoR) == 1:
CpoR = CpoR.item(0)
return CpoR
def get_Cp(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
Cp : float or (N,) `numpy.ndarray`_
Heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_CpoR(T=T) * R_adj
def get_HoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
HoRT : float or (N,) `numpy.ndarray`_
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
HoRT = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
nasa = self._get_nasa(T=T_i)
HoRT[i] = nasa.get_HoRT(T=T_i) \
+ np.sum(_get_mix_quantity(
misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
nasa = self._get_nasa(T=T)
HoRT = nasa.get_HoRT(T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_HoRT',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
if len(HoRT) == 1:
HoRT = HoRT.item(0)
return HoRT
def get_H(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
H : float or (N,) `numpy.ndarray`_
Enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_HoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
def get_SoR(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
SoR : float or (N,) `numpy.ndarray`_
Dimensionless entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
if _is_iterable(T):
SoR = np.zeros_like(a=T, dtype=np.double)
for i, T_i in enumerate(T):
nasa = self._get_nasa(T=T_i)
SoR[i] = nasa.get_SoR(T=T_i) \
+ np.sum(_get_mix_quantity(
misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T_i, **kwargs))
else:
nasa = self._get_nasa(T=T)
SoR = nasa.get_SoR(T=T) \
+ np.sum(_get_mix_quantity(misc_models=self.misc_models,
method_name='get_SoR',
raise_error=raise_error,
raise_warning=raise_warning,
default_value=0.,
T=T, **kwargs))
if len(SoR) == 1:
SoR = SoR.item(0)
return SoR
def get_S(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the entropy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units.
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
S : float or (N,) `numpy.ndarray`_
Entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_SoR(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * R_adj
def get_GoRT(self, T, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the dimensionless Gibbs free energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
GoRT : float or (N,) `numpy.ndarray`_
Dimensionless Gibbs free energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
GoRT = self.get_HoRT(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs) \
- self.get_SoR(T, raise_error=raise_error,
raise_warning=raise_warning, **kwargs)
return GoRT
def get_G(self, T, units, raise_error=True, raise_warning=True, **kwargs):
"""Calculate the Gibbs energy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
units : str
Units as string. See :func:`~pmutt.constants.R` for accepted
units but omit the '/K' (e.g. J/mol).
raise_error : bool, optional
If True, raises an error if any of the modes do not have the
quantity of interest. Default is True
raise_warning : bool, optional
Only relevant if raise_error is False. Raises a warning if any
of the modes do not have the quantity of interest. Default is
True
kwargs : key-word arguments
Arguments to calculate mixture model properties, if any
Returns
-------
G : float or (N,) `numpy.ndarray`_
Gibbs energy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
units = '{}/K'.format(units)
R_adj = _get_R_adj(units=units, elements=self.elements)
return self.get_GoRT(T=T,
raise_error=raise_error,
raise_warning=raise_warning,
**kwargs) * T * R_adj
@classmethod
def from_data(cls,
name,
T,
CpoR,
T_ref,
HoRT_ref,
SoR_ref,
elements=None,
T_mid=None,
fit_T_mid=True,
**kwargs):
"""Calculates the NASA polynomials using thermodynamic data
Parameters
----------
name : str
Name of the species
T : (N,) `numpy.ndarray`_
Temperatures in K used for fitting CpoR.
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacity corresponding to T.
T_ref : float
Reference temperature in K used fitting empirical coefficients.
HoRT_ref : float
Dimensionless reference enthalpy that corresponds to T_ref.
SoR_ref : float
Dimensionless entropy that corresponds to T_ref.
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
T_mid : iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the 6th lowest
and 6th highest value of T.
Returns
-------
Nasa : Nasa object
Nasa object with polynomial terms fitted to data.
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_low = min(T)
T_high = max(T)
# Find midpoint temperature, and a[0] through a[4] parameters
a = _fit_CpoR9(T=T, CpoR=CpoR, T_low=T_low, T_high=T_high, T_mid=T_mid)
# Fit a[7] parameter using reference enthalpy
a = _fit_HoRT9(T_ref=T_ref, HoRT_ref=HoRT_ref, a=a, T_mid=T_mid)
# Fit a[8] parameter using reference entropy
a = _fit_SoR9(T_ref=T_ref, SoR_ref=SoR_ref, a=a, T_mid=T_mid)
nasas = []
T_interval = np.concatenate([[T_low], T_mid, [T_high]])
for a_row, T_low, T_high in zip(a, T_interval, T_interval[1:]):
nasas.append(SingleNasa9(T_low=T_low, T_high=T_high, a=a_row))
return cls(name=name, nasas=nasas, elements=elements, **kwargs)
@classmethod
def from_model(cls,
name,
model,
T_low,
T_high,
elements=None,
T_mid=None,
n_interval=2,
n_T=50,
fit_T_mid=True,
**kwargs):
"""Calculates the NASA polynomials using the model passed
Parameters
----------
name : str
Name of the species
model : Model object or class
Model to generate data. Must contain the methods `get_CpoR`,
`get_HoRT` and `get_SoR`
T_low : float
Lower limit temerature in K
T_high : float
Higher limit temperature in K
elements : dict
Composition of the species.
Keys of dictionary are elements, values are stoichiometric
values in a formula unit.
e.g. CH3OH can be represented as:
{'C': 1, 'H': 4, 'O': 1,}.
T_mid : (n_interval,) nd.ndarray
Temperatures (in K) to use at intervals. See `fit_T_mid` for
behavior.
n_interval : int, optional
Number of NASA9 polynomials to create. Default is 2
n_T : int, optional
Number of temperature values to evaluate between each interval.
Larger values result is a better fit but take longer to run.
Default is 50.
fit_T_mid : bool, optional
If True, T_mid values initial values and can be changed. If
False, T_mid values are not changed
kwargs : keyword arguments
Used to initalize model if a class is passed.
Returns
-------
Nasa9 : Nasa9 object
Nasa object with polynomial terms fitted to data.
"""
# Initialize the model object
if inspect.isclass(model):
model = model(name=name, elements=elements, **kwargs)
# Optimize T_mids
if fit_T_mid:
# If guesses not specified, use even spacing
if T_mid is None:
T_mid0 = np.linspace(T_low, T_high, n_interval + 1)[1:-1]
else:
T_mid0 = T_mid
res = minimize(method='Nelder-Mead',
x0=T_mid0,
fun=_calc_T_mid_mse_nasa9,
args=(T_low, T_high, model, n_T))
T_mid = res.x
# Generate heat capacity data for from_data
T_interval = np.concatenate([[T_low], T_mid, [T_high]])
for i, (T1, T2) in enumerate(zip(T_interval, T_interval[1:])):
if i == 0:
T = np.linspace(T1, T2, n_T)
else:
T = np.concatenate([T, np.linspace(T1, T2, n_T)])
# Calculate heat capacity
try:
CpoR = model.get_CpoR(T=T)
except ValueError:
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
else:
if not _is_iterable(CpoR) or len(CpoR) != len(T):
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
# Generate enthalpy and entropy data
HoRT_ref = model.get_HoRT(T=T_low)
SoR_ref = model.get_SoR(T=T_low)
return cls.from_data(name=name,
T=T,
CpoR=CpoR,
T_ref=T_low,
HoRT_ref=HoRT_ref,
SoR_ref=SoR_ref,
T_mid=T_mid,
model=model,
elements=elements,
fit_T_mid=False,
**kwargs)
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
obj_dict = super().to_dict()
obj_dict['class'] = str(self.__class__)
obj_dict['type'] = 'nasa9'
obj_dict['nasa'] = [nasa.to_dict() for nasa in self.nasas]
obj_dict['n_sites'] = self.n_sites
return obj_dict
def to_omkm_yaml(self):
"""Returns a dictionary compatible with Cantera's YAML format
Returns
-------
yaml_dict : dict
Dictionary compatible with Cantera's YAML format
"""
yaml_dict = {
'name': self.name,
'composition': self.elements,
'thermo': {'model': 'NASA9',
'reference-pressure': '1 bar'},
}
if self.n_sites is not None:
yaml_dict['sites'] = self.n_sites
# Ensure that sorted NASAs are consistent whether using T_low or T_high
nasas_sorted_T_low = sorted(self.nasas, key=lambda nasa: nasa.T_low)
nasas_sorted_T_high = sorted(self.nasas, key=lambda nasa: nasa.T_high)
assert nasas_sorted_T_low == nasas_sorted_T_high
# Add temperature ranges and polynomials
yaml_dict['thermo']['temperature-ranges'] = []
yaml_dict['thermo']['data'] = []
for nasa in nasas_sorted_T_low:
yaml_dict['thermo']['temperature-ranges'].append(float(nasa.T_low))
yaml_dict['thermo']['data'].append(nasa.a.tolist())
yaml_dict['thermo']['temperature-ranges'].append(float(nasa.T_high))
return yaml_dict
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
Nasa : Nasa object
"""
json_obj = remove_class(json_obj)
# Reconstruct statmech model
json_obj['nasas'] = [json_to_pmutt(nasa) for nasa in json_obj['nasas']]
json_obj['model'] = json_to_pmutt(json_obj['model'])
json_obj['misc_models'] = json_to_pmutt(json_obj['misc_models'])
return cls(**json_obj)
def to_cti(self):
"""Writes the object in Cantera's CTI format.
Returns
-------
CTI_str : str
Object represented as a CTI string.
"""
elements = {key: int(val) for key, val in self.elements.items()}
if self.n_sites is None:
size_str = ''
else:
size_str = ' size={},'.format(self.n_sites)
cti_str = ('species(name="{}", atoms={},{}\n'
' thermo=('
''.format(self.name, obj_to_cti(elements), size_str))
for i, nasa in enumerate(self.nasas):
line_indent = (i != 0)
cti_str += '{},\n'.format(nasa.to_cti(line_indent=line_indent))
cti_str = '{})\n'.format(cti_str[:-2])
return cti_str
class SingleNasa9(EmpiricalBase):
"""Stores the NASA9 polynomial for a defined interval.
Inherits from :class:`~pmutt.empirical.EmpiricalBase`
Attributes
----------
T_low : float
Lower temperature bound (in K)
T_high : float
High temperature bound (in K)
a : (9,) `numpy.ndarray`_
NASA9 polynomial to use between T_low and T_high
"""
def __init__(self, T_low, T_high, a):
self.T_low = T_low
self.T_high = T_high
self.a = a
def get_CpoR(self, T):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# Convert T to 1D numpy format
if not _is_iterable(T):
T = [T]
T = np.array(T)
CpoR = get_nasa9_CpoR(a=self.a, T=T)
return CpoR
def get_HoRT(self, T):
"""Calculate the dimensionless enthalpy
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
Returns
-------
HoRT : float or (N,) `numpy.ndarray`_
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# Convert T to 1D numpy format
if not _is_iterable(T):
T = [T]
T = np.array(T)
HoRT = get_nasa9_HoRT(a=self.a, T=T)
return HoRT
def get_SoR(self, T):
"""Calculate the dimensionless heat capacity
Parameters
----------
T : float or (N,) `numpy.ndarray`_
Temperature(s) in K
Returns
-------
CpoR : float or (N,) `numpy.ndarray`_
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# Convert T to 1D numpy format
if not _is_iterable(T):
T = [T]
T = np.array(T)
SoR = get_nasa9_SoR(a=self.a, T=T)
return SoR
def to_dict(self):
"""Represents object as dictionary with JSON-accepted datatypes
Returns
-------
obj_dict : dict
"""
obj_dict = {
'class': str(self.__class__),
'type': 'singlenasa9',
'T_low': self.T_low,
'T_high': self.T_high,
'a': self.a.tolist()
}
return obj_dict
@classmethod
def from_dict(cls, json_obj):
"""Recreate an object from the JSON representation.
Parameters
----------
json_obj : dict
JSON representation
Returns
-------
Nasa : Nasa object
"""
json_obj = remove_class(json_obj)
# Reconstruct statmech model
json_obj['nasas'] = [json_to_pmutt(nasa) for nasa in json_obj['nasas']]
json_obj['model'] = json_to_pmutt(json_obj['model'])
json_obj['misc_models'] = json_to_pmutt(json_obj['misc_models'])
return cls(**json_obj)
def to_cti(self, line_indent=False):
"""Writes the object in Cantera's CTI format.
Parameters
----------
line_indent : bool, optional
If True, the first line is indented by 16 spaces. Default is
False
Returns
-------
CTI_str : str
Object represented as a CTI string.
"""
if line_indent:
line_adj = ' '
else:
line_adj = ''
cti_str = ('{}NASA([{}, {}],\n'
' [{: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}, {: 2.8E}, {: 2.8E},\n'
' {: 2.8E}, {: 2.8E}, {: 2.8E}])'
''.format(line_adj, self.T_low, self.T_high, self.a[0],
self.a[1], self.a[2], self.a[3], self.a[4],
self.a[5], self.a[6], self.a[7], self.a[8]))
return cti_str
def _fit_CpoR(T, CpoR, T_mid=None):
"""Fit a[0]-a[4] coefficients in a_low and a_high attributes given the
dimensionless heat capacity data
Parameters
----------
T : (N,) `numpy.ndarray`_
Temperatures in K
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacity
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the lowest value
and highest value of T.
Returns
-------
a_low : (7,) `numpy.ndarray`_
Lower coefficients of NASA polynomial
a_high : (7,) `numpy.ndarray`_
Higher coefficients of NASA polynomial
T_mid : float
Temperature in K used to split the CpoR data
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# If the Cp/R does not vary with temperature (occurs when no
# vibrational frequencies are listed), return default values
if all([np.isclose(x, 0.) for x in CpoR]) \
or any([np.isnan(x) for x in CpoR]):
T_mid = T[int(len(T) / 2)]
a_low = np.zeros(7)
a_high = np.zeros(7)
return a_low, a_high, T_mid
# If T_mid not specified, generate range between 6th smallest data point
# and 6th largest data point
if T_mid is None:
T_mid = T[5:-5]
# If a single value for T_mid is chosen, convert to a tuple
if not _is_iterable(T_mid):
T_mid = (T_mid, )
# Initialize parameters for T_mid optimization
mse_list = []
prev_mse = np.inf
all_a_low = []
all_a_high = []
for T_m in T_mid:
# Generate temperature data
(mse, a_low, a_high) = _get_CpoR_MSE(T=T, CpoR=CpoR, T_mid=T_m)
mse_list.append(mse)
all_a_low.append(a_low)
all_a_high.append(a_high)
# Check if the optimum T_mid has been found by determining if the
# fit MSE value for the current T_mid is higher than the previous
# indicating that subsequent guesses will not improve the fit
if mse > prev_mse:
break
prev_mse = mse
# Select the optimum T_mid based on the highest fit R2 value
min_mse = min(mse_list)
min_i = np.where(min_mse == mse_list)[0][0]
T_mid_out = T_mid[min_i]
a_low_rev = all_a_low[min_i]
a_high_rev = all_a_high[min_i]
# Reverse array and append two zeros to end
empty_arr = np.zeros(2)
a_low_out = np.concatenate((a_low_rev[::-1], empty_arr))
a_high_out = np.concatenate((a_high_rev[::-1], empty_arr))
return a_low_out, a_high_out, T_mid_out
def _get_CpoR_MSE(T, CpoR, T_mid):
"""Calculates the mean squared error of polynomial fit.
Parameters
----------
T : (N,) `numpy.ndarray`_
Temperatures (K) to fit the polynomial
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacities that correspond to T array
i_mid : int
Index that splits T and CpoR arrays into a lower
and higher range
Returns
-------
mse : float
Mean squared error resulting from NASA polynomial fit to T and CpoR
p_low : (5,) `numpy.ndarray`_
Polynomial corresponding to lower range of data
p_high : (5,) `numpy.ndarray`_
Polynomial corresponding to high range of data
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
low_condition = (T <= T_mid)
high_condition = (T > T_mid)
T_low = np.extract(condition=low_condition, arr=T)
T_high = np.extract(condition=high_condition, arr=T)
CpoR_low = np.extract(condition=low_condition, arr=CpoR)
CpoR_high = np.extract(condition=high_condition, arr=CpoR)
if len(T_low) < 5:
warn_msg = ('Small set of CpoR data between T_low and T_mid. Fit may '
'not be desirable.')
warn(warn_msg, RuntimeWarning)
if len(T_high) < 5:
warn_msg = ('Small set of CpoR data between T_mid and T_high. Fit may '
'not be desirable.')
warn(warn_msg, RuntimeWarning)
# Fit the polynomials
p_low = np.polyfit(x=T_low, y=CpoR_low, deg=4)
p_high = np.polyfit(x=T_high, y=CpoR_high, deg=4)
# Calculate RMSE
CpoR_low_fit = np.polyval(p_low, T_low)
CpoR_high_fit = np.polyval(p_high, T_high)
CpoR_fit = np.concatenate((CpoR_low_fit, CpoR_high_fit))
mse = np.mean((CpoR_fit - CpoR)**2)
return (mse, p_low, p_high)
def _fit_HoRT(T_ref, HoRT_ref, a_low, a_high, T_mid):
"""Fit a[5] coefficient in a_low and a_high attributes given the
dimensionless enthalpy
Parameters
----------
T_ref : float
Reference temperature in K
HoRT_ref : float
Reference dimensionless enthalpy
T_mid : float
Temperature to fit the offset
Returns
-------
a6_low_out : float
Lower a6 value for NASA polynomial
a6_high_out : float
Higher a6 value for NASA polynomial
"""
a6_low_out = (HoRT_ref - get_nasa_HoRT(a=a_low, T=T_ref)) * T_ref
a6_high = (HoRT_ref - get_nasa_HoRT(a=a_high, T=T_ref)) * T_ref
# Correcting for offset
H_low_last_T = get_nasa_HoRT(a=a_low, T=T_mid) + a6_low_out / T_mid
H_high_first_T = get_nasa_HoRT(a=a_high, T=T_mid) + a6_high / T_mid
H_offset = H_low_last_T - H_high_first_T
a6_high_out = T_mid * (a6_high / T_mid + H_offset)
return a6_low_out, a6_high_out
def _fit_SoR(T_ref, SoR_ref, a_low, a_high, T_mid):
"""Fit a[6] coefficient in a_low and a_high attributes given the
dimensionless entropy
Parameters
----------
T_ref : float
Reference temperature in K
SoR_ref : float
Reference dimensionless entropy
T_mid : float
Temperature to fit the offset
Returns
-------
a7_low_out : float
Lower a7 value for NASA polynomial
a7_high_out : float
Higher a7 value for NASA polynomial
"""
a7_low_out = SoR_ref - get_nasa_SoR(a=a_low, T=T_ref)
a7_high = SoR_ref - get_nasa_SoR(a=a_high, T=T_ref)
# Correcting for offset
S_low_last_T = get_nasa_SoR(a=a_low, T=T_mid) + a7_low_out
S_high_first_T = get_nasa_SoR(a=a_high, T=T_mid) + a7_high
S_offset = S_low_last_T - S_high_first_T
a7_high_out = a7_high + S_offset
return a7_low_out, a7_high_out
def _calc_T_mid_mse_nasa9(T_mid, T_low, T_high, model, n_T=50):
"""Calculates the mean squared error associated with temperature intervals
for NASA9 polynomials
Parameters
----------
T_mid : (N,) nd.ndarray
Temperature intervals (in K) being evaluated
T_low : float
Lower temperature bound
T_high : float
Higher temperature bound
model : Species object
Object that can provide heat capacity at any temperature
n_T : int
Number of temperature values to evaluate between each interval
Returns
-------
mse : float
Total mean squared error
"""
# T_mid should be between T_low and T_high
if np.any(T_mid <= T_low) or np.any(T_mid >= T_high):
return np.inf
mse = 0.
# Calculate MSE for each interval
T_interval = np.concatenate([[T_low], T_mid, [T_high]])
for T1, T2 in zip(T_interval, T_interval[1:]):
T = np.linspace(T1, T2, n_T)
# Generate heat capacity data
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
# Optimize NASA9 coefficients
res = minimize(method='BFGS',
args=(T, CpoR),
fun=_get_nasa9_mse,
jac=_get_nasa9_mse_jacob,
x0=np.zeros(9))
mse += res.fun
return mse
def _calc_T_mid_mse_nasa(T_mid, T_low, T_high, model, n_T=50):
"""Calculates the mean squared error associated with temperature intervals
for NASA9 polynomials
Parameters
----------
T_mid : float
Middle temperature bound in K being tested
T_low : float
Lower temperature bound in K
T_high : float
Higher temperature bound in K
model : Species object
Object that can provide heat capacity at any temperature
n_T : int
Number of temperature values to evaluate between each interval
Returns
-------
mse : float
Total mean squared error
"""
# T_mid should be between T_low and T_high
if np.any(T_mid <= T_low) or np.any(T_mid >= T_high):
return np.inf
mse = 0.
# Calculate MSE for each interval
T_interval = np.array([T_low, T_mid[0], T_high])
for T1, T2 in zip(T_interval, T_interval[1:]):
T = np.linspace(T1, T2, n_T)
# Generate heat capacity data
try:
CpoR = model.get_CpoR(T=T)
except ValueError:
CpoR = np.array([model.get_CpoR(T=T_i) for T_i in T])
# Optimize NASA9 coefficients
res = minimize(method='BFGS',
args=(T, CpoR),
fun=_get_nasa_mse,
jac=_get_nasa_mse_jacob,
x0=np.zeros(7))
mse += res.fun
return mse
def _get_nasa_mse(a, T, CpoR):
"""Calculates the mean squared error associated with NASA coefficients
Parameters
----------
a : (7,) nd.ndarray
Coefficients of NASA polynomial
T : (N,) nd.ndarray
Temperatures to evaluate the NASA coefficients in K
CpoR : (N,) nd.ndarray
Accurate dimensionless heat capacities corresponding to T
Returns
-------
mse : float
Total mean squared error
"""
CpoR_fit = get_nasa_CpoR(a, T)
mse = np.mean((CpoR_fit - CpoR)**2)
return mse
def _get_nasa_mse_jacob(a, T, CpoR):
"""Calculates the Jacobian associated with NASA coefficients
Parameters
----------
a : (7,) nd.ndarray
Coefficients of NASA polynomial
T : (N,) nd.ndarray
Temperatures to evaluate the NASA coefficients in K
CpoR : (N,) nd.ndarray
Accurate dimensionless heat capacities corresponding to T
Returns
-------
jac : (7,) nd.ndarray
Jacobian corresponding to a
"""
CpoR_fit = get_nasa_CpoR(a, T)
error = CpoR_fit - CpoR
jac = 2. / float(len(T)) * np.array([
1.,
np.sum(error * T),
np.sum(error * (T**2)),
np.sum(error * (T**3)),
np.sum(error * (T**4)), 0., 0.
])
return jac
def _get_nasa9_mse(a, T, CpoR):
"""Calculates the mean squared error associated with NASA9 coefficients
Parameters
----------
a : (9,) nd.ndarray
Coefficients of NASA9 polynomial
T : (N,) nd.ndarray
Temperatures to evaluate the NASA9 coefficients in K
CpoR : (N,) nd.ndarray
Accurate dimensionless heat capacities corresponding to T
Returns
-------
mse : float
Total mean squared error
"""
CpoR_fit = get_nasa9_CpoR(a, T)
mse = np.mean((CpoR_fit - CpoR)**2)
return mse
def _get_nasa9_mse_jacob(a, T, CpoR):
"""Calculates the Jacobian associated with NASA9 coefficients
Parameters
----------
a : (9,) nd.ndarray
Coefficients of NASA9 polynomial
T : (N,) nd.ndarray
Temperatures to evaluate the NASA9 coefficients in K
CpoR : (N,) nd.ndarray
Accurate dimensionless heat capacities corresponding to T
Returns
-------
jac : (9,) nd.ndarray
Jacobian corresponding to a
"""
CpoR_fit = get_nasa9_CpoR(a, T)
error = CpoR_fit - CpoR
jac = 2. / float(len(T)) * np.array([
np.sum(error * (T**-2)),
np.sum(error * (T**-1)), 1.,
np.sum(error * T),
np.sum(error * (T**2)),
np.sum(error * (T**3)),
np.sum(error * (T**4)), 0., 0.
])
return jac
def _fit_CpoR9(T, CpoR, T_low, T_high, T_mid):
"""Fit a[0]-a[6] coefficients in a_low and a_high attributes given the
dimensionless heat capacity data
Parameters
----------
T : (N,) `numpy.ndarray`_
Temperatures in K
CpoR : (N,) `numpy.ndarray`_
Dimensionless heat capacity
T_mid : float or iterable of float, optional
Guess for T_mid. If float, only uses that value for T_mid. If
list, finds the best fit for each element in the list. If None,
a range of T_mid values are screened between the lowest value
and highest value of T.
Returns
-------
a_low : (9,) `numpy.ndarray`_
Lower coefficients of NASA polynomial
a_high : (9,) `numpy.ndarray`_
Higher coefficients of NASA polynomial
T_mid : float
Temperature in K used to split the CpoR data
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
# If the Cp/R does not vary with temperature (occurs when no
# vibrational frequencies are listed), return default values
if all([np.isclose(x, 0.) for x in CpoR]) \
or any([np.isnan(x) for x in CpoR]):
return [np.zeros(9)] * (len(T_mid) + 1)
a = []
T_interval = np.concatenate([[T_low], T_mid, [T_high]])
for T1, T2 in zip(T_interval, T_interval[1:]):
# Find T and CpoR in interval
condition = (T > T1) & (T <= T2)
T_cond = np.extract(condition=condition, arr=T)
CpoR_cond = np.extract(condition=condition, arr=CpoR)
res = minimize(method='BFGS',
args=(T_cond, CpoR_cond),
fun=_get_nasa9_mse,
jac=_get_nasa9_mse_jacob,
x0=np.zeros(9))
a.append(res.x)
return a
def _fit_HoRT9(T_ref, HoRT_ref, a, T_mid):
"""Fit a[7] coefficient in a_low and a_high attributes given the
dimensionless enthalpy
Parameters
----------
T_ref : float
Reference temperature in K
HoRT_ref : float
Reference dimensionless enthalpy
a : (N, 9) nd.ndarray
NASA9 polynomial
T_mid : float
Temperature to fit the offset
Returns
-------
a : (N, 9) nd.ndarray
NASA9 polynomials with a[:, 7] position corrected for HoRT_ref
"""
a[0][7] = (HoRT_ref - get_nasa9_HoRT(a=a[0], T=T_ref)) * T_ref
for i, row_a in enumerate(a[1:], start=1):
a8_low = (HoRT_ref - get_nasa9_HoRT(a=a[i - 1], T=T_ref)) * T_ref
a8_high = (HoRT_ref - get_nasa9_HoRT(a=a[i], T=T_ref)) * T_ref
HoRT_low = get_nasa9_HoRT(a=a[i - 1],
T=T_mid[i - 1]) + a8_low / T_mid[i - 1]
HoRT_high = get_nasa9_HoRT(a=a[i],
T=T_mid[i - 1]) + a8_high / T_mid[i - 1]
HoRT_offset = HoRT_low - HoRT_high
a[i][7] = T_mid[i - 1] * (a8_high / T_mid[i - 1] + HoRT_offset)
HoRT_ref = HoRT_low
T_ref = T_mid[i - 1]
return a
def _fit_SoR9(T_ref, SoR_ref, a, T_mid):
"""Fit a[8] coefficient in a_low and a_high attributes given the
dimensionless entropy
Parameters
----------
T_ref : float
Reference temperature in K
SoR_ref : float
Reference dimensionless entropy
a : (N, 9) nd.ndarray
NASA9 polynomial
T_mid : float
Temperature to fit the offset
Returns
-------
a : (N, 9) nd.ndarray
NASA9 polynomials with a[:, 8] position corrected for SoR_ref
"""
a[0][8] = SoR_ref - get_nasa9_SoR(a=a[0], T=T_ref)
for i, row_a in enumerate(a[1:], start=1):
a9_low = SoR_ref - get_nasa9_SoR(a=a[i - 1], T=T_ref)
a9_high = SoR_ref - get_nasa9_SoR(a=a[i], T=T_ref)
SoR_low = get_nasa9_SoR(a=a[i - 1], T=T_mid[i - 1]) + a9_low
SoR_high = get_nasa9_SoR(a=a[i], T=T_mid[i - 1]) + a9_high
SoR_offset = SoR_low - SoR_high
a[i][8] = a9_high + SoR_offset
SoR_ref = SoR_low
T_ref = T_mid[i - 1]
return a
def get_nasa_CpoR(a, T):
"""Calculates the dimensionless heat capacity using NASA polynomial form
Parameters
----------
a : (7,) `numpy.ndarray`_
Coefficients of NASA polynomial
T : float
Temperature in K
Returns
-------
CpoR: float
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_arr = np.array([1., T, T**2, T**3, T**4, 0., 0.])
return np.dot(a, T_arr)
def get_nasa_HoRT(a, T):
"""Calculates the dimensionless enthalpy using NASA polynomial form
Parameters
----------
a : (7,) `numpy.ndarray`_
Coefficients of NASA polynomial
T : float
Temperature in K
Returns
-------
HoRT : float
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_arr = np.array(
[1., T / 2., (T**2) / 3., (T**3) / 4., (T**4) / 5., 1. / T, 0.])
return np.dot(a, T_arr)
def get_nasa_SoR(a, T):
"""Calculates the dimensionless entropy using NASA polynomial form
Parameters
----------
a : (7,) `numpy.ndarray`_
Coefficients of NASA polynomial
T : float
Temperature in K
Returns
-------
SoR : float
Dimensionless entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_arr = np.array(
[np.log(T), T, (T**2) / 2., (T**3) / 3., (T**4) / 4., 0., 1.])
return np.dot(a, T_arr)
def get_nasa9_CpoR(a, T):
"""Calculates the dimensionless heat capacity using NASA polynomial form
Parameters
----------
a : (9,) `numpy.ndarray`_
Coefficients of NASA polynomial
T : float
Temperature in K
Returns
-------
CpoR: float
Dimensionless heat capacity
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_arr = np.array([T**-2, T**-1, 1., T, T**2, T**3, T**4, 0., 0.])
return np.dot(a, T_arr)
def get_nasa9_HoRT(a, T):
"""Calculates the dimensionless enthalpy using NASA polynomial form
Parameters
----------
a : (9,) `numpy.ndarray`_
Coefficients of NASA polynomial
T : float
Temperature in K
Returns
-------
HoRT : float
Dimensionless enthalpy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_arr = np.array([
-(T**-2),
np.log(T) / T, 1., T / 2., (T**2) / 3., (T**3) / 4., (T**4) / 5.,
1. / T, 0.
])
return np.dot(a, T_arr)
def get_nasa9_SoR(a, T):
"""Calculates the dimensionless entropy using NASA polynomial form
Parameters
----------
a : (9,) `numpy.ndarray`_
Coefficients of NASA polynomial
T : float
Temperature in K
Returns
-------
SoR : float
Dimensionless entropy
.. _`numpy.ndarray`: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
"""
T_arr = np.array([
-(T**-2) / 2., -(T**-1),
np.log(T), T, (T**2) / 2., (T**3) / 3., (T**4) / 4., 0., 1.
])
return np.dot(a, T_arr)
| 37.403721
| 100
| 0.520555
| 9,901
| 80,418
| 4.049591
| 0.048278
| 0.014765
| 0.015563
| 0.012321
| 0.828856
| 0.791445
| 0.761666
| 0.738895
| 0.718793
| 0.690211
| 0
| 0.010849
| 0.381034
| 80,418
| 2,149
| 101
| 37.421126
| 0.79466
| 0.451255
| 0
| 0.599045
| 0
| 0
| 0.057692
| 0
| 0
| 0
| 0
| 0
| 0.001193
| 1
| 0.078759
| false
| 0.009547
| 0.013126
| 0.00358
| 0.173031
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ce677b6ea7ca2f761c8b2110bec500af6095388
| 12,642
|
py
|
Python
|
tests/test_closed_streams.py
|
standy66/ngh2
|
ab4e59d2594598a8cc203fbd2ce2837350c1fa5f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_closed_streams.py
|
standy66/ngh2
|
ab4e59d2594598a8cc203fbd2ce2837350c1fa5f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_closed_streams.py
|
standy66/ngh2
|
ab4e59d2594598a8cc203fbd2ce2837350c1fa5f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_closed_streams
~~~~~~~~~~~~~~~~~~~
Tests that we handle closed streams correctly.
"""
import pytest
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
class TestClosedStreams(object):
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
server_config = h2.config.H2Configuration(client_side=False)
def test_can_receive_multiple_rst_stream_frames(self, frame_factory):
"""
Multiple RST_STREAM frames can be received, either at once or well
after one another. Only the first fires an event.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.receive_data(frame_factory.server_preamble())
c.send_headers(1, self.example_request_headers, end_stream=True)
f = frame_factory.build_rst_stream_frame(stream_id=1)
events = c.receive_data(f.serialize() * 3)
# Force an iteration over all the streams to remove them.
# c.open_outbound_streams
# Receive more data.
events += c.receive_data(f.serialize() * 3)
print(events)
assert len(events) == 6
event = events[0]
assert isinstance(event, h2.events.StreamReset)
@pytest.mark.skip("nghttp2")
def test_receiving_low_stream_id_causes_goaway(self, frame_factory):
"""
The remote peer creating a stream with a lower ID than one we've seen
causes a GOAWAY frame.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.client_preamble())
c.initiate_connection()
f = frame_factory.build_headers_frame(
self.example_request_headers,
stream_id=3,
)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.example_request_headers,
stream_id=1,
)
with pytest.raises(h2.exceptions.StreamIDTooLowError) as e:
c.receive_data(f.serialize())
assert e.value.stream_id == 1
assert e.value.max_stream_id == 3
f = frame_factory.build_goaway_frame(
last_stream_id=3,
error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR,
)
assert c.data_to_send() == f.serialize()
@pytest.mark.skip("nghttp2")
def test_closed_stream_not_present_in_streams_dict(self, frame_factory):
"""
When streams have been closed, they get removed from the streams
dictionary the next time we count the open streams.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.client_preamble())
c.initiate_connection()
f = frame_factory.build_headers_frame(self.example_request_headers)
c.receive_data(f.serialize())
c.push_stream(1, 2, self.example_request_headers)
c.reset_stream(1)
c.clear_outbound_data_buffer()
f = frame_factory.build_rst_stream_frame(stream_id=2)
c.receive_data(f.serialize())
# Force a count of the streams.
assert not c.open_outbound_streams
# The streams dictionary should be empty.
assert not c.streams
class TestStreamsClosedByEndStream(object):
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
server_config = h2.config.H2Configuration(client_side=False)
@pytest.mark.parametrize(
"frame",
[
# data somehow works on nghttp2
# lambda self, ff: ff.build_data_frame(b'hello'),
lambda self, ff: ff.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']),
lambda self, ff: ff.build_headers_frame(
self.example_request_headers),
]
)
def test_frames_after_recv_end_will_error(self,
frame_factory,
frame):
"""
A stream that is closed by receiving END_STREAM raises
ProtocolError when it receives an unexpected frame.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.client_preamble())
c.initiate_connection()
f = frame_factory.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']
)
c.receive_data(f.serialize())
c.send_headers(
stream_id=1,
headers=self.example_response_headers,
end_stream=True
)
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(f.serialize())
f = frame_factory.build_goaway_frame(
last_stream_id=1,
error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
)
assert b"HEADERS: stream closed" in c.data_to_send()
@pytest.mark.skip("nghttp2")
@pytest.mark.parametrize(
"frame",
[
lambda self, ff: ff.build_data_frame(b'hello'),
lambda self, ff: ff.build_headers_frame(
self.example_response_headers, flags=['END_STREAM']),
lambda self, ff: ff.build_headers_frame(
self.example_response_headers),
]
)
def test_frames_after_send_end_will_error(self,
frame_factory,
frame):
"""
A stream that is closed by sending END_STREAM raises
ProtocolError when it receives an unexpected frame.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.receive_data(frame_factory.server_preamble())
c.send_headers(stream_id=1, headers=self.example_request_headers,
end_stream=True)
f = frame_factory.build_headers_frame(
self.example_response_headers, flags=['END_STREAM']
)
c.receive_data(f.serialize())
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
with pytest.raises(h2.exceptions.ProtocolError):
print(c.receive_data(f.serialize()))
f = frame_factory.build_goaway_frame(
last_stream_id=0,
error_code=h2.errors.ErrorCodes.STREAM_CLOSED,
)
assert c.data_to_send() == f.serialize()
@pytest.mark.parametrize(
"frame",
[
lambda self, ff: ff.build_window_update_frame(1, 1),
# lambda self, ff: ff.build_rst_stream_frame(1)
]
)
def test_frames_after_send_end_will_be_ignored(self,
frame_factory,
frame):
"""
A stream that is closed by sending END_STREAM will raise
ProtocolError when received unexpected frame.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.client_preamble())
c.initiate_connection()
f = frame_factory.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']
)
c.receive_data(f.serialize())
c.send_headers(
stream_id=1,
headers=self.example_response_headers,
end_stream=True
)
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
events = c.receive_data(f.serialize())
assert not events
class TestStreamsClosedByRstStream(object):
example_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
]
example_response_headers = [
(':status', '200'),
('server', 'fake-serv/0.1.0')
]
server_config = h2.config.H2Configuration(client_side=False)
@pytest.mark.skip("nghttp2")
@pytest.mark.parametrize(
"frame",
[
lambda self, ff: ff.build_headers_frame(
self.example_request_headers),
lambda self, ff: ff.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']),
lambda self, ff: ff.build_data_frame(b'hello'),
lambda self, ff: ff.build_window_update_frame(1, 1),
]
)
def test_resets_further_frames_after_recv_reset(self,
frame_factory,
frame):
"""
A stream that is closed by receive RST_STREAM can receive further
frames: it simply sends RST_STREAM for it.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.client_preamble())
c.initiate_connection()
header_frame = frame_factory.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']
)
c.receive_data(header_frame.serialize())
c.send_headers(
stream_id=1,
headers=self.example_response_headers,
end_stream=False
)
rst_frame = frame_factory.build_rst_stream_frame(
1, h2.errors.ErrorCodes.STREAM_CLOSED
)
c.receive_data(rst_frame.serialize())
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
events = c.receive_data(f.serialize())
rst_frame = frame_factory.build_rst_stream_frame(
1, h2.errors.ErrorCodes.STREAM_CLOSED
)
assert not events
assert c.data_to_send() == rst_frame.serialize()
events = c.receive_data(f.serialize() * 3)
assert not events
assert c.data_to_send() == rst_frame.serialize() * 3
events = c.receive_data(f.serialize() * 3)
assert not events
assert c.data_to_send() == rst_frame.serialize() * 3
@pytest.mark.skip("nghttp2")
@pytest.mark.parametrize(
"frame",
[
lambda self, ff: ff.build_headers_frame(
self.example_request_headers),
lambda self, ff: ff.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']),
lambda self, ff: ff.build_data_frame(b'hello'),
lambda self, ff: ff.build_window_update_frame(1, 1),
]
)
def test_resets_further_frames_after_send_reset(self,
frame_factory,
frame):
"""
A stream that is closed by sent RST_STREAM can receive further frames:
it simply sends RST_STREAM for it.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.client_preamble())
c.initiate_connection()
header_frame = frame_factory.build_headers_frame(
self.example_request_headers, flags=['END_STREAM']
)
c.receive_data(header_frame.serialize())
c.send_headers(
stream_id=1,
headers=self.example_response_headers,
end_stream=False
)
c.reset_stream(1, h2.errors.ErrorCodes.INTERNAL_ERROR)
rst_frame = frame_factory.build_rst_stream_frame(
1, h2.errors.ErrorCodes.STREAM_CLOSED
)
c.clear_outbound_data_buffer()
f = frame(self, frame_factory)
events = c.receive_data(f.serialize())
rst_frame = frame_factory.build_rst_stream_frame(
1, h2.errors.ErrorCodes.STREAM_CLOSED
)
assert not events
assert c.data_to_send() == rst_frame.serialize()
events = c.receive_data(f.serialize() * 3)
assert not events
assert c.data_to_send() == rst_frame.serialize() * 3
# Iterate over the streams to make sure it's gone, then confirm the
# behaviour is unchanged.
c.open_outbound_streams
events = c.receive_data(f.serialize() * 3)
assert not events
assert c.data_to_send() == rst_frame.serialize() * 3
| 33.094241
| 78
| 0.596978
| 1,451
| 12,642
| 4.92419
| 0.130255
| 0.063821
| 0.048705
| 0.03275
| 0.812456
| 0.790763
| 0.776347
| 0.761092
| 0.749335
| 0.716725
| 0
| 0.012058
| 0.30462
| 12,642
| 381
| 79
| 33.181102
| 0.800705
| 0.106787
| 0
| 0.681319
| 0
| 0
| 0.038727
| 0
| 0
| 0
| 0
| 0
| 0.080586
| 1
| 0.029304
| false
| 0
| 0.021978
| 0
| 0.095238
| 0.007326
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
508f6aba63dfccf40babe11f1d6d81b118c8674f
| 25,415
|
py
|
Python
|
finall.py
|
Vraajj/Hand-Cricket-Using-Webcam
|
8b10ebb091966140be5a25a11c6c8beff44c7ec9
|
[
"MIT"
] | null | null | null |
finall.py
|
Vraajj/Hand-Cricket-Using-Webcam
|
8b10ebb091966140be5a25a11c6c8beff44c7ec9
|
[
"MIT"
] | null | null | null |
finall.py
|
Vraajj/Hand-Cricket-Using-Webcam
|
8b10ebb091966140be5a25a11c6c8beff44c7ec9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 27 19:39:37 2018
@author: Admin
"""
import random
import cv2
import numpy as np
import math
def num():
cap = cv2.VideoCapture(1)
while (cap.isOpened()):
# read image
ret, img = cap.read()
# get hand data from the rectangle sub window on the screen
cv2.rectangle(img, (300, 300), (100, 100), (0, 255, 0), 0)
crop_img = img[100:300, 100:300]
# convert to grayscale
grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# applying gaussian blur
value = (35, 35)
blurred = cv2.GaussianBlur(grey, value, 0)
# thresholdin: Otsu's Binarization method
_, thresh1 = cv2.threshold(blurred, 127, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# show thresholded image
cv2.imshow('Thresholded', thresh1)
# check OpenCV version to avoid unpacking error
(version, _, _) = cv2.__version__.split('.')
if version == '3':
image, contours, hierarchy = cv2.findContours(thresh1.copy(), \
cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
elif version == '2':
contours, hierarchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, \
cv2.CHAIN_APPROX_NONE)
# find contour with max area
cnt = max(contours, key=lambda x: cv2.contourArea(x))
# create bounding rectangle around the contour (can skip below two lines)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt)
# drawing contours
drawing = np.zeros(crop_img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 0)
# finding convex hull
hull = cv2.convexHull(cnt, returnPoints=False)
# finding convexity defects
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defects
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
# find length of all sides of triangle
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
# apply cosine rule here
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) * 57
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
cv2.circle(crop_img, far, 1, [0, 0, 255], -1)
# dist = cv2.pointPolygonTest(cnt,far,True)
# draw a line from start to end i.e. the convex points (finger tips)
# (can skip this part)
cv2.line(crop_img, start, end, [0, 255, 0], 2)
# cv2.circle(crop_img,far,5,[0,0,255],-1)
# define actions required
if count_defects == 1:
cv2.putText(img, "2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 2:
str = "3"
cv2.putText(img, str, (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
elif count_defects == 3:
cv2.putText(img, "4", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
cv2.putText(img, "5", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
cv2.putText(img, "1", (50, 50), \
cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
# show appropriate images in windows
cv2.imshow('Gesture', img)
all_img = np.hstack((drawing, crop_img))
cv2.imshow('Contours', all_img)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
return count_defects + 1
user_score = 0
computer_score = 0
overs = 0
balls = 1
user_duck = True
computer_duck = True
user_out = False
computer_out = False
toss = ['1', '2']
bat_or_bowl = ["Bat", "Bowl"]
print("Toss time!")
choice = input("Head/Tails?\n1.Heads\n2.Tails\nEnter your choice: ")
if choice == '1' or choice == '2':
who = random.choice(toss)
# User won the toss!
if who == choice:
print("Hurray you won the toss!")
option = input("What do you wish to do?\n1.Bat\n2.Bowl\nEnter the option: ")
if option == '1' or option == '2':
# User choses to bat
if option == '1':
print("You chose to bat!")
print("Let's begin!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if user_input == computer_input:
computer_out = True
elif user_input != computer_input:
user_score += user_input
print("Current score:", user_score)
if user_input <= 0 or user_input > 5:
print("Invalid input!")
user_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
print()
while user_input != computer_input:
user_duck = False
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if computer_input == user_input:
break
user_score += user_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
print("Out!!!")
if user_duck:
user_score = 0
if user_score == 0:
print("Duck!!!")
else:
if overs <= 1:
print("You scored ", user_score, " from ", overs, " over and", balls, " balls.")
else:
print("You scored ", user_score, " from ", overs, " overs and", balls, " balls.")
# User's turn to bowl
balls = 1
overs = 0
print("Now your turn to bowl\n\nNever allow the opponent to cross your score\nAll the best!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if user_input != computer_input:
computer_score += computer_input
computer_duck = False
else:
computer_score = 0
if user_input <= 0 or user_input > 5:
print("Invalid input!")
computer_score = 0
balls = 0
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
while computer_score < user_score:
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if user_input == computer_input:
break
computer_score += computer_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
if computer_out:
print("Out!!!")
if computer_duck:
computer_score = 0
if overs <= 0:
print("Computer scored ", computer_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("Computer scored ", computer_score, " from ", overs, " overs and", balls + 1, " balls")
if user_score > computer_score:
print("You won!")
print("Scores:\nYou:", user_score, "\nComputer:", computer_score)
else:
print("Computer won!!!\nBetter luck next time")
print("Scores:\nComputer:", computer_score, "\nYou:", user_score)
# User choses to bowl
else:
print("You chose to bowl!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if user_input != computer_input:
computer_score += computer_input
computer_duck = False
if user_input <= 0 or user_input > 5:
print("Invalid input!")
computer_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
while computer_input != user_input:
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if computer_input == user_input:
break
computer_score += computer_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
print("Out!!!")
if computer_duck:
computer_score = 0
if computer_duck:
print("Duck!!!")
else:
if overs <= 0:
print("Computer scored ", computer_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("Computer scored ", computer_score, " from ", overs, " overs and", balls + 1, " balls")
# User's turn to bat
overs = 0
balls = 1
print("Now it's your turn to bat\nTry to defeat the oppponent\nAll the best!")
print("Let's begin!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if user_input != computer_input:
user_score += user_input
if user_input <= 0 or user_input > 5:
print("Invalid input!")
user_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
print()
while user_score < computer_score:
user_duck = False
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if user_input == computer_input:
user_out = True
break
user_score += user_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
if user_out:
print("Out!!!")
if user_duck:
user_score = 0
if overs <= 0:
print("You scored ", user_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("You scored ", user_score, " from ", overs, " overs and", balls + 1, " balls")
if user_score > computer_score:
print("You won!")
print("Scores:\nYou:", user_score, "\nComputer:", computer_score)
elif computer_score > user_score:
print("Computer won!!!\nBetter luck next time")
print("Scores:\nComputer:", computer_score, "\nYou:", user_score)
else:
print("Tie!!!")
print("No one gives up!")
else:
print("Invalid option begin given!")
# Computer won the toss!
else:
computer_option = random.choice(bat_or_bowl)
print("Bad luck! computer won the toss and chose to ", computer_option)
print("Let the battle begin!!!")
# If computer choses batting
if computer_option == "Bat":
print("Computer bats!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
computer_score += computer_input
if computer_input != user_input:
computer_duck = False
if user_input <= 0 or user_input > 5:
print("Invalid input!")
computer_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
while computer_input != user_input:
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if computer_input == user_input:
break
computer_score += computer_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
print("Out!!!")
if computer_duck:
print("Duck!!!")
computer_score = 0
else:
if overs <= 0:
print("Computer scored ", computer_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("Computer scored ", computer_score, " from ", overs, " overs and", balls + 1, " balls")
# Computer's turn to bowl
overs = 0
balls = 1
print("Computer bowls!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if user_input != computer_input:
user_score += user_input
if user_input <= 0 or user_input > 5:
print("Invalid input!")
user_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
print()
while user_score <= computer_score:
user_duck = False
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if user_input == computer_input:
user_out = True
break
user_score += user_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
if user_out:
print("Out!!!")
if user_duck:
user_score = 0
if overs <= 0:
print("You scored ", user_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("You scored ", user_score, " from ", overs, " overs and", balls + 1, " balls")
# Announcing the winner
if user_score > computer_score:
print("You won!")
print("Scores:\nYou:", user_score, "\nComputer:", computer_score)
elif computer_score > user_score:
print("Computer won!!!\nBetter luck next time")
print("Scores:\nComputer:", computer_score, "\nYou:", user_score)
else:
print("Tie!!!")
# If computer choses to bowl
else:
print("Computer bowls!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if user_input != computer_input:
user_score += user_input
if user_input <= 0 or user_input > 5:
print("Invalid input!")
user_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
print()
while user_input != computer_input:
user_duck = False
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if user_input == computer_input:
user_out = True
break
user_score += user_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print()
print("Current score:", user_score)
print("Overs:", overs, ".", balls)
print()
if user_out:
print("Out!!!")
if user_duck:
user_score = 0
if overs <= 0:
print("You scored ", user_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("You scored ", user_score, " from ", overs, " overs and", balls + 1, " balls")
# Computer's turn to bat
overs = 0
balls = 1
print("Computer bats!")
user_input = num()
computer_input = random.randint(1, 5)
print("Your input is: ", user_input, " and computer input is: ", computer_input)
if computer_input != user_input:
computer_score += computer_input
computer_duck = False
else:
computer_score = 0
if user_input <= 0 or user_input > 5:
print("Invalid input!")
computer_score = 0
balls = 0
print()
print("Your input is: ", user_input, " and computer input is: ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
while computer_score <= user_score:
computer_duck = False
user_input = num()
if user_input <= 0 or user_input > 5:
print("Invalid input!")
continue
computer_input = random.randint(1, 5)
if computer_input == user_input:
computer_out = True
break
computer_score += computer_input
balls += 1
if balls % 6 == 0:
overs += 1
if balls == 6:
balls = 0
print()
print("Your input is", user_input, " and computer input is ", computer_input)
print("Current score:", computer_score)
print("Overs:", overs, ".", balls)
print()
if computer_out:
print("Out!!!")
if computer_duck:
print("Duck!!!")
computer_score = 0
else:
if overs <= 0:
print("Computer scored ", computer_score, " from ", overs, " over and", balls + 1, " balls")
else:
print("Computer scored ", computer_score, " from ", overs, " overs and", balls + 1, " balls")
# Announcing the winner
if user_score > computer_score:
print("You won!")
print("Scores:\nYou:", user_score, "\nComputer:", computer_score)
elif computer_score > user_score:
print("Computer won!!!\nBetter luck next time")
print("Scores:\nComputer:", computer_score, "\nYou:", user_score)
else:
print("Tie!!!")
else:
print("Invalid option given!")
| 40.023622
| 118
| 0.441708
| 2,499
| 25,415
| 4.347739
| 0.111245
| 0.083663
| 0.026323
| 0.035343
| 0.752416
| 0.731615
| 0.728578
| 0.728578
| 0.715416
| 0.706489
| 0
| 0.030268
| 0.459217
| 25,415
| 634
| 119
| 40.086751
| 0.760259
| 0.047059
| 0
| 0.804305
| 0
| 0.001957
| 0.132291
| 0.002591
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001957
| false
| 0
| 0.007828
| 0
| 0.011742
| 0.350294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
509e51be7939ef2c07d1479e015f51ed54ca91ca
| 135
|
py
|
Python
|
jobs/rolloff_mask_generation/v0/producer_rolloff_mask.py
|
lsst-camera-dh/ts3-analysis
|
bf3400f286876c5ed4368e2dafe730a8598d0bf7
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
jobs/rolloff_mask_generation/v0/producer_rolloff_mask.py
|
lsst-camera-dh/ts3-analysis
|
bf3400f286876c5ed4368e2dafe730a8598d0bf7
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
jobs/rolloff_mask_generation/v0/producer_rolloff_mask.py
|
lsst-camera-dh/ts3-analysis
|
bf3400f286876c5ed4368e2dafe730a8598d0bf7
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#!/usr/bin/env python
import shutil
from lsst.eotest.sensor.rolloff_mask import rolloff_mask
rolloff_mask('ccd250_defects_mask.fits')
| 22.5
| 56
| 0.82963
| 21
| 135
| 5.095238
| 0.714286
| 0.308411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024
| 0.074074
| 135
| 5
| 57
| 27
| 0.832
| 0.148148
| 0
| 0
| 0
| 0
| 0.210526
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
50f1fb3a75d7dd4190773a520d83b030af88a3f8
| 36
|
py
|
Python
|
__init__.py
|
gthb/apache-log-reader
|
aa6b9f327ca18b3be20aaafb7758aba3f3eba4ad
|
[
"CC0-1.0"
] | 4
|
2015-02-02T18:26:30.000Z
|
2019-11-05T08:52:45.000Z
|
__init__.py
|
pklaus/apache-log-reader
|
aa6b9f327ca18b3be20aaafb7758aba3f3eba4ad
|
[
"CC0-1.0"
] | null | null | null |
__init__.py
|
pklaus/apache-log-reader
|
aa6b9f327ca18b3be20aaafb7758aba3f3eba4ad
|
[
"CC0-1.0"
] | 1
|
2016-03-06T07:39:56.000Z
|
2016-03-06T07:39:56.000Z
|
from log_reader import ApacheReader
| 18
| 35
| 0.888889
| 5
| 36
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fba077d4dbe9d46e1200f2c9be845daae29122b
| 9,288
|
py
|
Python
|
tests/test_bucketfs_utils.py
|
exasol/bucketfs-utils-python
|
6eb2aed97ad9f4aaece47bdac5bc8a44fb42d921
|
[
"MIT"
] | 1
|
2021-06-25T19:53:03.000Z
|
2021-06-25T19:53:03.000Z
|
tests/test_bucketfs_utils.py
|
exasol/bucketfs-utils-python
|
6eb2aed97ad9f4aaece47bdac5bc8a44fb42d921
|
[
"MIT"
] | 42
|
2020-11-18T12:58:47.000Z
|
2022-03-30T13:02:15.000Z
|
tests/test_bucketfs_utils.py
|
exasol/bucketfs-utils-python
|
6eb2aed97ad9f4aaece47bdac5bc8a44fb42d921
|
[
"MIT"
] | null | null | null |
import pytest
from exasol_bucketfs_utils_python import bucketfs_utils
from exasol_bucketfs_utils_python.bucketfs_config import BucketFSConfig
from exasol_bucketfs_utils_python.bucketfs_connection_config import BucketFSConnectionConfig
from exasol_bucketfs_utils_python.bucket_config import BucketConfig
def test_generate_bucket_udf_path_non_archive_file():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_udf_path(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt"
)
assert str(udf_path) == "/buckets/bfsdefault/default/path/in/bucket/test_file.txt"
def test_generate_bucket_udf_path_trailing_slash():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_udf_path(
bucket_config=bucket_config,
path_in_bucket="/path/in/bucket/test_file.txt"
)
assert str(udf_path) == "/buckets/bfsdefault/default/path/in/bucket/test_file.txt"
@pytest.mark.parametrize("extension", ["tar.gz", "zip", "tgz", "tar"])
def test_generate_bucket_udf_path_archive(extension):
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_udf_path(
bucket_config=bucket_config,
path_in_bucket=f"path/in/bucket/test_file.{extension}"
)
assert str(udf_path) == "/buckets/bfsdefault/default/path/in/bucket/test_file"
def test_generate_bucket_url_file_write_access():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt"
)
assert udf_path.geturl() == "http://localhost:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_trailing_slash():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="/path/in/bucket/test_file.txt"
)
assert udf_path.geturl() == "http://localhost:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_with_credentials():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://w:write@localhost:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_with_ip():
connection_config = BucketFSConnectionConfig(host="127.0.0.1", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://w:write@127.0.0.1:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_with_whitespace_in_host():
connection_config = BucketFSConnectionConfig(host="local host", port=6666, user="w", pwd="write", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://w:write@local%20host:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_with_whitespace_in_password():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write write",
is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://w:write%20write@localhost:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_with_whitespace_in_bucket_name():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write",
is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://w:write@localhost:6666/default%20default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_with_whitespace_in_path_in_bucket():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="w", pwd="write",
is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://w:write@localhost:6666/default/path/in/bucket/test%20file.txt"
def test_generate_bucket_url_file_read_only_access():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="r", pwd="read", is_https=False)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "http://r:read@localhost:6666/default/path/in/bucket/test_file.txt"
def test_generate_bucket_url_file_https():
connection_config = BucketFSConnectionConfig(host="localhost", port=6666, user="r", pwd="read", is_https=True)
bucketfs_config = BucketFSConfig(connection_config=connection_config,
bucketfs_name="bfsdefault")
bucket_config = BucketConfig(bucket_name="default", bucketfs_config=bucketfs_config)
udf_path = bucketfs_utils.generate_bucket_http_url(
bucket_config=bucket_config,
path_in_bucket="path/in/bucket/test_file.txt",
with_credentials=True
)
assert udf_path.geturl() == "https://r:read@localhost:6666/default/path/in/bucket/test_file.txt"
| 53.37931
| 117
| 0.724806
| 1,109
| 9,288
| 5.711452
| 0.064022
| 0.051784
| 0.075782
| 0.065677
| 0.931639
| 0.919324
| 0.893274
| 0.88838
| 0.88838
| 0.886486
| 0
| 0.014614
| 0.174849
| 9,288
| 173
| 118
| 53.687861
| 0.811848
| 0
| 0
| 0.673469
| 0
| 0.054422
| 0.178402
| 0.05491
| 0
| 0
| 0
| 0
| 0.088435
| 1
| 0.088435
| false
| 0.006803
| 0.034014
| 0
| 0.122449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0fd5abc29af413741171d75359a1f23f496935e7
| 113
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/clearkey/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/clearkey/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/clearkey/customdata/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.clearkey.customdata.customdata_api import CustomdataApi
| 56.5
| 112
| 0.893805
| 15
| 113
| 6.533333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 0.035398
| 113
| 1
| 113
| 113
| 0.889908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fef440938b700c66262731a57adf0065a91fcc4
| 137
|
py
|
Python
|
parser/fase2/team09/liteal.py
|
alerod620/tytus
|
361c9500dfcd2a1c759a9079bd069fd1c3f1f92b
|
[
"MIT"
] | null | null | null |
parser/fase2/team09/liteal.py
|
alerod620/tytus
|
361c9500dfcd2a1c759a9079bd069fd1c3f1f92b
|
[
"MIT"
] | null | null | null |
parser/fase2/team09/liteal.py
|
alerod620/tytus
|
361c9500dfcd2a1c759a9079bd069fd1c3f1f92b
|
[
"MIT"
] | 1
|
2021-01-05T18:31:17.000Z
|
2021-01-05T18:31:17.000Z
|
class Literal():
def __init__(self, literal):
self.literal = literal
def optimizacion(self):
return self.literal
| 22.833333
| 32
| 0.642336
| 15
| 137
| 5.6
| 0.466667
| 0.392857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.262774
| 137
| 6
| 33
| 22.833333
| 0.831683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e8cf80fe8825c1716fba339d5d6f9bfff68b16d5
| 86
|
py
|
Python
|
fastreid/modeling/backbones/regnet/__init__.py
|
tenghehan/reid_without_id
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
[
"MIT"
] | 2,194
|
2020-04-06T01:37:56.000Z
|
2022-03-30T22:17:28.000Z
|
fastreid/modeling/backbones/regnet/__init__.py
|
tenghehan/reid_without_id
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
[
"MIT"
] | 542
|
2020-04-14T08:00:05.000Z
|
2022-03-29T07:39:40.000Z
|
fastreid/modeling/backbones/regnet/__init__.py
|
tenghehan/reid_without_id
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
[
"MIT"
] | 667
|
2020-04-08T02:06:03.000Z
|
2022-03-29T00:57:32.000Z
|
from .regnet import build_regnet_backbone
from .effnet import build_effnet_backbone
| 17.2
| 41
| 0.860465
| 12
| 86
| 5.833333
| 0.5
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 86
| 4
| 42
| 21.5
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2ccfba797d20e0b374bec4b08c5c9f251b17cd97
| 112
|
py
|
Python
|
cogdl/wrappers/model_wrapper/clustering/__init__.py
|
li-ziang/cogdl
|
60022d3334e3abae2d2a505e6e049a26acf10f39
|
[
"MIT"
] | 6
|
2020-07-09T02:48:41.000Z
|
2021-06-16T09:04:14.000Z
|
cogdl/wrappers/model_wrapper/clustering/__init__.py
|
li-ziang/cogdl
|
60022d3334e3abae2d2a505e6e049a26acf10f39
|
[
"MIT"
] | null | null | null |
cogdl/wrappers/model_wrapper/clustering/__init__.py
|
li-ziang/cogdl
|
60022d3334e3abae2d2a505e6e049a26acf10f39
|
[
"MIT"
] | 1
|
2020-05-19T11:45:45.000Z
|
2020-05-19T11:45:45.000Z
|
from .agc_mw import AGCModelWrapper
from .daegc_mw import DAEGCModelWrapper
from .gae_mw import GAEModelWrapper
| 28
| 39
| 0.866071
| 15
| 112
| 6.266667
| 0.6
| 0.255319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 112
| 3
| 40
| 37.333333
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa3ff9cdfb397fba37d4a8c45c694cc9c152b74b
| 263
|
py
|
Python
|
closed/Intel/code/common/baseBackend.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 12
|
2021-09-23T08:05:57.000Z
|
2022-03-21T03:52:11.000Z
|
closed/Intel/code/common/baseBackend.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 11
|
2021-09-23T20:34:06.000Z
|
2022-01-22T07:58:02.000Z
|
closed/Intel/code/common/baseBackend.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 16
|
2021-09-23T20:26:38.000Z
|
2022-03-09T12:59:56.000Z
|
"""
abstract baseBackend class
"""
class baseBackend():
def __init__(self):
pass
def load_model(self):
raise NotImplementedError("baseBackend:load")
def predict(self):
raise NotImplementedError("baseBackend:predict")
| 23.909091
| 56
| 0.65019
| 24
| 263
| 6.916667
| 0.5
| 0.108434
| 0.337349
| 0.46988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243346
| 263
| 11
| 56
| 23.909091
| 0.834171
| 0.098859
| 0
| 0
| 0
| 0
| 0.159091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.142857
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
fa4238a81332fb5edb2c387754428d027368debe
| 184
|
py
|
Python
|
services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py
|
elisabettai/osparc-simcore
|
ad7b6e05111b50fe95e49306a992170490a7247f
|
[
"MIT"
] | null | null | null |
services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py
|
elisabettai/osparc-simcore
|
ad7b6e05111b50fe95e49306a992170490a7247f
|
[
"MIT"
] | 1
|
2021-11-29T13:38:09.000Z
|
2021-11-29T13:38:09.000Z
|
services/director-v2/src/simcore_service_director_v2/api/dependencies/rabbitmq.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | null | null | null |
from fastapi import Request
from ...modules.rabbitmq import RabbitMQClient
def get_rabbitmq_client(request: Request) -> RabbitMQClient:
return request.app.state.rabbitmq_client
| 23
| 60
| 0.809783
| 22
| 184
| 6.636364
| 0.590909
| 0.191781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 184
| 7
| 61
| 26.285714
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
fa564df5f2f55108b1b64b5e19a2e3915585784a
| 7,711
|
py
|
Python
|
tests/test_health_service.py
|
vmagamedov/asyncgrpc
|
50742efdf2c2a14ba66c736dd6d1a9cc4bf6f467
|
[
"BSD-3-Clause"
] | 6
|
2017-01-25T13:54:17.000Z
|
2017-07-06T16:23:01.000Z
|
tests/test_health_service.py
|
vmagamedov/asyncgrpc
|
50742efdf2c2a14ba66c736dd6d1a9cc4bf6f467
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_health_service.py
|
vmagamedov/asyncgrpc
|
50742efdf2c2a14ba66c736dd6d1a9cc4bf6f467
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import pytest
import async_timeout
from grpclib.const import Status
from grpclib.testing import ChannelFor
from grpclib.exceptions import GRPCError
from grpclib.health.check import ServiceCheck, ServiceStatus
from grpclib.health.service import Health
from grpclib.health.v1.health_pb2 import HealthCheckRequest, HealthCheckResponse
from grpclib.health.v1.health_grpc import HealthStub
class Check:
__current_status__ = None
async def __call__(self):
return self.__current_status__
SERVICE_NAME = 'namespace.ServiceName'
class Service:
async def Foo(self, stream):
raise NotImplementedError
def __mapping__(self):
return {'/{}/Foo'.format(SERVICE_NAME): self.Foo}
@pytest.mark.asyncio
async def test_check_unknown_service():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
with pytest.raises(GRPCError) as err:
await stub.Check(HealthCheckRequest(service='Unknown'))
assert err.value.status == Status.NOT_FOUND
@pytest.mark.asyncio
async def test_check_zero_checks():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
response = await stub.Check(HealthCheckRequest(service=SERVICE_NAME))
assert response == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
@pytest.mark.asyncio
@pytest.mark.parametrize('v1, v2, status', [
(None, None, HealthCheckResponse.UNKNOWN),
(True, False, HealthCheckResponse.NOT_SERVING),
(False, True, HealthCheckResponse.NOT_SERVING),
(True, True, HealthCheckResponse.SERVING)
])
async def test_check_service_check(loop, v1, v2, status):
svc = Service()
c1 = Check()
c2 = Check()
health = Health({svc: [
ServiceCheck(c1, check_ttl=0),
ServiceCheck(c2, check_ttl=0),
]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
c1.__current_status__ = v1
c2.__current_status__ = v2
response = await stub.Check(HealthCheckRequest(service=SERVICE_NAME))
assert response == HealthCheckResponse(status=status)
@pytest.mark.asyncio
@pytest.mark.parametrize('v1, v2, status', [
(None, None, HealthCheckResponse.UNKNOWN),
(True, False, HealthCheckResponse.NOT_SERVING),
(False, True, HealthCheckResponse.NOT_SERVING),
(True, True, HealthCheckResponse.SERVING)
])
async def test_check_service_status(v1, v2, status):
svc = Service()
s1 = ServiceStatus()
s2 = ServiceStatus()
health = Health({svc: [s1, s2]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
s1.set(v1)
s2.set(v2)
response = await stub.Check(HealthCheckRequest(service=SERVICE_NAME))
assert response == HealthCheckResponse(status=status)
@pytest.mark.asyncio
async def test_watch_unknown_service():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service='Unknown'),
end=True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVICE_UNKNOWN,
)
try:
async with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
await stream.cancel()
@pytest.mark.asyncio
async def test_watch_zero_checks():
svc = Service()
health = Health({svc: []})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service=SERVICE_NAME),
end=True)
response = await stream.recv_message()
assert response == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
try:
async with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
await stream.cancel()
@pytest.mark.asyncio
async def test_watch_service_check():
svc = Service()
c1 = Check()
c2 = Check()
health = Health({svc: [
ServiceCheck(c1, check_ttl=0.001),
ServiceCheck(c2, check_ttl=0.001),
]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service=SERVICE_NAME),
end=True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.UNKNOWN,
)
# check that there are no unnecessary messages
try:
async with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
c1.__current_status__ = True
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
c2.__current_status__ = True
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
c1.__current_status__ = False
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
c1.__current_status__ = True
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
await stream.cancel()
@pytest.mark.asyncio
async def test_watch_service_status():
svc = Service()
s1 = ServiceStatus()
s2 = ServiceStatus()
health = Health({svc: [s1, s2]})
async with ChannelFor([svc, health]) as channel:
stub = HealthStub(channel)
async with stub.Watch.open() as stream:
await stream.send_message(HealthCheckRequest(service=SERVICE_NAME),
end=True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.UNKNOWN,
)
s1.set(True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
s2.set(True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
s1.set(False)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.NOT_SERVING,
)
s1.set(True)
assert await stream.recv_message() == HealthCheckResponse(
status=HealthCheckResponse.SERVING,
)
# check that there are no unnecessary messages if status isn't
# changed
s1.set(True)
try:
async with async_timeout.timeout(0.01):
assert not await stream.recv_message()
except asyncio.TimeoutError:
pass
await stream.cancel()
| 34.119469
| 80
| 0.619634
| 766
| 7,711
| 6.079634
| 0.122715
| 0.056689
| 0.051535
| 0.075585
| 0.840885
| 0.80889
| 0.789349
| 0.754778
| 0.754778
| 0.754778
| 0
| 0.012048
| 0.289586
| 7,711
| 225
| 81
| 34.271111
| 0.83808
| 0.014654
| 0
| 0.691099
| 0
| 0
| 0.009218
| 0.002765
| 0
| 0
| 0
| 0
| 0.104712
| 1
| 0.005236
| false
| 0.020942
| 0.052356
| 0.005236
| 0.08377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d715e0eee1626fc28d030a932d58ec7fa0bbe1bd
| 24
|
py
|
Python
|
TempFolder/__init__.py
|
erv4gen/Tools-DataProcessing
|
12d956b9757bfcde4a24e453779671b8daa7e74a
|
[
"MIT"
] | null | null | null |
TempFolder/__init__.py
|
erv4gen/Tools-DataProcessing
|
12d956b9757bfcde4a24e453779671b8daa7e74a
|
[
"MIT"
] | null | null | null |
TempFolder/__init__.py
|
erv4gen/Tools-DataProcessing
|
12d956b9757bfcde4a24e453779671b8daa7e74a
|
[
"MIT"
] | null | null | null |
from . import TempFolder
| 24
| 24
| 0.833333
| 3
| 24
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad12d3b2890dec0a5a87b5bbc93aefaf13af066f
| 4,907
|
py
|
Python
|
tests/test_maintenance_delete.py
|
StackStorm-Exchange/zabbix
|
8a613dad10808cc5cd2f32e278e09d189b067cdf
|
[
"Apache-2.0"
] | 10
|
2018-03-07T06:12:13.000Z
|
2022-01-23T20:44:20.000Z
|
tests/test_maintenance_delete.py
|
StackStorm-Exchange/zabbix
|
8a613dad10808cc5cd2f32e278e09d189b067cdf
|
[
"Apache-2.0"
] | 36
|
2017-10-28T07:23:57.000Z
|
2021-08-18T14:38:47.000Z
|
tests/test_maintenance_delete.py
|
StackStorm-Exchange/zabbix
|
8a613dad10808cc5cd2f32e278e09d189b067cdf
|
[
"Apache-2.0"
] | 21
|
2017-10-31T01:06:42.000Z
|
2022-02-08T14:59:36.000Z
|
import mock
from zabbix_base_action_test_case import ZabbixBaseActionTestCase
from maintenance_delete import MaintenanceDelete
from six.moves.urllib.error import URLError
from pyzabbix.api import ZabbixAPIException
class MaintenanceDeleteTestCase(ZabbixBaseActionTestCase):
__test__ = True
action_cls = MaintenanceDelete
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_connection_error(self, mock_connect):
action = self.get_action_instance(self.full_config)
mock_connect.side_effect = URLError('connection error')
test_dict = {'maintenance_window_name': None, 'maintenance_id': '1'}
with self.assertRaises(URLError):
action.run(**test_dict)
@mock.patch('lib.actions.ZabbixAPI')
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_by_id(self, mock_connect, mock_client):
action = self.get_action_instance(self.full_config)
mock_connect.return_vaue = "connect return"
test_dict = {'maintenance_window_name': None, 'maintenance_id': '1'}
action.connect = mock_connect
mock_client.maintenance.delete.return_value = "delete return"
action.client = mock_client
result = action.run(**test_dict)
mock_client.maintenance.delete.assert_called_with(test_dict['maintenance_id'])
self.assertEqual(result, True)
@mock.patch('lib.actions.ZabbixAPI')
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_by_name(self, mock_connect, mock_client):
action = self.get_action_instance(self.full_config)
mock_connect.return_vaue = "connect return"
test_dict = {'maintenance_window_name': "test", 'maintenance_id': None}
maintenance_dict = {'name': "test", 'maintenanceid': 1}
action.connect = mock_connect
action.maintenance_get = mock.MagicMock(return_value=[maintenance_dict])
mock_client.maintenance.delete.return_value = "delete return"
action.client = mock_client
result = action.run(**test_dict)
mock_client.maintenance.delete.assert_called_with(maintenance_dict['maintenanceid'])
self.assertEqual(result, True)
@mock.patch('lib.actions.ZabbixAPI')
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_by_name_no_return_error(self, mock_connect, mock_client):
action = self.get_action_instance(self.full_config)
mock_connect.return_vaue = "connect return"
test_dict = {'maintenance_window_name': "test", 'maintenance_id': None}
action.connect = mock_connect
action.maintenance_get = mock.MagicMock(return_value=[])
mock_client.maintenance.delete.return_value = "delete return"
action.client = mock_client
with self.assertRaises(ValueError):
action.run(**test_dict)
@mock.patch('lib.actions.ZabbixAPI')
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_by_name_to_many_return_error(self, mock_connect, mock_client):
action = self.get_action_instance(self.full_config)
mock_connect.return_vaue = "connect return"
test_dict = {'maintenance_window_name': "test", 'maintenance_id': None}
maintenance_dict = [{'name': "test", 'maintenanceid': 1},
{'name': "test", 'maintenanceid': 2}]
action.connect = mock_connect
action.maintenance_get = mock.MagicMock(return_value=maintenance_dict)
mock_client.maintenance.delete.return_value = "delete return"
action.client = mock_client
with self.assertRaises(ValueError):
action.run(**test_dict)
@mock.patch('lib.actions.ZabbixAPI')
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_value_error(self, mock_connect, mock_client):
action = self.get_action_instance(self.full_config)
mock_connect.return_vaue = "connect return"
test_dict = {'maintenance_window_name': None, 'maintenance_id': None}
action.connect = mock_connect
mock_client.maintenance.delete.return_value = "delete return"
action.client = mock_client
with self.assertRaises(ValueError):
action.run(**test_dict)
@mock.patch('lib.actions.ZabbixAPI')
@mock.patch('lib.actions.ZabbixBaseAction.connect')
def test_run_delete_error(self, mock_connect, mock_client):
action = self.get_action_instance(self.full_config)
mock_connect.return_vaue = "connect return"
test_dict = {'maintenance_window_name': None, 'maintenance_id': '1'}
action.connect = mock_connect
mock_client.maintenance.delete.side_effect = ZabbixAPIException('maintenance error')
mock_client.maintenance.delete.return_value = "delete return"
action.client = mock_client
with self.assertRaises(ZabbixAPIException):
action.run(**test_dict)
| 45.018349
| 92
| 0.709191
| 569
| 4,907
| 5.824253
| 0.112478
| 0.063368
| 0.047073
| 0.074532
| 0.837658
| 0.837658
| 0.837658
| 0.836451
| 0.836451
| 0.806578
| 0
| 0.001503
| 0.186468
| 4,907
| 108
| 93
| 45.435185
| 0.828657
| 0
| 0
| 0.688889
| 0
| 0
| 0.190952
| 0.109843
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.077778
| false
| 0
| 0.055556
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad5236d5bc70d9375cc60b2b1b2f237a75bec25f
| 3,672
|
py
|
Python
|
yt/frontends/swift/tests/test_outputs.py
|
cevans216/yt
|
c19c3c615b996c8a6e418362ffea9041a616d673
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/swift/tests/test_outputs.py
|
cevans216/yt
|
c19c3c615b996c8a6e418362ffea9041a616d673
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/swift/tests/test_outputs.py
|
cevans216/yt
|
c19c3c615b996c8a6e418362ffea9041a616d673
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
from yt import load
from yt.frontends.swift.api import SwiftDataset
from yt.testing import ParticleSelectionComparison, assert_almost_equal, requires_file
from yt.utilities.on_demand_imports import _h5py as h5py
keplerian_ring = "KeplerianRing/keplerian_ring_0020.hdf5"
EAGLE_6 = "EAGLE_6/eagle_0005.hdf5"
# Combined the tests for loading a file and ensuring the units have been
# implemented correctly to save time on re-loading a dataset
@requires_file(keplerian_ring)
def test_non_cosmo_dataset():
ds = load(keplerian_ring)
assert type(ds) is SwiftDataset
field = ("gas", "density")
ad = ds.all_data()
yt_density = ad[field]
yt_coords = ad[(field[0], "position")]
# load some data the old fashioned way
fh = h5py.File(ds.parameter_filename, "r")
part_data = fh["PartType0"]
# set up a conversion factor by loading the unit mas and unit length in cm,
# and then converting to proper coordinates
units = fh["Units"]
units = dict(units.attrs)
density_factor = float(units["Unit mass in cgs (U_M)"])
density_factor /= float(units["Unit length in cgs (U_L)"]) ** 3
# now load the raw density and coordinates
raw_density = part_data["Density"][:].astype("float64") * density_factor
raw_coords = part_data["Coordinates"][:].astype("float64")
fh.close()
# sort by the positions - yt often loads in a different order
ind_raw = np.lexsort((raw_coords[:, 2], raw_coords[:, 1], raw_coords[:, 0]))
ind_yt = np.lexsort((yt_coords[:, 2], yt_coords[:, 1], yt_coords[:, 0]))
raw_density = raw_density[ind_raw]
yt_density = yt_density[ind_yt]
# make sure we are comparing fair units
assert str(yt_density.units) == "g/cm**3"
# make sure the actual values are the same
assert_almost_equal(yt_density.d, raw_density)
@requires_file(keplerian_ring)
def test_non_cosmo_dataset_selection():
ds = load(keplerian_ring)
psc = ParticleSelectionComparison(ds)
psc.run_defaults()
@requires_file(EAGLE_6)
def test_cosmo_dataset():
ds = load(EAGLE_6)
assert type(ds) == SwiftDataset
field = ("gas", "density")
ad = ds.all_data()
yt_density = ad[field]
yt_coords = ad[(field[0], "position")]
# load some data the old fashioned way
fh = h5py.File(ds.parameter_filename, "r")
part_data = fh["PartType0"]
# set up a conversion factor by loading the unit mas and unit length in cm,
# and then converting to proper coordinates
units = fh["Units"]
units = dict(units.attrs)
density_factor = float(units["Unit mass in cgs (U_M)"])
density_factor /= float(units["Unit length in cgs (U_L)"]) ** 3
# add the redshift factor
header = fh["Header"]
header = dict(header.attrs)
density_factor *= (1.0 + float(header["Redshift"])) ** 3
# now load the raw density and coordinates
raw_density = part_data["Density"][:].astype("float64") * density_factor
raw_coords = part_data["Coordinates"][:].astype("float64")
fh.close()
# sort by the positions - yt often loads in a different order
ind_raw = np.lexsort((raw_coords[:, 2], raw_coords[:, 1], raw_coords[:, 0]))
ind_yt = np.lexsort((yt_coords[:, 2], yt_coords[:, 1], yt_coords[:, 0]))
raw_density = raw_density[ind_raw]
yt_density = yt_density[ind_yt]
# make sure we are comparing fair units
assert str(yt_density.units) == "g/cm**3"
# make sure the actual values are the same
assert_almost_equal(yt_density.d, raw_density)
@requires_file(EAGLE_6)
def test_cosmo_dataset_selection():
ds = load(EAGLE_6)
psc = ParticleSelectionComparison(ds)
psc.run_defaults()
| 34
| 86
| 0.691993
| 542
| 3,672
| 4.494465
| 0.243542
| 0.036946
| 0.019704
| 0.037767
| 0.779557
| 0.767241
| 0.729475
| 0.729475
| 0.704023
| 0.670361
| 0
| 0.017149
| 0.190087
| 3,672
| 107
| 87
| 34.317757
| 0.80195
| 0.223039
| 0
| 0.757576
| 0
| 0
| 0.1097
| 0.021517
| 0
| 0
| 0
| 0
| 0.106061
| 1
| 0.060606
| false
| 0
| 0.075758
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad6bd6b4c832a2c22e49e9edb56a670552bfea0a
| 84
|
py
|
Python
|
models/__init__.py
|
rwightman/pytorch-countception-sealion
|
a938b77d4629a48a9d0b01c9dc41e4f9f188ccd7
|
[
"Apache-2.0"
] | 9
|
2017-11-09T09:01:11.000Z
|
2019-11-19T03:07:26.000Z
|
models/__init__.py
|
rwightman/pytorch-countception-sealion
|
a938b77d4629a48a9d0b01c9dc41e4f9f188ccd7
|
[
"Apache-2.0"
] | null | null | null |
models/__init__.py
|
rwightman/pytorch-countception-sealion
|
a938b77d4629a48a9d0b01c9dc41e4f9f188ccd7
|
[
"Apache-2.0"
] | 1
|
2020-03-30T09:04:44.000Z
|
2020-03-30T09:04:44.000Z
|
from .model_cnet import ModelCnet
from .model_countception import ModelCountception
| 28
| 49
| 0.880952
| 10
| 84
| 7.2
| 0.7
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 84
| 2
| 50
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad7036972304b002ce0a9fd805648e7dfa62247a
| 21
|
py
|
Python
|
src/imports/__init__.py
|
anthonyblanchettepotvin/colorium
|
8b4927b1f615981c8a3c46f973339415695761d4
|
[
"MIT"
] | null | null | null |
src/imports/__init__.py
|
anthonyblanchettepotvin/colorium
|
8b4927b1f615981c8a3c46f973339415695761d4
|
[
"MIT"
] | null | null | null |
src/imports/__init__.py
|
anthonyblanchettepotvin/colorium
|
8b4927b1f615981c8a3c46f973339415695761d4
|
[
"MIT"
] | null | null | null |
from imports import *
| 21
| 21
| 0.809524
| 3
| 21
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad7b46618a07f7e517487755c13aca50ffd7f622
| 5,956
|
py
|
Python
|
magenta/models/basic_rnn/basic_rnn_encoder_decoder_test.py
|
Sprog-gle/Magenta
|
55bfd53f8112cf34952e67efc646b98523837f8f
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/basic_rnn/basic_rnn_encoder_decoder_test.py
|
Sprog-gle/Magenta
|
55bfd53f8112cf34952e67efc646b98523837f8f
|
[
"Apache-2.0"
] | null | null | null |
magenta/models/basic_rnn/basic_rnn_encoder_decoder_test.py
|
Sprog-gle/Magenta
|
55bfd53f8112cf34952e67efc646b98523837f8f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for basic_rnn_encoder_decoder."""
# internal imports
import tensorflow as tf
from magenta.models.basic_rnn import basic_rnn_encoder_decoder
from magenta.music import melodies_lib
NOTE_OFF = melodies_lib.MELODY_NOTE_OFF
NO_EVENT = melodies_lib.MELODY_NO_EVENT
class BasicRnnEncoderDecoderTest(tf.test.TestCase):
def testDefaultRange(self):
basic_rnn_encoder_decoder.MIN_NOTE = 48
basic_rnn_encoder_decoder.MAX_NOTE = 84
self.assertEqual(basic_rnn_encoder_decoder.TRANSPOSE_TO_KEY, 0)
melody_encoder_decoder = basic_rnn_encoder_decoder.MelodyEncoderDecoder()
self.assertEqual(melody_encoder_decoder.input_size, 38)
self.assertEqual(melody_encoder_decoder.num_classes, 38)
melody_events = [48, NO_EVENT, 49, 83, NOTE_OFF]
melody = melodies_lib.Melody(melody_events)
expected_inputs = [
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
expected_labels = [2, 0, 3, 37, 1]
for i in xrange(len(melody_events)):
self.assertListEqual(melody_encoder_decoder.events_to_input(melody, i),
expected_inputs[i])
self.assertEqual(melody_encoder_decoder.events_to_label(melody, i),
expected_labels[i])
self.assertEqual(
melody_encoder_decoder.class_index_to_event(expected_labels[i], None),
melody_events[i])
partial_melody = melodies_lib.Melody(melody_events[:i])
softmax = [[[0.0] * melody_encoder_decoder.num_classes]]
softmax[0][0][expected_labels[i]] = 1.0
melody_encoder_decoder.extend_event_sequences([partial_melody], softmax)
self.assertEqual(list(partial_melody)[-1], melody_events[i])
melodies = [melody, melody]
expected_full_length_inputs_batch = [expected_inputs, expected_inputs]
expected_last_event_inputs_batch = [expected_inputs[-1:],
expected_inputs[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
melody_encoder_decoder.get_inputs_batch(melodies, True))
self.assertListEqual(
expected_last_event_inputs_batch,
melody_encoder_decoder.get_inputs_batch(melodies))
def testCustomRange(self):
basic_rnn_encoder_decoder.MIN_NOTE = 24
basic_rnn_encoder_decoder.MAX_NOTE = 36
melody_encoder_decoder = basic_rnn_encoder_decoder.MelodyEncoderDecoder()
self.assertEqual(melody_encoder_decoder.input_size, 14)
self.assertEqual(melody_encoder_decoder.num_classes, 14)
melody_events = [24, NO_EVENT, 25, 35, NOTE_OFF]
melody = melodies_lib.Melody(melody_events)
expected_inputs = [
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
expected_labels = [2, 0, 3, 13, 1]
for i in xrange(len(melody_events)):
self.assertListEqual(melody_encoder_decoder.events_to_input(melody, i),
expected_inputs[i])
self.assertEqual(melody_encoder_decoder.events_to_label(melody, i),
expected_labels[i])
self.assertEqual(
melody_encoder_decoder.class_index_to_event(expected_labels[i], None),
melody_events[i])
partial_melody = melodies_lib.Melody(melody_events[:i])
softmax = [[[0.0] * melody_encoder_decoder.num_classes]]
softmax[0][0][expected_labels[i]] = 1.0
melody_encoder_decoder.extend_event_sequences([partial_melody], softmax)
self.assertEqual(list(partial_melody)[-1], melody_events[i])
melodies = [melody, melody]
expected_full_length_inputs_batch = [expected_inputs, expected_inputs]
expected_last_event_inputs_batch = [expected_inputs[-1:],
expected_inputs[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
melody_encoder_decoder.get_inputs_batch(melodies, True))
self.assertListEqual(
expected_last_event_inputs_batch,
melody_encoder_decoder.get_inputs_batch(melodies))
if __name__ == '__main__':
tf.test.main()
| 45.465649
| 80
| 0.631464
| 1,092
| 5,956
| 3.260073
| 0.131868
| 0.282022
| 0.409551
| 0.532584
| 0.749438
| 0.749438
| 0.733146
| 0.689326
| 0.689326
| 0.689326
| 0
| 0.125213
| 0.212895
| 5,956
| 130
| 81
| 45.815385
| 0.634172
| 0.1046
| 0
| 0.618557
| 0
| 0
| 0.001505
| 0
| 0
| 0
| 0
| 0
| 0.175258
| 1
| 0.020619
| false
| 0
| 0.030928
| 0
| 0.061856
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.