hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e67e6ee435117298b96e0824cdefb562480a1f89
| 1,602
|
py
|
Python
|
8 - Largest Product in a Series/largest_product.py
|
jamtot/PyProjectEuler
|
98c2cf5dbfa3d38a2727f2ad204e41f01b624dda
|
[
"MIT"
] | null | null | null |
8 - Largest Product in a Series/largest_product.py
|
jamtot/PyProjectEuler
|
98c2cf5dbfa3d38a2727f2ad204e41f01b624dda
|
[
"MIT"
] | null | null | null |
8 - Largest Product in a Series/largest_product.py
|
jamtot/PyProjectEuler
|
98c2cf5dbfa3d38a2727f2ad204e41f01b624dda
|
[
"MIT"
] | null | null | null |
input = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"""
def get_num_list(input):
numers = []
for line in input:
numers+=[int(x) for x in line.split()]
return numers
def highest_prod(numlist, adjacents):
checklist = [int(x) for x in xrange(adjacents)]
largest = 0
prod = 1
for i in xrange(len(numlist)-adjacents):
for j in xrange(adjacents):
prod*=numlist[i+checklist[j]]
largest = max(prod, largest)
prod = 1
return largest
numebrs = get_num_list(input)
print highest_prod(numebrs, 4) # 5832)
print highest_prod(numebrs, 13) # 23514624000
| 38.142857
| 61
| 0.850811
| 100
| 1,602
| 13.56
| 0.52
| 0.024336
| 0.014749
| 0.022124
| 0.014749
| 0
| 0
| 0
| 0
| 0
| 0
| 0.712989
| 0.106117
| 1,602
| 41
| 62
| 39.073171
| 0.233939
| 0.010612
| 0
| 0.052632
| 0
| 0
| 0.644121
| 0.632111
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.052632
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e69970ccf1359c0ea3d314dde3f3182c65f809ad
| 121,848
|
py
|
Python
|
tests/unit/modules/test_vsphere.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 19
|
2016-01-29T14:37:52.000Z
|
2022-03-30T18:08:01.000Z
|
tests/unit/modules/test_vsphere.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 223
|
2016-03-02T16:39:41.000Z
|
2022-03-03T12:26:35.000Z
|
tests/unit/modules/test_vsphere.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 64
|
2016-02-04T19:45:26.000Z
|
2021-12-15T02:02:31.000Z
|
"""
:codeauthor: Nicole Thomas <nicole@saltstack.com>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Tests for functions in salt.modules.vsphere
"""
import salt.modules.vsphere as vsphere
import salt.utils.args
import salt.utils.vmware
from salt.exceptions import (
ArgumentValueError,
CommandExecutionError,
VMwareObjectRetrievalError,
VMwareSaltError,
)
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, Mock, call, patch
from tests.support.unit import TestCase, skipIf
try:
from pyVmomi import vim, vmodl # pylint: disable=unused-import,no-name-in-module
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
from com.vmware.vapi.std_client import DynamicID # pylint: disable=unused-import
HAS_VSPHERE_SDK = True
except ImportError:
HAS_VSPHERE_SDK = False
# Globals
HOST = "1.2.3.4"
USER = "root"
PASSWORD = "SuperSecret!"
ERROR = "Some Testing Error Message"
class VsphereTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for the salt.modules.vsphere module.
"""
def setup_loader_modules(self):
return {vsphere: {}}
# Tests for get_coredump_network_config function
def test_get_coredump_network_config_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.get_coredump_network_config,
HOST,
USER,
PASSWORD,
esxi_hosts="foo",
)
def test_get_coredump_network_config_host_list_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"Error": ERROR}},
vsphere.get_coredump_network_config(
HOST, USER, PASSWORD, esxi_hosts=[host_1]
),
)
def test_get_coredump_network_config_host_list_success(self):
"""
Tests successful function return when an esxi_host is provided.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
with patch(
"salt.modules.vsphere._format_coredump_stdout",
MagicMock(return_value={}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"Coredump Config": {}}},
vsphere.get_coredump_network_config(
HOST, USER, PASSWORD, esxi_hosts=[host_1]
),
)
def test_get_coredump_network_config_bad_retcode(self):
"""
Tests error message given for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
self.assertEqual(
{HOST: {"Error": ERROR}},
vsphere.get_coredump_network_config(HOST, USER, PASSWORD),
)
def test_get_coredump_network_config_success(self):
"""
Tests successful function return for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
with patch(
"salt.modules.vsphere._format_coredump_stdout",
MagicMock(return_value={}),
):
self.assertEqual(
{HOST: {"Coredump Config": {}}},
vsphere.get_coredump_network_config(HOST, USER, PASSWORD),
)
# Tests for coredump_network_enable function
def test_coredump_network_enable_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.coredump_network_enable,
HOST,
USER,
PASSWORD,
True,
esxi_hosts="foo",
)
def test_coredump_network_enable_host_list_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"Error": ERROR}},
vsphere.coredump_network_enable(
HOST, USER, PASSWORD, True, esxi_hosts=[host_1]
),
)
def test_coredump_network_enable_host_list_success(self):
"""
Tests successful function return when an esxi_host is provided.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
with patch(
"salt.modules.vsphere._format_coredump_stdout",
MagicMock(return_value={}),
):
enabled = True
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"Coredump Enabled": enabled}},
vsphere.coredump_network_enable(
HOST, USER, PASSWORD, enabled, esxi_hosts=[host_1]
),
)
def test_coredump_network_enable_bad_retcode(self):
"""
Tests error message given for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
self.assertEqual(
{HOST: {"Error": ERROR}},
vsphere.coredump_network_enable(HOST, USER, PASSWORD, True),
)
def test_coredump_network_enable_success(self):
"""
Tests successful function return for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
with patch(
"salt.modules.vsphere._format_coredump_stdout",
MagicMock(return_value={}),
):
enabled = True
self.assertEqual(
{HOST: {"Coredump Enabled": enabled}},
vsphere.coredump_network_enable(HOST, USER, PASSWORD, enabled),
)
# Tests for set_coredump_network_config function
def test_set_coredump_network_config_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.set_coredump_network_config,
HOST,
USER,
PASSWORD,
"loghost",
"foo",
esxi_hosts="bar",
)
def test_set_coredump_network_config_host_list_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={"retcode": 1})):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"retcode": 1, "success": False}},
vsphere.set_coredump_network_config(
HOST, USER, PASSWORD, "dump-ip.test.com", esxi_hosts=[host_1]
),
)
def test_set_coredump_network_config_host_list_success(self):
"""
Tests successful function return when an esxi_host is provided.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={"retcode": 0})):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"retcode": 0, "success": True}},
vsphere.set_coredump_network_config(
HOST, USER, PASSWORD, "dump-ip.test.com", esxi_hosts=[host_1]
),
)
def test_set_coredump_network_config_bad_retcode(self):
"""
Tests error message given for a single ESXi host.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={"retcode": 1})):
self.assertEqual(
{HOST: {"retcode": 1, "success": False}},
vsphere.set_coredump_network_config(
HOST, USER, PASSWORD, "dump-ip.test.com"
),
)
def test_set_coredump_network_config_success(self):
"""
Tests successful function return for a single ESXi host.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={"retcode": 0})):
self.assertEqual(
{HOST: {"retcode": 0, "success": True}},
vsphere.set_coredump_network_config(
HOST, USER, PASSWORD, "dump-ip.test.com"
),
)
# Tests for get_firewall_status function
def test_get_firewall_status_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.get_firewall_status,
HOST,
USER,
PASSWORD,
esxi_hosts="foo",
)
def test_get_firewall_status_host_list_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"success": False, "Error": ERROR, "rulesets": None}},
vsphere.get_firewall_status(HOST, USER, PASSWORD, esxi_hosts=[host_1]),
)
def test_get_firewall_status_host_list_success(self):
"""
Tests successful function return when an esxi_host is provided.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"rulesets": {}, "success": True}},
vsphere.get_firewall_status(HOST, USER, PASSWORD, esxi_hosts=[host_1]),
)
def test_get_firewall_status_bad_retcode(self):
"""
Tests error message given for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
self.assertEqual(
{HOST: {"success": False, "Error": ERROR, "rulesets": None}},
vsphere.get_firewall_status(HOST, USER, PASSWORD),
)
def test_get_firewall_status_success(self):
"""
Tests successful function return for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
self.assertEqual(
{HOST: {"rulesets": {}, "success": True}},
vsphere.get_firewall_status(HOST, USER, PASSWORD),
)
# Tests for enable_firewall_ruleset function
def test_enable_firewall_ruleset_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.enable_firewall_ruleset,
HOST,
USER,
PASSWORD,
"foo",
"bar",
esxi_hosts="baz",
)
# Tests for syslog_service_reload function
def test_syslog_service_reload_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.syslog_service_reload,
HOST,
USER,
PASSWORD,
esxi_hosts="foo",
)
# Tests for set_syslog_config function.
# These tests only test the firewall=True and syslog_config == 'loghost' if block.
# The rest of the function is tested in the _set_syslog_config_helper tests below.
def test_set_syslog_config_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list, but we don't enter the 'loghost'/firewall loop.
"""
self.assertRaises(
CommandExecutionError,
vsphere.set_syslog_config,
HOST,
USER,
PASSWORD,
"foo",
"bar",
esxi_hosts="baz",
)
def test_set_syslog_config_esxi_hosts_not_list_firewall(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list, and we enter the 'loghost'/firewall loop.
"""
self.assertRaises(
CommandExecutionError,
vsphere.set_syslog_config,
HOST,
USER,
PASSWORD,
"loghost",
"foo",
firewall=True,
esxi_hosts="bar",
)
def test_set_syslog_config_host_list_firewall_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts with 'loghost' as syslog_config.
"""
with patch(
"salt.modules.vsphere.enable_firewall_ruleset",
MagicMock(return_value={"host_1.foo.com": {"retcode": 1, "stdout": ERROR}}),
):
with patch(
"salt.modules.vsphere._set_syslog_config_helper",
MagicMock(return_value={}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"enable_firewall": {"message": ERROR, "success": False}}},
vsphere.set_syslog_config(
HOST,
USER,
PASSWORD,
"loghost",
"foo",
firewall=True,
esxi_hosts=[host_1],
),
)
def test_set_syslog_config_host_list_firewall_success(self):
"""
Tests successful function return with list of esxi_hosts with 'loghost' as syslog_config.
"""
with patch(
"salt.modules.vsphere.enable_firewall_ruleset",
MagicMock(return_value={"host_1.foo.com": {"retcode": 0}}),
):
with patch(
"salt.modules.vsphere._set_syslog_config_helper",
MagicMock(return_value={}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"enable_firewall": {"success": True}}},
vsphere.set_syslog_config(
HOST,
USER,
PASSWORD,
"loghost",
"foo",
firewall=True,
esxi_hosts=[host_1],
),
)
def test_set_syslog_config_firewall_bad_retcode(self):
"""
Tests error message given for a single ESXi host with 'loghost' as syslog_config.
"""
with patch(
"salt.modules.vsphere.enable_firewall_ruleset",
MagicMock(return_value={HOST: {"retcode": 1, "stdout": ERROR}}),
):
with patch(
"salt.modules.vsphere._set_syslog_config_helper",
MagicMock(return_value={}),
):
self.assertEqual(
{HOST: {"enable_firewall": {"message": ERROR, "success": False}}},
vsphere.set_syslog_config(
HOST, USER, PASSWORD, "loghost", "foo", firewall=True
),
)
def test_set_syslog_config_firewall_success(self):
"""
Tests successful function return for a single ESXi host with 'loghost' as syslog_config.
"""
with patch(
"salt.modules.vsphere.enable_firewall_ruleset",
MagicMock(return_value={HOST: {"retcode": 0}}),
):
with patch(
"salt.modules.vsphere._set_syslog_config_helper",
MagicMock(return_value={}),
):
self.assertEqual(
{HOST: {"enable_firewall": {"success": True}}},
vsphere.set_syslog_config(
HOST, USER, PASSWORD, "loghost", "foo", firewall=True
),
)
# Tests for get_syslog_config function
def test_get_syslog_config_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.get_syslog_config,
HOST,
USER,
PASSWORD,
esxi_hosts="foo",
)
def test_get_syslog_config_host_list_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"message": ERROR, "success": False}},
vsphere.get_syslog_config(HOST, USER, PASSWORD, esxi_hosts=[host_1]),
)
def test_get_syslog_config_host_list_success(self):
"""
Tests successful function return when an esxi_host is provided.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"success": True}},
vsphere.get_syslog_config(HOST, USER, PASSWORD, esxi_hosts=[host_1]),
)
def test_get_syslog_config_bad_retcode(self):
"""
Tests error message given for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
self.assertEqual(
{HOST: {"message": ERROR, "success": False}},
vsphere.get_syslog_config(HOST, USER, PASSWORD),
)
def test_get_syslog_config_success(self):
"""
Tests successful function return for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
self.assertEqual(
{HOST: {"success": True}},
vsphere.get_syslog_config(HOST, USER, PASSWORD),
)
# Tests for reset_syslog_config function
def test_reset_syslog_config_no_syslog_config(self):
"""
Tests CommandExecutionError is raised when a syslog_config parameter is missing.
"""
self.assertRaises(
CommandExecutionError, vsphere.reset_syslog_config, HOST, USER, PASSWORD
)
def test_reset_syslog_config_esxi_hosts_not_list(self):
"""
Tests CommandExecutionError is raised when esxi_hosts is provided,
but is not a list.
"""
self.assertRaises(
CommandExecutionError,
vsphere.reset_syslog_config,
HOST,
USER,
PASSWORD,
syslog_config="test",
esxi_hosts="foo",
)
def test_reset_syslog_config_invalid_config_param(self):
"""
Tests error message returned when an invalid syslog_config parameter is provided.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={})):
error = "Invalid syslog configuration parameter"
self.assertEqual(
{
HOST: {
"success": False,
"test": {"message": error, "success": False},
}
},
vsphere.reset_syslog_config(HOST, USER, PASSWORD, syslog_config="test"),
)
def test_reset_syslog_config_host_list_bad_retcode(self):
"""
Tests error message returned with list of esxi_hosts.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{
host_1: {
"success": False,
"logdir": {"message": ERROR, "success": False},
}
},
vsphere.reset_syslog_config(
HOST, USER, PASSWORD, syslog_config="logdir", esxi_hosts=[host_1]
),
)
def test_reset_syslog_config_host_list_success(self):
"""
Tests successful function return when an esxi_host is provided.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
host_1 = "host_1.foo.com"
self.assertEqual(
{host_1: {"success": True, "loghost": {"success": True}}},
vsphere.reset_syslog_config(
HOST, USER, PASSWORD, syslog_config="loghost", esxi_hosts=[host_1]
),
)
def test_reset_syslog_config_bad_retcode(self):
"""
Tests error message given for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
self.assertEqual(
{
HOST: {
"success": False,
"logdir-unique": {"message": ERROR, "success": False},
}
},
vsphere.reset_syslog_config(
HOST, USER, PASSWORD, syslog_config="logdir-unique"
),
)
def test_reset_syslog_config_success(self):
"""
Tests successful function return for a single ESXi host.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
self.assertEqual(
{HOST: {"success": True, "default-rotate": {"success": True}}},
vsphere.reset_syslog_config(
HOST, USER, PASSWORD, syslog_config="default-rotate"
),
)
def test_reset_syslog_config_success_multiple_configs(self):
"""
Tests successful function return for a single ESXi host when passing in multiple syslog_config values.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
self.assertEqual(
{
HOST: {
"success": True,
"default-size": {"success": True},
"default-timeout": {"success": True},
}
},
vsphere.reset_syslog_config(
HOST, USER, PASSWORD, syslog_config="default-size,default-timeout"
),
)
def test_reset_syslog_config_success_all_configs(self):
"""
Tests successful function return for a single ESXi host when passing in multiple syslog_config values.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 0, "stdout": ""}),
):
self.assertEqual(
{
HOST: {
"success": True,
"logdir": {"success": True},
"loghost": {"success": True},
"default-rotate": {"success": True},
"default-size": {"success": True},
"default-timeout": {"success": True},
"logdir-unique": {"success": True},
}
},
vsphere.reset_syslog_config(HOST, USER, PASSWORD, syslog_config="all"),
)
# Tests for _reset_syslog_config_params function
def test_reset_syslog_config_params_no_valid_reset(self):
"""
Tests function returns False when an invalid syslog config is passed.
"""
valid_resets = ["hello", "world"]
config = "foo"
ret = {
"success": False,
config: {
"success": False,
"message": "Invalid syslog configuration parameter",
},
}
self.assertEqual(
ret,
vsphere._reset_syslog_config_params(
HOST, USER, PASSWORD, "cmd", config, valid_resets
),
)
def test_reset_syslog_config_params_error(self):
"""
Tests function returns False when the esxxli function returns an unsuccessful retcode.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
valid_resets = ["hello", "world"]
error_dict = {"success": False, "message": ERROR}
ret = {"success": False, "hello": error_dict, "world": error_dict}
self.assertDictEqual(
ret,
vsphere._reset_syslog_config_params(
HOST, USER, PASSWORD, "cmd", valid_resets, valid_resets
),
)
def test_reset_syslog_config_params_success(self):
"""
Tests function returns True when the esxxli function returns a successful retcode.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={"retcode": 0})):
valid_resets = ["hello", "world"]
ret = {
"success": True,
"hello": {"success": True},
"world": {"success": True},
}
self.assertDictEqual(
ret,
vsphere._reset_syslog_config_params(
HOST, USER, PASSWORD, "cmd", valid_resets, valid_resets
),
)
# Tests for _set_syslog_config_helper function
def test_set_syslog_config_helper_no_valid_reset(self):
"""
Tests function returns False when an invalid syslog config is passed.
"""
config = "foo"
ret = {
"success": False,
"message": "'{}' is not a valid config variable.".format(config),
}
self.assertEqual(
ret, vsphere._set_syslog_config_helper(HOST, USER, PASSWORD, config, "bar")
)
def test_set_syslog_config_helper_bad_retcode(self):
"""
Tests function returns False when the esxcli function returns an unsuccessful retcode.
"""
with patch(
"salt.utils.vmware.esxcli",
MagicMock(return_value={"retcode": 1, "stdout": ERROR}),
):
config = "default-rotate"
self.assertEqual(
{config: {"success": False, "message": ERROR}},
vsphere._set_syslog_config_helper(HOST, USER, PASSWORD, config, "foo"),
)
def test_set_syslog_config_helper_success(self):
"""
Tests successful function return.
"""
with patch("salt.utils.vmware.esxcli", MagicMock(return_value={"retcode": 0})):
config = "logdir"
self.assertEqual(
{config: {"success": True}},
vsphere._set_syslog_config_helper(HOST, USER, PASSWORD, config, "foo"),
)
class GetProxyTypeTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.get_proxy_type
"""
def setup_loader_modules(self):
return {vsphere: {}}
def test_output(self):
with patch.dict(
vsphere.__pillar__, {"proxy": {"proxytype": "fake_proxy_type"}}
):
ret = vsphere.get_proxy_type()
self.assertEqual("fake_proxy_type", ret)
class SupportsProxiesTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.supports_proxies decorator
"""
def setup_loader_modules(self):
return {vsphere: {}}
def test_supported_proxy(self):
@vsphere.supports_proxies("supported")
def mock_function():
return "fake_function"
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="supported")
):
ret = mock_function()
self.assertEqual("fake_function", ret)
def test_unsupported_proxy(self):
@vsphere.supports_proxies("supported")
def mock_function():
return "fake_function"
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="unsupported")
):
with self.assertRaises(CommandExecutionError) as excinfo:
mock_function()
self.assertEqual(
"'unsupported' proxy is not supported by " "function mock_function",
excinfo.exception.strerror,
)
class _GetProxyConnectionDetailsTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere._get_proxy_connection_details
"""
def setup_loader_modules(self):
return {vsphere: {}}
def setUp(self):
self.esxi_host_details = {
"host": "fake_host",
"username": "fake_username",
"password": "fake_password",
"protocol": "fake_protocol",
"port": "fake_port",
"mechanism": "fake_mechanism",
"principal": "fake_principal",
"domain": "fake_domain",
}
self.esxi_vcenter_details = {
"vcenter": "fake_vcenter",
"username": "fake_username",
"password": "fake_password",
"protocol": "fake_protocol",
"port": "fake_port",
"mechanism": "fake_mechanism",
"principal": "fake_principal",
"domain": "fake_domain",
}
self.esxdatacenter_details = {
"vcenter": "fake_vcenter",
"datacenter": "fake_dc",
"username": "fake_username",
"password": "fake_password",
"protocol": "fake_protocol",
"port": "fake_port",
"mechanism": "fake_mechanism",
"principal": "fake_principal",
"domain": "fake_domain",
}
self.esxcluster_details = {
"vcenter": "fake_vcenter",
"datacenter": "fake_dc",
"cluster": "fake_cluster",
"username": "fake_username",
"password": "fake_password",
"protocol": "fake_protocol",
"port": "fake_port",
"mechanism": "fake_mechanism",
"principal": "fake_principal",
"domain": "fake_domain",
}
self.vcenter_details = {
"vcenter": "fake_vcenter",
"username": "fake_username",
"password": "fake_password",
"protocol": "fake_protocol",
"port": "fake_port",
"mechanism": "fake_mechanism",
"principal": "fake_principal",
"domain": "fake_domain",
}
def tearDown(self):
for attrname in (
"esxi_host_details",
"esxi_vcenter_details",
"esxdatacenter_details",
"esxcluster_details",
):
try:
delattr(self, attrname)
except AttributeError:
continue
def test_esxi_proxy_host_details(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxi")
):
with patch.dict(
vsphere.__salt__,
{"esxi.get_details": MagicMock(return_value=self.esxi_host_details)},
):
ret = vsphere._get_proxy_connection_details()
self.assertEqual(
(
"fake_host",
"fake_username",
"fake_password",
"fake_protocol",
"fake_port",
"fake_mechanism",
"fake_principal",
"fake_domain",
),
ret,
)
def test_esxdatacenter_proxy_details(self):
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxdatacenter"),
):
with patch.dict(
vsphere.__salt__,
{
"esxdatacenter.get_details": MagicMock(
return_value=self.esxdatacenter_details
)
},
):
ret = vsphere._get_proxy_connection_details()
self.assertEqual(
(
"fake_vcenter",
"fake_username",
"fake_password",
"fake_protocol",
"fake_port",
"fake_mechanism",
"fake_principal",
"fake_domain",
),
ret,
)
def test_esxcluster_proxy_details(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxcluster")
):
with patch.dict(
vsphere.__salt__,
{
"esxcluster.get_details": MagicMock(
return_value=self.esxcluster_details
)
},
):
ret = vsphere._get_proxy_connection_details()
self.assertEqual(
(
"fake_vcenter",
"fake_username",
"fake_password",
"fake_protocol",
"fake_port",
"fake_mechanism",
"fake_principal",
"fake_domain",
),
ret,
)
def test_esxi_proxy_vcenter_details(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxi")
):
with patch.dict(
vsphere.__salt__,
{"esxi.get_details": MagicMock(return_value=self.esxi_vcenter_details)},
):
ret = vsphere._get_proxy_connection_details()
self.assertEqual(
(
"fake_vcenter",
"fake_username",
"fake_password",
"fake_protocol",
"fake_port",
"fake_mechanism",
"fake_principal",
"fake_domain",
),
ret,
)
def test_vcenter_proxy_details(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="vcenter")
):
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": MagicMock(return_value=self.vcenter_details)},
):
ret = vsphere._get_proxy_connection_details()
self.assertEqual(
(
"fake_vcenter",
"fake_username",
"fake_password",
"fake_protocol",
"fake_port",
"fake_mechanism",
"fake_principal",
"fake_domain",
),
ret,
)
def test_unsupported_proxy_details(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="unsupported")
):
with self.assertRaises(CommandExecutionError) as excinfo:
ret = vsphere._get_proxy_connection_details()
self.assertEqual(
"'unsupported' proxy is not supported", excinfo.exception.strerror
)
class GetsServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.gets_service_instance_via_proxy
decorator
"""
def setup_loader_modules(self):
patcher = patch("salt.utils.vmware.get_service_instance", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("salt.utils.vmware.disconnect", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
return {vsphere: {"_get_proxy_connection_details": MagicMock()}}
def setUp(self):
self.mock_si = MagicMock()
self.mock_details1 = MagicMock()
self.mock_details2 = MagicMock()
def tearDown(self):
for attrname in ("mock_si", "mock_details1", "mock_details2"):
try:
delattr(self, attrname)
except AttributeError:
continue
def test_no_service_instance_or_kwargs_parameters(self):
@vsphere.gets_service_instance_via_proxy
def mock_function():
return "fake_function"
with self.assertRaises(CommandExecutionError) as excinfo:
mock_function()
self.assertEqual(
"Function mock_function must have either a "
"'service_instance', or a '**kwargs' type "
"parameter",
excinfo.exception.strerror,
)
def test___get_proxy_connection_details_call(self):
mock__get_proxy_connection_details = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(service_instance=None):
return service_instance
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
mock__get_proxy_connection_details,
):
mock_function()
mock__get_proxy_connection_details.assert_called_once_with()
def test_service_instance_named_parameter_no_value(self):
mock_get_service_instance = MagicMock(return_value=self.mock_si)
mock_disconnect = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(service_instance=None):
return service_instance
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=(self.mock_details1, self.mock_details2)),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
with patch("salt.utils.vmware.disconnect", mock_disconnect):
ret = mock_function()
mock_get_service_instance.assert_called_once_with(
self.mock_details1, self.mock_details2
)
mock_disconnect.assert_called_once_with(self.mock_si)
self.assertEqual(ret, self.mock_si)
def test_service_instance_kwargs_parameter_no_value(self):
mock_get_service_instance = MagicMock(return_value=self.mock_si)
mock_disconnect = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(**kwargs):
return kwargs["service_instance"]
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=(self.mock_details1, self.mock_details2)),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
with patch("salt.utils.vmware.disconnect", mock_disconnect):
ret = mock_function()
mock_get_service_instance.assert_called_once_with(
self.mock_details1, self.mock_details2
)
mock_disconnect.assert_called_once_with(self.mock_si)
self.assertEqual(ret, self.mock_si)
def test_service_instance_positional_parameter_no_default_value(self):
mock_get_service_instance = MagicMock()
mock_disconnect = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(service_instance):
return service_instance
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=(self.mock_details1, self.mock_details2)),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
with patch("salt.utils.vmware.disconnect", mock_disconnect):
ret = mock_function(self.mock_si)
self.assertEqual(mock_get_service_instance.call_count, 0)
self.assertEqual(mock_disconnect.call_count, 0)
self.assertEqual(ret, self.mock_si)
def test_service_instance_positional_parameter_with_default_value(self):
mock_get_service_instance = MagicMock()
mock_disconnect = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(service_instance=None):
return service_instance
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=(self.mock_details1, self.mock_details2)),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
with patch("salt.utils.vmware.disconnect", mock_disconnect):
ret = mock_function(self.mock_si)
self.assertEqual(mock_get_service_instance.call_count, 0)
self.assertEqual(mock_disconnect.call_count, 0)
self.assertEqual(ret, self.mock_si)
def test_service_instance_named_parameter_with_default_value(self):
mock_get_service_instance = MagicMock()
mock_disconnect = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(service_instance=None):
return service_instance
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=(self.mock_details1, self.mock_details2)),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
with patch("salt.utils.vmware.disconnect", mock_disconnect):
ret = mock_function(service_instance=self.mock_si)
self.assertEqual(mock_get_service_instance.call_count, 0)
self.assertEqual(mock_disconnect.call_count, 0)
self.assertEqual(ret, self.mock_si)
def test_service_instance_kwargs_parameter_passthrough(self):
mock_get_service_instance = MagicMock()
mock_disconnect = MagicMock()
@vsphere.gets_service_instance_via_proxy
def mock_function(**kwargs):
return kwargs["service_instance"]
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=(self.mock_details1, self.mock_details2)),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
with patch("salt.utils.vmware.disconnect", mock_disconnect):
ret = mock_function(service_instance=self.mock_si)
self.assertEqual(mock_get_service_instance.call_count, 0)
self.assertEqual(mock_disconnect.call_count, 0)
self.assertEqual(ret, self.mock_si)
class GetServiceInstanceViaProxyTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.get_service_instance_via_proxy
"""
def setup_loader_modules(self):
patcher = patch("salt.utils.vmware.get_service_instance", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
return {
vsphere: {
"get_proxy_type": MagicMock(return_value="esxi"),
"_get_proxy_connection_details": MagicMock(),
}
}
def test_supported_proxies(self):
supported_proxies = ["esxi", "esxcluster", "esxdatacenter", "vcenter", "esxvm"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.get_service_instance_via_proxy()
def test_get_service_instance_call(self):
mock_connection_details = [MagicMock(), MagicMock(), MagicMock()]
mock_get_service_instance = MagicMock()
with patch(
"salt.modules.vsphere._get_proxy_connection_details",
MagicMock(return_value=mock_connection_details),
):
with patch(
"salt.utils.vmware.get_service_instance", mock_get_service_instance
):
vsphere.get_service_instance_via_proxy()
mock_get_service_instance.assert_called_once_with(*mock_connection_details)
def test_output(self):
mock_si = MagicMock()
with patch(
"salt.utils.vmware.get_service_instance", MagicMock(return_value=mock_si)
):
res = vsphere.get_service_instance_via_proxy()
self.assertEqual(res, mock_si)
class DisconnectTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.disconnect
"""
def setup_loader_modules(self):
self.mock_si = MagicMock()
self.addCleanup(delattr, self, "mock_si")
patcher = patch("salt.utils.vmware.disconnect", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxi"),
}
}
def test_supported_proxies(self):
supported_proxies = ["esxi", "esxcluster", "esxdatacenter", "vcenter", "esxvm"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.disconnect(self.mock_si)
def test_disconnect_call(self):
mock_disconnect = MagicMock()
with patch("salt.utils.vmware.disconnect", mock_disconnect):
vsphere.disconnect(self.mock_si)
mock_disconnect.assert_called_once_with(self.mock_si)
def test_output(self):
res = vsphere.disconnect(self.mock_si)
self.assertEqual(res, True)
class TestVcenterConnectionTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.test_vcenter_connection
"""
def setup_loader_modules(self):
self.mock_si = MagicMock()
self.addCleanup(delattr, self, "mock_si")
patcher = patch(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
)
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("salt.utils.vmware.disconnect", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("salt.utils.vmware.is_connection_to_a_vcenter", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxi"),
}
}
def test_supported_proxies(self):
supported_proxies = ["esxi", "esxcluster", "esxdatacenter", "vcenter", "esxvm"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.test_vcenter_connection()
def test_is_connection_to_a_vcenter_call_default_service_instance(self):
mock_is_connection_to_a_vcenter = MagicMock()
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter",
mock_is_connection_to_a_vcenter,
):
vsphere.test_vcenter_connection()
mock_is_connection_to_a_vcenter.assert_called_once_with(self.mock_si)
def test_is_connection_to_a_vcenter_call_explicit_service_instance(self):
expl_mock_si = MagicMock()
mock_is_connection_to_a_vcenter = MagicMock()
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter",
mock_is_connection_to_a_vcenter,
):
vsphere.test_vcenter_connection(expl_mock_si)
mock_is_connection_to_a_vcenter.assert_called_once_with(expl_mock_si)
def test_is_connection_to_a_vcenter_raises_vmware_salt_error(self):
exc = VMwareSaltError("VMwareSaltError")
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter", MagicMock(side_effect=exc)
):
res = vsphere.test_vcenter_connection()
self.assertEqual(res, False)
def test_is_connection_to_a_vcenter_raises_non_vmware_salt_error(self):
exc = Exception("NonVMwareSaltError")
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter", MagicMock(side_effect=exc)
):
with self.assertRaises(Exception) as excinfo:
res = vsphere.test_vcenter_connection()
self.assertEqual("NonVMwareSaltError", str(excinfo.exception))
def test_output_true(self):
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter", MagicMock(return_value=True)
):
res = vsphere.test_vcenter_connection()
self.assertEqual(res, True)
def test_output_false(self):
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter",
MagicMock(return_value=False),
):
res = vsphere.test_vcenter_connection()
self.assertEqual(res, False)
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class ListDatacentersViaProxyTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.list_datacenters_via_proxy
"""
def setup_loader_modules(self):
self.mock_si = MagicMock()
self.addCleanup(delattr, self, "mock_si")
patcher = patch(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
)
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("salt.utils.vmware.get_datacenters", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("salt.utils.vmware.get_managed_object_name", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxdatacenter"),
}
}
def test_supported_proxies(self):
supported_proxies = ["esxcluster", "esxdatacenter", "vcenter", "esxvm"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.list_datacenters_via_proxy()
def test_default_params(self):
mock_get_datacenters = MagicMock()
with patch("salt.utils.vmware.get_datacenters", mock_get_datacenters):
vsphere.list_datacenters_via_proxy()
mock_get_datacenters.assert_called_once_with(
self.mock_si, get_all_datacenters=True
)
def test_defined_service_instance(self):
mock_si = MagicMock()
mock_get_datacenters = MagicMock()
with patch("salt.utils.vmware.get_datacenters", mock_get_datacenters):
vsphere.list_datacenters_via_proxy(service_instance=mock_si)
mock_get_datacenters.assert_called_once_with(mock_si, get_all_datacenters=True)
def test_defined_datacenter_names(self):
mock_datacenters = MagicMock()
mock_get_datacenters = MagicMock()
with patch("salt.utils.vmware.get_datacenters", mock_get_datacenters):
vsphere.list_datacenters_via_proxy(mock_datacenters)
mock_get_datacenters.assert_called_once_with(self.mock_si, mock_datacenters)
def test_get_managed_object_name_calls(self):
mock_get_managed_object_name = MagicMock()
mock_dcs = [MagicMock(), MagicMock()]
with patch(
"salt.utils.vmware.get_datacenters", MagicMock(return_value=mock_dcs)
):
with patch(
"salt.utils.vmware.get_managed_object_name",
mock_get_managed_object_name,
):
vsphere.list_datacenters_via_proxy()
mock_get_managed_object_name.assert_has_calls(
[call(mock_dcs[0]), call(mock_dcs[1])]
)
def test_returned_array(self):
with patch(
"salt.utils.vmware.get_datacenters",
MagicMock(return_value=[MagicMock(), MagicMock()]),
):
# 2 datacenters
with patch(
"salt.utils.vmware.get_managed_object_name",
MagicMock(side_effect=["fake_dc1", "fake_dc2", "fake_dc3"]),
):
# 3 possible names
res = vsphere.list_datacenters_via_proxy()
# Just the first two names are in the result
self.assertEqual(res, [{"name": "fake_dc1"}, {"name": "fake_dc2"}])
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class CreateDatacenterTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.create_datacenter
"""
def setup_loader_modules(self):
self.mock_si = MagicMock()
self.addCleanup(delattr, self, "mock_si")
patcher = patch(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
)
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch("salt.utils.vmware.create_datacenter", MagicMock())
patcher.start()
self.addCleanup(patcher.stop)
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxdatacenter"),
}
}
def test_supported_proxies(self):
supported_proxies = ["esxdatacenter", "vcenter"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.create_datacenter("fake_dc1")
def test_default_service_instance(self):
mock_create_datacenter = MagicMock()
with patch("salt.utils.vmware.create_datacenter", mock_create_datacenter):
vsphere.create_datacenter("fake_dc1")
mock_create_datacenter.assert_called_once_with(self.mock_si, "fake_dc1")
def test_defined_service_instance(self):
mock_si = MagicMock()
mock_create_datacenter = MagicMock()
with patch("salt.utils.vmware.create_datacenter", mock_create_datacenter):
vsphere.create_datacenter("fake_dc1", service_instance=mock_si)
mock_create_datacenter.assert_called_once_with(mock_si, "fake_dc1")
def test_returned_value(self):
res = vsphere.create_datacenter("fake_dc1")
self.assertEqual(res, {"create_datacenter": True})
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class EraseDiskPartitionsTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.erase_disk_partitions
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"__proxy__": {
"esxi.get_details": MagicMock(
return_value={"esxi_host": "fake_host"}
)
},
}
}
def setUp(self):
attrs = (("mock_si", MagicMock()), ("mock_host", MagicMock()))
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
attrs = (
("mock_proxy_target", MagicMock(return_value=self.mock_host)),
("mock_erase_disk_partitions", MagicMock()),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
patches = (
(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
),
("salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxi")),
(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_host),
),
(
"salt.utils.vmware.erase_disk_partitions",
self.mock_erase_disk_partitions,
),
)
for module, mock_obj in patches:
patcher = patch(module, mock_obj)
patcher.start()
self.addCleanup(patcher.stop)
def test_supported_proxies(self):
supported_proxies = ["esxi"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.erase_disk_partitions(disk_id="fake_disk")
def test_no_disk_id_or_scsi_address(self):
with self.assertRaises(ArgumentValueError) as excinfo:
vsphere.erase_disk_partitions()
self.assertEqual(
"Either 'disk_id' or 'scsi_address' needs to " "be specified",
excinfo.exception.strerror,
)
def test_get_proxy_target(self):
mock_test_proxy_target = MagicMock()
with patch("salt.modules.vsphere._get_proxy_target", mock_test_proxy_target):
vsphere.erase_disk_partitions(disk_id="fake_disk")
mock_test_proxy_target.assert_called_once_with(self.mock_si)
def test_scsi_address_not_found(self):
mock = MagicMock(return_value={"bad_scsi_address": "bad_disk_id"})
with patch("salt.utils.vmware.get_scsi_address_to_lun_map", mock):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.erase_disk_partitions(scsi_address="fake_scsi_address")
self.assertEqual(
"Scsi lun with address 'fake_scsi_address' was "
"not found on host 'fake_host'",
excinfo.exception.strerror,
)
def test_scsi_address_to_disk_id_map(self):
mock_disk_id = MagicMock(canonicalName="fake_scsi_disk_id")
mock_get_scsi_addr_to_lun = MagicMock(
return_value={"fake_scsi_address": mock_disk_id}
)
with patch(
"salt.utils.vmware.get_scsi_address_to_lun_map", mock_get_scsi_addr_to_lun
):
vsphere.erase_disk_partitions(scsi_address="fake_scsi_address")
mock_get_scsi_addr_to_lun.assert_called_once_with(self.mock_host)
self.mock_erase_disk_partitions.assert_called_once_with(
self.mock_si, self.mock_host, "fake_scsi_disk_id", hostname="fake_host"
)
def test_erase_disk_partitions(self):
vsphere.erase_disk_partitions(disk_id="fake_disk_id")
self.mock_erase_disk_partitions.assert_called_once_with(
self.mock_si, self.mock_host, "fake_disk_id", hostname="fake_host"
)
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class RemoveDatastoreTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.remove_datastore
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxdatacenter"),
}
}
def setUp(self):
attrs = (
("mock_si", MagicMock()),
("mock_target", MagicMock()),
("mock_ds", MagicMock()),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
patches = (
(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
),
(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxdatacenter"),
),
(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_target),
),
(
"salt.utils.vmware.get_datastores",
MagicMock(return_value=[self.mock_ds]),
),
("salt.utils.vmware.remove_datastore", MagicMock()),
)
for module, mock_obj in patches:
patcher = patch(module, mock_obj)
patcher.start()
self.addCleanup(patcher.stop)
def test_supported_proxes(self):
supported_proxies = ["esxi", "esxcluster", "esxdatacenter"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.remove_datastore(datastore="fake_ds_name")
def test__get_proxy_target_call(self):
mock__get_proxy_target = MagicMock(return_value=self.mock_target)
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.remove_datastore(datastore="fake_ds_name")
mock__get_proxy_target.assert_called_once_with(self.mock_si)
def test_get_datastores_call(self):
mock_get_datastores = MagicMock()
with patch("salt.utils.vmware.get_datastores", mock_get_datastores):
vsphere.remove_datastore(datastore="fake_ds")
mock_get_datastores.assert_called_once_with(
self.mock_si, reference=self.mock_target, datastore_names=["fake_ds"]
)
def test_datastore_not_found(self):
with patch("salt.utils.vmware.get_datastores", MagicMock(return_value=[])):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.remove_datastore(datastore="fake_ds")
self.assertEqual(
"Datastore 'fake_ds' was not found", excinfo.exception.strerror
)
def test_multiple_datastores_found(self):
with patch(
"salt.utils.vmware.get_datastores",
MagicMock(return_value=[MagicMock(), MagicMock()]),
):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.remove_datastore(datastore="fake_ds")
self.assertEqual(
"Multiple datastores 'fake_ds' were found", excinfo.exception.strerror
)
def test_remove_datastore_call(self):
mock_remove_datastore = MagicMock()
with patch("salt.utils.vmware.remove_datastore", mock_remove_datastore):
vsphere.remove_datastore(datastore="fake_ds")
mock_remove_datastore.assert_called_once_with(self.mock_si, self.mock_ds)
def test_success_output(self):
res = vsphere.remove_datastore(datastore="fake_ds")
self.assertTrue(res)
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class RemoveDiskgroupTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.remove_diskgroup
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"__proxy__": {
"esxi.get_details": MagicMock(
return_value={"esxi_host": "fake_host"}
)
},
}
}
def setUp(self):
attrs = (
("mock_si", MagicMock()),
("mock_host", MagicMock()),
("mock_diskgroup", MagicMock()),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
patches = (
(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
),
("salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxi")),
(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_host),
),
(
"salt.utils.vmware.get_diskgroups",
MagicMock(return_value=[self.mock_diskgroup]),
),
("salt.utils.vsan.remove_diskgroup", MagicMock()),
)
for module, mock_obj in patches:
patcher = patch(module, mock_obj)
patcher.start()
self.addCleanup(patcher.stop)
def test_supported_proxes(self):
supported_proxies = ["esxi"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.remove_diskgroup(cache_disk_id="fake_disk_id")
def test__get_proxy_target_call(self):
mock__get_proxy_target = MagicMock(return_value=self.mock_host)
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.remove_diskgroup(cache_disk_id="fake_disk_id")
mock__get_proxy_target.assert_called_once_with(self.mock_si)
def test_get_disk_groups(self):
mock_get_diskgroups = MagicMock(return_value=[self.mock_diskgroup])
with patch("salt.utils.vmware.get_diskgroups", mock_get_diskgroups):
vsphere.remove_diskgroup(cache_disk_id="fake_disk_id")
mock_get_diskgroups.assert_called_once_with(
self.mock_host, cache_disk_ids=["fake_disk_id"]
)
def test_disk_group_not_found_safety_checks_set(self):
with patch("salt.utils.vmware.get_diskgroups", MagicMock(return_value=[])):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.remove_diskgroup(cache_disk_id="fake_disk_id")
self.assertEqual(
"No diskgroup with cache disk id "
"'fake_disk_id' was found in ESXi host "
"'fake_host'",
excinfo.exception.strerror,
)
def test_remove_disk_group(self):
mock_remove_diskgroup = MagicMock(return_value=None)
with patch("salt.utils.vsan.remove_diskgroup", mock_remove_diskgroup):
vsphere.remove_diskgroup(cache_disk_id="fake_disk_id")
mock_remove_diskgroup.assert_called_once_with(
self.mock_si, self.mock_host, self.mock_diskgroup, data_accessibility=True
)
def test_remove_disk_group_data_accessibility_false(self):
mock_remove_diskgroup = MagicMock(return_value=None)
with patch("salt.utils.vsan.remove_diskgroup", mock_remove_diskgroup):
vsphere.remove_diskgroup(
cache_disk_id="fake_disk_id", data_accessibility=False
)
mock_remove_diskgroup.assert_called_once_with(
self.mock_si, self.mock_host, self.mock_diskgroup, data_accessibility=False
)
def test_success_output(self):
res = vsphere.remove_diskgroup(cache_disk_id="fake_disk_id")
self.assertTrue(res)
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
@skipIf(not vsphere.HAS_JSONSCHEMA, "The 'jsonschema' library is missing")
class RemoveCapacityFromDiskgroupTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.remove_capacity_from_diskgroup
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"__proxy__": {
"esxi.get_details": MagicMock(
return_value={"esxi_host": "fake_host"}
)
},
}
}
def setUp(self):
attrs = (
("mock_si", MagicMock()),
("mock_schema", MagicMock()),
("mock_host", MagicMock()),
("mock_disk1", MagicMock(canonicalName="fake_disk1")),
("mock_disk2", MagicMock(canonicalName="fake_disk2")),
("mock_disk3", MagicMock(canonicalName="fake_disk3")),
("mock_diskgroup", MagicMock()),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
patches = (
(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
),
(
"salt.modules.vsphere.DiskGroupsDiskIdSchema.serialize",
MagicMock(return_value=self.mock_schema),
),
("salt.modules.vsphere.jsonschema.validate", MagicMock()),
("salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxi")),
(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_host),
),
(
"salt.utils.vmware.get_disks",
MagicMock(
return_value=[self.mock_disk1, self.mock_disk2, self.mock_disk3]
),
),
(
"salt.utils.vmware.get_diskgroups",
MagicMock(return_value=[self.mock_diskgroup]),
),
("salt.utils.vsan.remove_capacity_from_diskgroup", MagicMock()),
)
for module, mock_obj in patches:
patcher = patch(module, mock_obj)
patcher.start()
self.addCleanup(patcher.stop)
def test_validate(self):
mock_schema_validate = MagicMock()
with patch("salt.modules.vsphere.jsonschema.validate", mock_schema_validate):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
mock_schema_validate.assert_called_once_with(
{
"diskgroups": [
{
"cache_id": "fake_cache_disk_id",
"capacity_ids": ["fake_disk1", "fake_disk2"],
}
]
},
self.mock_schema,
)
def test_invalid_schema_validation(self):
mock_schema_validate = MagicMock(
side_effect=vsphere.jsonschema.exceptions.ValidationError("err")
)
with patch("salt.modules.vsphere.jsonschema.validate", mock_schema_validate):
with self.assertRaises(ArgumentValueError) as excinfo:
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
self.assertEqual("err", excinfo.exception.strerror)
def test_supported_proxes(self):
supported_proxies = ["esxi"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
def test__get_proxy_target_call(self):
mock__get_proxy_target = MagicMock(return_value=self.mock_host)
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
mock__get_proxy_target.assert_called_once_with(self.mock_si)
def test_get_disks(self):
mock_get_disks = MagicMock(
return_value=[self.mock_disk1, self.mock_disk2, self.mock_disk3]
)
with patch("salt.utils.vmware.get_disks", mock_get_disks):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
mock_get_disks.assert_called_once_with(
self.mock_host, disk_ids=["fake_disk1", "fake_disk2"]
)
def test_disk_not_found_safety_checks_set(self):
mock_get_disks = MagicMock(
return_value=[self.mock_disk1, self.mock_disk2, self.mock_disk3]
)
with patch("salt.utils.vmware.get_disks", mock_get_disks):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk4"],
safety_checks=True,
)
self.assertEqual(
"No disk with id 'fake_disk4' was found " "in ESXi host 'fake_host'",
excinfo.exception.strerror,
)
def test_get_diskgroups(self):
mock_get_diskgroups = MagicMock(return_value=[self.mock_diskgroup])
with patch("salt.utils.vmware.get_diskgroups", mock_get_diskgroups):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
mock_get_diskgroups.assert_called_once_with(
self.mock_host, cache_disk_ids=["fake_cache_disk_id"]
)
def test_diskgroup_not_found(self):
with patch("salt.utils.vmware.get_diskgroups", MagicMock(return_value=[])):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
self.assertEqual(
"No diskgroup with cache disk id "
"'fake_cache_disk_id' was found in ESXi host "
"'fake_host'",
excinfo.exception.strerror,
)
def test_remove_capacity_from_diskgroup(self):
mock_remove_capacity_from_diskgroup = MagicMock()
with patch(
"salt.utils.vsan.remove_capacity_from_diskgroup",
mock_remove_capacity_from_diskgroup,
):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
mock_remove_capacity_from_diskgroup.assert_called_once_with(
self.mock_si,
self.mock_host,
self.mock_diskgroup,
capacity_disks=[self.mock_disk1, self.mock_disk2],
data_evacuation=True,
)
def test_remove_capacity_from_diskgroup_data_evacuation_false(self):
mock_remove_capacity_from_diskgroup = MagicMock()
with patch(
"salt.utils.vsan.remove_capacity_from_diskgroup",
mock_remove_capacity_from_diskgroup,
):
vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
data_evacuation=False,
)
mock_remove_capacity_from_diskgroup.assert_called_once_with(
self.mock_si,
self.mock_host,
self.mock_diskgroup,
capacity_disks=[self.mock_disk1, self.mock_disk2],
data_evacuation=False,
)
def test_success_output(self):
res = vsphere.remove_capacity_from_diskgroup(
cache_disk_id="fake_cache_disk_id",
capacity_disk_ids=["fake_disk1", "fake_disk2"],
)
self.assertTrue(res)
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class ListClusterTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.list_cluster
"""
def setup_loader_modules(self):
return {vsphere: {"_get_proxy_connection_details": MagicMock(), "__salt__": {}}}
def setUp(self):
attrs = (
("mock_si", MagicMock()),
("mock_dc", MagicMock()),
("mock_cl", MagicMock()),
("mock__get_cluster_dict", MagicMock()),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
attrs = (("mock_get_cluster", MagicMock(return_value=self.mock_cl)),)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
patches = (
(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
),
(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxcluster"),
),
(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_cl),
),
("salt.utils.vmware.get_cluster", self.mock_get_cluster),
("salt.modules.vsphere._get_cluster_dict", self.mock__get_cluster_dict),
)
for module, mock_obj in patches:
patcher = patch(module, mock_obj)
patcher.start()
self.addCleanup(patcher.stop)
# Patch __salt__ dunder
patcher = patch.dict(
vsphere.__salt__,
{"esxcluster.get_details": MagicMock(return_value={"cluster": "cl"})},
)
patcher.start()
self.addCleanup(patcher.stop)
def test_supported_proxies(self):
supported_proxies = ["esxcluster", "esxdatacenter"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.list_cluster(cluster="cl")
def test_default_service_instance(self):
mock__get_proxy_target = MagicMock()
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.list_cluster()
mock__get_proxy_target.assert_called_once_with(self.mock_si)
def test_defined_service_instance(self):
mock_si = MagicMock()
mock__get_proxy_target = MagicMock()
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.list_cluster(service_instance=mock_si)
mock__get_proxy_target.assert_called_once_with(mock_si)
def test_no_cluster_raises_argument_value_error(self):
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxdatacenter"),
):
with patch("salt.modules.vsphere._get_proxy_target", MagicMock()):
with self.assertRaises(ArgumentValueError) as excinfo:
vsphere.list_cluster()
self.assertEqual(excinfo.exception.strerror, "'cluster' needs to be specified")
def test_get_cluster_call(self):
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxdatacenter"),
):
with patch(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_dc),
):
vsphere.list_cluster(cluster="cl")
self.mock_get_cluster.assert_called_once_with(self.mock_dc, "cl")
def test__get_cluster_dict_call(self):
vsphere.list_cluster()
self.mock__get_cluster_dict.assert_called_once_with("cl", self.mock_cl)
@skipIf(not HAS_PYVMOMI, "The 'pyvmomi' library is missing")
class RenameDatastoreTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere.rename_datastore
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxdatacenter"),
}
}
def setUp(self):
self.mock_si = MagicMock()
self.mock_target = MagicMock()
self.mock_ds_ref = MagicMock()
self.mock_get_datastores = MagicMock(return_value=[self.mock_ds_ref])
self.mock_rename_datastore = MagicMock()
patches = (
(
"salt.utils.vmware.get_service_instance",
MagicMock(return_value=self.mock_si),
),
(
"salt.modules.vsphere._get_proxy_target",
MagicMock(return_value=self.mock_target),
),
("salt.utils.vmware.get_datastores", self.mock_get_datastores),
("salt.utils.vmware.rename_datastore", self.mock_rename_datastore),
)
for mod, mock in patches:
patcher = patch(mod, mock)
patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
for attr in (
"mock_si",
"mock_target",
"mock_ds_ref",
"mock_get_datastores",
"mock_rename_datastore",
):
delattr(self, attr)
def test_supported_proxes(self):
supported_proxies = ["esxi", "esxcluster", "esxdatacenter"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere.rename_datastore("current_ds_name", "new_ds_name")
def test_default_service_instance(self):
mock__get_proxy_target = MagicMock()
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.rename_datastore("current_ds_name", "new_ds_name")
mock__get_proxy_target.assert_called_once_with(self.mock_si)
def test_defined_service_instance(self):
mock_si = MagicMock()
mock__get_proxy_target = MagicMock()
with patch("salt.modules.vsphere._get_proxy_target", mock__get_proxy_target):
vsphere.rename_datastore(
"current_ds_name", "new_ds_name", service_instance=mock_si
)
mock__get_proxy_target.assert_called_once_with(mock_si)
def test_get_datastore_call(self):
vsphere.rename_datastore("current_ds_name", "new_ds_name")
self.mock_get_datastores.assert_called_once_with(
self.mock_si, self.mock_target, datastore_names=["current_ds_name"]
)
def test_get_no_datastores(self):
with patch("salt.utils.vmware.get_datastores", MagicMock(return_value=[])):
with self.assertRaises(VMwareObjectRetrievalError) as excinfo:
vsphere.rename_datastore("current_ds_name", "new_ds_name")
self.assertEqual(
excinfo.exception.strerror, "Datastore 'current_ds_name' was not found"
)
def test_rename_datastore_call(self):
vsphere.rename_datastore("current_ds_name", "new_ds_name")
self.mock_rename_datastore.assert_called_once_with(
self.mock_ds_ref, "new_ds_name"
)
class _GetProxyTargetTestCase(TestCase, LoaderModuleMockMixin):
"""
Tests for salt.modules.vsphere._get_proxy_target
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="esxdatacenter"),
}
}
def setUp(self):
attrs = (
("mock_si", MagicMock()),
("mock_dc", MagicMock()),
("mock_cl", MagicMock()),
("mock_root", MagicMock()),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
attrs = (
("mock_get_datacenter", MagicMock(return_value=self.mock_dc)),
("mock_get_cluster", MagicMock(return_value=self.mock_cl)),
("mock_get_root_folder", MagicMock(return_value=self.mock_root)),
)
for attr, mock_obj in attrs:
setattr(self, attr, mock_obj)
self.addCleanup(delattr, self, attr)
patches = (
(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxcluster"),
),
(
"salt.utils.vmware.is_connection_to_a_vcenter",
MagicMock(return_value=True),
),
(
"salt.modules.vsphere._get_esxcluster_proxy_details",
MagicMock(
return_value=(
None,
None,
None,
None,
None,
None,
None,
None,
"datacenter",
"cluster",
)
),
),
(
"salt.modules.vsphere._get_esxdatacenter_proxy_details",
MagicMock(
return_value=(
None,
None,
None,
None,
None,
None,
None,
None,
"datacenter",
)
),
),
("salt.utils.vmware.get_datacenter", self.mock_get_datacenter),
("salt.utils.vmware.get_cluster", self.mock_get_cluster),
("salt.utils.vmware.get_root_folder", self.mock_get_root_folder),
)
for module, mock_obj in patches:
patcher = patch(module, mock_obj)
patcher.start()
self.addCleanup(patcher.stop)
def test_supported_proxies(self):
supported_proxies = ["esxcluster", "esxdatacenter"]
for proxy_type in supported_proxies:
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value=proxy_type),
):
vsphere._get_proxy_target(self.mock_si)
def test_connected_to_esxi(self):
with patch(
"salt.utils.vmware.is_connection_to_a_vcenter",
MagicMock(return_value=False),
):
with self.assertRaises(CommandExecutionError) as excinfo:
vsphere._get_proxy_target(self.mock_si)
self.assertEqual(
excinfo.exception.strerror,
"'_get_proxy_target' not supported when " "connected via the ESXi host",
)
def test_get_cluster_call(self):
vsphere._get_proxy_target(self.mock_si)
self.mock_get_datacenter.assert_called_once_with(self.mock_si, "datacenter")
self.mock_get_cluster.assert_called_once_with(self.mock_dc, "cluster")
def test_esxcluster_proxy_return(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="esxcluster")
):
ret = vsphere._get_proxy_target(self.mock_si)
self.assertEqual(ret, self.mock_cl)
def test_get_datacenter_call(self):
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxdatacenter"),
):
vsphere._get_proxy_target(self.mock_si)
self.mock_get_datacenter.assert_called_once_with(self.mock_si, "datacenter")
self.assertEqual(self.mock_get_cluster.call_count, 0)
def test_esxdatacenter_proxy_return(self):
with patch(
"salt.modules.vsphere.get_proxy_type",
MagicMock(return_value="esxdatacenter"),
):
ret = vsphere._get_proxy_target(self.mock_si)
self.assertEqual(ret, self.mock_dc)
def test_vcenter_proxy_return(self):
with patch(
"salt.modules.vsphere.get_proxy_type", MagicMock(return_value="vcenter")
):
ret = vsphere._get_proxy_target(self.mock_si)
self.mock_get_root_folder.assert_called_once_with(self.mock_si)
self.assertEqual(ret, self.mock_root)
@skipIf(not HAS_VSPHERE_SDK, "The 'vsphere-automation-sdk' library is missing")
class TestVSphereTagging(TestCase, LoaderModuleMockMixin):
"""
Tests for:
- salt.modules.vsphere.create_tag_category
- salt.modules.vsphere.create_tag
- salt.modules.vsphere.delete_tag_category
- salt.modules.vsphere.delete_tag
- salt.modules.vsphere.list_tag_categories
- salt.modules.vsphere.list_tags
- salt.modules.vsphere.attach_tags
- salt.modules.vsphere.list_attached_tags
"""
def setup_loader_modules(self):
return {
vsphere: {
"_get_proxy_connection_details": MagicMock(),
"get_proxy_type": MagicMock(return_value="vcenter"),
}
}
# Grains for expected __salt__ calls
details = {key: None for key in ["vcenter", "username", "password"]}
# Function attributes
func_attrs = {
key: None
for key in [
"category_id",
"object_id",
"tag_id",
"name",
"description",
"cardinality",
]
}
# Expected returns
create_tag_category = {
"Category created": "urn:vmomi:InventoryServiceTag:"
"bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL"
}
create_tag = {
"Tag created": "urn:vmomi:InventoryServiceTag:"
"bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL"
}
delete_tag_category = {
"Category deleted": "urn:vmomi:InventoryServiceTag:"
"bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL"
}
delete_tag = {
"Tag deleted": "urn:vmomi:InventoryServiceTag:"
"bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL"
}
list_tag_categories_return = [
"urn:vmomi:InventoryServiceCategory:"
"b13f4959-a3f3-48d0-8080-15bb586b4355:GLOBAL",
"urn:vmomi:InventoryServiceCategory:"
"f4d41f02-c317-422d-9013-dcbebfcd54ad:GLOBAL",
"urn:vmomi:InventoryServiceCategory:"
"2db5b00b-f211-4bba-ba42-e2658ebbb283:GLOBAL",
"urn:vmomi:InventoryServiceCategory:"
"cd847c3c-687c-4bd9-8e5a-0eb536f0a01d:GLOBAL",
"urn:vmomi:InventoryServiceCategory:"
"d51c24f9-cffb-4ce0-af56-7f18b6e649af:GLOBAL",
]
list_tags_return = [
"urn:vmomi:InventoryServiceTag:a584a83b-3015-45ad-8057-a3630613052f:GLOBAL",
"urn:vmomi:InventoryServiceTag:db08019c-15de-4bbf-be46-d81aaf8d25c0:GLOBAL",
"urn:vmomi:InventoryServiceTag:b55ecc77-f4a5-49f8-ab52-38865467cfbe:GLOBAL",
"urn:vmomi:InventoryServiceTag:f009ab1b-e1b5-4c40-b8f7-951d9d716b39:GLOBAL",
"urn:vmomi:InventoryServiceTag:102bb4c5-9b76-4d6c-882a-76a91ee3edcc:GLOBAL",
"urn:vmomi:InventoryServiceTag:bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL",
"urn:vmomi:InventoryServiceTag:71d30f2d-bb23-48e1-995f-630adfb0dc89:GLOBAL",
]
list_attached_tags_return = [
"urn:vmomi:InventoryServiceTag:b55ecc77-f4a5-49f8-ab52-38865467cfbe:GLOBAL",
"urn:vmomi:InventoryServiceTag:bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL",
]
list_create_category_return = [
"urn:vmomi:InventoryServiceCategory:"
"0af54c2d-e8cd-4248-931e-2f5807d8c477:GLOBAL"
]
list_create_tag_return = [
"urn:vmomi:InventoryServiceCategory:"
"0af54c2d-e8cd-4248-931e-2f5807d8c477:GLOBAL"
]
attach_tags_return = {
"Tag attached": "urn:vmomi:InventoryServiceTag:"
"bb0350b4-85db-46b0-a726-e7c5989fc857:GLOBAL"
}
def test_create_tag_category_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
ret = vsphere.create_tag_category(
self.func_attrs["name"],
self.func_attrs["description"],
self.func_attrs["cardinality"],
)
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Category created": None})
def test_create_tag_category_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
Category=Mock(
CreateSpec=Mock(return_value=Mock()),
create=Mock(
return_value=self.create_tag_category["Category created"]
),
)
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
ret = vsphere.create_tag_category(
self.func_attrs["name"],
self.func_attrs["description"],
self.func_attrs["cardinality"],
)
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, self.create_tag_category)
def test_create_tag_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
ret = vsphere.create_tag(
self.func_attrs["name"],
self.func_attrs["description"],
self.func_attrs["cardinality"],
)
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Tag created": None})
def test_create_tag_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
Tag=Mock(
CreateSpec=Mock(return_value=Mock()),
create=Mock(return_value=self.create_tag["Tag created"]),
)
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
ret = vsphere.create_tag(
self.func_attrs["name"],
self.func_attrs["description"],
self.func_attrs["cardinality"],
)
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, self.create_tag)
def test_delete_tag_category_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
ret = vsphere.delete_tag_category(
self.func_attrs["category_id"]
)
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Category deleted": None})
def test_delete_tag_category_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
Category=Mock(
delete=Mock(
return_value=self.delete_tag_category["Category deleted"]
)
)
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
ret = vsphere.delete_tag_category(
self.func_attrs["category_id"]
)
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, self.delete_tag_category)
def test_delete_tag_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
ret = vsphere.delete_tag(self.func_attrs["tag_id"])
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Tag deleted": None})
def test_delete_tag_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
Tag=Mock(delete=Mock(return_value=self.delete_tag["Tag deleted"]))
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
ret = vsphere.delete_tag(self.func_attrs["tag_id"])
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, self.delete_tag)
def test_list_tag_categories_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
ret = vsphere.list_tag_categories()
# Check function calls and return data
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Categories": None})
def test_list_tag_categories_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
Category=Mock(list=Mock(return_value=self.list_tag_categories_return))
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
ret = vsphere.list_tag_categories()
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(
ret, {"Categories": self.list_tag_categories_return}
)
def test_list_tags_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
# Check function calls and return
ret = vsphere.list_tags()
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Tags": None})
def test_list_tags_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(Tag=Mock(list=Mock(return_value=self.list_tags_return)))
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
# Check function calls and return
ret = vsphere.list_tags()
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Tags": self.list_tags_return})
def test_list_attached_tags_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
with patch.object(vsphere, "DynamicID") as dynamic_id:
# Check function calls and return
ret = vsphere.list_attached_tags("object_id")
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Attached tags": None})
def test_list_attached_tags_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
TagAssociation=Mock(
list_attached_tags=Mock(return_value=self.list_attached_tags_return)
)
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
with patch.object(vsphere, "DynamicID") as dynamic_id:
# Check function calls and return
ret = vsphere.list_attached_tags(
self.func_attrs["object_id"]
)
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(
ret,
{"Attached tags": self.list_attached_tags_return},
)
def test_attach_tags_client_none(self):
get_details = MagicMock(return_value=self.details)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware, "get_vsphere_client", return_value=None
) as get_vsphere_client:
# Check function calls and return
ret = vsphere.attach_tag(
object_id=self.func_attrs["object_id"],
tag_id=self.func_attrs["tag_id"],
)
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(ret, {"Tag attached": None})
def test_attach_tags_client(self):
get_details = MagicMock(return_value=self.details)
# Mock CreateSpec object and create objects
mock_client = Mock(
tagging=Mock(
TagAssociation=Mock(
attach=Mock(return_value=self.list_attached_tags_return)
)
)
)
# Start patching each external API return with Mock Objects
with patch.object(
vsphere, "get_proxy_type", return_value="vcenter"
) as get_proxy_type:
with patch.object(
vsphere, "_get_proxy_connection_details", return_value=[]
) as get_proxy_connection:
with patch.object(
salt.utils.vmware, "get_service_instance", return_value=None
) as get_service_instance:
with patch.dict(
vsphere.__salt__,
{"vcenter.get_details": get_details},
clear=True,
) as get_vcenter_details:
with patch.object(
salt.utils.vmware,
"get_vsphere_client",
return_value=mock_client,
) as get_vsphere_client:
with patch.object(vsphere, "DynamicID") as dynamic_id:
# Check function calls and return
ret = vsphere.attach_tag(
object_id=self.func_attrs["object_id"],
tag_id=self.func_attrs["tag_id"],
)
get_proxy_type.assert_called_once()
get_proxy_connection.assert_called_once()
get_service_instance.assert_called_once()
get_vsphere_client.assert_called_once()
self.assertEqual(
ret,
{"Tag attached": self.list_attached_tags_return},
)
| 38.535104
| 110
| 0.565795
| 11,949
| 121,848
| 5.425893
| 0.037158
| 0.041398
| 0.052442
| 0.026838
| 0.887204
| 0.85176
| 0.825261
| 0.797421
| 0.773375
| 0.745057
| 0
| 0.007599
| 0.344437
| 121,848
| 3,161
| 111
| 38.547295
| 0.804054
| 0.061724
| 0
| 0.702755
| 0
| 0
| 0.160062
| 0.082018
| 0
| 0
| 0
| 0
| 0.091967
| 1
| 0.075281
| false
| 0.023671
| 0.004269
| 0.008537
| 0.102057
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6c3ec4ca59fb2b5bce24dc29ee26259f2af0b09
| 44,097
|
py
|
Python
|
src/ralph/cmdb/migrations/0028_auto__add_civalueboolean__add_field_ciattributevalue_value_boolean__de.py
|
vi4m/ralph
|
2af767ee23d89be9e6cec0a537350a1ce8840bd1
|
[
"Apache-2.0"
] | 1
|
2018-09-01T14:14:08.000Z
|
2018-09-01T14:14:08.000Z
|
src/ralph/cmdb/migrations/0028_auto__add_civalueboolean__add_field_ciattributevalue_value_boolean__de.py
|
srikanth4372/sample
|
127b5742ae464d42909a14d71e3c10c241ec3a23
|
[
"Apache-2.0"
] | 1
|
2019-08-14T10:03:45.000Z
|
2019-08-14T10:03:45.000Z
|
src/ralph/cmdb/migrations/0028_auto__add_civalueboolean__add_field_ciattributevalue_value_boolean__de.py
|
srikanth4372/sample
|
127b5742ae464d42909a14d71e3c10c241ec3a23
|
[
"Apache-2.0"
] | 1
|
2019-08-14T09:59:42.000Z
|
2019-08-14T09:59:42.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CIValueBoolean'
db.create_table('cmdb_civalueboolean', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('cache_version', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('value', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True)),
))
db.send_create_signal('cmdb', ['CIValueBoolean'])
# Adding field 'CIAttributeValue.value_boolean'
db.add_column('cmdb_ciattributevalue', 'value_boolean',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cmdb.CIValueBoolean'], null=True, blank=True),
keep_default=False)
# Deleting field 'CIOwner.last_name'
db.delete_column('cmdb_ciowner', 'last_name')
# Deleting field 'CIOwner.first_name'
db.delete_column('cmdb_ciowner', 'first_name')
# Deleting field 'CIOwner.sAMAccountName'
db.delete_column('cmdb_ciowner', 'sAMAccountName')
# Deleting field 'CIOwner.email'
db.delete_column('cmdb_ciowner', 'email')
# Changing field 'CIOwner.profile'
db.alter_column('cmdb_ciowner', 'profile_id', self.gf('django.db.models.fields.related.OneToOneField')(default=-1, to=orm['account.Profile'], unique=True))
def backwards(self, orm):
# Deleting model 'CIValueBoolean'
db.delete_table('cmdb_civalueboolean')
# Deleting field 'CIAttributeValue.value_boolean'
db.delete_column('cmdb_ciattributevalue', 'value_boolean_id')
# Adding field 'CIOwner.last_name'
db.add_column('cmdb_ciowner', 'last_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=100),
keep_default=False)
# Adding field 'CIOwner.first_name'
db.add_column('cmdb_ciowner', 'first_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=50),
keep_default=False)
# Adding field 'CIOwner.sAMAccountName'
db.add_column('cmdb_ciowner', 'sAMAccountName',
self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True),
keep_default=False)
# Adding field 'CIOwner.email'
db.add_column('cmdb_ciowner', 'email',
self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75, null=True),
keep_default=False)
# Changing field 'CIOwner.profile'
db.alter_column('cmdb_ciowner', 'profile_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['account.Profile'], unique=True, null=True))
models = {
'account.profile': {
'Meta': {'object_name': 'Profile'},
'activation_token': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'cost_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'country': ('django.db.models.fields.PositiveIntegerField', [], {'default': '153'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'employee_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'gender': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'home_page': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'False', u'default': '1', 'null': 'False', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'manager': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'nick': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'profit_center': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cmdb.archivedcichange': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'ArchivedCIChange'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_key': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'max_length': '11'}),
'registration_type': ('django.db.models.fields.IntegerField', [], {'default': '4', 'max_length': '11'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11', 'db_index': 'True'})
},
'cmdb.archivedcichangecmdbhistory': {
'Meta': {'object_name': 'ArchivedCIChangeCMDBHistory'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'field_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'new_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'old_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'cmdb.archivedcichangegit': {
'Meta': {'object_name': 'ArchivedCIChangeGit'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'changeset': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file_paths': ('django.db.models.fields.CharField', [], {'max_length': '3000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.archivedcichangepuppet': {
'Meta': {'object_name': 'ArchivedCIChangePuppet'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'configuration_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'cmdb.archivedcichangezabbixtrigger': {
'Meta': {'object_name': 'ArchivedCIChangeZabbixTrigger'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'host_id': ('django.db.models.fields.BigIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastchange': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'trigger_id': ('django.db.models.fields.BigIntegerField', [], {})
},
'cmdb.archivedpuppetlog': {
'Meta': {'object_name': 'ArchivedPuppetLog'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cichange': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.ArchivedCIChangePuppet']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'cmdb.ci': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CI'},
'added_manually': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'barcode': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'business_service': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CILayer']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIOwner']", 'through': "orm['cmdb.CIOwnership']", 'symmetrical': 'False'}),
'pci_scope': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'relations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CI']", 'through': "orm['cmdb.CIRelation']", 'symmetrical': 'False'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '11'}),
'technical_service': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIType']"}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'zabbix_id': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
},
'cmdb.ciattribute': {
'Meta': {'object_name': 'CIAttribute'},
'attribute_type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ci_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIType']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cmdb.ciattributevalue': {
'Meta': {'object_name': 'CIAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIAttribute']"}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value_boolean': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueBoolean']", 'null': 'True', 'blank': 'True'}),
'value_choice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueChoice']", 'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueDate']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueFloat']", 'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueInteger']", 'null': 'True', 'blank': 'True'}),
'value_string': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIValueString']", 'null': 'True', 'blank': 'True'})
},
'cmdb.cichange': {
'Meta': {'unique_together': "((u'content_type', u'object_id'),)", 'object_name': 'CIChange'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_key': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'max_length': '11'}),
'registration_type': ('django.db.models.fields.IntegerField', [], {'default': '4', 'max_length': '11'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11', 'db_index': 'True'})
},
'cmdb.cichangecmdbhistory': {
'Meta': {'object_name': 'CIChangeCMDBHistory'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'field_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'new_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'old_value': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'cmdb.cichangegit': {
'Meta': {'object_name': 'CIChangeGit'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'changeset': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'file_paths': ('django.db.models.fields.CharField', [], {'max_length': '3000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.cichangepuppet': {
'Meta': {'object_name': 'CIChangePuppet'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'configuration_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'cmdb.cichangezabbixtrigger': {
'Meta': {'object_name': 'CIChangeZabbixTrigger'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']", 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'host_id': ('django.db.models.fields.BigIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastchange': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'trigger_id': ('django.db.models.fields.BigIntegerField', [], {})
},
'cmdb.cicontenttypeprefix': {
'Meta': {'object_name': 'CIContentTypePrefix'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'content_type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'prefix': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'cmdb.ciincident': {
'Meta': {'object_name': 'CIIncident'},
'analysis': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'assignee': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cis': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'ciincident'", 'symmetrical': 'False', 'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_type': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'jira_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'planned_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'planned_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'resolvet_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.cilayer': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'CILayer'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'connected_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmdb.CIType']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'icon': (u'dj.choices.fields.ChoiceField', [], {'unique': 'False', 'primary_key': 'False', 'db_column': 'None', 'blank': 'True', u'default': 'None', 'null': 'True', '_in_south': 'True', 'db_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmdb.ciowner': {
'Meta': {'object_name': 'CIOwner'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['account.Profile']", 'unique': 'True'})
},
'cmdb.ciownership': {
'Meta': {'object_name': 'CIOwnership'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIOwner']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'cmdb.ciproblem': {
'Meta': {'object_name': 'CIProblem'},
'analysis': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'assignee': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cis': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'ciproblem'", 'symmetrical': 'False', 'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_type': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'jira_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'planned_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'planned_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'resolvet_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.cirelation': {
'Meta': {'unique_together': "((u'parent', u'child', u'type'),)", 'object_name': 'CIRelation'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'child'", 'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parent'", 'to': "orm['cmdb.CI']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '11'})
},
'cmdb.citype': {
'Meta': {'object_name': 'CIType'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'icon_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'cmdb.civalueboolean': {
'Meta': {'object_name': 'CIValueBoolean'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
'cmdb.civaluechoice': {
'Meta': {'object_name': 'CIValueChoice'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.civaluedate': {
'Meta': {'object_name': 'CIValueDate'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.civaluefloat': {
'Meta': {'object_name': 'CIValueFloat'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.civalueinteger': {
'Meta': {'object_name': 'CIValueInteger'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.civaluestring': {
'Meta': {'object_name': 'CIValueString'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'cmdb.gitpathmapping': {
'Meta': {'object_name': 'GitPathMapping'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ci': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'cmdb.jirachanges': {
'Meta': {'object_name': 'JiraChanges'},
'analysis': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'assignee': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cis': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'jirachanges'", 'symmetrical': 'False', 'to': "orm['cmdb.CI']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_type': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'jira_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'planned_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'planned_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'resolvet_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'update_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cmdb.puppetlog': {
'Meta': {'object_name': 'PuppetLog'},
'cache_version': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'cichange': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmdb.CIChangePuppet']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cmdb']
| 82.733583
| 224
| 0.570311
| 4,353
| 44,097
| 5.682748
| 0.058121
| 0.116101
| 0.202611
| 0.289445
| 0.873833
| 0.848446
| 0.825807
| 0.80701
| 0.789869
| 0.732991
| 0
| 0.011044
| 0.186883
| 44,097
| 532
| 225
| 82.889098
| 0.678854
| 0.01177
| 0
| 0.537223
| 0
| 0
| 0.595244
| 0.361393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004024
| false
| 0.002012
| 0.008048
| 0
| 0.018109
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e6e955725917b50b50da91674e681cdda5752ce7
| 402
|
py
|
Python
|
mmpose/core/__init__.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
mmpose/core/__init__.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
mmpose/core/__init__.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from .bbox import * # noqa: F401, F403
from .camera import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .fp16 import * # noqa: F401, F403
from .optimizer import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
from .visualization import * # noqa: F401, F403
| 40.2
| 50
| 0.691542
| 55
| 402
| 5.036364
| 0.363636
| 0.288809
| 0.404332
| 0.519856
| 0.555957
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154799
| 0.196517
| 402
| 9
| 51
| 44.666667
| 0.702786
| 0.450249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fc089a584039257e009647476e94deeda5696ef1
| 5,147
|
py
|
Python
|
LCR/corrections.py
|
MariaPoliti/LCR
|
8991a4395b945951d468fe33593dee94d7f5668a
|
[
"MIT"
] | 2
|
2020-09-06T00:05:54.000Z
|
2020-09-23T17:37:57.000Z
|
LCR/corrections.py
|
MariaPoliti/LCR
|
8991a4395b945951d468fe33593dee94d7f5668a
|
[
"MIT"
] | null | null | null |
LCR/corrections.py
|
MariaPoliti/LCR
|
8991a4395b945951d468fe33593dee94d7f5668a
|
[
"MIT"
] | 1
|
2021-11-08T20:08:48.000Z
|
2021-11-08T20:08:48.000Z
|
import time
def cable_length(my_instrument, length=1):
"""
Function to activate the cable length correction.
The correction will then be active on the intrument.
Unless tuned off, the correction will be applied to all the measurements
collected after.
Parameters
----------
my_instrument: pyvisa.resources.usb.USBInstrument
Instrument instance obtained throught the Pyvisa package
mode: str
Options are ON or OFF.
length: int
Set the length of the cable to correct for. Options are 0 to 4 m
Returns
-------
The function will return a print statement confirming the
task was completed.
"""
# Clear previous states and resets the instrument settings
my_instrument.write('*RST; *CLS')
# Enable the display to update itself when a change is made
my_instrument.write(':DISP:ENAB')
time.sleep(3)
# Configure the instrument to automatically perform continuous
# measurements
my_instrument.write(':INIT:CONT')
# * Switch the instrument triggering function to 'EXT'
# in order to control the measurements remotely
my_instrument.write(':TRIG:SOUR EXT')
time.sleep(5)
my_instrument.write(':CORR:LENG ', length)
return print('the correction for the cable length was set to {}'
.format(length))
def open(my_instrument, mode='ON', collect=False):
"""
Function to activate and measure the open circuit potential
(OCP) of the system. The correction will then be active
on the intrument. Once this funciton is ran, the new OCP
will be stored in the instrument and unless tuned off,
the correction will be applied to all the measurements collected after.
Parameters
----------
my_instrument: pyvisa.resources.usb.USBInstrument
Instrument instance obtained throught the Pyvisa package
mode: str
Options are ON or OFF.
collect: bool
Option to collect a new measurement for the open correction.
If collect is set to false, the last measured open correction will
be used.
Returns
-------
The function will return a print statement confirming the
task was completed.
"""
# Clear previous states and resets the instrument settings
my_instrument.write('*RST; *CLS')
# Enable the display to update itself when a change is made
my_instrument.write(':DISP:ENAB')
time.sleep(3)
# Configure the instrument to automatically perform continuous
# measurements
my_instrument.write(':INIT:CONT')
# * Switch the instrument triggering function to 'EXT'
# in order to control the measurements remotely
my_instrument.write(':TRIG:SOUR EXT')
time.sleep(5)
if mode == 'ON':
my_instrument.write(':CORR:OPEN:STAT ON')
if collect:
my_instrument.write(':CORR:OPEN:EXEC')
else:
pass
return print('the correction for the Open Circuit Potential' +
' was successfully collected')
else:
my_instrument.write(':CORR:OPEN:STAT OFF')
return print('the correction for the Open Circuit Potential' +
' was disactivated')
def short(my_instrument, mode='ON', collect=False):
"""
Function to activate and measure a short circuit correction for
the system. The correction will then be active
on the intrument. Once this funciton is ran, the short correction
will be stored in the instrument and unless tuned off,
the correction will be applied to all the measurements collected after.
Parameters
----------
my_instrument: pyvisa.resources.usb.USBInstrument
Instrument instance obtained throught the Pyvisa package
mode: str
Options are ON or OFF.
collect: bool
Option to collect a new measurement for the open correction.
If collect is set to false, the last measured open correction will
be used.
Returns
-------
The function will return a print statement confirming the
task was completed.
"""
# Clear previous states and resets the instrument settings
my_instrument.write('*RST; *CLS')
# Enable the display to update itself when a change is made
my_instrument.write(':DISP:ENAB')
time.sleep(3)
# Configure the instrument to automatically perform continuous
# measurements
my_instrument.write(':INIT:CONT')
# * Switch the instrument triggering function to 'EXT'
# in order to control the measurements remotely
my_instrument.write(':TRIG:SOUR EXT')
time.sleep(5)
if mode == 'ON':
my_instrument.write(':CORR:SHOR:STAT ON')
if collect:
my_instrument.write(':CORR:SHOR:EXEC')
else:
pass
return print('the correction for the short correction' +
'was successfully collected')
else:
my_instrument.write(':CORR:SHOR:STAT OFF')
return print('the correction for the short correction' +
'was disactivated')
| 32.371069
| 79
| 0.657665
| 650
| 5,147
| 5.167692
| 0.195385
| 0.089312
| 0.09616
| 0.043763
| 0.905031
| 0.90265
| 0.888955
| 0.888955
| 0.835963
| 0.787437
| 0
| 0.00239
| 0.268506
| 5,147
| 158
| 80
| 32.575949
| 0.889774
| 0.571595
| 0
| 0.734694
| 0
| 0
| 0.287926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0.040816
| 0.020408
| 0
| 0.183673
| 0.102041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d7881e6e4752c9d504384cc677c2cc78ecc99b2
| 80,086
|
py
|
Python
|
mistral/tests/unit/engine/test_subworkflows_pause_resume.py
|
shubhamdang/mistral
|
3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d
|
[
"Apache-2.0"
] | 205
|
2015-06-21T11:51:47.000Z
|
2022-03-05T04:00:04.000Z
|
mistral/tests/unit/engine/test_subworkflows_pause_resume.py
|
shubhamdang/mistral
|
3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d
|
[
"Apache-2.0"
] | 8
|
2015-06-23T14:47:58.000Z
|
2021-01-28T06:06:44.000Z
|
mistral/tests/unit/engine/test_subworkflows_pause_resume.py
|
shubhamdang/mistral
|
3c83837f6ce1e4ab74fb519a63e82eaae70f9d2d
|
[
"Apache-2.0"
] | 110
|
2015-06-14T03:34:38.000Z
|
2021-11-11T12:12:56.000Z
|
# Copyright 2015 - StackStorm, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.db.v2 import api as db_api
from mistral.services import workbooks as wb_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
from mistral_lib import actions as ml_actions
class SubworkflowPauseResumeTest(base.EngineTestCase):
def test_pause_resume_cascade_down_to_subworkflow(self):
wb_text = """
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
workflow: wf2
on-success: task3
task2:
workflow: wf3
on-success: task3
task3:
join: all
wf2:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
wf3:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
"""
wb_service.create_workbook_v2(wb_text)
# Start workflow execution.
wf_1_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_state(wf_1_ex.id, states.RUNNING)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_execs = wf_2_ex.task_executions
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_1_ex.state)
self.assertEqual(2, len(wf_1_task_execs))
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(1, len(wf_1_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex.id)
self.assertEqual(1, len(wf_1_task_2_action_exs))
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id)
self.assertEqual(states.RUNNING, wf_2_ex.state)
self.assertEqual(1, len(wf_2_task_execs))
self.assertEqual(states.RUNNING, wf_2_task_1_ex.state)
self.assertEqual(1, len(wf_2_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(1, len(wf_3_task_execs))
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(1, len(wf_3_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Pause the main workflow.
self.engine.pause_workflow(wf_1_ex.id)
self.await_workflow_paused(wf_1_ex.id)
self.await_workflow_paused(wf_2_ex.id)
self.await_workflow_paused(wf_3_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.PAUSED, wf_2_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
self.assertEqual(states.PAUSED, wf_1_ex.state)
# Resume the main workflow.
self.engine.resume_workflow(wf_1_ex.id)
self.await_workflow_running(wf_1_ex.id)
self.await_workflow_running(wf_2_ex.id)
self.await_workflow_running(wf_3_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_2_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(states.RUNNING, wf_1_ex.state)
# Complete action executions of the subworkflows.
self.engine.on_action_complete(
wf_2_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_2_ex.id)
self.await_workflow_success(wf_3_ex.id)
self.await_workflow_success(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
wf_1_task_3_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task3'
)
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_execs = wf_2_ex.task_executions
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_2_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task2'
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_2_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task2'
)
self.assertEqual(states.SUCCESS, wf_1_ex.state)
self.assertEqual(3, len(wf_1_task_execs))
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_3_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_ex.state)
self.assertEqual(2, len(wf_2_task_execs))
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_3_ex.state)
self.assertEqual(2, len(wf_3_task_execs))
self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_2_ex.state)
def test_pause_resume_cascade_up_from_subworkflow(self):
wb_text = """
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
workflow: wf2
on-success: task3
task2:
workflow: wf3
on-success: task3
task3:
join: all
wf2:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
wf3:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
"""
wb_service.create_workbook_v2(wb_text)
# Start workflow execution.
wf_1_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_state(wf_1_ex.id, states.RUNNING)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_execs = wf_2_ex.task_executions
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_1_ex.state)
self.assertEqual(2, len(wf_1_task_execs))
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(1, len(wf_1_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex.id)
self.assertEqual(1, len(wf_1_task_2_action_exs))
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id)
self.assertEqual(states.RUNNING, wf_2_ex.state)
self.assertEqual(1, len(wf_2_task_execs))
self.assertEqual(states.RUNNING, wf_2_task_1_ex.state)
self.assertEqual(1, len(wf_2_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(1, len(wf_3_task_execs))
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(1, len(wf_3_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Pause the subworkflow.
self.engine.pause_workflow(wf_2_ex.id)
self.await_workflow_paused(wf_2_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.PAUSED, wf_2_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
self.assertEqual(states.PAUSED, wf_1_ex.state)
# Resume the 1st subworkflow.
self.engine.resume_workflow(wf_2_ex.id)
self.await_workflow_running(wf_2_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_2_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
self.assertEqual(states.PAUSED, wf_1_ex.state)
# Complete action execution of 1st subworkflow.
self.engine.on_action_complete(
wf_2_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_2_ex.id)
self.await_task_success(wf_1_task_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.SUCCESS, wf_2_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
self.assertEqual(states.PAUSED, wf_1_ex.state)
# Resume the 2nd subworkflow.
self.engine.resume_workflow(wf_3_ex.id)
self.await_workflow_running(wf_3_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.SUCCESS, wf_2_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(states.RUNNING, wf_1_ex.state)
# Complete action execution of 2nd subworkflow.
self.engine.on_action_complete(
wf_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_3_ex.id)
self.await_workflow_success(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
wf_1_task_3_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task3'
)
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_execs = wf_2_ex.task_executions
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_2_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task2'
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_2_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task2'
)
self.assertEqual(states.SUCCESS, wf_1_ex.state)
self.assertEqual(3, len(wf_1_task_execs))
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_3_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_ex.state)
self.assertEqual(2, len(wf_2_task_execs))
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_3_ex.state)
self.assertEqual(2, len(wf_3_task_execs))
self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_2_ex.state)
def test_pause_resume_cascade_down_to_with_items_subworkflows(self):
wb_text = """
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
with-items: i in <% range(3) %>
workflow: wf2
on-success: task3
task2:
workflow: wf3
on-success: task3
task3:
join: all
wf2:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
wf3:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
"""
wb_service.create_workbook_v2(wb_text)
# Start workflow execution.
wf_1_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_state(wf_1_ex.id, states.RUNNING)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_execs = wf_2_ex_1.task_executions
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_execs = wf_2_ex_2.task_executions
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_execs = wf_2_ex_3.task_executions
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.RUNNING, wf_1_ex.state)
self.assertEqual(2, len(wf_1_task_execs))
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(3, len(wf_1_task_1_action_exs))
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex_1.id)
self.assertEqual(states.RUNNING, wf_2_ex_1.state)
self.assertEqual(1, len(wf_2_ex_1_task_execs))
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state)
self.assertEqual(1, len(wf_2_ex_1_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[1].state)
self.assertEqual(wf_1_task_1_action_exs[1].id, wf_2_ex_2.id)
self.assertEqual(states.RUNNING, wf_2_ex_2.state)
self.assertEqual(1, len(wf_2_ex_2_task_execs))
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state)
self.assertEqual(1, len(wf_2_ex_2_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[2].state)
self.assertEqual(wf_1_task_1_action_exs[2].id, wf_2_ex_3.id)
self.assertEqual(states.RUNNING, wf_2_ex_3.state)
self.assertEqual(1, len(wf_2_ex_3_task_execs))
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state)
self.assertEqual(1, len(wf_2_ex_3_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(1, len(wf_1_task_2_action_exs))
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(1, len(wf_3_task_execs))
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(1, len(wf_3_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Pause the main workflow.
self.engine.pause_workflow(wf_1_ex.id)
self.await_workflow_paused(wf_2_ex_1.id)
self.await_workflow_paused(wf_2_ex_2.id)
self.await_workflow_paused(wf_2_ex_3.id)
self.await_workflow_paused(wf_3_ex.id)
self.await_task_paused(wf_1_task_1_ex.id)
self.await_task_paused(wf_1_task_2_ex.id)
self.await_workflow_paused(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.PAUSED, wf_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_2_ex_1.state)
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[1].state)
self.assertEqual(states.PAUSED, wf_2_ex_2.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[2].state)
self.assertEqual(states.PAUSED, wf_2_ex_3.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Resume the main workflow.
self.engine.resume_workflow(wf_1_ex.id)
self.await_workflow_running(wf_2_ex_1.id)
self.await_workflow_running(wf_2_ex_2.id)
self.await_workflow_running(wf_2_ex_3.id)
self.await_workflow_running(wf_3_ex.id)
self.await_task_running(wf_1_task_1_ex.id)
self.await_task_running(wf_1_task_2_ex.id)
self.await_workflow_running(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.RUNNING, wf_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_2_ex_1.state)
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[1].state)
self.assertEqual(states.RUNNING, wf_2_ex_2.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[2].state)
self.assertEqual(states.RUNNING, wf_2_ex_3.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Complete action execution of subworkflows.
self.engine.on_action_complete(
wf_2_ex_1_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_2_ex_2_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_2_ex_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_2_ex_1.id)
self.await_workflow_success(wf_2_ex_2.id)
self.await_workflow_success(wf_2_ex_3.id)
self.await_workflow_success(wf_3_ex.id)
self.await_task_success(wf_1_task_1_ex.id)
self.await_task_success(wf_1_task_2_ex.id)
self.await_workflow_success(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.SUCCESS, wf_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state)
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_ex_1.state)
self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[1].state)
self.assertEqual(states.SUCCESS, wf_2_ex_2.state)
self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[2].state)
self.assertEqual(states.SUCCESS, wf_2_ex_3.state)
self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_3_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_1_action_exs[0].state)
def test_pause_resume_cascade_up_from_with_items_subworkflow(self):
wb_text = """
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
with-items: i in <% range(3) %>
workflow: wf2
on-success: task3
task2:
workflow: wf3
on-success: task3
task3:
join: all
wf2:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
wf3:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
"""
wb_service.create_workbook_v2(wb_text)
# Start workflow execution.
wf_1_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_state(wf_1_ex.id, states.RUNNING)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_execs = wf_2_ex_1.task_executions
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_execs = wf_2_ex_2.task_executions
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_execs = wf_2_ex_3.task_executions
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.RUNNING, wf_1_ex.state)
self.assertEqual(2, len(wf_1_task_execs))
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(3, len(wf_1_task_1_action_exs))
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(wf_1_task_1_action_exs[0].id, wf_2_ex_1.id)
self.assertEqual(states.RUNNING, wf_2_ex_1.state)
self.assertEqual(1, len(wf_2_ex_1_task_execs))
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state)
self.assertEqual(1, len(wf_2_ex_1_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[1].state)
self.assertEqual(wf_1_task_1_action_exs[1].id, wf_2_ex_2.id)
self.assertEqual(states.RUNNING, wf_2_ex_2.state)
self.assertEqual(1, len(wf_2_ex_2_task_execs))
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state)
self.assertEqual(1, len(wf_2_ex_2_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[2].state)
self.assertEqual(wf_1_task_1_action_exs[2].id, wf_2_ex_3.id)
self.assertEqual(states.RUNNING, wf_2_ex_3.state)
self.assertEqual(1, len(wf_2_ex_3_task_execs))
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state)
self.assertEqual(1, len(wf_2_ex_3_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(1, len(wf_1_task_2_action_exs))
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(wf_1_task_2_action_exs[0].id, wf_3_ex.id)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(1, len(wf_3_task_execs))
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(1, len(wf_3_task_1_action_exs))
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Pause one of the subworkflows in the with-items task.
self.engine.pause_workflow(wf_2_ex_1.id)
self.await_workflow_paused(wf_2_ex_1.id)
self.await_workflow_paused(wf_2_ex_2.id)
self.await_workflow_paused(wf_2_ex_3.id)
self.await_workflow_paused(wf_3_ex.id)
self.await_task_paused(wf_1_task_1_ex.id)
self.await_task_paused(wf_1_task_2_ex.id)
self.await_workflow_paused(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.PAUSED, wf_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_2_ex_1.state)
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[1].state)
self.assertEqual(states.PAUSED, wf_2_ex_2.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[2].state)
self.assertEqual(states.PAUSED, wf_2_ex_3.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# NOTE(rakhmerov): Since cascade pausing is not atomic we need
# to make sure that all internal operations related to pausing
# one of workflow executions 'wb.wf2' are completed. So we have
# to look if any "_on_action_update" calls are scheduled.
def _predicate():
return all(
[
'_on_action_update' not in c.target_method_name
for c in db_api.get_delayed_calls()
]
)
self._await(_predicate)
# Resume one of the subworkflows in the with-items task.
self.engine.resume_workflow(wf_2_ex_1.id)
self.await_workflow_running(wf_2_ex_1.id)
self.await_workflow_paused(wf_2_ex_2.id)
self.await_workflow_paused(wf_2_ex_3.id)
self.await_workflow_paused(wf_3_ex.id)
self.await_task_paused(wf_1_task_1_ex.id)
self.await_task_paused(wf_1_task_2_ex.id)
self.await_workflow_paused(wf_1_ex.id)
# Complete action execution of the subworkflow that is resumed.
self.engine.on_action_complete(
wf_2_ex_1_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_2_ex_1.id)
self.await_workflow_paused(wf_2_ex_2.id)
self.await_workflow_paused(wf_2_ex_3.id)
self.await_workflow_paused(wf_3_ex.id)
self.await_task_paused(wf_1_task_1_ex.id)
self.await_task_paused(wf_1_task_2_ex.id)
self.await_workflow_paused(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.PAUSED, wf_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_ex_1.state)
self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[1].state)
self.assertEqual(states.PAUSED, wf_2_ex_2.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[2].state)
self.assertEqual(states.PAUSED, wf_2_ex_3.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
# Resume one of the remaining subworkflows.
self.engine.resume_workflow(wf_2_ex_2.id)
self.engine.resume_workflow(wf_2_ex_3.id)
self.engine.resume_workflow(wf_3_ex.id)
self.await_workflow_running(wf_2_ex_2.id)
self.await_workflow_running(wf_2_ex_3.id)
self.await_workflow_running(wf_3_ex.id)
self.await_task_running(wf_1_task_1_ex.id)
self.await_task_running(wf_1_task_2_ex.id)
self.await_workflow_running(wf_1_ex.id)
# Complete action executions of the remaining subworkflows.
self.engine.on_action_complete(
wf_2_ex_2_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_2_ex_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_2_ex_1.id)
self.await_workflow_success(wf_2_ex_2.id)
self.await_workflow_success(wf_2_ex_3.id)
self.await_workflow_success(wf_3_ex.id)
self.await_task_success(wf_1_task_1_ex.id)
self.await_task_success(wf_1_task_2_ex.id)
self.await_workflow_success(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = sorted(
wf_1_task_1_ex.executions,
key=lambda x: x['runtime_context']['index']
)
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the with-items subworkflow executions.
wf_2_ex_1 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[0].id
)
wf_2_ex_1_task_1_ex = self._assert_single_item(
wf_2_ex_1.task_executions,
name='task1'
)
wf_2_ex_1_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_1_task_1_ex.id
)
wf_2_ex_2 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[1].id
)
wf_2_ex_2_task_1_ex = self._assert_single_item(
wf_2_ex_2.task_executions,
name='task1'
)
wf_2_ex_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_2_task_1_ex.id
)
wf_2_ex_3 = db_api.get_workflow_execution(
wf_1_task_1_action_exs[2].id
)
wf_2_ex_3_task_1_ex = self._assert_single_item(
wf_2_ex_3.task_executions,
name='task1'
)
wf_2_ex_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_ex_3_task_1_ex.id
)
# Get objects for the wf3 subworkflow execution.
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
# Check state of parent workflow execution.
self.assertEqual(states.SUCCESS, wf_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state)
# Check state of wf2 (1) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_ex_1.state)
self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_1_task_1_action_exs[0].state)
# Check state of wf2 (2) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[1].state)
self.assertEqual(states.SUCCESS, wf_2_ex_2.state)
self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_2_task_1_action_exs[0].state)
# Check state of wf2 (3) subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[2].state)
self.assertEqual(states.SUCCESS, wf_2_ex_3.state)
self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_ex_3_task_1_action_exs[0].state)
# Check state of wf3 subworkflow execution.
self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_3_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_1_action_exs[0].state)
def test_pause_resume_cascade_up_from_subworkflow_pause_before(self):
wb_text = """
version: '2.0'
name: wb
workflows:
wf1:
tasks:
task1:
workflow: wf2
on-success: task3
task2:
workflow: wf3
on-success: task3
task3:
join: all
wf2:
tasks:
task1:
action: std.noop
on-success: task2
task2:
pause-before: true
action: std.async_noop
wf3:
tasks:
task1:
action: std.async_noop
on-success: task2
task2:
action: std.noop
"""
wb_service.create_workbook_v2(wb_text)
# Start workflow execution.
wf_1_ex = self.engine.start_workflow('wb.wf1')
self.await_workflow_state(wf_1_ex.id, states.PAUSED)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_2_task_2_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task2'
)
wf_2_task_2_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_2_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.PAUSED, wf_2_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.IDLE, wf_2_task_2_ex.state)
self.assertEqual(0, len(wf_2_task_2_action_exs))
self.assertEqual(states.PAUSED, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_1_ex.state)
self.assertEqual(states.PAUSED, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.PAUSED, wf_1_task_2_ex.state)
self.assertEqual(states.PAUSED, wf_1_ex.state)
# Resume the main workflow.
self.engine.resume_workflow(wf_1_ex.id)
self.await_workflow_running(wf_1_ex.id)
self.await_workflow_running(wf_2_ex.id)
self.await_workflow_running(wf_3_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_2_task_2_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task2'
)
wf_2_task_2_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_2_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_2_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_2_task_2_ex.state)
self.assertEqual(states.RUNNING, wf_2_task_2_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_3_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_1_ex.state)
self.assertEqual(states.RUNNING, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.RUNNING, wf_1_task_2_ex.state)
self.assertEqual(states.RUNNING, wf_1_ex.state)
# Complete action executions of the subworkflows.
self.engine.on_action_complete(
wf_2_task_2_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.engine.on_action_complete(
wf_3_task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
self.await_workflow_success(wf_2_ex.id)
self.await_workflow_success(wf_3_ex.id)
self.await_workflow_success(wf_1_ex.id)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
# Get objects for the parent workflow execution.
wf_1_ex = self._assert_single_item(wf_execs, name='wb.wf1')
wf_1_task_execs = wf_1_ex.task_executions
wf_1_task_1_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task1'
)
wf_1_task_1_action_exs = wf_1_task_1_ex.executions
wf_1_task_2_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task2'
)
wf_1_task_2_action_exs = wf_1_task_2_ex.executions
wf_1_task_3_ex = self._assert_single_item(
wf_1_ex.task_executions,
name='task3'
)
# Get objects for the subworkflow executions.
wf_2_ex = self._assert_single_item(wf_execs, name='wb.wf2')
wf_2_task_execs = wf_2_ex.task_executions
wf_2_task_1_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task1'
)
wf_2_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_1_ex.id
)
wf_2_task_2_ex = self._assert_single_item(
wf_2_ex.task_executions,
name='task2'
)
wf_2_task_2_action_exs = db_api.get_action_executions(
task_execution_id=wf_2_task_2_ex.id
)
wf_3_ex = self._assert_single_item(wf_execs, name='wb.wf3')
wf_3_task_execs = wf_3_ex.task_executions
wf_3_task_1_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task1'
)
wf_3_task_1_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_1_ex.id
)
wf_3_task_2_ex = self._assert_single_item(
wf_3_ex.task_executions,
name='task2'
)
wf_3_task_2_action_exs = db_api.get_action_executions(
task_execution_id=wf_3_task_2_ex.id
)
self.assertEqual(states.SUCCESS, wf_1_ex.state)
self.assertEqual(3, len(wf_1_task_execs))
self.assertEqual(states.SUCCESS, wf_1_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_3_ex.state)
self.assertEqual(states.SUCCESS, wf_1_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_1_task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_ex.state)
self.assertEqual(2, len(wf_2_task_execs))
self.assertEqual(states.SUCCESS, wf_2_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_2_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_2_task_2_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_3_ex.state)
self.assertEqual(2, len(wf_3_task_execs))
self.assertEqual(states.SUCCESS, wf_3_task_1_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_2_ex.state)
self.assertEqual(states.SUCCESS, wf_3_task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, wf_3_task_2_action_exs[0].state)
| 36.485649
| 78
| 0.627987
| 11,512
| 80,086
| 3.883165
| 0.016852
| 0.052457
| 0.035344
| 0.071091
| 0.974029
| 0.972731
| 0.970159
| 0.96611
| 0.964275
| 0.96215
| 0
| 0.044343
| 0.294608
| 80,086
| 2,194
| 79
| 36.502279
| 0.746973
| 0.073209
| 0
| 0.840025
| 0
| 0
| 0.065406
| 0
| 0
| 0
| 0
| 0
| 0.337157
| 1
| 0.003824
| false
| 0
| 0.003187
| 0.000637
| 0.008286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d86766633c6b1ee04d8d9426b4edf7e879f38ea
| 5,853
|
py
|
Python
|
userbot/plugins/phonecontrol.py
|
azizkziba/FridayUserbot
|
f77a692f74d1c2da2b6e7ee47f5eee7a9dd9e138
|
[
"MIT"
] | 55
|
2019-07-13T15:57:54.000Z
|
2021-09-20T16:50:42.000Z
|
userbot/plugins/phonecontrol.py
|
azizkziba/FridayUserbot
|
f77a692f74d1c2da2b6e7ee47f5eee7a9dd9e138
|
[
"MIT"
] | 4
|
2020-11-07T07:39:51.000Z
|
2020-11-10T03:46:41.000Z
|
userbot/plugins/phonecontrol.py
|
azizkziba/FridayUserbot
|
f77a692f74d1c2da2b6e7ee47f5eee7a9dd9e138
|
[
"MIT"
] | 450
|
2019-07-12T13:18:41.000Z
|
2022-03-29T18:47:42.000Z
|
#New Qoute module by @r4v4n4 😉
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="battery ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.reply("```Reply to any user message.```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.reply("```Reply to text message```")
return
chat = "@batterylevelbot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.reply("```Reply to actual users message.```")
return
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=830109936))
message = await event.client.forward_messages(chat, reply_message)
await message.reply("🔋 Battery")
await asyncio.sleep(4)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me u Nigga```")
return
if response.text.startswith("Hello"):
await event.reply("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message, reply_to=event.message.reply_to_msg_id)
@borg.on(admin_cmd(pattern="pmute ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.reply("```Reply to any user message.```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.reply("```Reply to text message```")
return
chat = "@batterylevelbot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.reply("```Reply to actual users message.```")
return
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=830109936))
message = await event.client.forward_messages(chat, reply_message)
await message.reply("/ring_mode silent")
await asyncio.sleep(4)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me u Nigga```")
return
if response.text.startswith("Hello"):
await event.reply("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message, reply_to=event.message.reply_to_msg_id)
@borg.on(admin_cmd(pattern="pring ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.reply("```Reply to any user message.```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.reply("```Reply to text message```")
return
chat = "@batterylevelbot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.reply("```Reply to actual users message.```")
return
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=830109936))
message = await event.client.forward_messages(chat, reply_message)
await message.reply("/ring_mode normal")
await asyncio.sleep(4)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me u Nigga```")
return
if response.text.startswith("Hello"):
await event.reply("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message, reply_to=event.message.reply_to_msg_id)
@borg.on(admin_cmd(pattern="pvibrate ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.reply("```Reply to any user message.```")
return
reply_message = await event.get_reply_message()
if not reply_message.text:
await event.reply("```Reply to text message```")
return
chat = "@batterylevelbot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.reply("```Reply to actual users message.```")
return
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=830109936))
message = await event.client.forward_messages(chat, reply_message)
await message.reply("/ring_mode vibrate")
await asyncio.sleep(4)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me u Nigga```")
return
if response.text.startswith("Hello"):
await event.reply("```Can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await event.client.send_message(event.chat_id, response.message, reply_to=event.message.reply_to_msg_id)
| 35.047904
| 117
| 0.63335
| 696
| 5,853
| 5.191092
| 0.139368
| 0.09964
| 0.083033
| 0.066427
| 0.92444
| 0.918627
| 0.918627
| 0.918627
| 0.918627
| 0.918627
| 0
| 0.009954
| 0.261917
| 5,853
| 166
| 118
| 35.259036
| 0.825926
| 0.004955
| 0
| 0.892308
| 0
| 0
| 0.166609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046154
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5daa7bf73ff2f9d3fc6260f0ef2a334609c90922
| 4,334
|
py
|
Python
|
labour_welfare/labour_welfare/labour_welfare_api.py
|
sumitkamboj2/labour_welfare
|
5e049021f2cbbc9a0ed2dac7319eb076e1db4abf
|
[
"MIT"
] | null | null | null |
labour_welfare/labour_welfare/labour_welfare_api.py
|
sumitkamboj2/labour_welfare
|
5e049021f2cbbc9a0ed2dac7319eb076e1db4abf
|
[
"MIT"
] | null | null | null |
labour_welfare/labour_welfare/labour_welfare_api.py
|
sumitkamboj2/labour_welfare
|
5e049021f2cbbc9a0ed2dac7319eb076e1db4abf
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import frappe
import time
from frappe.utils import flt, now_datetime, cstr, add_to_date, date_diff, nowdate, add_days, getdate
from frappe import _
import requests
import json
import tempfile
@frappe.whitelist()
def labour_welfare(data=None):
data = json.loads(data)
if data.get("name"):
labour_data = frappe.db.sql("""SELECT * from `tabLabour Welfare Board` where name='{0}'""".format(data.get("name")), as_dict=1)
return labour_data
elif frappe.db.get_value("Labour Welfare Board", data.get("registration_number")):
doc = frappe.get_doc("Labour Welfare Board", data.get("registration_number"))
doc.registration_number_search = data.get("registration_number")
doc.new_form = False
doc.renewal = True
doc.registration_date = getdate(data.get("registration_date"))
doc.full_name_of_domestic_work = data.get("full_name_worker")
doc.birthdate_of_beneficiany = getdate(data.get("birthdate_of_baneficiary"))
doc.age_of_beneficiary = data.get("age")
doc.mobile_number = data.get("mobile_number")
if data.get("sex")=="male":
doc.sex="Male / पुरूष"
elif data.get("sex")=="female":
doc.sex="Female / महिला"
else:
doc.sex="Other / इतर"
doc.adhar_card_number=data.get("adhar_card_number")
doc.residence_address=data.get("residence_address")
doc.district=data.get("district")
doc.village_city=data.get("village_city")
doc.pincode=data.get("pincode")
doc.owner_name=data.get("owner_name")
doc.owner_mobile_number=data.get("owner_mobile_number")
doc.owner_address=data.get("owner_address")
doc.beneficiary_bank_name=data.get("bank_name")
doc.beneficiary_bank_account_number=data.get("bank_account_number")
doc.ifsc_code=data.get("ifsc_code")
doc.beneficiaries_total_number_of_offspring=data.get("child_qty")
doc.name_of_nominee=data.get("name_of_nominee")
doc.relationship_to_nominee=data.get("relationship_name")
doc.receipt_image_file_name=data.get("receipt_file_name")
doc.identity_proof_file_name=data.get("adhar_card_copy")
doc.bank_copy_file_name=data.get("bank_pass_copy")
doc.address_proof_file_name=data.get("residential_proof")
doc.please_choose_any_one_identity_proof=data.get("photo_id")
doc.please_choose_any_one_address_proof = data.get("address_proof")
doc.please_select_an_option=data.get("bank_copy")
doc.registration_district=data.get("registration_district")
doc.welfare_id = data.get("welfare_id")
doc.save()
frappe.db.commit()
else:
doc = frappe.new_doc("Labour Welfare Board")
doc.registation_no = data.get("registration_number")
doc.registration_number_search = data.get("registration_number")
doc.new_form = True
doc.renewal = False
doc.registration_date = getdate(data.get("registration_date"))
doc.full_name_of_domestic_work = data.get("full_name_worker")
doc.birthdate_of_beneficiany = getdate(data.get("birthdate_of_baneficiary"))
doc.age_of_beneficiary = data.get("age")
doc.mobile_number = data.get("mobile_number")
if data.get("sex")=="male":
doc.sex="Male / पुरूष"
elif data.get("sex")=="female":
doc.sex="Female / महिला"
else:
doc.sex="Other / इतर"
doc.adhar_card_number=data.get("adhar_card_number")
doc.residence_address=data.get("residence_address")
doc.district=data.get("district")
doc.village_city=data.get("village_city")
doc.pincode=data.get("pincode")
doc.owner_name=data.get("owner_name")
doc.owner_mobile_number=data.get("owner_mobile_number")
doc.owner_address=data.get("owner_address")
doc.beneficiary_bank_name=data.get("bank_name")
doc.beneficiary_bank_account_number=data.get("bank_account_number")
doc.ifsc_code=data.get("ifsc_code")
doc.beneficiaries_total_number_of_offspring=data.get("child_qty")
doc.name_of_nominee=data.get("name_of_nominee")
doc.relationship_to_nominee=data.get("relationship_name")
doc.receipt_image_file_name=data.get("receipt_file_name")
doc.identity_proof_file_name=data.get("adhar_card_copy")
doc.bank_copy_file_name=data.get("bank_pass_copy")
doc.address_proof_file_name=data.get("residential_proof")
doc.please_choose_any_one_identity_proof=data.get("photo_id")
doc.please_choose_any_one_address_proof = data.get("address_proof")
doc.please_select_an_option=data.get("bank_copy")
doc.welfare_id = data.get("welfare_id")
doc.save()
frappe.db.commit()
| 43.34
| 129
| 0.772958
| 674
| 4,334
| 4.664688
| 0.179525
| 0.146947
| 0.041985
| 0.038168
| 0.833015
| 0.833015
| 0.833015
| 0.833015
| 0.812659
| 0.812659
| 0
| 0.000506
| 0.08791
| 4,334
| 99
| 130
| 43.777778
| 0.792816
| 0
| 0
| 0.760417
| 0
| 0
| 0.244634
| 0.015924
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0.020833
| 0.083333
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dae31c4c1525f0c11466f08cfb3eef482176b4e
| 1,165
|
py
|
Python
|
Curso de Cisco/Actividades/Act 3 - Modulo 2.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/Act 3 - Modulo 2.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
Curso de Cisco/Actividades/Act 3 - Modulo 2.py
|
tomasfriz/Curso-de-Cisco
|
a50ee5fa96bd86d468403e29ccdc3565a181af60
|
[
"MIT"
] | null | null | null |
print("version original:")
print(" *")
print(" * *")
print(" * *")
print(" * *")
print("*** ***")
print(" * *")
print(" * *")
print(" *****")
print("con menos invocaciones de 'print()': ")
print(" *\n * *\n * *\n * *\n*** ***")
print(" * *\n * *\n *****")
print("mas alto:")
print(" *")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print("****** ******")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print(" * *")
print(" *******")
print("doble:")
print(" * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print("****** ******"*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" * * "*2)
print(" ******* "*2)
| 21.981132
| 52
| 0.262661
| 79
| 1,165
| 3.886076
| 0.164557
| 0.814332
| 1.074919
| 1.302932
| 0.736156
| 0.736156
| 0.736156
| 0.736156
| 0.736156
| 0.589577
| 0
| 0.024353
| 0.436052
| 1,165
| 53
| 53
| 21.981132
| 0.4414
| 0
| 0
| 0.717391
| 0
| 0
| 0.571674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
5dc79c21e3135d742a354458e1e14dc27ce4753c
| 63
|
py
|
Python
|
tests/tests_rash.py
|
gabrielmontagne/rash
|
ce9acd3d2e616797e2a43a5ee500cb3161dc9b2c
|
[
"MIT"
] | null | null | null |
tests/tests_rash.py
|
gabrielmontagne/rash
|
ce9acd3d2e616797e2a43a5ee500cb3161dc9b2c
|
[
"MIT"
] | null | null | null |
tests/tests_rash.py
|
gabrielmontagne/rash
|
ce9acd3d2e616797e2a43a5ee500cb3161dc9b2c
|
[
"MIT"
] | null | null | null |
from context import rash
def test_fail(): assert False, 'x_x'
| 15.75
| 36
| 0.746032
| 11
| 63
| 4.090909
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15873
| 63
| 3
| 37
| 21
| 0.849057
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5df4ce47ae6a5b513e87300d1ce51bb17d68d140
| 153
|
py
|
Python
|
src/dnn/__init__.py
|
iki-taichi/tf-keras-transformer
|
613122705583c0274b0c9be0993f3bbeb240932d
|
[
"MIT"
] | 5
|
2019-08-03T07:56:30.000Z
|
2020-07-04T09:00:23.000Z
|
src/dnn/__init__.py
|
iki-taichi/tf-keras-transformer
|
613122705583c0274b0c9be0993f3bbeb240932d
|
[
"MIT"
] | 1
|
2019-10-15T16:50:11.000Z
|
2019-10-15T16:50:11.000Z
|
src/dnn/__init__.py
|
iki-taichi/tf-keras-transformer
|
613122705583c0274b0c9be0993f3bbeb240932d
|
[
"MIT"
] | 4
|
2019-06-15T03:13:47.000Z
|
2020-08-03T09:04:14.000Z
|
# coding:utf-8
from .transformer_model import get_transformer_model
from .transformer_model import get_custom_objects as get_transformer_custom_objects
| 30.6
| 83
| 0.882353
| 22
| 153
| 5.727273
| 0.5
| 0.380952
| 0.31746
| 0.412698
| 0.460317
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007143
| 0.084967
| 153
| 4
| 84
| 38.25
| 0.892857
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f8ece79b244194919e0864e7c2324334358da35b
| 88
|
py
|
Python
|
uniflex_module_simple/__init__.py
|
tkn-tub/module_simple
|
79a79042046f5cf6ff0e27fde3136dae2cfb73d7
|
[
"MIT"
] | null | null | null |
uniflex_module_simple/__init__.py
|
tkn-tub/module_simple
|
79a79042046f5cf6ff0e27fde3136dae2cfb73d7
|
[
"MIT"
] | null | null | null |
uniflex_module_simple/__init__.py
|
tkn-tub/module_simple
|
79a79042046f5cf6ff0e27fde3136dae2cfb73d7
|
[
"MIT"
] | 1
|
2019-11-02T20:31:51.000Z
|
2019-11-02T20:31:51.000Z
|
from .module_simple import *
from .module_simple2 import *
from .module_simple3 import *
| 29.333333
| 29
| 0.806818
| 12
| 88
| 5.666667
| 0.5
| 0.441176
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 0.125
| 88
| 3
| 30
| 29.333333
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5d3b1c919418e12d1019e13bad2516b484b70abb
| 5,276
|
py
|
Python
|
tests/misc/test_report_tools.py
|
wiheto/fmridenoise
|
cc544264806418618861f0ee93fff71a0fa83eca
|
[
"Apache-2.0"
] | null | null | null |
tests/misc/test_report_tools.py
|
wiheto/fmridenoise
|
cc544264806418618861f0ee93fff71a0fa83eca
|
[
"Apache-2.0"
] | null | null | null |
tests/misc/test_report_tools.py
|
wiheto/fmridenoise
|
cc544264806418618861f0ee93fff71a0fa83eca
|
[
"Apache-2.0"
] | null | null | null |
import unittest as ut
from fmridenoise.pipelines import get_pipeline_path
from fmridenoise.utils.utils import load_pipeline_from_json
from fmridenoise.utils.report import get_pipeline_summary, YES, NO, NA
class TestPipelineSummary(ut.TestCase):
def test_pipeline_1(self):
pipeline = load_pipeline_from_json(get_pipeline_path('pipeline-24HMP_8Phys_SpikeReg_4GS'))
summary = get_pipeline_summary(pipeline)
for confound in summary:
if confound['Confound'] == 'WM':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], YES)
self.assertEqual(confound["Quadr. terms"], YES)
elif confound['Confound'] == 'CSF':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], YES)
self.assertEqual(confound["Quadr. terms"], YES)
elif confound['Confound'] == 'GS':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], YES)
self.assertEqual(confound["Quadr. terms"], YES)
elif confound['Confound'] == 'aCompCor':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
elif confound['Confound'] == 'ICA-AROMA':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
elif confound['Confound'] == 'Spikes':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
else:
raise ValueError(f'Unknown confound {confound}')
def test_pipeline_2(self):
pipeline = load_pipeline_from_json(get_pipeline_path('pipeline-ICA-AROMA_8Phys'))
summary = get_pipeline_summary(pipeline)
for confound in summary:
if confound['Confound'] == 'WM':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], YES)
self.assertEqual(confound["Quadr. terms"], YES)
elif confound['Confound'] == 'CSF':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], YES)
self.assertEqual(confound["Quadr. terms"], YES)
elif confound['Confound'] == 'GS':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NO)
self.assertEqual(confound["Quadr. terms"], NO)
elif confound['Confound'] == 'aCompCor':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
elif confound['Confound'] == 'ICA-AROMA':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
elif confound['Confound'] == 'Spikes':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
else:
raise ValueError(f'Unknown confound {confound}')
def test_pipeline_3(self):
pipeline = load_pipeline_from_json(get_pipeline_path('pipeline-24HMP_aCompCor_SpikeReg'))
summary = get_pipeline_summary(pipeline)
for confound in summary:
if confound['Confound'] == 'WM':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NO)
self.assertEqual(confound["Quadr. terms"], NO)
elif confound['Confound'] == 'CSF':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NO)
self.assertEqual(confound["Quadr. terms"], NO)
elif confound['Confound'] == 'GS':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NO)
self.assertEqual(confound["Quadr. terms"], NO)
elif confound['Confound'] == 'aCompCor':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
elif confound['Confound'] == 'ICA-AROMA':
self.assertEqual(confound['Raw'], NO)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
elif confound['Confound'] == 'Spikes':
self.assertEqual(confound['Raw'], YES)
self.assertEqual(confound["Temp. deriv."], NA)
self.assertEqual(confound["Quadr. terms"], NA)
else:
raise ValueError(f'Unknown confound {confound}')
| 53.292929
| 98
| 0.569939
| 516
| 5,276
| 5.75
| 0.104651
| 0.273003
| 0.418605
| 0.157735
| 0.909673
| 0.909673
| 0.909673
| 0.909673
| 0.908662
| 0.908662
| 0
| 0.002676
| 0.291698
| 5,276
| 99
| 99
| 53.292929
| 0.791276
| 0
| 0
| 0.884211
| 0
| 0
| 0.168656
| 0.016866
| 0
| 0
| 0
| 0
| 0.568421
| 1
| 0.031579
| false
| 0
| 0.042105
| 0
| 0.084211
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5d4585f7a0af953934272c0731a2290cdc5e0ebe
| 82
|
py
|
Python
|
src/dao/__init__.py
|
devs-7/bible-projector-python
|
a62b95bdc0c655ba04adf3b47197c562e09468b9
|
[
"Apache-2.0"
] | null | null | null |
src/dao/__init__.py
|
devs-7/bible-projector-python
|
a62b95bdc0c655ba04adf3b47197c562e09468b9
|
[
"Apache-2.0"
] | null | null | null |
src/dao/__init__.py
|
devs-7/bible-projector-python
|
a62b95bdc0c655ba04adf3b47197c562e09468b9
|
[
"Apache-2.0"
] | null | null | null |
from src.dao.verse_dao import VerseDAO
from src.dao.version_dao import VersionDAO
| 27.333333
| 42
| 0.853659
| 14
| 82
| 4.857143
| 0.571429
| 0.205882
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 82
| 2
| 43
| 41
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
538cf9e9a61e31a69a2f84a248196241d27b18e8
| 1,715
|
py
|
Python
|
testpro1/website1/models.py
|
dongkakika/OXS
|
95166365fb5e35155af3b8de6859ec87f3d9ca78
|
[
"MIT"
] | 4
|
2020-04-22T08:42:01.000Z
|
2021-07-31T19:28:51.000Z
|
testpro1/website1/models.py
|
dongkakika/OXS
|
95166365fb5e35155af3b8de6859ec87f3d9ca78
|
[
"MIT"
] | null | null | null |
testpro1/website1/models.py
|
dongkakika/OXS
|
95166365fb5e35155af3b8de6859ec87f3d9ca78
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
# Create your models here.
# 내가 원하는 자료의 형태를 클래스로 정의하기(단일 자료의 형태)
class UserInfo(models.Model):
username=models.CharField(max_length=20)
userid=models.CharField(max_length=20)
userpw=models.CharField(max_length=20)
class sw_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
class com_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
class jj_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
class cbnu_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
class jt_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
class jk_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
class jjd_info(models.Model):
Name = models.CharField(max_length=50)
Date = models.CharField(max_length=15)
View = models.CharField(max_length=30)
Href = models.CharField(max_length=300)
| 32.980769
| 44
| 0.737026
| 248
| 1,715
| 4.943548
| 0.177419
| 0.379282
| 0.455139
| 0.606852
| 0.8646
| 0.800979
| 0.800979
| 0.800979
| 0.800979
| 0.800979
| 0
| 0.047325
| 0.149854
| 1,715
| 52
| 45
| 32.980769
| 0.793553
| 0.034985
| 0
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04878
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
53c27e50b5fbe6291fe52e1a7f1c9dff6faf4d24
| 115
|
py
|
Python
|
spikeforest/spikeforestwidgets/featurespacewidget/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:19.000Z
|
2021-09-23T01:07:19.000Z
|
spikeforest/spikeforestwidgets/featurespacewidget/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/spikeforestwidgets/featurespacewidget/__init__.py
|
mhhennig/spikeforest
|
5b4507ead724af3de0be5d48a3b23aaedb0be170
|
[
"Apache-2.0"
] | 1
|
2021-09-23T01:07:21.000Z
|
2021-09-23T01:07:21.000Z
|
from .featurespacewidget import FeatureSpaceWidget
from .featurespacewidget_plotly import FeatureSpaceWidgetPlotly
| 38.333333
| 63
| 0.913043
| 9
| 115
| 11.555556
| 0.555556
| 0.423077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.069565
| 115
| 2
| 64
| 57.5
| 0.971963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
53dca1aae52b14118f6e338e001590730da7a786
| 335
|
py
|
Python
|
jlf_stats/exceptions.py
|
worldofchris/jlf
|
9f80a4e17ffa9fab33bd2bd0ee68d0e260e9c68c
|
[
"BSD-2-Clause"
] | 14
|
2015-01-10T03:02:08.000Z
|
2019-01-09T10:58:44.000Z
|
jlf_stats/exceptions.py
|
worldofchris/jlf
|
9f80a4e17ffa9fab33bd2bd0ee68d0e260e9c68c
|
[
"BSD-2-Clause"
] | 1
|
2015-09-07T20:23:52.000Z
|
2015-11-01T21:32:35.000Z
|
jlf_stats/exceptions.py
|
worldofchris/jlf
|
9f80a4e17ffa9fab33bd2bd0ee68d0e260e9c68c
|
[
"BSD-2-Clause"
] | 5
|
2015-07-06T11:01:05.000Z
|
2020-01-16T12:01:23.000Z
|
class MissingState(Exception):
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
def __str__(self):
return self.msg
class MissingConfigItem(Exception):
def __init__(self, expr, msg):
self.expr = expr
self.msg = msg
def __str__(self):
return self.msg
| 15.952381
| 35
| 0.59403
| 40
| 335
| 4.575
| 0.275
| 0.174863
| 0.174863
| 0.218579
| 0.786885
| 0.786885
| 0.786885
| 0.786885
| 0.786885
| 0.786885
| 0
| 0
| 0.310448
| 335
| 20
| 36
| 16.75
| 0.792208
| 0
| 0
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
53e91bae8daa0af1c1f71bce3b416346576007b3
| 184,856
|
py
|
Python
|
sdk/deviceupdate/azure-iot-deviceupdate/azure/iot/deviceupdate/aio/operations/_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-09T08:59:13.000Z
|
2022-03-09T08:59:13.000Z
|
sdk/deviceupdate/azure-iot-deviceupdate/azure/iot/deviceupdate/aio/operations/_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/deviceupdate/azure-iot-deviceupdate/azure/iot/deviceupdate/aio/operations/_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from json import loads as _loads
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from ...operations._operations import build_device_management_collect_logs_request, build_device_management_create_or_update_deployment_request, build_device_management_create_or_update_group_request, build_device_management_delete_deployment_request, build_device_management_delete_group_request, build_device_management_get_deployment_request, build_device_management_get_deployment_status_request, build_device_management_get_device_class_request, build_device_management_get_device_module_request, build_device_management_get_device_request, build_device_management_get_device_tag_request, build_device_management_get_group_request, build_device_management_get_group_update_compliance_request, build_device_management_get_log_collection_operation_detailed_status_request, build_device_management_get_log_collection_operation_request, build_device_management_get_operation_request, build_device_management_get_update_compliance_request, build_device_management_import_devices_request_initial, build_device_management_list_best_updates_for_group_request, build_device_management_list_deployment_devices_request, build_device_management_list_deployments_for_group_request, build_device_management_list_device_classes_request, build_device_management_list_device_tags_request, build_device_management_list_devices_request, build_device_management_list_groups_request, build_device_management_list_installable_updates_for_device_class_request, build_device_management_list_log_collection_operations_request, build_device_management_list_operations_request, build_device_management_retry_deployment_request, build_device_management_stop_deployment_request, build_device_update_delete_update_request_initial, build_device_update_get_file_request, build_device_update_get_operation_request, build_device_update_get_update_request, build_device_update_import_update_request_initial, build_device_update_list_files_request, build_device_update_list_names_request, build_device_update_list_operations_request, build_device_update_list_providers_request, build_device_update_list_updates_request, build_device_update_list_versions_request
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DeviceUpdateOperations:
"""DeviceUpdateOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _import_update_initial(
self,
update_to_import: List[JSONType],
*,
action: str,
**kwargs: Any
) -> JSONType:
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = update_to_import
request = build_device_update_import_update_request_initial(
instance_id=self._config.instance_id,
api_version=api_version,
content_type=content_type,
action=action,
json=_json,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_import_update_initial.metadata = {'url': '/deviceupdate/{instanceId}/updates'} # type: ignore
@distributed_trace_async
async def begin_import_update(
self,
update_to_import: List[JSONType],
*,
action: str,
**kwargs: Any
) -> AsyncLROPoller[JSONType]:
"""Import new update version.
:param update_to_import: The update to be imported.
:type update_to_import: list[JSONType]
:keyword action: Import update action. "import"
:paramtype action: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns JSON object
:rtype: ~azure.core.polling.AsyncLROPoller[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
update_to_import = [
{
"files": [
{
"filename": "str", # Required. Update file name as specified inside import manifest.
"url": "str" # Required. Azure Blob location from which the update file can be downloaded by Device Update for IoT Hub. This is typically a read-only SAS-protected blob URL with an expiration set to at least 4 hours.
}
],
"friendlyName": "str", # Optional. Friendly update name.
"importManifest": {
"hashes": {
"str": "str" # Required. A JSON object containing the hash(es) of the file. At least SHA256 hash is required. This object can be thought of as a set of key-value pairs where the key is the hash algorithm, and the value is the hash of the file calculated using that algorithm.
},
"sizeInBytes": 0.0, # Required. File size in number of bytes.
"url": "str" # Required. Azure Blob location from which the import manifest can be downloaded by Device Update for IoT Hub. This is typically a read-only SAS-protected blob URL with an expiration set to at least 4 hours.
}
}
]
# response body for status code(s): 202
response.json() == {
"compatibility": [
{
"str": "str" # Required. List of update compatibility information.
}
],
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the update was created.
"description": "str", # Optional. Update description specified by creator.
"etag": "str", # Optional. Update ETag.
"friendlyName": "str", # Optional. Friendly update name specified by importer.
"importedDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the update was imported.
"installedCriteria": "str", # Optional. String interpreted by Device Update client to determine if the update is installed on the device. Deprecated in latest import manifest schema.
"instructions": {
"steps": [
{
"description": "str", # Optional. Step description.
"files": [
"str" # Optional. Collection of file names to be passed to handler during execution. Required if step type is inline.
],
"handler": "str", # Optional. Identity of handler that will execute this step. Required if step type is inline.
"handlerProperties": {}, # Optional. Parameters to be passed to handler during execution.
"type": "inline", # Optional. Default value is "inline". Step type. Possible values include: "Inline", "Reference". Default value: "inline".
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
},
"isDeployable": True, # Optional. Default value is True. Whether the update can be deployed to a device on its own.
"manifestVersion": "str", # Required. Schema version of manifest used to import the update.
"referencedBy": [
{
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
],
"scanResult": "str", # Optional. Update aggregate scan result (calculated from payload file scan results).
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"updateType": "str" # Optional. Update type. Deprecated in latest import manifest schema.
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._import_update_initial(
update_to_import=update_to_import,
action=action,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_import_update.metadata = {'url': '/deviceupdate/{instanceId}/updates'} # type: ignore
@distributed_trace
def list_updates(
self,
*,
search: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all updates that have been imported to Device Update for IoT Hub.
:keyword search: Request updates matching a free-text search expression.
:paramtype search: str
:keyword filter: Filter updates by its properties.
:paramtype filter: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"compatibility": [
{
"str": "str" # Required. List of update compatibility information.
}
],
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the update was created.
"description": "str", # Optional. Update description specified by creator.
"etag": "str", # Optional. Update ETag.
"friendlyName": "str", # Optional. Friendly update name specified by importer.
"importedDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the update was imported.
"installedCriteria": "str", # Optional. String interpreted by Device Update client to determine if the update is installed on the device. Deprecated in latest import manifest schema.
"instructions": {
"steps": [
{
"description": "str", # Optional. Step description.
"files": [
"str" # Optional. Collection of file names to be passed to handler during execution. Required if step type is inline.
],
"handler": "str", # Optional. Identity of handler that will execute this step. Required if step type is inline.
"handlerProperties": {}, # Optional. Parameters to be passed to handler during execution.
"type": "inline", # Optional. Default value is "inline". Step type. Possible values include: "Inline", "Reference". Default value: "inline".
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
},
"isDeployable": True, # Optional. Default value is True. Whether the update can be deployed to a device on its own.
"manifestVersion": "str", # Required. Schema version of manifest used to import the update.
"referencedBy": [
{
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
],
"scanResult": "str", # Optional. Update aggregate scan result (calculated from payload file scan results).
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"updateType": "str" # Optional. Update type. Deprecated in latest import manifest schema.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_update_list_updates_request(
instance_id=self._config.instance_id,
api_version=api_version,
search=search,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_update_list_updates_request(
instance_id=self._config.instance_id,
api_version=api_version,
search=search,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_updates.metadata = {'url': '/deviceupdate/{instanceId}/updates'} # type: ignore
@distributed_trace_async
async def get_update(
self,
provider: str,
name: str,
version: str,
*,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional[JSONType]:
"""Get a specific update version.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
:keyword if_none_match: Defines the If-None-Match condition. The operation will be performed
only if the ETag on the server does not match this value.
:paramtype if_none_match: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType or None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"compatibility": [
{
"str": "str" # Required. List of update compatibility information.
}
],
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the update was created.
"description": "str", # Optional. Update description specified by creator.
"etag": "str", # Optional. Update ETag.
"friendlyName": "str", # Optional. Friendly update name specified by importer.
"importedDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the update was imported.
"installedCriteria": "str", # Optional. String interpreted by Device Update client to determine if the update is installed on the device. Deprecated in latest import manifest schema.
"instructions": {
"steps": [
{
"description": "str", # Optional. Step description.
"files": [
"str" # Optional. Collection of file names to be passed to handler during execution. Required if step type is inline.
],
"handler": "str", # Optional. Identity of handler that will execute this step. Required if step type is inline.
"handlerProperties": {}, # Optional. Parameters to be passed to handler during execution.
"type": "inline", # Optional. Default value is "inline". Step type. Possible values include: "Inline", "Reference". Default value: "inline".
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
},
"isDeployable": True, # Optional. Default value is True. Whether the update can be deployed to a device on its own.
"manifestVersion": "str", # Required. Schema version of manifest used to import the update.
"referencedBy": [
{
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
],
"scanResult": "str", # Optional. Update aggregate scan result (calculated from payload file scan results).
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"updateType": "str" # Optional. Update type. Deprecated in latest import manifest schema.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional[JSONType]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_update_get_update_request(
instance_id=self._config.instance_id,
provider=provider,
name=name,
version=version,
api_version=api_version,
if_none_match=if_none_match,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_update.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names/{name}/versions/{version}'} # type: ignore
async def _delete_update_initial(
self,
provider: str,
name: str,
version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_update_delete_update_request_initial(
instance_id=self._config.instance_id,
provider=provider,
name=name,
version=version,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_update_initial.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names/{name}/versions/{version}'} # type: ignore
@distributed_trace_async
async def begin_delete_update(
self,
provider: str,
name: str,
version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a specific update version.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns None
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_update_initial(
provider=provider,
name=name,
version=version,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_update.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names/{name}/versions/{version}'} # type: ignore
@distributed_trace
def list_providers(
self,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all update providers that have been imported to Device Update for IoT Hub.
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
"str" # Required. The collection of pageable items.
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_update_list_providers_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_update_list_providers_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_providers.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers'} # type: ignore
@distributed_trace
def list_names(
self,
provider: str,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all update names that match the specified provider.
:param provider: Update provider.
:type provider: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
"str" # Required. The collection of pageable items.
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_update_list_names_request(
instance_id=self._config.instance_id,
provider=provider,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_update_list_names_request(
instance_id=self._config.instance_id,
provider=provider,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_names.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names'} # type: ignore
@distributed_trace
def list_versions(
self,
provider: str,
name: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all update versions that match the specified provider and name.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:keyword filter: Filter updates by its properties.
:paramtype filter: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
"str" # Required. The collection of pageable items.
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_update_list_versions_request(
instance_id=self._config.instance_id,
provider=provider,
name=name,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_update_list_versions_request(
instance_id=self._config.instance_id,
provider=provider,
name=name,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_versions.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names/{name}/versions'} # type: ignore
@distributed_trace
def list_files(
self,
provider: str,
name: str,
version: str,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all update file identifiers for the specified version.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
"str" # Required. The collection of pageable items.
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_update_list_files_request(
instance_id=self._config.instance_id,
provider=provider,
name=name,
version=version,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_update_list_files_request(
instance_id=self._config.instance_id,
provider=provider,
name=name,
version=version,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_files.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names/{name}/versions/{version}/files'} # type: ignore
@distributed_trace_async
async def get_file(
self,
provider: str,
name: str,
version: str,
file_id: str,
*,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional[JSONType]:
"""Get a specific update file from the version.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
:param file_id: File identifier.
:type file_id: str
:keyword if_none_match: Defines the If-None-Match condition. The operation will be performed
only if the ETag on the server does not match this value.
:paramtype if_none_match: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType or None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"etag": "str", # Optional. File ETag.
"fileId": "str", # Required. File identity, generated by server at import time.
"fileName": "str", # Required. File name.
"hashes": {
"str": "str" # Required. Mapping of hashing algorithm to base64 encoded hash values.
},
"mimeType": "str", # Optional. File MIME type.
"scanDetails": "str", # Optional. Anti-malware scan details.
"scanResult": "str", # Optional. Anti-malware scan result.
"sizeInBytes": 0.0 # Required. File size in number of bytes.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional[JSONType]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_update_get_file_request(
instance_id=self._config.instance_id,
provider=provider,
name=name,
version=version,
file_id=file_id,
api_version=api_version,
if_none_match=if_none_match,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_file.metadata = {'url': '/deviceupdate/{instanceId}/updates/providers/{provider}/names/{name}/versions/{version}/files/{fileId}'} # type: ignore
@distributed_trace
def list_operations(
self,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all import update operations. Completed operations are kept for 7 days before
auto-deleted. Delete operations are not returned by this API version.
:keyword filter: Restricts the set of operations returned. Only one specific filter is
supported: "status eq 'NotStarted' or status eq 'Running'".
:paramtype filter: str
:keyword top: Specifies a non-negative integer n that limits the number of items returned from
a collection. The service returns the number of available items up to but not greater than the
specified value n.
:paramtype top: int
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation was created.
"error": {
"code": "str", # Required. Server defined error code.
"details": [
...
],
"innererror": {
"code": "str", # Required. A more specific error code than what was provided by the containing error.
"errorDetail": "str", # Optional. The internal error or exception message.
"innerError": ...,
"message": "str" # Optional. A human-readable representation of the error.
},
"message": "str", # Required. A human-readable representation of the error.
"occurredDateTime": "2020-02-20 00:00:00", # Optional. Date and time in UTC when the error occurred.
"target": "str" # Optional. The target of the error.
},
"etag": "str", # Optional. Operation ETag.
"lastActionDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation status was last updated.
"operationId": "str", # Required. Operation Id.
"resourceLocation": "str", # Optional. Location of the imported update when operation is successful.
"status": "str", # Required. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
"traceId": "str", # Optional. Operation correlation identity that can used by Microsoft Support for troubleshooting.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_update_list_operations_request(
instance_id=self._config.instance_id,
api_version=api_version,
filter=filter,
top=top,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_update_list_operations_request(
instance_id=self._config.instance_id,
api_version=api_version,
filter=filter,
top=top,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/deviceupdate/{instanceId}/updates/operations'} # type: ignore
@distributed_trace_async
async def get_operation(
self,
operation_id: str,
*,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional[JSONType]:
"""Retrieve operation status.
:param operation_id: Operation identifier.
:type operation_id: str
:keyword if_none_match: Defines the If-None-Match condition. The operation will be performed
only if the ETag on the server does not match this value.
:paramtype if_none_match: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType or None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation was created.
"error": {
"code": "str", # Required. Server defined error code.
"details": [
...
],
"innererror": {
"code": "str", # Required. A more specific error code than what was provided by the containing error.
"errorDetail": "str", # Optional. The internal error or exception message.
"innerError": ...,
"message": "str" # Optional. A human-readable representation of the error.
},
"message": "str", # Required. A human-readable representation of the error.
"occurredDateTime": "2020-02-20 00:00:00", # Optional. Date and time in UTC when the error occurred.
"target": "str" # Optional. The target of the error.
},
"etag": "str", # Optional. Operation ETag.
"lastActionDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation status was last updated.
"operationId": "str", # Required. Operation Id.
"resourceLocation": "str", # Optional. Location of the imported update when operation is successful.
"status": "str", # Required. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
"traceId": "str", # Optional. Operation correlation identity that can used by Microsoft Support for troubleshooting.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional[JSONType]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_update_get_operation_request(
instance_id=self._config.instance_id,
operation_id=operation_id,
api_version=api_version,
if_none_match=if_none_match,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_operation.metadata = {'url': '/deviceupdate/{instanceId}/updates/operations/{operationId}'} # type: ignore
class DeviceManagementOperations:
"""DeviceManagementOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_device_classes(
self,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of all device classes (unique combinations of device manufacturer and model) for
all devices connected to Device Update for IoT Hub.
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"bestCompatibleUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"compatProperties": {
"str": "str" # Required. The compat properties of the device class. This object can be thought of as a set of key-value pairs where the key is the name of the compatibility property and the value is the value of the compatibility property. There will always be at least 1 compat property.
},
"deviceClassId": "str" # Required. The device class identifier.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_device_classes_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_device_classes_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_device_classes.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceclasses'} # type: ignore
@distributed_trace_async
async def get_device_class(
self,
device_class_id: str,
**kwargs: Any
) -> JSONType:
"""Gets the properties of a device class.
:param device_class_id: Device class identifier.
:type device_class_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"bestCompatibleUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"compatProperties": {
"str": "str" # Required. The compat properties of the device class. This object can be thought of as a set of key-value pairs where the key is the name of the compatibility property and the value is the value of the compatibility property. There will always be at least 1 compat property.
},
"deviceClassId": "str" # Required. The device class identifier.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_device_class_request(
instance_id=self._config.instance_id,
device_class_id=device_class_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_device_class.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceclasses/{deviceClassId}'} # type: ignore
@distributed_trace
def list_installable_updates_for_device_class(
self,
device_class_id: str,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of installable updates for a device class.
:param device_class_id: Device class identifier.
:type device_class_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_installable_updates_for_device_class_request(
instance_id=self._config.instance_id,
device_class_id=device_class_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_installable_updates_for_device_class_request(
instance_id=self._config.instance_id,
device_class_id=device_class_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_installable_updates_for_device_class.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceclasses/{deviceClassId}/installableupdates'} # type: ignore
@distributed_trace
def list_devices(
self,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of devices connected to Device Update for IoT Hub.
:keyword filter: Restricts the set of devices returned. You can filter on device GroupId or
DeviceClassId.
:paramtype filter: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"deploymentStatus": "str", # Optional. State of the device in its last deployment. Possible values include: "Succeeded", "InProgress", "Failed", "Canceled", "Incompatible".
"deviceClassId": "str", # Required. Device class identity.
"deviceId": "str", # Required. Device identity.
"groupId": "str", # Optional. Device group identity.
"installedUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"lastAttemptedUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"lastDeploymentId": "str", # Optional. The deployment identifier for the last deployment to the device.
"lastInstallResult": {
"extendedResultCode": 0, # Required. Install extended result code.
"resultCode": 0, # Required. Install result code.
"resultDetails": "str", # Optional. A string containing further details about the install result.
"stepResults": [
{
"description": "str", # Optional. Step description. It might be null for update steps.
"extendedResultCode": 0, # Required. Install extended result code.
"resultCode": 0, # Required. Install result code.
"resultDetails": "str", # Optional. A string containing further details about the install result.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
},
"manufacturer": "str", # Required. Device manufacturer.
"model": "str", # Required. Device model.
"moduleId": "str", # Optional. Device module identity.
"onLatestUpdate": bool # Required. Boolean flag indicating whether the latest update is installed on the device.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_devices_request(
instance_id=self._config.instance_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_devices_request(
instance_id=self._config.instance_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_devices.metadata = {'url': '/deviceupdate/{instanceId}/management/devices'} # type: ignore
async def _import_devices_initial(
self,
import_type: str,
*,
action: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = import_type
request = build_device_management_import_devices_request_initial(
instance_id=self._config.instance_id,
api_version=api_version,
content_type=content_type,
action=action,
json=_json,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
response_headers = {}
response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location'))
if cls:
return cls(pipeline_response, None, response_headers)
_import_devices_initial.metadata = {'url': '/deviceupdate/{instanceId}/management/devices'} # type: ignore
@distributed_trace_async
async def begin_import_devices(
self,
import_type: str,
*,
action: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Import existing devices from IoT Hub.
:param import_type: The types of devices to import. Possible values are: "Devices", "Modules",
and "All".
:type import_type: str
:keyword action: Devices action. "import"
:paramtype action: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncLROBasePolling. Pass in False
for this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns None
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._import_devices_initial(
import_type=import_type,
action=action,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = AsyncLROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_import_devices.metadata = {'url': '/deviceupdate/{instanceId}/management/devices'} # type: ignore
@distributed_trace_async
async def get_device(
self,
device_id: str,
**kwargs: Any
) -> JSONType:
"""Gets the device properties and latest deployment status for a device connected to Device Update
for IoT Hub.
:param device_id: Device identifier in Azure IoT Hub.
:type device_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deploymentStatus": "str", # Optional. State of the device in its last deployment. Possible values include: "Succeeded", "InProgress", "Failed", "Canceled", "Incompatible".
"deviceClassId": "str", # Required. Device class identity.
"deviceId": "str", # Required. Device identity.
"groupId": "str", # Optional. Device group identity.
"installedUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"lastAttemptedUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"lastDeploymentId": "str", # Optional. The deployment identifier for the last deployment to the device.
"lastInstallResult": {
"extendedResultCode": 0, # Required. Install extended result code.
"resultCode": 0, # Required. Install result code.
"resultDetails": "str", # Optional. A string containing further details about the install result.
"stepResults": [
{
"description": "str", # Optional. Step description. It might be null for update steps.
"extendedResultCode": 0, # Required. Install extended result code.
"resultCode": 0, # Required. Install result code.
"resultDetails": "str", # Optional. A string containing further details about the install result.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
},
"manufacturer": "str", # Required. Device manufacturer.
"model": "str", # Required. Device model.
"moduleId": "str", # Optional. Device module identity.
"onLatestUpdate": bool # Required. Boolean flag indicating whether the latest update is installed on the device.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_device_request(
instance_id=self._config.instance_id,
device_id=device_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_device.metadata = {'url': '/deviceupdate/{instanceId}/management/devices/{deviceId}'} # type: ignore
@distributed_trace_async
async def get_device_module(
self,
device_id: str,
module_id: str,
**kwargs: Any
) -> JSONType:
"""Gets the device module properties and latest deployment status for a device module connected to
Device Update for IoT Hub.
:param device_id: Device identifier in Azure IoT Hub.
:type device_id: str
:param module_id: Device module identifier in Azure IoT Hub.
:type module_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deploymentStatus": "str", # Optional. State of the device in its last deployment. Possible values include: "Succeeded", "InProgress", "Failed", "Canceled", "Incompatible".
"deviceClassId": "str", # Required. Device class identity.
"deviceId": "str", # Required. Device identity.
"groupId": "str", # Optional. Device group identity.
"installedUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"lastAttemptedUpdateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
},
"lastDeploymentId": "str", # Optional. The deployment identifier for the last deployment to the device.
"lastInstallResult": {
"extendedResultCode": 0, # Required. Install extended result code.
"resultCode": 0, # Required. Install result code.
"resultDetails": "str", # Optional. A string containing further details about the install result.
"stepResults": [
{
"description": "str", # Optional. Step description. It might be null for update steps.
"extendedResultCode": 0, # Required. Install extended result code.
"resultCode": 0, # Required. Install result code.
"resultDetails": "str", # Optional. A string containing further details about the install result.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
},
"manufacturer": "str", # Required. Device manufacturer.
"model": "str", # Required. Device model.
"moduleId": "str", # Optional. Device module identity.
"onLatestUpdate": bool # Required. Boolean flag indicating whether the latest update is installed on the device.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_device_module_request(
instance_id=self._config.instance_id,
device_id=device_id,
module_id=module_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_device_module.metadata = {'url': '/deviceupdate/{instanceId}/management/devices/{deviceId}/modules/{moduleId}'} # type: ignore
@distributed_trace_async
async def get_update_compliance(
self,
**kwargs: Any
) -> JSONType:
"""Gets the breakdown of how many devices are on their latest update, have new updates available,
or are in progress receiving new updates.
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"newUpdatesAvailableDeviceCount": 0, # Required. Number of devices with a newer update available.
"onLatestUpdateDeviceCount": 0, # Required. Number of devices on the latest update.
"totalDeviceCount": 0, # Required. Total number of devices.
"updatesInProgressDeviceCount": 0 # Required. Number of devices with update in-progress.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_update_compliance_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_update_compliance.metadata = {'url': '/deviceupdate/{instanceId}/management/updatecompliance'} # type: ignore
@distributed_trace
def list_device_tags(
self,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of available group device tags for all devices connected to Device Update for IoT
Hub.
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"deviceCount": 0, # Required. Number of devices with this tag.
"tagName": "str" # Required. Tag name.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_device_tags_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_device_tags_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_device_tags.metadata = {'url': '/deviceupdate/{instanceId}/management/devicetags'} # type: ignore
@distributed_trace_async
async def get_device_tag(
self,
tag_name: str,
**kwargs: Any
) -> JSONType:
"""Gets a count of how many devices have a device tag.
:param tag_name: Tag name.
:type tag_name: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deviceCount": 0, # Required. Number of devices with this tag.
"tagName": "str" # Required. Tag name.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_device_tag_request(
instance_id=self._config.instance_id,
tag_name=tag_name,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_device_tag.metadata = {'url': '/deviceupdate/{instanceId}/management/devicetags/{tagName}'} # type: ignore
@distributed_trace
def list_groups(
self,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of all device groups.
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"createdDateTime": "str", # Required. Date and time when the update was created.
"deploymentId": "str", # Optional. The deployment Id for the group.
"deviceClassId": "str", # Optional. The device class Id for the group.
"deviceCount": 0, # Optional. The number of devices in the group.
"groupId": "str", # Required. Group identity.
"groupType": "str", # Required. Group type. Possible values include: "DeviceClassIdAndIoTHubTag", "InvalidDeviceClassIdAndIoTHubTag", "DefaultDeviceClassId".
"tags": [
"str" # Required. A set of tags. IoT Hub tags.
]
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_groups_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_groups_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_groups.metadata = {'url': '/deviceupdate/{instanceId}/management/groups'} # type: ignore
@distributed_trace_async
async def get_group(
self,
group_id: str,
**kwargs: Any
) -> JSONType:
"""Gets the properties of a group.
:param group_id: Group identity.
:type group_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"createdDateTime": "str", # Required. Date and time when the update was created.
"deploymentId": "str", # Optional. The deployment Id for the group.
"deviceClassId": "str", # Optional. The device class Id for the group.
"deviceCount": 0, # Optional. The number of devices in the group.
"groupId": "str", # Required. Group identity.
"groupType": "str", # Required. Group type. Possible values include: "DeviceClassIdAndIoTHubTag", "InvalidDeviceClassIdAndIoTHubTag", "DefaultDeviceClassId".
"tags": [
"str" # Required. A set of tags. IoT Hub tags.
]
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_group.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}'} # type: ignore
@distributed_trace_async
async def create_or_update_group(
self,
group_id: str,
group: JSONType,
**kwargs: Any
) -> JSONType:
"""Create or update a device group.
:param group_id: Group identity.
:type group_id: str
:param group: The group properties.
:type group: JSONType
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
group = {
"createdDateTime": "str", # Required. Date and time when the update was created.
"deploymentId": "str", # Optional. The deployment Id for the group.
"deviceClassId": "str", # Optional. The device class Id for the group.
"deviceCount": 0, # Optional. The number of devices in the group.
"groupId": "str", # Required. Group identity.
"groupType": "str", # Required. Group type. Possible values include: "DeviceClassIdAndIoTHubTag", "InvalidDeviceClassIdAndIoTHubTag", "DefaultDeviceClassId".
"tags": [
"str" # Required. A set of tags. IoT Hub tags.
]
}
# response body for status code(s): 200
response.json() == {
"createdDateTime": "str", # Required. Date and time when the update was created.
"deploymentId": "str", # Optional. The deployment Id for the group.
"deviceClassId": "str", # Optional. The device class Id for the group.
"deviceCount": 0, # Optional. The number of devices in the group.
"groupId": "str", # Required. Group identity.
"groupType": "str", # Required. Group type. Possible values include: "DeviceClassIdAndIoTHubTag", "InvalidDeviceClassIdAndIoTHubTag", "DefaultDeviceClassId".
"tags": [
"str" # Required. A set of tags. IoT Hub tags.
]
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = group
request = build_device_management_create_or_update_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_group.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}'} # type: ignore
@distributed_trace_async
async def delete_group(
self,
group_id: str,
**kwargs: Any
) -> None:
"""Deletes a device group.
:param group_id: Group identity.
:type group_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_delete_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete_group.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}'} # type: ignore
@distributed_trace_async
async def get_group_update_compliance(
self,
group_id: str,
**kwargs: Any
) -> JSONType:
"""Get group update compliance information such as how many devices are on their latest update,
how many need new updates, and how many are in progress on receiving a new update.
:param group_id: Group identity.
:type group_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"newUpdatesAvailableDeviceCount": 0, # Required. Number of devices with a newer update available.
"onLatestUpdateDeviceCount": 0, # Required. Number of devices on the latest update.
"totalDeviceCount": 0, # Required. Total number of devices.
"updatesInProgressDeviceCount": 0 # Required. Number of devices with update in-progress.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_group_update_compliance_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_group_update_compliance.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/updateCompliance'} # type: ignore
@distributed_trace
def list_best_updates_for_group(
self,
group_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get the best available updates for a group and a count of how many devices need each update.
:param group_id: Group identity.
:type group_id: str
:keyword filter: Restricts the set of bestUpdates returned. You can filter on update Provider,
Name and Version property.
:paramtype filter: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"deviceCount": 0, # Required. Total number of devices for which the update is applicable.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_best_updates_for_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_best_updates_for_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_best_updates_for_group.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/bestUpdates'} # type: ignore
@distributed_trace
def list_deployments_for_group(
self,
group_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of deployments for a group.
:param group_id: Group identity.
:type group_id: str
:keyword filter: Restricts the set of deployments returned. You can filter on update Provider,
Name and Version property.
:paramtype filter: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"deploymentId": "str", # Required. The deployment identifier.
"groupId": "str", # Required. The group identity.
"isCanceled": bool, # Optional. Boolean flag indicating whether the deployment was canceled.
"isRetried": bool, # Optional. Boolean flag indicating whether the deployment has been retried.
"startDateTime": "2020-02-20 00:00:00", # Required. The deployment start datetime.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_deployments_for_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_deployments_for_group_request(
instance_id=self._config.instance_id,
group_id=group_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_deployments_for_group.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments'} # type: ignore
@distributed_trace_async
async def get_deployment(
self,
group_id: str,
deployment_id: str,
**kwargs: Any
) -> JSONType:
"""Gets the properties of a deployment.
:param group_id: Group identity.
:type group_id: str
:param deployment_id: Deployment identifier.
:type deployment_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deploymentId": "str", # Required. The deployment identifier.
"groupId": "str", # Required. The group identity.
"isCanceled": bool, # Optional. Boolean flag indicating whether the deployment was canceled.
"isRetried": bool, # Optional. Boolean flag indicating whether the deployment has been retried.
"startDateTime": "2020-02-20 00:00:00", # Required. The deployment start datetime.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_deployment_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deployment.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}'} # type: ignore
@distributed_trace_async
async def create_or_update_deployment(
self,
deployment_id: str,
group_id: str,
deployment: JSONType,
**kwargs: Any
) -> JSONType:
"""Creates or updates a deployment.
:param deployment_id: Deployment identifier.
:type deployment_id: str
:param group_id: Group identity.
:type group_id: str
:param deployment: The deployment properties.
:type deployment: JSONType
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
deployment = {
"deploymentId": "str", # Required. The deployment identifier.
"groupId": "str", # Required. The group identity.
"isCanceled": bool, # Optional. Boolean flag indicating whether the deployment was canceled.
"isRetried": bool, # Optional. Boolean flag indicating whether the deployment has been retried.
"startDateTime": "2020-02-20 00:00:00", # Required. The deployment start datetime.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
# response body for status code(s): 200
response.json() == {
"deploymentId": "str", # Required. The deployment identifier.
"groupId": "str", # Required. The group identity.
"isCanceled": bool, # Optional. Boolean flag indicating whether the deployment was canceled.
"isRetried": bool, # Optional. Boolean flag indicating whether the deployment has been retried.
"startDateTime": "2020-02-20 00:00:00", # Required. The deployment start datetime.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = deployment
request = build_device_management_create_or_update_deployment_request(
instance_id=self._config.instance_id,
deployment_id=deployment_id,
group_id=group_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_deployment.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}'} # type: ignore
@distributed_trace_async
async def delete_deployment(
self,
group_id: str,
deployment_id: str,
**kwargs: Any
) -> None:
"""Deletes a deployment.
:param group_id: Group identity.
:type group_id: str
:param deployment_id: Deployment identifier.
:type deployment_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_delete_deployment_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete_deployment.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}'} # type: ignore
@distributed_trace_async
async def get_deployment_status(
self,
group_id: str,
deployment_id: str,
**kwargs: Any
) -> JSONType:
"""Gets the status of a deployment including a breakdown of how many devices in the deployment are
in progress, completed, or failed.
:param group_id: Group identity.
:type group_id: str
:param deployment_id: Deployment identifier.
:type deployment_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deploymentState": "str", # Required. The state of the deployment. Possible values include: "Active", "Inactive", "Canceled".
"devicesCanceledCount": 0, # Optional. The number of devices which have had their deployment canceled.
"devicesCompletedFailedCount": 0, # Optional. The number of devices that have completed deployment with a failure.
"devicesCompletedSucceededCount": 0, # Optional. The number of devices which have successfully completed deployment.
"devicesInProgressCount": 0, # Optional. The number of devices that are currently in deployment.
"totalDevices": 0 # Optional. The total number of devices in the deployment.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_deployment_status_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deployment_status.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}/status'} # type: ignore
@distributed_trace
def list_deployment_devices(
self,
group_id: str,
deployment_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Gets a list of devices in a deployment along with their state. Useful for getting a list of
failed devices.
:param group_id: Group identity.
:type group_id: str
:param deployment_id: Deployment identifier.
:type deployment_id: str
:keyword filter: Restricts the set of deployment device states returned. You can filter on
deviceId and moduleId and/or deviceState.
:paramtype filter: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"deviceId": "str", # Required. Device identity.
"deviceState": "str", # Required. Deployment device state. Possible values include: "Succeeded", "InProgress", "Failed", "Canceled", "Incompatible".
"moduleId": "str", # Optional. Device module identity.
"movedOnToNewDeployment": bool, # Required. Boolean flag indicating whether this device is in a newer deployment and can no longer retry this deployment.
"retryCount": 0 # Required. The number of times this deployment has been retried on this device.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_deployment_devices_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_deployment_devices_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
filter=filter,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_deployment_devices.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}/devicestates'} # type: ignore
@distributed_trace_async
async def get_operation(
self,
operation_id: str,
*,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional[JSONType]:
"""Retrieve operation status.
:param operation_id: Operation identifier.
:type operation_id: str
:keyword if_none_match: Defines the If-None-Match condition. The operation will be performed
only if the ETag on the server does not match this value.
:paramtype if_none_match: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType or None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation was created.
"error": {
"code": "str", # Required. Server defined error code.
"details": [
...
],
"innererror": {
"code": "str", # Required. A more specific error code than what was provided by the containing error.
"errorDetail": "str", # Optional. The internal error or exception message.
"innerError": ...,
"message": "str" # Optional. A human-readable representation of the error.
},
"message": "str", # Required. A human-readable representation of the error.
"occurredDateTime": "2020-02-20 00:00:00", # Optional. Date and time in UTC when the error occurred.
"target": "str" # Optional. The target of the error.
},
"etag": "str", # Optional. Operation ETag.
"lastActionDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation status was last updated.
"operationId": "str", # Required. Operation Id.
"status": "str", # Required. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
"traceId": "str" # Optional. Operation correlation identity that can used by Microsoft Support for troubleshooting.
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional[JSONType]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_operation_request(
instance_id=self._config.instance_id,
operation_id=operation_id,
api_version=api_version,
if_none_match=if_none_match,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
response_headers = {}
if response.status_code == 200:
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_operation.metadata = {'url': '/deviceupdate/{instanceId}/management/operations/{operationId}'} # type: ignore
@distributed_trace
def list_operations(
self,
*,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get a list of all device import operations. Completed operations are kept for 7 days before
auto-deleted.
:keyword filter: Restricts the set of operations returned. Only one specific filter is
supported: "status eq 'NotStarted' or status eq 'Running'".
:paramtype filter: str
:keyword top: Specifies a non-negative integer n that limits the number of items returned from
a collection. The service returns the number of available items up to but not greater than the
specified value n.
:paramtype top: int
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"createdDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation was created.
"error": {
"code": "str", # Required. Server defined error code.
"details": [
...
],
"innererror": {
"code": "str", # Required. A more specific error code than what was provided by the containing error.
"errorDetail": "str", # Optional. The internal error or exception message.
"innerError": ...,
"message": "str" # Optional. A human-readable representation of the error.
},
"message": "str", # Required. A human-readable representation of the error.
"occurredDateTime": "2020-02-20 00:00:00", # Optional. Date and time in UTC when the error occurred.
"target": "str" # Optional. The target of the error.
},
"etag": "str", # Optional. Operation ETag.
"lastActionDateTime": "2020-02-20 00:00:00", # Required. Date and time in UTC when the operation status was last updated.
"operationId": "str", # Required. Operation Id.
"status": "str", # Required. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
"traceId": "str" # Optional. Operation correlation identity that can used by Microsoft Support for troubleshooting.
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_operations_request(
instance_id=self._config.instance_id,
api_version=api_version,
filter=filter,
top=top,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_operations_request(
instance_id=self._config.instance_id,
api_version=api_version,
filter=filter,
top=top,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/deviceupdate/{instanceId}/management/operations'} # type: ignore
@distributed_trace_async
async def collect_logs(
self,
operation_id: str,
log_collection_request: JSONType,
**kwargs: Any
) -> JSONType:
"""Start the device diagnostics log collection operation on specified devices.
:param operation_id: Operation identifier.
:type operation_id: str
:param log_collection_request: The deployment properties.
:type log_collection_request: JSONType
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# JSON input template you can fill out and use as your body input.
log_collection_request = {
"createdDateTime": "str", # Optional. The timestamp when the operation was created.
"description": "str", # Optional. Description of the diagnostics operation.
"deviceList": [
{
"deviceId": "str", # Required. Device Id.
"moduleId": "str" # Optional. Module Id.
}
],
"lastActionDateTime": "str", # Optional. A timestamp for when the current state was entered.
"operationId": "str", # Optional. The diagnostics operation id.
"status": "str" # Optional. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
}
# response body for status code(s): 201
response.json() == {
"createdDateTime": "str", # Optional. The timestamp when the operation was created.
"description": "str", # Optional. Description of the diagnostics operation.
"deviceList": [
{
"deviceId": "str", # Required. Device Id.
"moduleId": "str" # Optional. Module Id.
}
],
"lastActionDateTime": "str", # Optional. A timestamp for when the current state was entered.
"operationId": "str", # Optional. The diagnostics operation id.
"status": "str" # Optional. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = log_collection_request
request = build_device_management_collect_logs_request(
instance_id=self._config.instance_id,
operation_id=operation_id,
api_version=api_version,
content_type=content_type,
json=_json,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
collect_logs.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceDiagnostics/logCollections/{operationId}'} # type: ignore
@distributed_trace_async
async def get_log_collection_operation(
self,
operation_id: str,
**kwargs: Any
) -> JSONType:
"""Get the device diagnostics log collection operation.
:param operation_id: Operation identifier.
:type operation_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"createdDateTime": "str", # Optional. The timestamp when the operation was created.
"description": "str", # Optional. Description of the diagnostics operation.
"deviceList": [
{
"deviceId": "str", # Required. Device Id.
"moduleId": "str" # Optional. Module Id.
}
],
"lastActionDateTime": "str", # Optional. A timestamp for when the current state was entered.
"operationId": "str", # Optional. The diagnostics operation id.
"status": "str" # Optional. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_log_collection_operation_request(
instance_id=self._config.instance_id,
operation_id=operation_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_collection_operation.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceDiagnostics/logCollections/{operationId}'} # type: ignore
@distributed_trace
def list_log_collection_operations(
self,
**kwargs: Any
) -> AsyncIterable[JSONType]:
"""Get all device diagnostics log collection operations.
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: An iterator like instance of JSON object
:rtype: ~azure.core.async_paging.AsyncItemPaged[JSONType]
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"nextLink": "str", # Optional. The link to the next page of items.
"value": [
{
"createdDateTime": "str", # Optional. The timestamp when the operation was created.
"description": "str", # Optional. Description of the diagnostics operation.
"deviceList": [
{
"deviceId": "str", # Required. Device Id.
"moduleId": "str" # Optional. Module Id.
}
],
"lastActionDateTime": "str", # Optional. A timestamp for when the current state was entered.
"operationId": "str", # Optional. The diagnostics operation id.
"status": "str" # Optional. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
}
]
}
"""
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_device_management_list_log_collection_operations_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = build_device_management_list_log_collection_operations_request(
instance_id=self._config.instance_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(next_link, **path_format_arguments)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = _loads(pipeline_response.http_response.body())
list_of_elem = deserialized["value"]
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.get("nextLink", None), AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_log_collection_operations.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceDiagnostics/logCollections'} # type: ignore
@distributed_trace_async
async def get_log_collection_operation_detailed_status(
self,
operation_id: str,
**kwargs: Any
) -> JSONType:
"""Get device diagnostics log collection operation with detailed status.
:param operation_id: Operation identifier.
:type operation_id: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"createdDateTime": "str", # Optional. The timestamp when the operation was created.
"description": "str", # Optional. Device diagnostics operation description.
"deviceStatus": [
{
"deviceId": "str", # Required. Device id.
"extendedResultCode": "str", # Optional. Log upload extended result code.
"logLocation": "str", # Optional. Log upload location.
"moduleId": "str", # Optional. Module id.
"resultCode": "str", # Optional. Log upload result code.
"status": "str" # Required. Log upload status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
}
],
"lastActionDateTime": "str", # Optional. A timestamp for when the current state was entered.
"operationId": "str", # Optional. The device diagnostics operation id.
"status": "str" # Optional. Operation status. Possible values include: "Undefined", "NotStarted", "Running", "Succeeded", "Failed".
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_get_log_collection_operation_detailed_status_request(
instance_id=self._config.instance_id,
operation_id=operation_id,
api_version=api_version,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_log_collection_operation_detailed_status.metadata = {'url': '/deviceupdate/{instanceId}/management/deviceDiagnostics/logCollections/{operationId}/detailedStatus'} # type: ignore
@distributed_trace_async
async def stop_deployment(
self,
group_id: str,
deployment_id: str,
*,
action: str,
**kwargs: Any
) -> JSONType:
"""Stops a deployment.
:param group_id: Group identity.
:type group_id: str
:param deployment_id: Deployment identifier.
:type deployment_id: str
:keyword action: Cancel deployment action. "cancel"
:paramtype action: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deploymentId": "str", # Required. The deployment identifier.
"groupId": "str", # Required. The group identity.
"isCanceled": bool, # Optional. Boolean flag indicating whether the deployment was canceled.
"isRetried": bool, # Optional. Boolean flag indicating whether the deployment has been retried.
"startDateTime": "2020-02-20 00:00:00", # Required. The deployment start datetime.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_stop_deployment_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
action=action,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
stop_deployment.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}'} # type: ignore
@distributed_trace_async
async def retry_deployment(
self,
group_id: str,
deployment_id: str,
*,
action: str,
**kwargs: Any
) -> JSONType:
"""Retries a deployment with failed devices.
:param group_id: Group identity.
:type group_id: str
:param deployment_id: Deployment identifier.
:type deployment_id: str
:keyword action: Retry deployment action. "retry"
:paramtype action: str
:keyword api_version: Api Version. The default value is "2021-06-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:return: JSON object
:rtype: JSONType
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"deploymentId": "str", # Required. The deployment identifier.
"groupId": "str", # Required. The group identity.
"isCanceled": bool, # Optional. Boolean flag indicating whether the deployment was canceled.
"isRetried": bool, # Optional. Boolean flag indicating whether the deployment has been retried.
"startDateTime": "2020-02-20 00:00:00", # Required. The deployment start datetime.
"updateId": {
"name": "str", # Required. Update name.
"provider": "str", # Required. Update provider.
"version": "str" # Required. Update version.
}
}
"""
cls = kwargs.pop('cls', None) # type: ClsType[JSONType]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-06-01-preview") # type: str
request = build_device_management_retry_deployment_request(
instance_id=self._config.instance_id,
group_id=group_id,
deployment_id=deployment_id,
api_version=api_version,
action=action,
)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if response.content:
deserialized = response.json()
else:
deserialized = None
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retry_deployment.metadata = {'url': '/deviceupdate/{instanceId}/management/groups/{groupId}/deployments/{deploymentId}'} # type: ignore
| 45.904147
| 2,130
| 0.580906
| 18,308
| 184,856
| 5.677846
| 0.032008
| 0.031938
| 0.02632
| 0.019432
| 0.952881
| 0.945512
| 0.933747
| 0.919875
| 0.904993
| 0.886118
| 0
| 0.013646
| 0.329245
| 184,856
| 4,026
| 2,131
| 45.915549
| 0.824708
| 0.169797
| 0
| 0.826541
| 0
| 0.000497
| 0.081477
| 0.045582
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01839
| false
| 0
| 0.016899
| 0
| 0.095924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
990993aeb160cf81cc19fc259d35d391c6f0cb81
| 5,662
|
py
|
Python
|
ksteta3pi/PotentialBackgrounds/MC_12_13104411_MagUp.py.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
ksteta3pi/PotentialBackgrounds/MC_12_13104411_MagUp.py.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
ksteta3pi/PotentialBackgrounds/MC_12_13104411_MagUp.py.py
|
Williams224/davinci-scripts
|
730642d2ff13543eca4073a4ce0932631195de56
|
[
"MIT"
] | null | null | null |
#-- GAUDI jobOptions generated on Fri Jul 24 17:22:37 2015
#-- Contains event types :
#-- 13104411 - 34 files - 508996 events - 112.67 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124594
#-- StepId : 124594
#-- StepName : Reco14 for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p6
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124817
#-- StepId : 124817
#-- StepName : Sim08a - 2012 - MU - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r2
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-mu100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-mu100
#-- ExtraPackages : AppConfig.v3r169;DecFiles.v27r4
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000028_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00024258/0000/00024258_00000035_1.allstreams.dst'
], clear=True)
| 51.472727
| 247
| 0.775874
| 728
| 5,662
| 5.936813
| 0.222527
| 0.204535
| 0.070801
| 0.102267
| 0.599722
| 0.599722
| 0.599722
| 0.599722
| 0.588616
| 0.577973
| 0
| 0.26087
| 0.073826
| 5,662
| 109
| 248
| 51.944954
| 0.56331
| 0.447898
| 0
| 0
| 1
| 0.918919
| 0.898079
| 0.896776
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.054054
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
075a55afd2bec6fbf875de3d14130a6ff24449e0
| 1,598
|
py
|
Python
|
src/transformers/models/ltp/ltp_model_output.py
|
kssteven418/LTP
|
f1d5ec88aba913de5e2b4aa502af9cf0ab7bb13f
|
[
"Apache-2.0"
] | 34
|
2021-07-05T02:44:31.000Z
|
2022-03-28T14:39:57.000Z
|
src/transformers/models/ltp/ltp_model_output.py
|
kssteven418/LTTP
|
f1d5ec88aba913de5e2b4aa502af9cf0ab7bb13f
|
[
"Apache-2.0"
] | 3
|
2021-07-22T15:49:44.000Z
|
2022-03-19T08:46:27.000Z
|
src/transformers/models/ltp/ltp_model_output.py
|
kssteven418/LTTP
|
f1d5ec88aba913de5e2b4aa502af9cf0ab7bb13f
|
[
"Apache-2.0"
] | 6
|
2021-07-05T02:44:32.000Z
|
2022-02-14T10:10:13.000Z
|
import torch
from dataclasses import dataclass
from transformers.file_utils import ModelOutput
from typing import Optional, Tuple, List
@dataclass
class LTPEncoderOutput(ModelOutput):
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
attention_sentence_lengths: Optional[List[List[torch.FloatTensor]]] = None
ffn_sentence_lengths: Optional[List[List[torch.FloatTensor]]] = None
@dataclass
class LTPSequenceClassifierOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
attention_sentence_lengths: Optional[List[List[torch.FloatTensor]]] = None
ffn_sentence_lengths: Optional[List[List[torch.FloatTensor]]] = None
@dataclass
class LTPModelOutput(ModelOutput):
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
attention_sentence_lengths: Optional[List[List[torch.FloatTensor]]] = None
ffn_sentence_lengths: Optional[List[List[torch.FloatTensor]]] = None
| 42.052632
| 78
| 0.775344
| 181
| 1,598
| 6.696133
| 0.198895
| 0.277228
| 0.346535
| 0.206271
| 0.79538
| 0.79538
| 0.79538
| 0.735974
| 0.735974
| 0.735974
| 0
| 0
| 0.124531
| 1,598
| 37
| 79
| 43.189189
| 0.866333
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.129032
| 0
| 0.903226
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 10
|
075d75f0a140c65f524f00264705e4813f70a3f6
| 38,942
|
py
|
Python
|
backend/Plan2Dance_backend/plan2dance/Plan2Dance/prepare/nao_actions_config.py
|
Dongbox/Plan2Dance_docker
|
0d2f6f03cd514aafbd9e2f877c4a2f53b89cdcf8
|
[
"Apache-2.0"
] | null | null | null |
backend/Plan2Dance_backend/plan2dance/Plan2Dance/prepare/nao_actions_config.py
|
Dongbox/Plan2Dance_docker
|
0d2f6f03cd514aafbd9e2f877c4a2f53b89cdcf8
|
[
"Apache-2.0"
] | 4
|
2021-03-30T13:33:37.000Z
|
2021-06-10T19:16:18.000Z
|
backend/Plan2Dance_backend/plan2dance/Plan2Dance/prepare/nao_actions_config.py
|
Dongbox/Plan2Dance_docker
|
0d2f6f03cd514aafbd9e2f877c4a2f53b89cdcf8
|
[
"Apache-2.0"
] | 2
|
2020-07-29T09:17:11.000Z
|
2020-07-30T01:28:56.000Z
|
y_state = ['forward', 'centre', 'backward'] # 向前, 中间, 向后
z_state = ['stand', 'half-squat', 'squat'] # 站立, 半蹲, 全蹲
hand_state = ["chest", 'others'] # 手部在胸前, 其他位置
NaoActionsDefine = {
'abandon': {
'hand_right_use': True, # 执行过程用到右手
'hand_left_use': True, # 执行过程用到左手
'leg_right_use': True, # 执行过程用到右腿
'leg_left_use': True, # 执行过程用到左腿
'start_space_y': y_state[1], # 机器人身体倾斜状态
'start_space_z': z_state[1], # 机器人是否下蹲
'start_hand_position': hand_state[0], # 手的位置
'end_space_y': y_state[1], # 机器人身体倾斜状态
'end_space_z': z_state[1], # 机器人是否下蹲
'end_hand_position': hand_state[0], # 手的位置
'time_change_allowed': True, # 允许通过速率调整动作持续时间
'coherent': [] # 后置固定动作组衔接
},
'akimbo': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': ['akimboRaise']
},
'akimboDown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': ['akimboRaise']
},
'akimboRaise': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'allForMe': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[1],
'start_hand_position': hand_state[0],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': True,
'coherent': []
},
'armsUpward': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'beatUpAndDown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': True,
'coherent': []
},
# 'crossFingers': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': False,
# 'leg_left_use': False,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[0],
# 'time_change_allowed': False,
# 'coherent': []
# },
'dancingYouth': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'drumRoll': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'elbowTurn': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[2],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
# 'enlargeBosmHoodwink': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[2],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[1],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': False,
# 'coherent': []
# },
'fastAdvance': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'finesseSwing': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'handAppearance': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'handFly': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'handHeadFoot': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'handsBendInward': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'handsCircle': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'HandsUpAndDown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'handsWaveDown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'handsWaveUp': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'handUpAndDown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'heavyPendulum': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'hitChest': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
# 'kettle': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[1],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': False,
# 'coherent': []
# },
'leftAttack': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'leftFuels': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': True,
'coherent': []
},
'leftHandBend': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[0],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'leftHandsObliqueParallel': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'leftHandWave': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'leftHook': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'leftProtest': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[0],
'time_change_allowed': True,
'coherent': []
},
'leftPunch': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
# 'leftRightOut': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[0],
# 'end_space_z': z_state[1],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': False,
# 'coherent': []
# },
'leftRightRhythm': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': ['handswavedown', 'leftRightWave'] # 表示本动作可衔接在哪些动作后面
},
'leftRightWave': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': ['leftRightRhythm']
},
'leftSideKick': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'LeftTurn': {
'hand_right_use': False,
'hand_left_use': False,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'liftingArm': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': ['LeftTurn']
},
'liftTheChest': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'mergeHand': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': ['liftTheChest']
},
'motionHands': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'openHeartLeft': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[2],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'parallelHands': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'parallelHandsAkimboErect': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'power': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': ['raiseHandsTakeHear']
},
'putUpHandsdown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'raiseHandAndDropDown': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'raiseHandsTakeHear': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'rightBlock': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'rightHandFan': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': True,
'coherent': []
},
# 'rightHandUp': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[2],
# 'start_space_z': z_state[1],
# 'start_hand_position': hand_state[0],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[0],
# 'time_change_allowed': True,
# 'coherent': []
# },
# 'rightHook': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': True,
# 'coherent': []
# },
# 'rightLean': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[1],
# 'start_hand_position': hand_state[0],
# 'end_space_y': y_state[2],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[0],
# 'time_change_allowed': True,
# 'coherent': []
# },
# 'rightSwing': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': False,
# 'leg_left_use': False,
# 'start_space_y': y_state[2],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[0],
# 'end_space_y': y_state[2],
# 'end_space_z': z_state[1],
# 'end_hand_position': hand_state[0],
# 'time_change_allowed': False,
# 'coherent': ['rightLean']
# },
'rightUppercut': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': ['roar']
},
# 'roar': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[0],
# 'end_space_z': z_state[1],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': False,
# 'coherent': []
# },
# 'rotatingRightHand': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[2],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[2],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': False,
# 'coherent': []
# },
'runaround': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'runningMan': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': ['runaround']
},
'soldierSalute': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'saluteSwingArm': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'scratchingHead': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[1],
'start_space_z': z_state[1],
'start_hand_position': hand_state[1],
'end_space_y': y_state[2],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'shake': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'shakeElbow': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': False,
'leg_left_use': False,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
# 'shakeHands': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': False,
# 'leg_left_use': False,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': True,
# 'coherent': []
# },
'shockHands': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': ['shoulderRhythm']
},
'shockHandsToChest': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[1],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'shoulderRhythm': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'sideMovement': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'sidestepLeft': {
'hand_right_use': False,
'hand_left_use': False,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[1],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
# 'splitLeap': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[0],
# 'start_space_z': z_state[1],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[0],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': False,
# 'coherent': []
# },
'spreadWings': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[2],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'squat': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[1],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[1],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'storingForce': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
# 'swingHandsFootSide': {
# 'hand_right_use': True,
# 'hand_left_use': True,
# 'leg_right_use': True,
# 'leg_left_use': True,
# 'start_space_y': y_state[1],
# 'start_space_z': z_state[0],
# 'start_hand_position': hand_state[1],
# 'end_space_y': y_state[1],
# 'end_space_z': z_state[1],
# 'end_hand_position': hand_state[1],
# 'time_change_allowed': True,
# 'coherent': []
# },
'swingHandsPutUp': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'swingShake': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': True,
'coherent': []
},
'takeTheLeft': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[1],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
'twistWaist': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'upwardPunches': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'verticalArm': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'waveLeftAndRight': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[1],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[1],
'time_change_allowed': False,
'coherent': []
},
'wildDance': {
'hand_right_use': True,
'hand_left_use': True,
'leg_right_use': True,
'leg_left_use': True,
'start_space_y': y_state[0],
'start_space_z': z_state[0],
'start_hand_position': hand_state[0],
'end_space_y': y_state[0],
'end_space_z': z_state[0],
'end_hand_position': hand_state[0],
'time_change_allowed': False,
'coherent': []
},
}
| 32.532999
| 73
| 0.554543
| 4,979
| 38,942
| 3.858606
| 0.026511
| 0.106392
| 0.06194
| 0.106184
| 0.935509
| 0.935509
| 0.924474
| 0.924474
| 0.924474
| 0.924474
| 0
| 0.01851
| 0.292486
| 38,942
| 1,196
| 74
| 32.560201
| 0.678789
| 0.134919
| 0
| 0.845015
| 0
| 0
| 0.380841
| 0.001433
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ae05f4fe808c698237ebf6e3960ccd6135c1bd0
| 7,544
|
py
|
Python
|
Boutique/DIOR.py
|
yangjun-ux/Boutique-crawling-Project
|
e880d9fde234c2ab336340f98430aa530c106661
|
[
"MIT"
] | 1
|
2022-03-09T03:34:51.000Z
|
2022-03-09T03:34:51.000Z
|
Boutique/DIOR.py
|
yangjun-ux/Boutique-crawling-Project
|
e880d9fde234c2ab336340f98430aa530c106661
|
[
"MIT"
] | null | null | null |
Boutique/DIOR.py
|
yangjun-ux/Boutique-crawling-Project
|
e880d9fde234c2ab336340f98430aa530c106661
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.parse import urljoin
import urllib.request
import pandas as pd
# 디올 남성 의류
def dior_clothes():
url_base = 'https://www.dior.com/ko_kr'
url_sub = '/%EB%82%A8%EC%84%B1-%ED%8C%A8%EC%85%98/%EB%A0%88%EB%94%94-%ED%88%AC-%EC%9B%A8%EC%96%B4/%EC%A0%84%EC%B2%B4-%EB%A0%88%EB%94%94-%ED%88%AC-%EC%9B%A8%EC%96%B4'
url = url_base + url_sub
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
brand = []
name = []
price = []
img = []
sex = []
url = []
for item in soup.find_all('div', 'product product-legend-bottom product--cdcbase'):
brand.append('Dior')
name.append(item.find(class_='multiline-text').get_text())
price.append(item.find(class_='price-line').get_text().replace('₩', ''))
img.append(item.find('img')['src'])
sex.append('Men')
url.append(urljoin(url_base, item.find('a')['href']))
# dataframe
data = {'title': name, 'price': price, 'image': img, 'sex': sex, 'brand': brand}
df_mens_clothes = pd.DataFrame(data)
# download images
print('남성의류 사진 저장을 시작합니다')
for idx, link in enumerate(img):
urllib.request.urlretrieve(link, 'dior mens clothes/' + 'dior' + str(idx) + '.jpg')
# 디올 여성 의류
url_base = 'https://www.dior.com/ko_kr'
url_sub = '/%EC%97%AC%EC%84%B1-%ED%8C%A8%EC%85%98/%EC%97%AC%EC%84%B1-%EC%9D%98%EB%A5%98/%EC%A0%84%EC%B2%B4-%EB%A0%88%EB%94%94-%ED%88%AC-%EC%9B%A8%EC%96%B4'
url = url_base + url_sub
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
brand = []
name = []
price = []
img = []
sex = []
url = []
for item in soup.find_all('div', 'product product-legend-bottom product--cdcbase'):
brand.append('Dior')
name.append(item.find(class_='multiline-text').get_text())
price.append(item.find(class_='price-line').get_text().replace('₩', ''))
img.append(item.find('img')['src'])
sex.append('Women')
url.append(urljoin(url_base, item.find('a')['href']))
# dataframe
data = {'title': name, 'price': price, 'image': img, 'sex': sex, 'brand': brand}
df_womens_clothes = pd.DataFrame(data)
# concat data
dior_clothes_df = pd.concat([df_mens_clothes, df_womens_clothes]).reset_index(drop=True)
dior_clothes_df.to_csv('dior_clothes.csv', index=False, encoding='utf-8')
# download images
print('여성의류 사진 저장을 시작합니다')
for idx, link in enumerate(img):
urllib.request.urlretrieve(link, 'dior womens clothes/' + 'dior' + str(idx) + '.jpg')
return dior_clothes_df
# 디올 남성 가방
def dior_bags():
url_base = 'https://www.dior.com/ko_kr'
url_sub = '/%EB%82%A8%EC%84%B1-%ED%8C%A8%EC%85%98/%EA%B0%80%EC%A3%BD-%EC%A0%9C%ED%92%88/%EC%A0%84%EC%B2%B4-%EA%B0%80%EC%A3%BD-%EC%A0%9C%ED%92%88'
url = url_base + url_sub
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
brand = []
name = []
price = []
img = []
sex = []
url = []
for item in soup.find_all('div', 'product product-legend-bottom product--cdcbase'):
brand.append('Dior')
name.append(item.find(class_='multiline-text').get_text())
price.append(item.find(class_='price-line').get_text().replace('₩', ''))
img.append(item.find('img')['src'])
sex.append('Men')
url.append(urljoin(url_base, item.find('a')['href']))
# dataframe : dior mens bags
data = {'title': name, 'price': price, 'image': img, 'sex': sex, 'brand': brand}
df_mens_bags = pd.DataFrame(data)
# download images
print('남성가방 사진 저장을 시작합니다')
for idx, link in enumerate(img):
urllib.request.urlretrieve(link, 'dior mens bags/' + 'dior' + str(idx) + '.jpg')
# 디올 여자 가방
url_base = 'https://www.dior.com/ko_kr'
url_sub = '/%EC%97%AC%EC%84%B1-%ED%8C%A8%EC%85%98/%EC%97%AC%EC%84%B1-%EA%B0%80%EB%B0%A9/%EB%AA%A8%EB%93%A0-%EA%B0%80%EC%A3%BD%EC%A0%9C%ED%92%88'
url = url_base + url_sub
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
brand = []
name = []
price = []
img = []
sex = []
url = []
for item in soup.find_all('div', 'product product-legend-bottom product--cdcbase'):
brand.append('Dior')
name.append(item.find(class_='multiline-text').get_text())
price.append(item.find(class_='price-line').get_text().replace('₩', ''))
img.append(item.find('img')['src'])
sex.append('women')
url.append(urljoin(url_base, item.find('a')['href']))
# dataframe : dior women bags
data = {'title': name, 'price': price, 'image': img, 'sex': sex, 'brand': brand}
df_womens_bags = pd.DataFrame(data)
# concat data
dior_bags_df = pd.concat([df_mens_bags, df_womens_bags]).reset_index(drop=True)
dior_bags_df.to_csv('dior_bags.csv', index=False, encoding='utf-8')
# download images
print('여성가방 사진 저장을 시작합니다')
for idx, link in enumerate(img):
urllib.request.urlretrieve(link, 'dior womens bags/' + 'dior' + str(idx) + '.jpg')
return dior_bags_df
# 디올 남성 신발
def dior_shoes():
url_base = 'https://www.dior.com/ko_kr'
url_sub = '/%EB%82%A8%EC%84%B1-%ED%8C%A8%EC%85%98/%EC%8A%88%EC%A6%88/%EB%AA%A8%EB%93%A0-%EC%8A%88%EC%A6%88'
url = url_base + url_sub
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
brand = []
name = []
price = []
img = []
sex = []
url = []
for item in soup.find_all('div', 'product product-legend-bottom product--cdcbase'):
brand.append('Dior')
name.append(item.find(class_='multiline-text').get_text())
price.append(item.find(class_='price-line').get_text().replace('₩', ''))
img.append(item.find('img')['src'])
sex.append('Men')
url.append(urljoin(url_base, item.find('a')['href']))
# dataframe : dior mens shoes
data = {'title': name, 'price': price, 'image': img, 'sex': sex, 'brand': brand}
df_mens_shoes = pd.DataFrame(data)
# download images
print('남성신발 사진 저장을 시작합니다')
for idx, link in enumerate(img):
urllib.request.urlretrieve(link, 'dior mens shoes/' + 'dior' + str(idx) + '.jpg')
# 디올 여자 신발
url_base = 'https://www.dior.com/ko_kr'
url_sub = '/%EC%97%AC%EC%84%B1-%ED%8C%A8%EC%85%98/%EC%97%AC%EC%84%B1-%EC%8A%88%EC%A6%88/%EB%AA%A8%EB%93%A0-%EC%8A%88%EC%A6%88'
url = url_base + url_sub
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
brand = []
name = []
price = []
img = []
sex = []
url = []
for item in soup.find_all('div', 'product product-legend-bottom product--cdcbase'):
brand.append('Dior')
name.append(item.find(class_='multiline-text').get_text())
price.append(item.find(class_='price-line').get_text().replace('₩', ''))
img.append(item.find('img')['src'])
sex.append('women')
url.append(urljoin(url_base, item.find('a')['href']))
# dataframe : dior women bags
data = {'title': name, 'price': price, 'image': img, 'sex': sex, 'brand': brand}
df_women_shoes = pd.DataFrame(data)
# concat data
dior_shoes_df = pd.concat([df_mens_shoes, df_women_shoes]).reset_index(drop=True)
dior_shoes_df.to_csv('dior_shoes.csv', index=False, encoding='utf-8')
# download images
print('여성신발 사진 저장을 시작합니다')
for idx, link in enumerate(img):
urllib.request.urlretrieve(link, 'dior women shoes/' + 'dior' + str(idx) + '.jpg')
return dior_shoes_df
| 36.269231
| 169
| 0.606575
| 1,155
| 7,544
| 3.863203
| 0.116017
| 0.04303
| 0.056477
| 0.051098
| 0.902958
| 0.866876
| 0.796056
| 0.796056
| 0.796056
| 0.766472
| 0
| 0.036737
| 0.198966
| 7,544
| 208
| 170
| 36.269231
| 0.700645
| 0.043081
| 0
| 0.729032
| 0
| 0.03871
| 0.277716
| 0.124479
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019355
| false
| 0
| 0.032258
| 0
| 0.070968
| 0.03871
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4afa13f31671f7baafff71a85bb32ab192cd700f
| 209
|
py
|
Python
|
autotesting/benchmarks_ground_truth/conv_pool_softmax.py
|
ualberta-smr/SOAR
|
325a6ed2518088b9800299c81271db51b645816a
|
[
"BSD-3-Clause-Clear"
] | 8
|
2021-01-13T14:59:18.000Z
|
2021-06-29T17:01:37.000Z
|
autotesting/benchmarks_ground_truth/conv_pool_softmax.py
|
squaresLab/SOAR
|
72a35a4014d3e74548aab7d2a5cf1bdfaab149c1
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
autotesting/benchmarks_ground_truth/conv_pool_softmax.py
|
squaresLab/SOAR
|
72a35a4014d3e74548aab7d2a5cf1bdfaab149c1
|
[
"BSD-3-Clause-Clear"
] | 2
|
2021-01-16T00:09:54.000Z
|
2021-08-05T01:14:40.000Z
|
{'tf.keras.layers.Conv2D': ('torch.nn.Conv2d', 1), 'tf.keras.layers.MaxPool2D': ('torch.nn.MaxPool2d', 1), 'tf.keras.layers.Flatten': ('torch.flatten', 1), 'tf.keras.layers.Softmax': ('torch.nn.Softmax', 1)}
| 69.666667
| 207
| 0.669856
| 31
| 209
| 4.516129
| 0.322581
| 0.2
| 0.371429
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040816
| 0.062201
| 209
| 2
| 208
| 104.5
| 0.673469
| 0
| 0
| 0
| 0
| 0
| 0.745192
| 0.447115
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab02eefb3e853b7772dac4116a31f54be52a3626
| 12,762
|
py
|
Python
|
python/pycarbon/test/test_read_write_carbon.py
|
ydvpankaj99/carbondata
|
1613ed9f94a727fcb453320858e3b59b3d832995
|
[
"Apache-2.0"
] | 1
|
2020-02-14T17:21:39.000Z
|
2020-02-14T17:21:39.000Z
|
python/pycarbon/test/test_read_write_carbon.py
|
ydvpankaj99/carbondata
|
1613ed9f94a727fcb453320858e3b59b3d832995
|
[
"Apache-2.0"
] | null | null | null |
python/pycarbon/test/test_read_write_carbon.py
|
ydvpankaj99/carbondata
|
1613ed9f94a727fcb453320858e3b59b3d832995
|
[
"Apache-2.0"
] | 1
|
2020-09-15T10:22:10.000Z
|
2020-09-15T10:22:10.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pycarbon.sdk.CarbonReader import CarbonReader
from pycarbon.sdk.CarbonSchemaReader import CarbonSchemaReader
from pycarbon.sdk.CarbonWriter import CarbonWriter
import base64
import time
import shutil
import os
import jnius_config
jnius_config.set_classpath("../../../store/sdk/target/carbondata-sdk.jar")
IMAGE_DATA_PATH = "./resources"
def test_run_write_carbon():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
for i in range(0, 10):
from jnius import autoclass
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.withBatch(1000) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
i += len(rows)
assert 10 == i
reader.close()
carbonSchemaReader = CarbonSchemaReader()
schema = carbonSchemaReader.readSchema(path)
assert 3 == schema.getFieldsLength()
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
for i in range(0, 10):
from jnius import autoclass
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
writer.write(data_list.toArray())
writer.close()
carbonSchemaReader = CarbonSchemaReader()
schema = carbonSchemaReader.readSchema(getAsBuffer=False, path=path, validateSchema=True)
assert 3 == schema.getFieldsLength()
shutil.rmtree(path)
def test_run_write_carbon_binary_base64_encode():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int},{binaryField:binary}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
jpg_path = IMAGE_DATA_PATH + "/carbondatalogo.jpg"
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
with open(jpg_path, mode='rb+') as file_object:
content = file_object.read()
for i in range(0, 10):
from jnius import autoclass
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
data_list.add(base64.b64encode(content))
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.withBatch(1000) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
for row in rows:
i += 1
for column in row:
from jnius.jnius import ByteArray
if 1 == i and isinstance(column, ByteArray) and len(column) > 1000:
with open(path + "/image.jpg", 'wb+') as file_object:
file_object.write(base64.b64decode(column.tostring()))
assert 10 == i
reader.close()
shutil.rmtree(path)
# TODO: to be supported
@pytest.mark.skip("write binary to be supported")
def test_run_write_carbon_binary():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int},{binaryField:binary}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
jpg_path = IMAGE_DATA_PATH + "/carbondatalogo.jpg"
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
with open(jpg_path, mode='rb+') as file_object:
content = file_object.read()
for i in range(0, 10):
from jnius import autoclass
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
data_list.add(content)
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.withBatch(1000) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
for row in rows:
i += 1
for column in row:
from jnius.jnius import ByteArray
if 1 == i and isinstance(column, ByteArray) and len(column) > 1000:
with open(path + "/image.jpg", 'wb+') as file_object:
file_object.write(column.tostring())
assert 10 == i
reader.close()
shutil.rmtree(path)
def test_run_write_carbon_binary_base64_encode_many_files():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int},{binaryField:binary},{txtField:string}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
jpg_path = IMAGE_DATA_PATH + "/flowers"
from jnius import autoclass
sdkUtilClass = autoclass("org.apache.carbondata.sdk.file.utils.SDKUtil")
jpg_files = sdkUtilClass.listFiles(jpg_path, '.jpg')
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
for i in range(0, jpg_files.size()):
jpg_path = jpg_files.get(i)
with open(jpg_path, mode='rb+') as file_object:
content = file_object.read()
with open(str(jpg_path).replace('.jpg', '.txt'), mode='r+') as file_object:
txt = file_object.read()
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
data_list.add(base64.b64encode(content))
data_list.add(txt)
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
for row in rows:
i += 1
for column in row:
from jnius.jnius import ByteArray
if isinstance(column, ByteArray) and len(column) > 1000:
with open(path + "/image" + str(i) + ".jpg", 'wb+') as file_object:
file_object.write(base64.b64decode(column.tostring()))
assert 3 == i
reader.close()
shutil.rmtree(path)
def test_run_write_carbon_binary_base64_encode_voc():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int},{binaryField:binary},{txtField:string}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
jpg_path = IMAGE_DATA_PATH + "/voc"
from jnius import autoclass
sdkUtilClass = autoclass("org.apache.carbondata.sdk.file.utils.SDKUtil")
jpg_files = sdkUtilClass.listFiles(jpg_path, '.jpg')
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
for i in range(0, jpg_files.size()):
jpg_path = jpg_files.get(i)
with open(jpg_path, mode='rb+') as file_object:
content = file_object.read()
with open(str(jpg_path).replace('.jpg', '.xml'), mode='r+') as file_object:
txt = file_object.read()
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
data_list.add(base64.b64encode(content))
data_list.add(txt)
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.withBatch(1000) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
for row in rows:
i += 1
for column in row:
from jnius.jnius import ByteArray
if isinstance(column, ByteArray) and len(column) > 1000:
with open(path + "/image" + str(i) + ".jpg", 'wb+') as file_object:
file_object.write(base64.b64decode(column.tostring()))
assert 5 == i
reader.close()
shutil.rmtree(path)
def test_run_write_carbon_binary_base64_encode_vocForSegmentationClass():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int},{binaryField:binary},{segField:binary}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
jpg_path = IMAGE_DATA_PATH + "/vocForSegmentationClass"
from jnius import autoclass
sdkUtilClass = autoclass("org.apache.carbondata.sdk.file.utils.SDKUtil")
jpg_files = sdkUtilClass.listFiles(jpg_path, '.jpg')
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.build()
for i in range(0, jpg_files.size()):
jpg_path = jpg_files.get(i)
with open(jpg_path, mode='rb+') as file_object:
content = file_object.read()
with open(str(jpg_path).replace('.jpg', '.png'), mode='rb+') as file_object:
png_data = file_object.read()
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
data_list.add(base64.b64encode(content))
data_list.add(base64.b64encode(png_data))
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.withBatch(1000) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
for row in rows:
i += 1
num = 0
for column in row:
num += 1
from jnius.jnius import ByteArray
if isinstance(column, ByteArray) and len(column) > 1000:
with open(path + "/image" + str(i) + "_" + str(num) + ".jpg", 'wb+') as file_object:
file_object.write(base64.b64decode(column.tostring()))
assert 3 == i
reader.close()
shutil.rmtree(path)
def test_run_write_carbon_binary_base64_encode_decodeInJava_many_files():
jsonSchema = "[{stringField:string},{shortField:short},{intField:int},{binaryField:binary},{txtField:string}]"
path = "/tmp/data/writeCarbon" + str(time.time())
if os.path.exists(path):
shutil.rmtree(path)
jpg_path = IMAGE_DATA_PATH + "/flowers"
from jnius import autoclass
sdkUtilClass = autoclass("org.apache.carbondata.sdk.file.utils.SDKUtil")
jpg_files = sdkUtilClass.listFiles(jpg_path, '.jpg')
writer = CarbonWriter() \
.builder() \
.outputPath(path) \
.withCsvInput(jsonSchema) \
.writtenBy("pycarbon") \
.withLoadOption("binary_decoder", "base64") \
.withPageSizeInMb(1) \
.build()
for i in range(0, jpg_files.size()):
jpg_path = jpg_files.get(i)
with open(jpg_path, mode='rb+') as file_object:
content = file_object.read()
with open(str(jpg_path).replace('.jpg', '.txt'), mode='r+') as file_object:
txt = file_object.read()
arrayListClass = autoclass("java.util.ArrayList")
data_list = arrayListClass()
data_list.add("pycarbon")
data_list.add(str(i))
data_list.add(str(i * 10))
data_list.add(base64.b64encode(content))
data_list.add(txt)
writer.write(data_list.toArray())
writer.close()
reader = CarbonReader() \
.builder() \
.withFolder(path) \
.withBatch(1000) \
.build()
i = 0
while reader.hasNext():
rows = reader.readNextBatchRow()
for row in rows:
i += 1
for column in row:
from jnius.jnius import ByteArray
if isinstance(column, ByteArray) and len(column) > 1000 and i < 20:
with open(path + "/image" + str(i) + ".jpg", 'wb+') as file_object:
file_object.write((column.tostring()))
assert 3 == i
reader.close()
shutil.rmtree(path)
| 27.211087
| 112
| 0.665413
| 1,596
| 12,762
| 5.20614
| 0.125313
| 0.048141
| 0.045011
| 0.026959
| 0.850524
| 0.842219
| 0.821519
| 0.81514
| 0.81514
| 0.806716
| 0
| 0.016543
| 0.194797
| 12,762
| 468
| 113
| 27.269231
| 0.79204
| 0.06057
| 0
| 0.883721
| 0
| 0.011628
| 0.127955
| 0.081934
| 0
| 0
| 0
| 0.002137
| 0.026163
| 1
| 0.020349
| false
| 0
| 0.06686
| 0
| 0.087209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab034504d5fe58897e15783866c79a155a0730b3
| 58
|
py
|
Python
|
test/example/__init__.py
|
minhtuan221/cython-npm
|
ea6e2c9910800ffc12f8d8cfc5d39ad8198ef87c
|
[
"MIT"
] | 6
|
2018-03-01T04:25:41.000Z
|
2019-10-21T03:39:11.000Z
|
test/example/__init__.py
|
minhtuan221/cython-npm
|
ea6e2c9910800ffc12f8d8cfc5d39ad8198ef87c
|
[
"MIT"
] | null | null | null |
test/example/__init__.py
|
minhtuan221/cython-npm
|
ea6e2c9910800ffc12f8d8cfc5d39ad8198ef87c
|
[
"MIT"
] | 1
|
2019-04-26T10:43:49.000Z
|
2019-04-26T10:43:49.000Z
|
from example import hello
from example import secondapp
| 19.333333
| 30
| 0.827586
| 8
| 58
| 6
| 0.625
| 0.458333
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 58
| 2
| 31
| 29
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ab169e74d6c4f62985a713c88955e5789ac6e446
| 70
|
py
|
Python
|
models/sfnet_module/__init__.py
|
synml/face-parsing-pytorch
|
c0d8aad8ec0a72d65abf04a826b0a15ef1125a49
|
[
"MIT"
] | null | null | null |
models/sfnet_module/__init__.py
|
synml/face-parsing-pytorch
|
c0d8aad8ec0a72d65abf04a826b0a15ef1125a49
|
[
"MIT"
] | null | null | null |
models/sfnet_module/__init__.py
|
synml/face-parsing-pytorch
|
c0d8aad8ec0a72d65abf04a826b0a15ef1125a49
|
[
"MIT"
] | null | null | null |
import models.sfnet_module.module
import models.sfnet_module.resnet_d
| 23.333333
| 35
| 0.885714
| 11
| 70
| 5.363636
| 0.545455
| 0.40678
| 0.576271
| 0.779661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 70
| 2
| 36
| 35
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ab312c87312660984095bcfb102bd996d552ce09
| 56,511
|
py
|
Python
|
sdk/python/pulumi_oci/loganalytics/log_analytics_object_collection_rule.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/loganalytics/log_analytics_object_collection_rule.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/loganalytics/log_analytics_object_collection_rule.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LogAnalyticsObjectCollectionRuleArgs', 'LogAnalyticsObjectCollectionRule']
@pulumi.input_type
class LogAnalyticsObjectCollectionRuleArgs:
def __init__(__self__, *,
compartment_id: pulumi.Input[str],
log_group_id: pulumi.Input[str],
log_source_name: pulumi.Input[str],
namespace: pulumi.Input[str],
os_bucket_name: pulumi.Input[str],
os_namespace: pulumi.Input[str],
char_encoding: Optional[pulumi.Input[str]] = None,
collection_type: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
entity_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
overrides: Optional[pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]]] = None,
poll_since: Optional[pulumi.Input[str]] = None,
poll_till: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a LogAnalyticsObjectCollectionRule resource.
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
:param pulumi.Input[str] log_group_id: (Updatable) Logging Analytics Log group OCID to associate the processed logs with.
:param pulumi.Input[str] log_source_name: (Updatable) Name of the Logging Analytics Source to use for the processing.
:param pulumi.Input[str] namespace: The Logging Analytics namespace used for the request.
:param pulumi.Input[str] os_bucket_name: Name of the Object Storage bucket.
:param pulumi.Input[str] os_namespace: Object Storage namespace.
:param pulumi.Input[str] char_encoding: (Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
:param pulumi.Input[str] collection_type: The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
:param pulumi.Input[str] entity_id: (Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[str] name: A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
:param pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]] overrides: (Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
:param pulumi.Input[str] poll_since: The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
:param pulumi.Input[str] poll_till: The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "log_group_id", log_group_id)
pulumi.set(__self__, "log_source_name", log_source_name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "os_bucket_name", os_bucket_name)
pulumi.set(__self__, "os_namespace", os_namespace)
if char_encoding is not None:
pulumi.set(__self__, "char_encoding", char_encoding)
if collection_type is not None:
pulumi.set(__self__, "collection_type", collection_type)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if description is not None:
pulumi.set(__self__, "description", description)
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if name is not None:
pulumi.set(__self__, "name", name)
if overrides is not None:
pulumi.set(__self__, "overrides", overrides)
if poll_since is not None:
pulumi.set(__self__, "poll_since", poll_since)
if poll_till is not None:
pulumi.set(__self__, "poll_till", poll_till)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Input[str]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="logGroupId")
def log_group_id(self) -> pulumi.Input[str]:
"""
(Updatable) Logging Analytics Log group OCID to associate the processed logs with.
"""
return pulumi.get(self, "log_group_id")
@log_group_id.setter
def log_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "log_group_id", value)
@property
@pulumi.getter(name="logSourceName")
def log_source_name(self) -> pulumi.Input[str]:
"""
(Updatable) Name of the Logging Analytics Source to use for the processing.
"""
return pulumi.get(self, "log_source_name")
@log_source_name.setter
def log_source_name(self, value: pulumi.Input[str]):
pulumi.set(self, "log_source_name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The Logging Analytics namespace used for the request.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="osBucketName")
def os_bucket_name(self) -> pulumi.Input[str]:
"""
Name of the Object Storage bucket.
"""
return pulumi.get(self, "os_bucket_name")
@os_bucket_name.setter
def os_bucket_name(self, value: pulumi.Input[str]):
pulumi.set(self, "os_bucket_name", value)
@property
@pulumi.getter(name="osNamespace")
def os_namespace(self) -> pulumi.Input[str]:
"""
Object Storage namespace.
"""
return pulumi.get(self, "os_namespace")
@os_namespace.setter
def os_namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "os_namespace", value)
@property
@pulumi.getter(name="charEncoding")
def char_encoding(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
"""
return pulumi.get(self, "char_encoding")
@char_encoding.setter
def char_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "char_encoding", value)
@property
@pulumi.getter(name="collectionType")
def collection_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
"""
return pulumi.get(self, "collection_type")
@collection_type.setter
def collection_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "collection_type", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
"""
return pulumi.get(self, "entity_id")
@entity_id.setter
def entity_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_id", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]:
"""
(Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
"""
return pulumi.get(self, "overrides")
@overrides.setter
def overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]):
pulumi.set(self, "overrides", value)
@property
@pulumi.getter(name="pollSince")
def poll_since(self) -> Optional[pulumi.Input[str]]:
"""
The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
"""
return pulumi.get(self, "poll_since")
@poll_since.setter
def poll_since(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "poll_since", value)
@property
@pulumi.getter(name="pollTill")
def poll_till(self) -> Optional[pulumi.Input[str]]:
"""
The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
"""
return pulumi.get(self, "poll_till")
@poll_till.setter
def poll_till(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "poll_till", value)
@pulumi.input_type
class _LogAnalyticsObjectCollectionRuleState:
def __init__(__self__, *,
char_encoding: Optional[pulumi.Input[str]] = None,
collection_type: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
entity_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
log_group_id: Optional[pulumi.Input[str]] = None,
log_source_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
os_bucket_name: Optional[pulumi.Input[str]] = None,
os_namespace: Optional[pulumi.Input[str]] = None,
overrides: Optional[pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]]] = None,
poll_since: Optional[pulumi.Input[str]] = None,
poll_till: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LogAnalyticsObjectCollectionRule resources.
:param pulumi.Input[str] char_encoding: (Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
:param pulumi.Input[str] collection_type: The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
:param pulumi.Input[str] entity_id: (Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[str] lifecycle_details: A detailed status of the life cycle state.
:param pulumi.Input[str] log_group_id: (Updatable) Logging Analytics Log group OCID to associate the processed logs with.
:param pulumi.Input[str] log_source_name: (Updatable) Name of the Logging Analytics Source to use for the processing.
:param pulumi.Input[str] name: A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
:param pulumi.Input[str] namespace: The Logging Analytics namespace used for the request.
:param pulumi.Input[str] os_bucket_name: Name of the Object Storage bucket.
:param pulumi.Input[str] os_namespace: Object Storage namespace.
:param pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]] overrides: (Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
:param pulumi.Input[str] poll_since: The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
:param pulumi.Input[str] poll_till: The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
:param pulumi.Input[str] state: The current state of the rule.
:param pulumi.Input[str] time_created: The time when this rule was created. An RFC3339 formatted datetime string.
:param pulumi.Input[str] time_updated: The time when this rule was last updated. An RFC3339 formatted datetime string.
"""
if char_encoding is not None:
pulumi.set(__self__, "char_encoding", char_encoding)
if collection_type is not None:
pulumi.set(__self__, "collection_type", collection_type)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if description is not None:
pulumi.set(__self__, "description", description)
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if lifecycle_details is not None:
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if log_group_id is not None:
pulumi.set(__self__, "log_group_id", log_group_id)
if log_source_name is not None:
pulumi.set(__self__, "log_source_name", log_source_name)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if os_bucket_name is not None:
pulumi.set(__self__, "os_bucket_name", os_bucket_name)
if os_namespace is not None:
pulumi.set(__self__, "os_namespace", os_namespace)
if overrides is not None:
pulumi.set(__self__, "overrides", overrides)
if poll_since is not None:
pulumi.set(__self__, "poll_since", poll_since)
if poll_till is not None:
pulumi.set(__self__, "poll_till", poll_till)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_updated is not None:
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="charEncoding")
def char_encoding(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
"""
return pulumi.get(self, "char_encoding")
@char_encoding.setter
def char_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "char_encoding", value)
@property
@pulumi.getter(name="collectionType")
def collection_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
"""
return pulumi.get(self, "collection_type")
@collection_type.setter
def collection_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "collection_type", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
"""
return pulumi.get(self, "entity_id")
@entity_id.setter
def entity_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_id", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
A detailed status of the life cycle state.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter(name="logGroupId")
def log_group_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Logging Analytics Log group OCID to associate the processed logs with.
"""
return pulumi.get(self, "log_group_id")
@log_group_id.setter
def log_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_group_id", value)
@property
@pulumi.getter(name="logSourceName")
def log_source_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) Name of the Logging Analytics Source to use for the processing.
"""
return pulumi.get(self, "log_source_name")
@log_source_name.setter
def log_source_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_source_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
The Logging Analytics namespace used for the request.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="osBucketName")
def os_bucket_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Object Storage bucket.
"""
return pulumi.get(self, "os_bucket_name")
@os_bucket_name.setter
def os_bucket_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os_bucket_name", value)
@property
@pulumi.getter(name="osNamespace")
def os_namespace(self) -> Optional[pulumi.Input[str]]:
"""
Object Storage namespace.
"""
return pulumi.get(self, "os_namespace")
@os_namespace.setter
def os_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os_namespace", value)
@property
@pulumi.getter
def overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]:
"""
(Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
"""
return pulumi.get(self, "overrides")
@overrides.setter
def overrides(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]):
pulumi.set(self, "overrides", value)
@property
@pulumi.getter(name="pollSince")
def poll_since(self) -> Optional[pulumi.Input[str]]:
"""
The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
"""
return pulumi.get(self, "poll_since")
@poll_since.setter
def poll_since(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "poll_since", value)
@property
@pulumi.getter(name="pollTill")
def poll_till(self) -> Optional[pulumi.Input[str]]:
"""
The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
"""
return pulumi.get(self, "poll_till")
@poll_till.setter
def poll_till(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "poll_till", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of the rule.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The time when this rule was created. An RFC3339 formatted datetime string.
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
The time when this rule was last updated. An RFC3339 formatted datetime string.
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
class LogAnalyticsObjectCollectionRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
char_encoding: Optional[pulumi.Input[str]] = None,
collection_type: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
entity_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
log_group_id: Optional[pulumi.Input[str]] = None,
log_source_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
os_bucket_name: Optional[pulumi.Input[str]] = None,
os_namespace: Optional[pulumi.Input[str]] = None,
overrides: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]] = None,
poll_since: Optional[pulumi.Input[str]] = None,
poll_till: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Log Analytics Object Collection Rule resource in Oracle Cloud Infrastructure Log Analytics service.
Create a configuration to collect logs from object storage bucket.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_log_analytics_object_collection_rule = oci.loganalytics.LogAnalyticsObjectCollectionRule("testLogAnalyticsObjectCollectionRule",
compartment_id=var["compartment_id"],
log_group_id=oci_logging_log_group["test_log_group"]["id"],
log_source_name=var["log_analytics_object_collection_rule_log_source_name"],
namespace=var["log_analytics_object_collection_rule_namespace"],
os_bucket_name=oci_objectstorage_bucket["test_bucket"]["name"],
os_namespace=var["log_analytics_object_collection_rule_os_namespace"],
char_encoding=var["log_analytics_object_collection_rule_char_encoding"],
collection_type=var["log_analytics_object_collection_rule_collection_type"],
defined_tags={
"foo-namespace.bar-key": "value",
},
description=var["log_analytics_object_collection_rule_description"],
entity_id=oci_log_analytics_entity["test_entity"]["id"],
freeform_tags={
"bar-key": "value",
},
overrides=var["log_analytics_object_collection_rule_overrides"],
poll_since=var["log_analytics_object_collection_rule_poll_since"],
poll_till=var["log_analytics_object_collection_rule_poll_till"])
```
## Import
LogAnalyticsObjectCollectionRules can be imported using the `id`, e.g.
```sh
$ pulumi import oci:loganalytics/logAnalyticsObjectCollectionRule:LogAnalyticsObjectCollectionRule test_log_analytics_object_collection_rule "namespaces/{namespaceName}/logAnalyticsObjectCollectionRules/{logAnalyticsObjectCollectionRuleId}"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] char_encoding: (Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
:param pulumi.Input[str] collection_type: The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
:param pulumi.Input[str] entity_id: (Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[str] log_group_id: (Updatable) Logging Analytics Log group OCID to associate the processed logs with.
:param pulumi.Input[str] log_source_name: (Updatable) Name of the Logging Analytics Source to use for the processing.
:param pulumi.Input[str] name: A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
:param pulumi.Input[str] namespace: The Logging Analytics namespace used for the request.
:param pulumi.Input[str] os_bucket_name: Name of the Object Storage bucket.
:param pulumi.Input[str] os_namespace: Object Storage namespace.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogAnalyticsObjectCollectionRuleOverrideArgs']]]] overrides: (Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
:param pulumi.Input[str] poll_since: The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
:param pulumi.Input[str] poll_till: The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LogAnalyticsObjectCollectionRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Log Analytics Object Collection Rule resource in Oracle Cloud Infrastructure Log Analytics service.
Create a configuration to collect logs from object storage bucket.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_log_analytics_object_collection_rule = oci.loganalytics.LogAnalyticsObjectCollectionRule("testLogAnalyticsObjectCollectionRule",
compartment_id=var["compartment_id"],
log_group_id=oci_logging_log_group["test_log_group"]["id"],
log_source_name=var["log_analytics_object_collection_rule_log_source_name"],
namespace=var["log_analytics_object_collection_rule_namespace"],
os_bucket_name=oci_objectstorage_bucket["test_bucket"]["name"],
os_namespace=var["log_analytics_object_collection_rule_os_namespace"],
char_encoding=var["log_analytics_object_collection_rule_char_encoding"],
collection_type=var["log_analytics_object_collection_rule_collection_type"],
defined_tags={
"foo-namespace.bar-key": "value",
},
description=var["log_analytics_object_collection_rule_description"],
entity_id=oci_log_analytics_entity["test_entity"]["id"],
freeform_tags={
"bar-key": "value",
},
overrides=var["log_analytics_object_collection_rule_overrides"],
poll_since=var["log_analytics_object_collection_rule_poll_since"],
poll_till=var["log_analytics_object_collection_rule_poll_till"])
```
## Import
LogAnalyticsObjectCollectionRules can be imported using the `id`, e.g.
```sh
$ pulumi import oci:loganalytics/logAnalyticsObjectCollectionRule:LogAnalyticsObjectCollectionRule test_log_analytics_object_collection_rule "namespaces/{namespaceName}/logAnalyticsObjectCollectionRules/{logAnalyticsObjectCollectionRuleId}"
```
:param str resource_name: The name of the resource.
:param LogAnalyticsObjectCollectionRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LogAnalyticsObjectCollectionRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
char_encoding: Optional[pulumi.Input[str]] = None,
collection_type: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
entity_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
log_group_id: Optional[pulumi.Input[str]] = None,
log_source_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
os_bucket_name: Optional[pulumi.Input[str]] = None,
os_namespace: Optional[pulumi.Input[str]] = None,
overrides: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]] = None,
poll_since: Optional[pulumi.Input[str]] = None,
poll_till: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LogAnalyticsObjectCollectionRuleArgs.__new__(LogAnalyticsObjectCollectionRuleArgs)
__props__.__dict__["char_encoding"] = char_encoding
__props__.__dict__["collection_type"] = collection_type
if compartment_id is None and not opts.urn:
raise TypeError("Missing required property 'compartment_id'")
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["description"] = description
__props__.__dict__["entity_id"] = entity_id
__props__.__dict__["freeform_tags"] = freeform_tags
if log_group_id is None and not opts.urn:
raise TypeError("Missing required property 'log_group_id'")
__props__.__dict__["log_group_id"] = log_group_id
if log_source_name is None and not opts.urn:
raise TypeError("Missing required property 'log_source_name'")
__props__.__dict__["log_source_name"] = log_source_name
__props__.__dict__["name"] = name
if namespace is None and not opts.urn:
raise TypeError("Missing required property 'namespace'")
__props__.__dict__["namespace"] = namespace
if os_bucket_name is None and not opts.urn:
raise TypeError("Missing required property 'os_bucket_name'")
__props__.__dict__["os_bucket_name"] = os_bucket_name
if os_namespace is None and not opts.urn:
raise TypeError("Missing required property 'os_namespace'")
__props__.__dict__["os_namespace"] = os_namespace
__props__.__dict__["overrides"] = overrides
__props__.__dict__["poll_since"] = poll_since
__props__.__dict__["poll_till"] = poll_till
__props__.__dict__["lifecycle_details"] = None
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_updated"] = None
super(LogAnalyticsObjectCollectionRule, __self__).__init__(
'oci:loganalytics/logAnalyticsObjectCollectionRule:LogAnalyticsObjectCollectionRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
char_encoding: Optional[pulumi.Input[str]] = None,
collection_type: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
entity_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
log_group_id: Optional[pulumi.Input[str]] = None,
log_source_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
os_bucket_name: Optional[pulumi.Input[str]] = None,
os_namespace: Optional[pulumi.Input[str]] = None,
overrides: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogAnalyticsObjectCollectionRuleOverrideArgs']]]]] = None,
poll_since: Optional[pulumi.Input[str]] = None,
poll_till: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None) -> 'LogAnalyticsObjectCollectionRule':
"""
Get an existing LogAnalyticsObjectCollectionRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] char_encoding: (Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
:param pulumi.Input[str] collection_type: The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
:param pulumi.Input[str] compartment_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
:param pulumi.Input[str] entity_id: (Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[str] lifecycle_details: A detailed status of the life cycle state.
:param pulumi.Input[str] log_group_id: (Updatable) Logging Analytics Log group OCID to associate the processed logs with.
:param pulumi.Input[str] log_source_name: (Updatable) Name of the Logging Analytics Source to use for the processing.
:param pulumi.Input[str] name: A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
:param pulumi.Input[str] namespace: The Logging Analytics namespace used for the request.
:param pulumi.Input[str] os_bucket_name: Name of the Object Storage bucket.
:param pulumi.Input[str] os_namespace: Object Storage namespace.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogAnalyticsObjectCollectionRuleOverrideArgs']]]] overrides: (Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
:param pulumi.Input[str] poll_since: The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
:param pulumi.Input[str] poll_till: The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
:param pulumi.Input[str] state: The current state of the rule.
:param pulumi.Input[str] time_created: The time when this rule was created. An RFC3339 formatted datetime string.
:param pulumi.Input[str] time_updated: The time when this rule was last updated. An RFC3339 formatted datetime string.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LogAnalyticsObjectCollectionRuleState.__new__(_LogAnalyticsObjectCollectionRuleState)
__props__.__dict__["char_encoding"] = char_encoding
__props__.__dict__["collection_type"] = collection_type
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["description"] = description
__props__.__dict__["entity_id"] = entity_id
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["lifecycle_details"] = lifecycle_details
__props__.__dict__["log_group_id"] = log_group_id
__props__.__dict__["log_source_name"] = log_source_name
__props__.__dict__["name"] = name
__props__.__dict__["namespace"] = namespace
__props__.__dict__["os_bucket_name"] = os_bucket_name
__props__.__dict__["os_namespace"] = os_namespace
__props__.__dict__["overrides"] = overrides
__props__.__dict__["poll_since"] = poll_since
__props__.__dict__["poll_till"] = poll_till
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
__props__.__dict__["time_updated"] = time_updated
return LogAnalyticsObjectCollectionRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="charEncoding")
def char_encoding(self) -> pulumi.Output[str]:
"""
(Updatable) An optional character encoding to aid in detecting the character encoding of the contents of the objects while processing. It is recommended to set this value as ISO_8589_1 when configuring content of the objects having more numeric characters, and very few alphabets. For e.g. this applies when configuring VCN Flow Logs.
"""
return pulumi.get(self, "char_encoding")
@property
@pulumi.getter(name="collectionType")
def collection_type(self) -> pulumi.Output[str]:
"""
The type of collection. Supported collection types: LIVE, HISTORIC, HISTORIC_LIVE
"""
return pulumi.get(self, "collection_type")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to which this rule belongs.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
(Updatable) A string that describes the details of the rule. It does not have to be unique, and can be changed. Avoid entering confidential information.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> pulumi.Output[str]:
"""
(Updatable) Logging Analytics entity OCID. Associates the processed logs with the given entity (optional).
"""
return pulumi.get(self, "entity_id")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> pulumi.Output[str]:
"""
A detailed status of the life cycle state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="logGroupId")
def log_group_id(self) -> pulumi.Output[str]:
"""
(Updatable) Logging Analytics Log group OCID to associate the processed logs with.
"""
return pulumi.get(self, "log_group_id")
@property
@pulumi.getter(name="logSourceName")
def log_source_name(self) -> pulumi.Output[str]:
"""
(Updatable) Name of the Logging Analytics Source to use for the processing.
"""
return pulumi.get(self, "log_source_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name given to the rule. The name must be unique within the tenancy, and cannot be modified.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> pulumi.Output[str]:
"""
The Logging Analytics namespace used for the request.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="osBucketName")
def os_bucket_name(self) -> pulumi.Output[str]:
"""
Name of the Object Storage bucket.
"""
return pulumi.get(self, "os_bucket_name")
@property
@pulumi.getter(name="osNamespace")
def os_namespace(self) -> pulumi.Output[str]:
"""
Object Storage namespace.
"""
return pulumi.get(self, "os_namespace")
@property
@pulumi.getter
def overrides(self) -> pulumi.Output[Sequence['outputs.LogAnalyticsObjectCollectionRuleOverride']]:
"""
(Updatable) The override is used to modify some important configuration properties for objects matching a specific pattern inside the bucket. Supported propeties for override are - logSourceName, charEncoding. Supported matchType for override are "contains".
"""
return pulumi.get(self, "overrides")
@property
@pulumi.getter(name="pollSince")
def poll_since(self) -> pulumi.Output[str]:
"""
The oldest time of the file in the bucket to consider for collection. Accepted values are: BEGINNING or CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollSince value other than CURRENT_TIME will result in error.
"""
return pulumi.get(self, "poll_since")
@property
@pulumi.getter(name="pollTill")
def poll_till(self) -> pulumi.Output[str]:
"""
The oldest time of the file in the bucket to consider for collection. Accepted values are: CURRENT_TIME or RFC3339 formatted datetime string. When collectionType is LIVE, specifying pollTill will result in error.
"""
return pulumi.get(self, "poll_till")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The current state of the rule.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The time when this rule was created. An RFC3339 formatted datetime string.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> pulumi.Output[str]:
"""
The time when this rule was last updated. An RFC3339 formatted datetime string.
"""
return pulumi.get(self, "time_updated")
| 53.871306
| 387
| 0.682841
| 6,847
| 56,511
| 5.437418
| 0.044545
| 0.07357
| 0.073328
| 0.067956
| 0.931829
| 0.918587
| 0.9083
| 0.89769
| 0.889713
| 0.863873
| 0
| 0.002816
| 0.220807
| 56,511
| 1,048
| 388
| 53.92271
| 0.842686
| 0.429792
| 0
| 0.758794
| 1
| 0
| 0.114822
| 0.021097
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165829
| false
| 0.001675
| 0.011725
| 0
| 0.278057
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ab3bfe87ba29cd8bd568c0ed7a4c30de7506529a
| 229
|
py
|
Python
|
mdb_stream_recorder/__init__.py
|
wpeisker/streamrec
|
89342bb6f9ea62dcb467794f8f5b56fab909b42b
|
[
"MIT"
] | null | null | null |
mdb_stream_recorder/__init__.py
|
wpeisker/streamrec
|
89342bb6f9ea62dcb467794f8f5b56fab909b42b
|
[
"MIT"
] | null | null | null |
mdb_stream_recorder/__init__.py
|
wpeisker/streamrec
|
89342bb6f9ea62dcb467794f8f5b56fab909b42b
|
[
"MIT"
] | null | null | null |
# mdb_stream_recorder/__init__.py
from mdb_stream_recorder.recorder_modbus import *
from mdb_stream_recorder.recorder_csv import *
from mdb_stream_recorder.recorder_visualization import *
from mdb_stream_recorder.config import *
| 38.166667
| 56
| 0.873362
| 32
| 229
| 5.71875
| 0.34375
| 0.245902
| 0.464481
| 0.459016
| 0.688525
| 0.382514
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078603
| 229
| 6
| 57
| 38.166667
| 0.867299
| 0.135371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
db6e4aac7df2b1f0adb6c984b4daa71d0683cf7a
| 139,056
|
py
|
Python
|
UFOs/Type-FL/2HDM_CPC_Type-FL_NLO_cba_UFO/decays.py
|
ycwu1030/2HDM_FR
|
599490fd785cb67e3e4ffad1fa7536906ac8bcd5
|
[
"MIT"
] | 1
|
2019-09-04T01:44:29.000Z
|
2019-09-04T01:44:29.000Z
|
UFOs/Type-FL/2HDM_CPC_Type-FL_NLO_cba_UFO/decays.py
|
ycwu1030/2HDM_FR
|
599490fd785cb67e3e4ffad1fa7536906ac8bcd5
|
[
"MIT"
] | null | null | null |
UFOs/Type-FL/2HDM_CPC_Type-FL_NLO_cba_UFO/decays.py
|
ycwu1030/2HDM_FR
|
599490fd785cb67e3e4ffad1fa7536906ac8bcd5
|
[
"MIT"
] | null | null | null |
# This file was automatically created by FeynRules 2.3.36
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Fri 13 Aug 2021 16:39:38
from object_library import all_decays, Decay
import particles as P
Decay_b = Decay(name = 'Decay_b',
particle = P.b,
partial_widths = {(P.H__minus__,P.t):'((6*I4a33*MB*MT*complexconjugate(I3a33) + (3*cb**2*I3a33*MB**2*complexconjugate(I3a33))/sb**2 - (3*cb**2*I3a33*MHp**2*complexconjugate(I3a33))/sb**2 + (3*cb**2*I3a33*MT**2*complexconjugate(I3a33))/sb**2 + 6*I3a33*MB*MT*complexconjugate(I4a33) + (3*I4a33*MB**2*sb**2*complexconjugate(I4a33))/cb**2 - (3*I4a33*MHp**2*sb**2*complexconjugate(I4a33))/cb**2 + (3*I4a33*MT**2*sb**2*complexconjugate(I4a33))/cb**2)*cmath.sqrt(MB**4 - 2*MB**2*MHp**2 + MHp**4 - 2*MB**2*MT**2 - 2*MHp**2*MT**2 + MT**4))/(96.*cmath.pi*abs(MB)**3)',
(P.W__minus__,P.t):'(((3*ee**2*MB**2)/(2.*sw**2) + (3*ee**2*MT**2)/(2.*sw**2) + (3*ee**2*MB**4)/(2.*MW**2*sw**2) - (3*ee**2*MB**2*MT**2)/(MW**2*sw**2) + (3*ee**2*MT**4)/(2.*MW**2*sw**2) - (3*ee**2*MW**2)/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MB)**3)'})
Decay_HA = Decay(name = 'Decay_HA',
particle = P.HA,
partial_widths = {(P.b,P.b__tilde__):'(3*MHA**2*sb**2*yb**2*cmath.sqrt(-4*MB**2*MHA**2 + MHA**4))/(16.*cb**2*cmath.pi*abs(MHA)**3)',
(P.HH,P.Z):'((-(cb**4*ee**2*MHA**2*sba**2) - cb**4*ee**2*MHH**2*sba**2 + (cb**4*ee**2*MHA**4*sba**2)/(2.*MZ**2) - (cb**4*ee**2*MHA**2*MHH**2*sba**2)/MZ**2 + (cb**4*ee**2*MHH**4*sba**2)/(2.*MZ**2) + (cb**4*ee**2*MZ**2*sba**2)/2. - 2*cb**2*ee**2*MHA**2*sb**2*sba**2 - 2*cb**2*ee**2*MHH**2*sb**2*sba**2 + (cb**2*ee**2*MHA**4*sb**2*sba**2)/MZ**2 - (2*cb**2*ee**2*MHA**2*MHH**2*sb**2*sba**2)/MZ**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/MZ**2 + cb**2*ee**2*MZ**2*sb**2*sba**2 - ee**2*MHA**2*sb**4*sba**2 - ee**2*MHH**2*sb**4*sba**2 + (ee**2*MHA**4*sb**4*sba**2)/(2.*MZ**2) - (ee**2*MHA**2*MHH**2*sb**4*sba**2)/MZ**2 + (ee**2*MHH**4*sb**4*sba**2)/(2.*MZ**2) + (ee**2*MZ**2*sb**4*sba**2)/2. - (cb**4*cw**2*ee**2*MHA**2*sba**2)/(2.*sw**2) - (cb**4*cw**2*ee**2*MHH**2*sba**2)/(2.*sw**2) + (cb**4*cw**2*ee**2*MHA**4*sba**2)/(4.*MZ**2*sw**2) - (cb**4*cw**2*ee**2*MHA**2*MHH**2*sba**2)/(2.*MZ**2*sw**2) + (cb**4*cw**2*ee**2*MHH**4*sba**2)/(4.*MZ**2*sw**2) + (cb**4*cw**2*ee**2*MZ**2*sba**2)/(4.*sw**2) - (cb**2*cw**2*ee**2*MHA**2*sb**2*sba**2)/sw**2 - (cb**2*cw**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 + (cb**2*cw**2*ee**2*MHA**4*sb**2*sba**2)/(2.*MZ**2*sw**2) - (cb**2*cw**2*ee**2*MHA**2*MHH**2*sb**2*sba**2)/(MZ**2*sw**2) + (cb**2*cw**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MZ**2*sw**2) + (cb**2*cw**2*ee**2*MZ**2*sb**2*sba**2)/(2.*sw**2) - (cw**2*ee**2*MHA**2*sb**4*sba**2)/(2.*sw**2) - (cw**2*ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) + (cw**2*ee**2*MHA**4*sb**4*sba**2)/(4.*MZ**2*sw**2) - (cw**2*ee**2*MHA**2*MHH**2*sb**4*sba**2)/(2.*MZ**2*sw**2) + (cw**2*ee**2*MHH**4*sb**4*sba**2)/(4.*MZ**2*sw**2) + (cw**2*ee**2*MZ**2*sb**4*sba**2)/(4.*sw**2) - (cb**4*ee**2*MHA**2*sba**2*sw**2)/(2.*cw**2) - (cb**4*ee**2*MHH**2*sba**2*sw**2)/(2.*cw**2) + (cb**4*ee**2*MHA**4*sba**2*sw**2)/(4.*cw**2*MZ**2) - (cb**4*ee**2*MHA**2*MHH**2*sba**2*sw**2)/(2.*cw**2*MZ**2) + (cb**4*ee**2*MHH**4*sba**2*sw**2)/(4.*cw**2*MZ**2) + (cb**4*ee**2*MZ**2*sba**2*sw**2)/(4.*cw**2) - (cb**2*ee**2*MHA**2*sb**2*sba**2*sw**2)/cw**2 - (cb**2*ee**2*MHH**2*sb**2*sba**2*sw**2)/cw**2 + (cb**2*ee**2*MHA**4*sb**2*sba**2*sw**2)/(2.*cw**2*MZ**2) - (cb**2*ee**2*MHA**2*MHH**2*sb**2*sba**2*sw**2)/(cw**2*MZ**2) + (cb**2*ee**2*MHH**4*sb**2*sba**2*sw**2)/(2.*cw**2*MZ**2) + (cb**2*ee**2*MZ**2*sb**2*sba**2*sw**2)/(2.*cw**2) - (ee**2*MHA**2*sb**4*sba**2*sw**2)/(2.*cw**2) - (ee**2*MHH**2*sb**4*sba**2*sw**2)/(2.*cw**2) + (ee**2*MHA**4*sb**4*sba**2*sw**2)/(4.*cw**2*MZ**2) - (ee**2*MHA**2*MHH**2*sb**4*sba**2*sw**2)/(2.*cw**2*MZ**2) + (ee**2*MHH**4*sb**4*sba**2*sw**2)/(4.*cw**2*MZ**2) + (ee**2*MZ**2*sb**4*sba**2*sw**2)/(4.*cw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHH**2 + MHH**4 - 2*MHA**2*MZ**2 - 2*MHH**2*MZ**2 + MZ**4))/(16.*cmath.pi*abs(MHA)**3)',
(P.HL,P.Z):'((-(cb**4*cba**2*ee**2*MHA**2) - cb**4*cba**2*ee**2*MHL**2 + (cb**4*cba**2*ee**2*MHA**4)/(2.*MZ**2) - (cb**4*cba**2*ee**2*MHA**2*MHL**2)/MZ**2 + (cb**4*cba**2*ee**2*MHL**4)/(2.*MZ**2) + (cb**4*cba**2*ee**2*MZ**2)/2. - 2*cb**2*cba**2*ee**2*MHA**2*sb**2 - 2*cb**2*cba**2*ee**2*MHL**2*sb**2 + (cb**2*cba**2*ee**2*MHA**4*sb**2)/MZ**2 - (2*cb**2*cba**2*ee**2*MHA**2*MHL**2*sb**2)/MZ**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/MZ**2 + cb**2*cba**2*ee**2*MZ**2*sb**2 - cba**2*ee**2*MHA**2*sb**4 - cba**2*ee**2*MHL**2*sb**4 + (cba**2*ee**2*MHA**4*sb**4)/(2.*MZ**2) - (cba**2*ee**2*MHA**2*MHL**2*sb**4)/MZ**2 + (cba**2*ee**2*MHL**4*sb**4)/(2.*MZ**2) + (cba**2*ee**2*MZ**2*sb**4)/2. - (cb**4*cba**2*cw**2*ee**2*MHA**2)/(2.*sw**2) - (cb**4*cba**2*cw**2*ee**2*MHL**2)/(2.*sw**2) + (cb**4*cba**2*cw**2*ee**2*MHA**4)/(4.*MZ**2*sw**2) - (cb**4*cba**2*cw**2*ee**2*MHA**2*MHL**2)/(2.*MZ**2*sw**2) + (cb**4*cba**2*cw**2*ee**2*MHL**4)/(4.*MZ**2*sw**2) + (cb**4*cba**2*cw**2*ee**2*MZ**2)/(4.*sw**2) - (cb**2*cba**2*cw**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*cba**2*cw**2*ee**2*MHL**2*sb**2)/sw**2 + (cb**2*cba**2*cw**2*ee**2*MHA**4*sb**2)/(2.*MZ**2*sw**2) - (cb**2*cba**2*cw**2*ee**2*MHA**2*MHL**2*sb**2)/(MZ**2*sw**2) + (cb**2*cba**2*cw**2*ee**2*MHL**4*sb**2)/(2.*MZ**2*sw**2) + (cb**2*cba**2*cw**2*ee**2*MZ**2*sb**2)/(2.*sw**2) - (cba**2*cw**2*ee**2*MHA**2*sb**4)/(2.*sw**2) - (cba**2*cw**2*ee**2*MHL**2*sb**4)/(2.*sw**2) + (cba**2*cw**2*ee**2*MHA**4*sb**4)/(4.*MZ**2*sw**2) - (cba**2*cw**2*ee**2*MHA**2*MHL**2*sb**4)/(2.*MZ**2*sw**2) + (cba**2*cw**2*ee**2*MHL**4*sb**4)/(4.*MZ**2*sw**2) + (cba**2*cw**2*ee**2*MZ**2*sb**4)/(4.*sw**2) - (cb**4*cba**2*ee**2*MHA**2*sw**2)/(2.*cw**2) - (cb**4*cba**2*ee**2*MHL**2*sw**2)/(2.*cw**2) + (cb**4*cba**2*ee**2*MHA**4*sw**2)/(4.*cw**2*MZ**2) - (cb**4*cba**2*ee**2*MHA**2*MHL**2*sw**2)/(2.*cw**2*MZ**2) + (cb**4*cba**2*ee**2*MHL**4*sw**2)/(4.*cw**2*MZ**2) + (cb**4*cba**2*ee**2*MZ**2*sw**2)/(4.*cw**2) - (cb**2*cba**2*ee**2*MHA**2*sb**2*sw**2)/cw**2 - (cb**2*cba**2*ee**2*MHL**2*sb**2*sw**2)/cw**2 + (cb**2*cba**2*ee**2*MHA**4*sb**2*sw**2)/(2.*cw**2*MZ**2) - (cb**2*cba**2*ee**2*MHA**2*MHL**2*sb**2*sw**2)/(cw**2*MZ**2) + (cb**2*cba**2*ee**2*MHL**4*sb**2*sw**2)/(2.*cw**2*MZ**2) + (cb**2*cba**2*ee**2*MZ**2*sb**2*sw**2)/(2.*cw**2) - (cba**2*ee**2*MHA**2*sb**4*sw**2)/(2.*cw**2) - (cba**2*ee**2*MHL**2*sb**4*sw**2)/(2.*cw**2) + (cba**2*ee**2*MHA**4*sb**4*sw**2)/(4.*cw**2*MZ**2) - (cba**2*ee**2*MHA**2*MHL**2*sb**4*sw**2)/(2.*cw**2*MZ**2) + (cba**2*ee**2*MHL**4*sb**4*sw**2)/(4.*cw**2*MZ**2) + (cba**2*ee**2*MZ**2*sb**4*sw**2)/(4.*cw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHL**2 + MHL**4 - 2*MHA**2*MZ**2 - 2*MHL**2*MZ**2 + MZ**4))/(16.*cmath.pi*abs(MHA)**3)',
(P.H__plus__,P.W__minus__):'((-(cb**4*ee**2*MHA**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2)/(2.*sw**2) + (cb**4*ee**2*MHA**4)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHA**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2)/(4.*sw**2) - (cb**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*ee**2*MHA**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHA**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (ee**2*MHA**2*sb**4)/(2.*sw**2) - (ee**2*MHp**2*sb**4)/(2.*sw**2) + (ee**2*MHA**4*sb**4)/(4.*MW**2*sw**2) - (ee**2*MHA**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHp**2 + MHp**4 - 2*MHA**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHA)**3)',
(P.H__minus__,P.W__plus__):'((-(cb**4*ee**2*MHA**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2)/(2.*sw**2) + (cb**4*ee**2*MHA**4)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHA**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2)/(4.*sw**2) - (cb**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*ee**2*MHA**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHA**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (ee**2*MHA**2*sb**4)/(2.*sw**2) - (ee**2*MHp**2*sb**4)/(2.*sw**2) + (ee**2*MHA**4*sb**4)/(4.*MW**2*sw**2) - (ee**2*MHA**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHp**2 + MHp**4 - 2*MHA**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHA)**3)',
(P.t,P.t__tilde__):'(3*cb**2*MHA**2*yt**2*cmath.sqrt(MHA**4 - 4*MHA**2*MT**2))/(16.*cmath.pi*sb**2*abs(MHA)**3)',
(P.ta__minus__,P.ta__plus__):'(cb**2*MHA**2*ytau**2*cmath.sqrt(MHA**4 - 4*MHA**2*MTA**2))/(16.*cmath.pi*sb**2*abs(MHA)**3)'})
Decay_HH = Decay(name = 'Decay_HH',
particle = P.HH,
partial_widths = {(P.b,P.b__tilde__):'((-12*cba**2*MB**2*yb**2 + 3*cba**2*MHH**2*yb**2 - (24*cba*MB**2*sb*sba*yb**2)/cb + (6*cba*MHH**2*sb*sba*yb**2)/cb - (12*MB**2*sb**2*sba**2*yb**2)/cb**2 + (3*MHH**2*sb**2*sba**2*yb**2)/cb**2)*cmath.sqrt(-4*MB**2*MHH**2 + MHH**4))/(16.*cmath.pi*abs(MHH)**3)',
(P.HA,P.HA):'(((16*cb**4*cba**2*m122**2)/vev**2 + (4*cb**8*cba**2*MHA**4)/vev**2 + (4*cb**8*cba**4*MHA**2*MHH**2)/vev**2 + (cb**8*cba**6*MHH**4)/vev**2 + (4*cb**6*cba**2*MHA**2*MHL**2)/vev**2 - (4*cb**8*cba**4*MHA**2*MHL**2)/vev**2 + (2*cb**6*cba**4*MHH**2*MHL**2)/vev**2 - (2*cb**8*cba**6*MHH**2*MHL**2)/vev**2 + (cb**4*cba**2*MHL**4)/vev**2 - (2*cb**6*cba**4*MHL**4)/vev**2 + (cb**8*cba**6*MHL**4)/vev**2 + (4*cb**6*cba**2*m122**2)/(sb**2*vev**2) - (8*cb**7*cba**2*m122*MHA**2)/(sb*vev**2) - (4*cb**7*cba**4*m122*MHH**2)/(sb*vev**2) - (4*cb**5*cba**2*m122*MHL**2)/(sb*vev**2) + (4*cb**7*cba**4*m122*MHL**2)/(sb*vev**2) - (32*cb**5*cba**2*m122*MHA**2*sb)/vev**2 - (4*cb**3*cba**2*m122*MHH**2*sb)/vev**2 - (12*cb**5*cba**4*m122*MHH**2*sb)/vev**2 - (8*cb**3*cba**2*m122*MHL**2*sb)/vev**2 + (12*cb**5*cba**4*m122*MHL**2*sb)/vev**2 + (24*cb**2*cba**2*m122**2*sb**2)/vev**2 + (16*cb**6*cba**2*MHA**4*sb**2)/vev**2 + (4*cb**4*cba**2*MHA**2*MHH**2*sb**2)/vev**2 + (12*cb**6*cba**4*MHA**2*MHH**2*sb**2)/vev**2 + (2*cb**4*cba**4*MHH**4*sb**2)/vev**2 + (2*cb**6*cba**6*MHH**4*sb**2)/vev**2 + (8*cb**4*cba**2*MHA**2*MHL**2*sb**2)/vev**2 - (12*cb**6*cba**4*MHA**2*MHL**2*sb**2)/vev**2 + (2*cb**2*cba**2*MHH**2*MHL**2*sb**2)/vev**2 - (4*cb**6*cba**6*MHH**2*MHL**2*sb**2)/vev**2 - (2*cb**4*cba**4*MHL**4*sb**2)/vev**2 + (2*cb**6*cba**6*MHL**4*sb**2)/vev**2 - (48*cb**3*cba**2*m122*MHA**2*sb**3)/vev**2 - (8*cb*cba**2*m122*MHH**2*sb**3)/vev**2 - (12*cb**3*cba**4*m122*MHH**2*sb**3)/vev**2 - (4*cb*cba**2*m122*MHL**2*sb**3)/vev**2 + (12*cb**3*cba**4*m122*MHL**2*sb**3)/vev**2 + (16*cba**2*m122**2*sb**4)/vev**2 + (24*cb**4*cba**2*MHA**4*sb**4)/vev**2 + (8*cb**2*cba**2*MHA**2*MHH**2*sb**4)/vev**2 + (12*cb**4*cba**4*MHA**2*MHH**2*sb**4)/vev**2 + (cba**2*MHH**4*sb**4)/vev**2 + (2*cb**2*cba**4*MHH**4*sb**4)/vev**2 + (cb**4*cba**6*MHH**4*sb**4)/vev**2 + (4*cb**2*cba**2*MHA**2*MHL**2*sb**4)/vev**2 - (12*cb**4*cba**4*MHA**2*MHL**2*sb**4)/vev**2 - (2*cb**2*cba**4*MHH**2*MHL**2*sb**4)/vev**2 - (2*cb**4*cba**6*MHH**2*MHL**2*sb**4)/vev**2 + (cb**4*cba**6*MHL**4*sb**4)/vev**2 - (32*cb*cba**2*m122*MHA**2*sb**5)/vev**2 - (4*cba**2*m122*MHH**2*sb**5)/(cb*vev**2) - (4*cb*cba**4*m122*MHH**2*sb**5)/vev**2 + (4*cb*cba**4*m122*MHL**2*sb**5)/vev**2 + (4*cba**2*m122**2*sb**6)/(cb**2*vev**2) + (16*cb**2*cba**2*MHA**4*sb**6)/vev**2 + (4*cba**2*MHA**2*MHH**2*sb**6)/vev**2 + (4*cb**2*cba**4*MHA**2*MHH**2*sb**6)/vev**2 - (4*cb**2*cba**4*MHA**2*MHL**2*sb**6)/vev**2 - (8*cba**2*m122*MHA**2*sb**7)/(cb*vev**2) + (4*cba**2*MHA**4*sb**8)/vev**2 + (12*cb**6*cba*m122*MHA**2*sba)/vev**2 + (2*cb**4*cba*m122*MHH**2*sba)/vev**2 + (16*cb**6*cba**3*m122*MHH**2*sba)/vev**2 + (10*cb**4*cba*m122*MHL**2*sba)/vev**2 - (16*cb**6*cba**3*m122*MHL**2*sba)/vev**2 - (4*cb**7*cba*m122**2*sba)/(sb**3*vev**2) + (4*cb**8*cba*m122*MHA**2*sba)/(sb**2*vev**2) + (6*cb**8*cba**3*m122*MHH**2*sba)/(sb**2*vev**2) + (6*cb**6*cba*m122*MHL**2*sba)/(sb**2*vev**2) - (6*cb**8*cba**3*m122*MHL**2*sba)/(sb**2*vev**2) - (12*cb**5*cba*m122**2*sba)/(sb*vev**2) - (4*cb**9*cba**3*MHA**2*MHH**2*sba)/(sb*vev**2) - (2*cb**9*cba**5*MHH**4*sba)/(sb*vev**2) - (4*cb**7*cba*MHA**2*MHL**2*sba)/(sb*vev**2) + (4*cb**9*cba**3*MHA**2*MHL**2*sba)/(sb*vev**2) - (4*cb**7*cba**3*MHH**2*MHL**2*sba)/(sb*vev**2) + (4*cb**9*cba**5*MHH**2*MHL**2*sba)/(sb*vev**2) - (2*cb**5*cba*MHL**4*sba)/(sb*vev**2) + (4*cb**7*cba**3*MHL**4*sba)/(sb*vev**2) - (2*cb**9*cba**5*MHL**4*sba)/(sb*vev**2) - (8*cb**3*cba*m122**2*sb*sba)/vev**2 - (12*cb**7*cba**3*MHA**2*MHH**2*sb*sba)/vev**2 - (2*cb**5*cba**3*MHH**4*sb*sba)/vev**2 - (4*cb**7*cba**5*MHH**4*sb*sba)/vev**2 - (8*cb**5*cba*MHA**2*MHL**2*sb*sba)/vev**2 + (12*cb**7*cba**3*MHA**2*MHL**2*sb*sba)/vev**2 - (2*cb**3*cba*MHH**2*MHL**2*sb*sba)/vev**2 - (2*cb**5*cba**3*MHH**2*MHL**2*sb*sba)/vev**2 + (8*cb**7*cba**5*MHH**2*MHL**2*sb*sba)/vev**2 + (4*cb**5*cba**3*MHL**4*sb*sba)/vev**2 - (4*cb**7*cba**5*MHL**4*sb*sba)/vev**2 + (8*cb**4*cba*m122*MHA**2*sb**2*sba)/vev**2 - (2*cb**2*cba*m122*MHH**2*sb**2*sba)/vev**2 + (12*cb**4*cba**3*m122*MHH**2*sb**2*sba)/vev**2 + (2*cb**2*cba*m122*MHL**2*sb**2*sba)/vev**2 - (12*cb**4*cba**3*m122*MHL**2*sb**2*sba)/vev**2 + (8*cb*cba*m122**2*sb**3*sba)/vev**2 + (4*cb**3*cba*MHA**2*MHH**2*sb**3*sba)/vev**2 - (12*cb**5*cba**3*MHA**2*MHH**2*sb**3*sba)/vev**2 - (2*cb**5*cba**5*MHH**4*sb**3*sba)/vev**2 - (4*cb**3*cba*MHA**2*MHL**2*sb**3*sba)/vev**2 + (12*cb**5*cba**3*MHA**2*MHL**2*sb**3*sba)/vev**2 + (2*cb*cba*MHH**2*MHL**2*sb**3*sba)/vev**2 + (4*cb**5*cba**5*MHH**2*MHL**2*sb**3*sba)/vev**2 - (2*cb**5*cba**5*MHL**4*sb**3*sba)/vev**2 - (8*cb**2*cba*m122*MHA**2*sb**4*sba)/vev**2 - (10*cba*m122*MHH**2*sb**4*sba)/vev**2 - (2*cba*m122*MHL**2*sb**4*sba)/vev**2 + (12*cba*m122**2*sb**5*sba)/(cb*vev**2) + (8*cb*cba*MHA**2*MHH**2*sb**5*sba)/vev**2 - (4*cb**3*cba**3*MHA**2*MHH**2*sb**5*sba)/vev**2 + (2*cba*MHH**4*sb**5*sba)/(cb*vev**2) + (2*cb*cba**3*MHH**4*sb**5*sba)/vev**2 + (4*cb**3*cba**3*MHA**2*MHL**2*sb**5*sba)/vev**2 - (2*cb*cba**3*MHH**2*MHL**2*sb**5*sba)/vev**2 - (12*cba*m122*MHA**2*sb**6*sba)/vev**2 - (6*cba*m122*MHH**2*sb**6*sba)/(cb**2*vev**2) - (2*cba**3*m122*MHH**2*sb**6*sba)/vev**2 + (2*cba**3*m122*MHL**2*sb**6*sba)/vev**2 + (4*cba*m122**2*sb**7*sba)/(cb**3*vev**2) + (4*cba*MHA**2*MHH**2*sb**7*sba)/(cb*vev**2) - (4*cba*m122*MHA**2*sb**8*sba)/(cb**2*vev**2) - (cb**4*m122**2*sba**2)/vev**2 + (4*cb**8*cba**2*MHA**2*MHH**2*sba**2)/vev**2 + (4*cb**8*cba**4*MHH**4*sba**2)/vev**2 - (4*cb**8*cba**2*MHA**2*MHL**2*sba**2)/vev**2 + (4*cb**6*cba**2*MHH**2*MHL**2*sba**2)/vev**2 - (8*cb**8*cba**4*MHH**2*MHL**2*sba**2)/vev**2 - (4*cb**6*cba**2*MHL**4*sba**2)/vev**2 + (4*cb**8*cba**4*MHL**4*sba**2)/vev**2 + (cb**8*m122**2*sba**2)/(sb**4*vev**2) - (2*cb**9*cba**2*m122*MHH**2*sba**2)/(sb**3*vev**2) - (2*cb**7*m122*MHL**2*sba**2)/(sb**3*vev**2) + (2*cb**9*cba**2*m122*MHL**2*sba**2)/(sb**3*vev**2) + (2*cb**6*m122**2*sba**2)/(sb**2*vev**2) + (cb**10*cba**4*MHH**4*sba**2)/(sb**2*vev**2) + (2*cb**8*cba**2*MHH**2*MHL**2*sba**2)/(sb**2*vev**2) - (2*cb**10*cba**4*MHH**2*MHL**2*sba**2)/(sb**2*vev**2) + (cb**6*MHL**4*sba**2)/(sb**2*vev**2) - (2*cb**8*cba**2*MHL**4*sba**2)/(sb**2*vev**2) + (cb**10*cba**4*MHL**4*sba**2)/(sb**2*vev**2) - (8*cb**7*cba**2*m122*MHH**2*sba**2)/(sb*vev**2) - (2*cb**5*m122*MHL**2*sba**2)/(sb*vev**2) + (8*cb**7*cba**2*m122*MHL**2*sba**2)/(sb*vev**2) + (2*cb**3*m122*MHH**2*sb*sba**2)/vev**2 - (12*cb**5*cba**2*m122*MHH**2*sb*sba**2)/vev**2 + (2*cb**3*m122*MHL**2*sb*sba**2)/vev**2 + (12*cb**5*cba**2*m122*MHL**2*sb*sba**2)/vev**2 - (4*cb**2*m122**2*sb**2*sba**2)/vev**2 + (12*cb**6*cba**2*MHA**2*MHH**2*sb**2*sba**2)/vev**2 + (5*cb**6*cba**4*MHH**4*sb**2*sba**2)/vev**2 - (12*cb**6*cba**2*MHA**2*MHL**2*sb**2*sba**2)/vev**2 - (2*cb**2*MHH**2*MHL**2*sb**2*sba**2)/vev**2 + (2*cb**4*cba**2*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (10*cb**6*cba**4*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (2*cb**4*cba**2*MHL**4*sb**2*sba**2)/vev**2 + (5*cb**6*cba**4*MHL**4*sb**2*sba**2)/vev**2 + (2*cb*m122*MHH**2*sb**3*sba**2)/vev**2 - (8*cb**3*cba**2*m122*MHH**2*sb**3*sba**2)/vev**2 + (2*cb*m122*MHL**2*sb**3*sba**2)/vev**2 + (8*cb**3*cba**2*m122*MHL**2*sb**3*sba**2)/vev**2 - (m122**2*sb**4*sba**2)/vev**2 + (12*cb**4*cba**2*MHA**2*MHH**2*sb**4*sba**2)/vev**2 + (2*cb**4*cba**4*MHH**4*sb**4*sba**2)/vev**2 - (12*cb**4*cba**2*MHA**2*MHL**2*sb**4*sba**2)/vev**2 - (4*cb**4*cba**4*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (2*cb**4*cba**4*MHL**4*sb**4*sba**2)/vev**2 - (2*m122*MHH**2*sb**5*sba**2)/(cb*vev**2) - (2*cb*cba**2*m122*MHH**2*sb**5*sba**2)/vev**2 + (2*cb*cba**2*m122*MHL**2*sb**5*sba**2)/vev**2 + (2*m122**2*sb**6*sba**2)/(cb**2*vev**2) + (4*cb**2*cba**2*MHA**2*MHH**2*sb**6*sba**2)/vev**2 + (MHH**4*sb**6*sba**2)/(cb**2*vev**2) - (4*cb**2*cba**2*MHA**2*MHL**2*sb**6*sba**2)/vev**2 - (2*m122*MHH**2*sb**7*sba**2)/(cb**3*vev**2) + (m122**2*sb**8*sba**2)/(cb**4*vev**2) + (16*cb**6*cba*m122*MHH**2*sba**3)/vev**2 - (16*cb**6*cba*m122*MHL**2*sba**3)/vev**2 + (6*cb**8*cba*m122*MHH**2*sba**3)/(sb**2*vev**2) - (6*cb**8*cba*m122*MHL**2*sba**3)/(sb**2*vev**2) - (4*cb**9*cba*MHA**2*MHH**2*sba**3)/(sb*vev**2) - (4*cb**9*cba**3*MHH**4*sba**3)/(sb*vev**2) + (4*cb**9*cba*MHA**2*MHL**2*sba**3)/(sb*vev**2) - (4*cb**7*cba*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (8*cb**9*cba**3*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (4*cb**7*cba*MHL**4*sba**3)/(sb*vev**2) - (4*cb**9*cba**3*MHL**4*sba**3)/(sb*vev**2) - (12*cb**7*cba*MHA**2*MHH**2*sb*sba**3)/vev**2 - (2*cb**5*cba*MHH**4*sb*sba**3)/vev**2 - (8*cb**7*cba**3*MHH**4*sb*sba**3)/vev**2 + (12*cb**7*cba*MHA**2*MHL**2*sb*sba**3)/vev**2 - (2*cb**5*cba*MHH**2*MHL**2*sb*sba**3)/vev**2 + (16*cb**7*cba**3*MHH**2*MHL**2*sb*sba**3)/vev**2 + (4*cb**5*cba*MHL**4*sb*sba**3)/vev**2 - (8*cb**7*cba**3*MHL**4*sb*sba**3)/vev**2 + (12*cb**4*cba*m122*MHH**2*sb**2*sba**3)/vev**2 - (12*cb**4*cba*m122*MHL**2*sb**2*sba**3)/vev**2 - (12*cb**5*cba*MHA**2*MHH**2*sb**3*sba**3)/vev**2 - (4*cb**5*cba**3*MHH**4*sb**3*sba**3)/vev**2 + (12*cb**5*cba*MHA**2*MHL**2*sb**3*sba**3)/vev**2 + (8*cb**5*cba**3*MHH**2*MHL**2*sb**3*sba**3)/vev**2 - (4*cb**5*cba**3*MHL**4*sb**3*sba**3)/vev**2 - (4*cb**3*cba*MHA**2*MHH**2*sb**5*sba**3)/vev**2 + (2*cb*cba*MHH**4*sb**5*sba**3)/vev**2 + (4*cb**3*cba*MHA**2*MHL**2*sb**5*sba**3)/vev**2 - (2*cb*cba*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (2*cba*m122*MHH**2*sb**6*sba**3)/vev**2 + (2*cba*m122*MHL**2*sb**6*sba**3)/vev**2 + (5*cb**8*cba**2*MHH**4*sba**4)/vev**2 + (2*cb**6*MHH**2*MHL**2*sba**4)/vev**2 - (10*cb**8*cba**2*MHH**2*MHL**2*sba**4)/vev**2 - (2*cb**6*MHL**4*sba**4)/vev**2 + (5*cb**8*cba**2*MHL**4*sba**4)/vev**2 - (2*cb**9*m122*MHH**2*sba**4)/(sb**3*vev**2) + (2*cb**9*m122*MHL**2*sba**4)/(sb**3*vev**2) + (2*cb**10*cba**2*MHH**4*sba**4)/(sb**2*vev**2) + (2*cb**8*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (4*cb**10*cba**2*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (2*cb**8*MHL**4*sba**4)/(sb**2*vev**2) + (2*cb**10*cba**2*MHL**4*sba**4)/(sb**2*vev**2) - (4*cb**7*m122*MHH**2*sba**4)/(sb*vev**2) + (4*cb**7*m122*MHL**2*sba**4)/(sb*vev**2) - (2*cb**4*MHH**4*sb**2*sba**4)/vev**2 + (4*cb**6*cba**2*MHH**4*sb**2*sba**4)/vev**2 + (2*cb**4*MHH**2*MHL**2*sb**2*sba**4)/vev**2 - (8*cb**6*cba**2*MHH**2*MHL**2*sb**2*sba**4)/vev**2 + (4*cb**6*cba**2*MHL**4*sb**2*sba**4)/vev**2 + (4*cb**3*m122*MHH**2*sb**3*sba**4)/vev**2 - (4*cb**3*m122*MHL**2*sb**3*sba**4)/vev**2 - (2*cb**2*MHH**4*sb**4*sba**4)/vev**2 + (cb**4*cba**2*MHH**4*sb**4*sba**4)/vev**2 + (2*cb**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 - (2*cb**4*cba**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (cb**4*cba**2*MHL**4*sb**4*sba**4)/vev**2 + (2*cb*m122*MHH**2*sb**5*sba**4)/vev**2 - (2*cb*m122*MHL**2*sb**5*sba**4)/vev**2 - (2*cb**9*cba*MHH**4*sba**5)/(sb*vev**2) + (4*cb**9*cba*MHH**2*MHL**2*sba**5)/(sb*vev**2) - (2*cb**9*cba*MHL**4*sba**5)/(sb*vev**2) - (4*cb**7*cba*MHH**4*sb*sba**5)/vev**2 + (8*cb**7*cba*MHH**2*MHL**2*sb*sba**5)/vev**2 - (4*cb**7*cba*MHL**4*sb*sba**5)/vev**2 - (2*cb**5*cba*MHH**4*sb**3*sba**5)/vev**2 + (4*cb**5*cba*MHH**2*MHL**2*sb**3*sba**5)/vev**2 - (2*cb**5*cba*MHL**4*sb**3*sba**5)/vev**2 + (2*cb**8*MHH**4*sba**6)/vev**2 - (4*cb**8*MHH**2*MHL**2*sba**6)/vev**2 + (2*cb**8*MHL**4*sba**6)/vev**2 + (cb**10*MHH**4*sba**6)/(sb**2*vev**2) - (2*cb**10*MHH**2*MHL**2*sba**6)/(sb**2*vev**2) + (cb**10*MHL**4*sba**6)/(sb**2*vev**2) + (cb**6*MHH**4*sb**2*sba**6)/vev**2 - (2*cb**6*MHH**2*MHL**2*sb**2*sba**6)/vev**2 + (cb**6*MHL**4*sb**2*sba**6)/vev**2)*cmath.sqrt(-4*MHA**2*MHH**2 + MHH**4))/(32.*cmath.pi*abs(MHH)**3)',
(P.HA,P.Z):'((-(cb**4*ee**2*MHA**2*sba**2) - cb**4*ee**2*MHH**2*sba**2 + (cb**4*ee**2*MHA**4*sba**2)/(2.*MZ**2) - (cb**4*ee**2*MHA**2*MHH**2*sba**2)/MZ**2 + (cb**4*ee**2*MHH**4*sba**2)/(2.*MZ**2) + (cb**4*ee**2*MZ**2*sba**2)/2. - 2*cb**2*ee**2*MHA**2*sb**2*sba**2 - 2*cb**2*ee**2*MHH**2*sb**2*sba**2 + (cb**2*ee**2*MHA**4*sb**2*sba**2)/MZ**2 - (2*cb**2*ee**2*MHA**2*MHH**2*sb**2*sba**2)/MZ**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/MZ**2 + cb**2*ee**2*MZ**2*sb**2*sba**2 - ee**2*MHA**2*sb**4*sba**2 - ee**2*MHH**2*sb**4*sba**2 + (ee**2*MHA**4*sb**4*sba**2)/(2.*MZ**2) - (ee**2*MHA**2*MHH**2*sb**4*sba**2)/MZ**2 + (ee**2*MHH**4*sb**4*sba**2)/(2.*MZ**2) + (ee**2*MZ**2*sb**4*sba**2)/2. - (cb**4*cw**2*ee**2*MHA**2*sba**2)/(2.*sw**2) - (cb**4*cw**2*ee**2*MHH**2*sba**2)/(2.*sw**2) + (cb**4*cw**2*ee**2*MHA**4*sba**2)/(4.*MZ**2*sw**2) - (cb**4*cw**2*ee**2*MHA**2*MHH**2*sba**2)/(2.*MZ**2*sw**2) + (cb**4*cw**2*ee**2*MHH**4*sba**2)/(4.*MZ**2*sw**2) + (cb**4*cw**2*ee**2*MZ**2*sba**2)/(4.*sw**2) - (cb**2*cw**2*ee**2*MHA**2*sb**2*sba**2)/sw**2 - (cb**2*cw**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 + (cb**2*cw**2*ee**2*MHA**4*sb**2*sba**2)/(2.*MZ**2*sw**2) - (cb**2*cw**2*ee**2*MHA**2*MHH**2*sb**2*sba**2)/(MZ**2*sw**2) + (cb**2*cw**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MZ**2*sw**2) + (cb**2*cw**2*ee**2*MZ**2*sb**2*sba**2)/(2.*sw**2) - (cw**2*ee**2*MHA**2*sb**4*sba**2)/(2.*sw**2) - (cw**2*ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) + (cw**2*ee**2*MHA**4*sb**4*sba**2)/(4.*MZ**2*sw**2) - (cw**2*ee**2*MHA**2*MHH**2*sb**4*sba**2)/(2.*MZ**2*sw**2) + (cw**2*ee**2*MHH**4*sb**4*sba**2)/(4.*MZ**2*sw**2) + (cw**2*ee**2*MZ**2*sb**4*sba**2)/(4.*sw**2) - (cb**4*ee**2*MHA**2*sba**2*sw**2)/(2.*cw**2) - (cb**4*ee**2*MHH**2*sba**2*sw**2)/(2.*cw**2) + (cb**4*ee**2*MHA**4*sba**2*sw**2)/(4.*cw**2*MZ**2) - (cb**4*ee**2*MHA**2*MHH**2*sba**2*sw**2)/(2.*cw**2*MZ**2) + (cb**4*ee**2*MHH**4*sba**2*sw**2)/(4.*cw**2*MZ**2) + (cb**4*ee**2*MZ**2*sba**2*sw**2)/(4.*cw**2) - (cb**2*ee**2*MHA**2*sb**2*sba**2*sw**2)/cw**2 - (cb**2*ee**2*MHH**2*sb**2*sba**2*sw**2)/cw**2 + (cb**2*ee**2*MHA**4*sb**2*sba**2*sw**2)/(2.*cw**2*MZ**2) - (cb**2*ee**2*MHA**2*MHH**2*sb**2*sba**2*sw**2)/(cw**2*MZ**2) + (cb**2*ee**2*MHH**4*sb**2*sba**2*sw**2)/(2.*cw**2*MZ**2) + (cb**2*ee**2*MZ**2*sb**2*sba**2*sw**2)/(2.*cw**2) - (ee**2*MHA**2*sb**4*sba**2*sw**2)/(2.*cw**2) - (ee**2*MHH**2*sb**4*sba**2*sw**2)/(2.*cw**2) + (ee**2*MHA**4*sb**4*sba**2*sw**2)/(4.*cw**2*MZ**2) - (ee**2*MHA**2*MHH**2*sb**4*sba**2*sw**2)/(2.*cw**2*MZ**2) + (ee**2*MHH**4*sb**4*sba**2*sw**2)/(4.*cw**2*MZ**2) + (ee**2*MZ**2*sb**4*sba**2*sw**2)/(4.*cw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHH**2 + MHH**4 - 2*MHA**2*MZ**2 - 2*MHH**2*MZ**2 + MZ**4))/(16.*cmath.pi*abs(MHH)**3)',
(P.HL,P.HL):'(((16*cb**4*cba**6*m122**2)/vev**2 + (cb**8*cba**10*MHH**4)/vev**2 + (6*cb**6*cba**8*MHH**2*MHL**2)/vev**2 - (2*cb**8*cba**10*MHH**2*MHL**2)/vev**2 + (9*cb**4*cba**6*MHL**4)/vev**2 - (6*cb**6*cba**8*MHL**4)/vev**2 + (cb**8*cba**10*MHL**4)/vev**2 + (4*cb**6*cba**6*m122**2)/(sb**2*vev**2) - (4*cb**7*cba**8*m122*MHH**2)/(sb*vev**2) - (12*cb**5*cba**6*m122*MHL**2)/(sb*vev**2) + (4*cb**7*cba**8*m122*MHL**2)/(sb*vev**2) - (12*cb**3*cba**6*m122*MHH**2*sb)/vev**2 - (4*cb**5*cba**8*m122*MHH**2*sb)/vev**2 - (24*cb**3*cba**6*m122*MHL**2*sb)/vev**2 + (4*cb**5*cba**8*m122*MHL**2*sb)/vev**2 + (24*cb**2*cba**6*m122**2*sb**2)/vev**2 + (6*cb**4*cba**8*MHH**4*sb**2)/vev**2 - (2*cb**6*cba**10*MHH**4*sb**2)/vev**2 + (18*cb**2*cba**6*MHH**2*MHL**2*sb**2)/vev**2 - (12*cb**4*cba**8*MHH**2*MHL**2*sb**2)/vev**2 + (4*cb**6*cba**10*MHH**2*MHL**2*sb**2)/vev**2 + (6*cb**4*cba**8*MHL**4*sb**2)/vev**2 - (2*cb**6*cba**10*MHL**4*sb**2)/vev**2 - (24*cb*cba**6*m122*MHH**2*sb**3)/vev**2 + (12*cb**3*cba**8*m122*MHH**2*sb**3)/vev**2 - (12*cb*cba**6*m122*MHL**2*sb**3)/vev**2 - (12*cb**3*cba**8*m122*MHL**2*sb**3)/vev**2 + (16*cba**6*m122**2*sb**4)/vev**2 + (9*cba**6*MHH**4*sb**4)/vev**2 - (6*cb**2*cba**8*MHH**4*sb**4)/vev**2 - (3*cb**4*cba**10*MHH**4*sb**4)/vev**2 - (6*cb**2*cba**8*MHH**2*MHL**2*sb**4)/vev**2 + (6*cb**4*cba**10*MHH**2*MHL**2*sb**4)/vev**2 + (12*cb**2*cba**8*MHL**4*sb**4)/vev**2 - (3*cb**4*cba**10*MHL**4*sb**4)/vev**2 - (12*cba**6*m122*MHH**2*sb**5)/(cb*vev**2) + (20*cb*cba**8*m122*MHH**2*sb**5)/vev**2 - (20*cb*cba**8*m122*MHL**2*sb**5)/vev**2 + (4*cba**6*m122**2*sb**6)/(cb**2*vev**2) - (12*cba**8*MHH**4*sb**6)/vev**2 + (4*cb**2*cba**10*MHH**4*sb**6)/vev**2 + (12*cba**8*MHH**2*MHL**2*sb**6)/vev**2 - (8*cb**2*cba**10*MHH**2*MHL**2*sb**6)/vev**2 + (4*cb**2*cba**10*MHL**4*sb**6)/vev**2 + (8*cba**8*m122*MHH**2*sb**7)/(cb*vev**2) - (8*cba**8*m122*MHL**2*sb**7)/(cb*vev**2) + (4*cba**10*MHH**4*sb**8)/vev**2 - (8*cba**10*MHH**2*MHL**2*sb**8)/vev**2 + (4*cba**10*MHL**4*sb**8)/vev**2 + (42*cb**4*cba**5*m122*MHH**2*sba)/vev**2 - (12*cb**6*cba**7*m122*MHH**2*sba)/vev**2 + (18*cb**4*cba**5*m122*MHL**2*sba)/vev**2 + (12*cb**6*cba**7*m122*MHL**2*sba)/vev**2 - (12*cb**7*cba**5*m122**2*sba)/(sb**3*vev**2) + (10*cb**8*cba**7*m122*MHH**2*sba)/(sb**2*vev**2) + (30*cb**6*cba**5*m122*MHL**2*sba)/(sb**2*vev**2) - (10*cb**8*cba**7*m122*MHL**2*sba)/(sb**2*vev**2) - (36*cb**5*cba**5*m122**2*sba)/(sb*vev**2) - (2*cb**9*cba**9*MHH**4*sba)/(sb*vev**2) - (12*cb**7*cba**7*MHH**2*MHL**2*sba)/(sb*vev**2) + (4*cb**9*cba**9*MHH**2*MHL**2*sba)/(sb*vev**2) - (18*cb**5*cba**5*MHL**4*sba)/(sb*vev**2) + (12*cb**7*cba**7*MHL**4*sba)/(sb*vev**2) - (2*cb**9*cba**9*MHL**4*sba)/(sb*vev**2) - (24*cb**3*cba**5*m122**2*sb*sba)/vev**2 - (18*cb**5*cba**7*MHH**4*sb*sba)/vev**2 + (12*cb**7*cba**9*MHH**4*sb*sba)/vev**2 - (54*cb**3*cba**5*MHH**2*MHL**2*sb*sba)/vev**2 + (66*cb**5*cba**7*MHH**2*MHL**2*sb*sba)/vev**2 - (24*cb**7*cba**9*MHH**2*MHL**2*sb*sba)/vev**2 + (36*cb**3*cba**5*MHL**4*sb*sba)/vev**2 - (48*cb**5*cba**7*MHL**4*sb*sba)/vev**2 + (12*cb**7*cba**9*MHL**4*sb*sba)/vev**2 + (54*cb**2*cba**5*m122*MHH**2*sb**2*sba)/vev**2 - (76*cb**4*cba**7*m122*MHH**2*sb**2*sba)/vev**2 - (54*cb**2*cba**5*m122*MHL**2*sb**2*sba)/vev**2 + (76*cb**4*cba**7*m122*MHL**2*sb**2*sba)/vev**2 + (24*cb*cba**5*m122**2*sb**3*sba)/vev**2 - (36*cb*cba**5*MHH**4*sb**3*sba)/vev**2 + (48*cb**3*cba**7*MHH**4*sb**3*sba)/vev**2 + (2*cb**5*cba**9*MHH**4*sb**3*sba)/vev**2 + (54*cb*cba**5*MHH**2*MHL**2*sb**3*sba)/vev**2 - (24*cb**3*cba**7*MHH**2*MHL**2*sb**3*sba)/vev**2 - (4*cb**5*cba**9*MHH**2*MHL**2*sb**3*sba)/vev**2 - (24*cb**3*cba**7*MHL**4*sb**3*sba)/vev**2 + (2*cb**5*cba**9*MHL**4*sb**3*sba)/vev**2 - (18*cba**5*m122*MHH**2*sb**4*sba)/vev**2 - (56*cb**2*cba**7*m122*MHH**2*sb**4*sba)/vev**2 - (42*cba**5*m122*MHL**2*sb**4*sba)/vev**2 + (56*cb**2*cba**7*m122*MHL**2*sb**4*sba)/vev**2 + (36*cba**5*m122**2*sb**5*sba)/(cb*vev**2) + (18*cba**5*MHH**4*sb**5*sba)/(cb*vev**2) + (42*cb*cba**7*MHH**4*sb**5*sba)/vev**2 - (32*cb**3*cba**9*MHH**4*sb**5*sba)/vev**2 - (78*cb*cba**7*MHH**2*MHL**2*sb**5*sba)/vev**2 + (64*cb**3*cba**9*MHH**2*MHL**2*sb**5*sba)/vev**2 + (36*cb*cba**7*MHL**4*sb**5*sba)/vev**2 - (32*cb**3*cba**9*MHL**4*sb**5*sba)/vev**2 - (30*cba**5*m122*MHH**2*sb**6*sba)/(cb**2*vev**2) + (18*cba**7*m122*MHH**2*sb**6*sba)/vev**2 - (18*cba**7*m122*MHL**2*sb**6*sba)/vev**2 + (12*cba**5*m122**2*sb**7*sba)/(cb**3*vev**2) - (24*cba**7*MHH**4*sb**7*sba)/(cb*vev**2) - (12*cb*cba**9*MHH**4*sb**7*sba)/vev**2 + (24*cba**7*MHH**2*MHL**2*sb**7*sba)/(cb*vev**2) + (24*cb*cba**9*MHH**2*MHL**2*sb**7*sba)/vev**2 - (12*cb*cba**9*MHL**4*sb**7*sba)/vev**2 + (20*cba**7*m122*MHH**2*sb**8*sba)/(cb**2*vev**2) - (20*cba**7*m122*MHL**2*sb**8*sba)/(cb**2*vev**2) + (8*cba**9*MHH**4*sb**9*sba)/(cb*vev**2) - (16*cba**9*MHH**2*MHL**2*sb**9*sba)/(cb*vev**2) + (8*cba**9*MHL**4*sb**9*sba)/(cb*vev**2) - (73*cb**4*cba**4*m122**2*sba**2)/vev**2 + (18*cb**6*cba**6*MHH**4*sba**2)/vev**2 - (16*cb**8*cba**8*MHH**4*sba**2)/vev**2 + (54*cb**4*cba**4*MHH**2*MHL**2*sba**2)/vev**2 - (90*cb**6*cba**6*MHH**2*MHL**2*sba**2)/vev**2 + (32*cb**8*cba**8*MHH**2*MHL**2*sba**2)/vev**2 - (72*cb**4*cba**4*MHL**4*sba**2)/vev**2 + (72*cb**6*cba**6*MHL**4*sba**2)/vev**2 - (16*cb**8*cba**8*MHL**4*sba**2)/vev**2 + (9*cb**8*cba**4*m122**2*sba**2)/(sb**4*vev**2) - (6*cb**9*cba**6*m122*MHH**2*sba**2)/(sb**3*vev**2) - (18*cb**7*cba**4*m122*MHL**2*sba**2)/(sb**3*vev**2) + (6*cb**9*cba**6*m122*MHL**2*sba**2)/(sb**3*vev**2) + (2*cb**6*cba**4*m122**2*sba**2)/(sb**2*vev**2) + (cb**10*cba**8*MHH**4*sba**2)/(sb**2*vev**2) + (6*cb**8*cba**6*MHH**2*MHL**2*sba**2)/(sb**2*vev**2) - (2*cb**10*cba**8*MHH**2*MHL**2*sba**2)/(sb**2*vev**2) + (9*cb**6*cba**4*MHL**4*sba**2)/(sb**2*vev**2) - (6*cb**8*cba**6*MHL**4*sba**2)/(sb**2*vev**2) + (cb**10*cba**8*MHL**4*sba**2)/(sb**2*vev**2) - (48*cb**5*cba**4*m122*MHH**2*sba**2)/(sb*vev**2) + (44*cb**7*cba**6*m122*MHH**2*sba**2)/(sb*vev**2) + (66*cb**5*cba**4*m122*MHL**2*sba**2)/(sb*vev**2) - (44*cb**7*cba**6*m122*MHL**2*sba**2)/(sb*vev**2) + (6*cb**3*cba**4*m122*MHH**2*sb*sba**2)/vev**2 + (92*cb**5*cba**6*m122*MHH**2*sb*sba**2)/vev**2 + (138*cb**3*cba**4*m122*MHL**2*sb*sba**2)/vev**2 - (92*cb**5*cba**6*m122*MHL**2*sb*sba**2)/vev**2 - (132*cb**2*cba**4*m122**2*sb**2*sba**2)/vev**2 + (54*cb**2*cba**4*MHH**4*sb**2*sba**2)/vev**2 - (102*cb**4*cba**6*MHH**4*sb**2*sba**2)/vev**2 + (23*cb**6*cba**8*MHH**4*sb**2*sba**2)/vev**2 - (162*cb**2*cba**4*MHH**2*MHL**2*sb**2*sba**2)/vev**2 + (156*cb**4*cba**6*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (46*cb**6*cba**8*MHH**2*MHL**2*sb**2*sba**2)/vev**2 + (54*cb**2*cba**4*MHL**4*sb**2*sba**2)/vev**2 - (54*cb**4*cba**6*MHL**4*sb**2*sba**2)/vev**2 + (23*cb**6*cba**8*MHL**4*sb**2*sba**2)/vev**2 + (138*cb*cba**4*m122*MHH**2*sb**3*sba**2)/vev**2 - (36*cb**3*cba**6*m122*MHH**2*sb**3*sba**2)/vev**2 + (6*cb*cba**4*m122*MHL**2*sb**3*sba**2)/vev**2 + (36*cb**3*cba**6*m122*MHL**2*sb**3*sba**2)/vev**2 - (73*cba**4*m122**2*sb**4*sba**2)/vev**2 - (72*cba**4*MHH**4*sb**4*sba**2)/vev**2 - (18*cb**2*cba**6*MHH**4*sb**4*sba**2)/vev**2 + (62*cb**4*cba**8*MHH**4*sb**4*sba**2)/vev**2 + (54*cba**4*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (114*cb**2*cba**6*MHH**2*MHL**2*sb**4*sba**2)/vev**2 - (124*cb**4*cba**8*MHH**2*MHL**2*sb**4*sba**2)/vev**2 - (96*cb**2*cba**6*MHL**4*sb**4*sba**2)/vev**2 + (62*cb**4*cba**8*MHL**4*sb**4*sba**2)/vev**2 + (66*cba**4*m122*MHH**2*sb**5*sba**2)/(cb*vev**2) - (130*cb*cba**6*m122*MHH**2*sb**5*sba**2)/vev**2 - (48*cba**4*m122*MHL**2*sb**5*sba**2)/(cb*vev**2) + (130*cb*cba**6*m122*MHL**2*sb**5*sba**2)/vev**2 + (2*cba**4*m122**2*sb**6*sba**2)/(cb**2*vev**2) + (9*cba**4*MHH**4*sb**6*sba**2)/(cb**2*vev**2) + (90*cba**6*MHH**4*sb**6*sba**2)/vev**2 - (10*cb**2*cba**8*MHH**4*sb**6*sba**2)/vev**2 - (126*cba**6*MHH**2*MHL**2*sb**6*sba**2)/vev**2 + (20*cb**2*cba**8*MHH**2*MHL**2*sb**6*sba**2)/vev**2 + (36*cba**6*MHL**4*sb**6*sba**2)/vev**2 - (10*cb**2*cba**8*MHL**4*sb**6*sba**2)/vev**2 - (18*cba**4*m122*MHH**2*sb**7*sba**2)/(cb**3*vev**2) - (40*cba**6*m122*MHH**2*sb**7*sba**2)/(cb*vev**2) + (40*cba**6*m122*MHL**2*sb**7*sba**2)/(cb*vev**2) + (9*cba**4*m122**2*sb**8*sba**2)/(cb**4*vev**2) - (12*cba**6*MHH**4*sb**8*sba**2)/(cb**2*vev**2) - (28*cba**8*MHH**4*sb**8*sba**2)/vev**2 + (12*cba**6*MHH**2*MHL**2*sb**8*sba**2)/(cb**2*vev**2) + (56*cba**8*MHH**2*MHL**2*sb**8*sba**2)/vev**2 - (28*cba**8*MHL**4*sb**8*sba**2)/vev**2 + (12*cba**6*m122*MHH**2*sb**9*sba**2)/(cb**3*vev**2) - (12*cba**6*m122*MHL**2*sb**9*sba**2)/(cb**3*vev**2) + (4*cba**8*MHH**4*sb**10*sba**2)/(cb**2*vev**2) - (8*cba**8*MHH**2*MHL**2*sb**10*sba**2)/(cb**2*vev**2) + (4*cba**8*MHL**4*sb**10*sba**2)/(cb**2*vev**2) - (78*cb**4*cba**3*m122*MHH**2*sba**3)/vev**2 + (6*cb**6*cba**5*m122*MHH**2*sba**3)/vev**2 - (6*cb**4*cba**3*m122*MHL**2*sba**3)/vev**2 - (6*cb**6*cba**5*m122*MHL**2*sba**3)/vev**2 + (24*cb**7*cba**3*m122**2*sba**3)/(sb**3*vev**2) + (18*cb**6*cba**3*m122*MHH**2*sba**3)/(sb**2*vev**2) - (22*cb**8*cba**5*m122*MHH**2*sba**3)/(sb**2*vev**2) - (60*cb**6*cba**3*m122*MHL**2*sba**3)/(sb**2*vev**2) + (22*cb**8*cba**5*m122*MHL**2*sba**3)/(sb**2*vev**2) + (72*cb**5*cba**3*m122**2*sba**3)/(sb*vev**2) - (6*cb**7*cba**5*MHH**4*sba**3)/(sb*vev**2) + (4*cb**9*cba**7*MHH**4*sba**3)/(sb*vev**2) - (18*cb**5*cba**3*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (30*cb**7*cba**5*MHH**2*MHL**2*sba**3)/(sb*vev**2) - (8*cb**9*cba**7*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (36*cb**5*cba**3*MHL**4*sba**3)/(sb*vev**2) - (24*cb**7*cba**5*MHL**4*sba**3)/(sb*vev**2) + (4*cb**9*cba**7*MHL**4*sba**3)/(sb*vev**2) + (48*cb**3*cba**3*m122**2*sb*sba**3)/vev**2 - (36*cb**3*cba**3*MHH**4*sb*sba**3)/vev**2 + (78*cb**5*cba**5*MHH**4*sb*sba**3)/vev**2 - (24*cb**7*cba**7*MHH**4*sb*sba**3)/vev**2 + (162*cb**3*cba**3*MHH**2*MHL**2*sb*sba**3)/vev**2 - (174*cb**5*cba**5*MHH**2*MHL**2*sb*sba**3)/vev**2 + (48*cb**7*cba**7*MHH**2*MHL**2*sb*sba**3)/vev**2 - (108*cb**3*cba**3*MHL**4*sb*sba**3)/vev**2 + (96*cb**5*cba**5*MHL**4*sb*sba**3)/vev**2 - (24*cb**7*cba**7*MHL**4*sb*sba**3)/vev**2 - (150*cb**2*cba**3*m122*MHH**2*sb**2*sba**3)/vev**2 + (124*cb**4*cba**5*m122*MHH**2*sb**2*sba**3)/vev**2 + (150*cb**2*cba**3*m122*MHL**2*sb**2*sba**3)/vev**2 - (124*cb**4*cba**5*m122*MHL**2*sb**2*sba**3)/vev**2 - (48*cb*cba**3*m122**2*sb**3*sba**3)/vev**2 + (108*cb*cba**3*MHH**4*sb**3*sba**3)/vev**2 - (48*cb**3*cba**5*MHH**4*sb**3*sba**3)/vev**2 - (28*cb**5*cba**7*MHH**4*sb**3*sba**3)/vev**2 - (162*cb*cba**3*MHH**2*MHL**2*sb**3*sba**3)/vev**2 + (24*cb**3*cba**5*MHH**2*MHL**2*sb**3*sba**3)/vev**2 + (56*cb**5*cba**7*MHH**2*MHL**2*sb**3*sba**3)/vev**2 + (36*cb*cba**3*MHL**4*sb**3*sba**3)/vev**2 + (24*cb**3*cba**5*MHL**4*sb**3*sba**3)/vev**2 - (28*cb**5*cba**7*MHL**4*sb**3*sba**3)/vev**2 + (6*cba**3*m122*MHH**2*sb**4*sba**3)/vev**2 + (116*cb**2*cba**5*m122*MHH**2*sb**4*sba**3)/vev**2 + (78*cba**3*m122*MHL**2*sb**4*sba**3)/vev**2 - (116*cb**2*cba**5*m122*MHL**2*sb**4*sba**3)/vev**2 - (72*cba**3*m122**2*sb**5*sba**3)/(cb*vev**2) - (36*cba**3*MHH**4*sb**5*sba**3)/(cb*vev**2) - (102*cb*cba**5*MHH**4*sb**5*sba**3)/vev**2 + (28*cb**3*cba**7*MHH**4*sb**5*sba**3)/vev**2 + (18*cba**3*MHH**2*MHL**2*sb**5*sba**3)/(cb*vev**2) + (186*cb*cba**5*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (56*cb**3*cba**7*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (84*cb*cba**5*MHL**4*sb**5*sba**3)/vev**2 + (28*cb**3*cba**7*MHL**4*sb**5*sba**3)/vev**2 + (60*cba**3*m122*MHH**2*sb**6*sba**3)/(cb**2*vev**2) - (6*cba**5*m122*MHH**2*sb**6*sba**3)/vev**2 - (18*cba**3*m122*MHL**2*sb**6*sba**3)/(cb**2*vev**2) + (6*cba**5*m122*MHL**2*sb**6*sba**3)/vev**2 - (24*cba**3*m122**2*sb**7*sba**3)/(cb**3*vev**2) + (30*cba**5*MHH**4*sb**7*sba**3)/(cb*vev**2) + (24*cb*cba**7*MHH**4*sb**7*sba**3)/vev**2 - (42*cba**5*MHH**2*MHL**2*sb**7*sba**3)/(cb*vev**2) - (48*cb*cba**7*MHH**2*MHL**2*sb**7*sba**3)/vev**2 + (12*cba**5*MHL**4*sb**7*sba**3)/(cb*vev**2) + (24*cb*cba**7*MHL**4*sb**7*sba**3)/vev**2 - (26*cba**5*m122*MHH**2*sb**8*sba**3)/(cb**2*vev**2) + (26*cba**5*m122*MHL**2*sb**8*sba**3)/(cb**2*vev**2) - (4*cba**7*MHH**4*sb**9*sba**3)/(cb*vev**2) + (8*cba**7*MHH**2*MHL**2*sb**9*sba**3)/(cb*vev**2) - (4*cba**7*MHL**4*sb**9*sba**3)/(cb*vev**2) + (64*cb**4*cba**2*m122**2*sba**4)/vev**2 + (9*cb**4*cba**2*MHH**4*sba**4)/vev**2 - (12*cb**6*cba**4*MHH**4*sba**4)/vev**2 - (13*cb**8*cba**6*MHH**4*sba**4)/vev**2 - (54*cb**4*cba**2*MHH**2*MHL**2*sba**4)/vev**2 + (6*cb**6*cba**4*MHH**2*MHL**2*sba**4)/vev**2 + (26*cb**8*cba**6*MHH**2*MHL**2*sba**4)/vev**2 + (54*cb**4*cba**2*MHL**4*sba**4)/vev**2 + (6*cb**6*cba**4*MHL**4*sba**4)/vev**2 - (13*cb**8*cba**6*MHL**4*sba**4)/vev**2 - (6*cb**9*cba**4*m122*MHH**2*sba**4)/(sb**3*vev**2) + (6*cb**9*cba**4*m122*MHL**2*sba**4)/(sb**3*vev**2) + (16*cb**6*cba**2*m122**2*sba**4)/(sb**2*vev**2) + (2*cb**10*cba**6*MHH**4*sba**4)/(sb**2*vev**2) + (6*cb**8*cba**4*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (4*cb**10*cba**6*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (6*cb**8*cba**4*MHL**4*sba**4)/(sb**2*vev**2) + (2*cb**10*cba**6*MHL**4*sba**4)/(sb**2*vev**2) + (42*cb**5*cba**2*m122*MHH**2*sba**4)/(sb*vev**2) - (2*cb**7*cba**4*m122*MHH**2*sba**4)/(sb*vev**2) - (66*cb**5*cba**2*m122*MHL**2*sba**4)/(sb*vev**2) + (2*cb**7*cba**4*m122*MHL**2*sba**4)/(sb*vev**2) + (18*cb**3*cba**2*m122*MHH**2*sb*sba**4)/vev**2 + (4*cb**5*cba**4*m122*MHH**2*sb*sba**4)/vev**2 - (90*cb**3*cba**2*m122*MHL**2*sb*sba**4)/vev**2 - (4*cb**5*cba**4*m122*MHL**2*sb*sba**4)/vev**2 + (96*cb**2*cba**2*m122**2*sb**2*sba**4)/vev**2 - (72*cb**2*cba**2*MHH**4*sb**2*sba**4)/vev**2 + (12*cb**4*cba**4*MHH**4*sb**2*sba**4)/vev**2 + (20*cb**6*cba**6*MHH**4*sb**2*sba**4)/vev**2 + (162*cb**2*cba**2*MHH**2*MHL**2*sb**2*sba**4)/vev**2 - (36*cb**4*cba**4*MHH**2*MHL**2*sb**2*sba**4)/vev**2 - (40*cb**6*cba**6*MHH**2*MHL**2*sb**2*sba**4)/vev**2 - (72*cb**2*cba**2*MHL**4*sb**2*sba**4)/vev**2 + (24*cb**4*cba**4*MHL**4*sb**2*sba**4)/vev**2 + (20*cb**6*cba**6*MHL**4*sb**2*sba**4)/vev**2 - (90*cb*cba**2*m122*MHH**2*sb**3*sba**4)/vev**2 - (24*cb**3*cba**4*m122*MHH**2*sb**3*sba**4)/vev**2 + (18*cb*cba**2*m122*MHL**2*sb**3*sba**4)/vev**2 + (24*cb**3*cba**4*m122*MHL**2*sb**3*sba**4)/vev**2 + (64*cba**2*m122**2*sb**4*sba**4)/vev**2 + (54*cba**2*MHH**4*sb**4*sba**4)/vev**2 + (48*cb**2*cba**4*MHH**4*sb**4*sba**4)/vev**2 + (40*cb**4*cba**6*MHH**4*sb**4*sba**4)/vev**2 - (54*cba**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 - (54*cb**2*cba**4*MHH**2*MHL**2*sb**4*sba**4)/vev**2 - (80*cb**4*cba**6*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (9*cba**2*MHL**4*sb**4*sba**4)/vev**2 + (6*cb**2*cba**4*MHL**4*sb**4*sba**4)/vev**2 + (40*cb**4*cba**6*MHL**4*sb**4*sba**4)/vev**2 - (66*cba**2*m122*MHH**2*sb**5*sba**4)/(cb*vev**2) - (26*cb*cba**4*m122*MHH**2*sb**5*sba**4)/vev**2 + (42*cba**2*m122*MHL**2*sb**5*sba**4)/(cb*vev**2) + (26*cb*cba**4*m122*MHL**2*sb**5*sba**4)/vev**2 + (16*cba**2*m122**2*sb**6*sba**4)/(cb**2*vev**2) + (12*cba**4*MHH**4*sb**6*sba**4)/vev**2 - (34*cb**2*cba**6*MHH**4*sb**6*sba**4)/vev**2 - (6*cba**4*MHH**2*MHL**2*sb**6*sba**4)/vev**2 + (68*cb**2*cba**6*MHH**2*MHL**2*sb**6*sba**4)/vev**2 - (6*cba**4*MHL**4*sb**6*sba**4)/vev**2 - (34*cb**2*cba**6*MHL**4*sb**6*sba**4)/vev**2 + (10*cba**4*m122*MHH**2*sb**7*sba**4)/(cb*vev**2) - (10*cba**4*m122*MHL**2*sb**7*sba**4)/(cb*vev**2) - (12*cba**4*MHH**4*sb**8*sba**4)/(cb**2*vev**2) - (31*cba**6*MHH**4*sb**8*sba**4)/vev**2 + (12*cba**4*MHH**2*MHL**2*sb**8*sba**4)/(cb**2*vev**2) + (62*cba**6*MHH**2*MHL**2*sb**8*sba**4)/vev**2 - (31*cba**6*MHL**4*sb**8*sba**4)/vev**2 + (12*cba**4*m122*MHH**2*sb**9*sba**4)/(cb**3*vev**2) - (12*cba**4*m122*MHL**2*sb**9*sba**4)/(cb**3*vev**2) + (8*cba**6*MHH**4*sb**10*sba**4)/(cb**2*vev**2) - (16*cba**6*MHH**2*MHL**2*sb**10*sba**4)/(cb**2*vev**2) + (8*cba**6*MHL**4*sb**10*sba**4)/(cb**2*vev**2) + (24*cb**4*cba*m122*MHH**2*sba**5)/vev**2 - (6*cb**6*cba**3*m122*MHH**2*sba**5)/vev**2 - (24*cb**4*cba*m122*MHL**2*sba**5)/vev**2 + (6*cb**6*cba**3*m122*MHL**2*sba**5)/vev**2 - (32*cb**8*cba**3*m122*MHH**2*sba**5)/(sb**2*vev**2) + (32*cb**8*cba**3*m122*MHL**2*sba**5)/(sb**2*vev**2) - (6*cb**7*cba**3*MHH**4*sba**5)/(sb*vev**2) + (14*cb**9*cba**5*MHH**4*sba**5)/(sb*vev**2) + (42*cb**7*cba**3*MHH**2*MHL**2*sba**5)/(sb*vev**2) - (28*cb**9*cba**5*MHH**2*MHL**2*sba**5)/(sb*vev**2) - (36*cb**7*cba**3*MHL**4*sba**5)/(sb*vev**2) + (14*cb**9*cba**5*MHL**4*sba**5)/(sb*vev**2) + (18*cb**3*cba*MHH**4*sb*sba**5)/vev**2 + (54*cb**5*cba**3*MHH**4*sb*sba**5)/vev**2 - (60*cb**7*cba**5*MHH**4*sb*sba**5)/vev**2 - (54*cb**3*cba*MHH**2*MHL**2*sb*sba**5)/vev**2 - (138*cb**5*cba**3*MHH**2*MHL**2*sb*sba**5)/vev**2 + (120*cb**7*cba**5*MHH**2*MHL**2*sb*sba**5)/vev**2 + (36*cb**3*cba*MHL**4*sb*sba**5)/vev**2 + (84*cb**5*cba**3*MHL**4*sb*sba**5)/vev**2 - (60*cb**7*cba**5*MHL**4*sb*sba**5)/vev**2 + (48*cb**2*cba*m122*MHH**2*sb**2*sba**5)/vev**2 + (128*cb**4*cba**3*m122*MHH**2*sb**2*sba**5)/vev**2 - (48*cb**2*cba*m122*MHL**2*sb**2*sba**5)/vev**2 - (128*cb**4*cba**3*m122*MHL**2*sb**2*sba**5)/vev**2 - (36*cb*cba*MHH**4*sb**3*sba**5)/vev**2 - (72*cb**3*cba**3*MHH**4*sb**3*sba**5)/vev**2 - (44*cb**5*cba**5*MHH**4*sb**3*sba**5)/vev**2 + (54*cb*cba*MHH**2*MHL**2*sb**3*sba**5)/vev**2 + (36*cb**3*cba**3*MHH**2*MHL**2*sb**3*sba**5)/vev**2 + (88*cb**5*cba**5*MHH**2*MHL**2*sb**3*sba**5)/vev**2 - (18*cb*cba*MHL**4*sb**3*sba**5)/vev**2 + (36*cb**3*cba**3*MHL**4*sb**3*sba**5)/vev**2 - (44*cb**5*cba**5*MHL**4*sb**3*sba**5)/vev**2 + (24*cba*m122*MHH**2*sb**4*sba**5)/vev**2 + (100*cb**2*cba**3*m122*MHH**2*sb**4*sba**5)/vev**2 - (24*cba*m122*MHL**2*sb**4*sba**5)/vev**2 - (100*cb**2*cba**3*m122*MHL**2*sb**4*sba**5)/vev**2 - (78*cb*cba**3*MHH**4*sb**5*sba**5)/vev**2 + (116*cb**3*cba**5*MHH**4*sb**5*sba**5)/vev**2 + (150*cb*cba**3*MHH**2*MHL**2*sb**5*sba**5)/vev**2 - (232*cb**3*cba**5*MHH**2*MHL**2*sb**5*sba**5)/vev**2 - (72*cb*cba**3*MHL**4*sb**5*sba**5)/vev**2 + (116*cb**3*cba**5*MHL**4*sb**5*sba**5)/vev**2 - (48*cba**3*m122*MHH**2*sb**6*sba**5)/vev**2 + (48*cba**3*m122*MHL**2*sb**6*sba**5)/vev**2 + (54*cba**3*MHH**4*sb**7*sba**5)/(cb*vev**2) + (54*cb*cba**5*MHH**4*sb**7*sba**5)/vev**2 - (66*cba**3*MHH**2*MHL**2*sb**7*sba**5)/(cb*vev**2) - (108*cb*cba**5*MHH**2*MHL**2*sb**7*sba**5)/vev**2 + (12*cba**3*MHL**4*sb**7*sba**5)/(cb*vev**2) + (54*cb*cba**5*MHL**4*sb**7*sba**5)/vev**2 - (46*cba**3*m122*MHH**2*sb**8*sba**5)/(cb**2*vev**2) + (46*cba**3*m122*MHL**2*sb**8*sba**5)/(cb**2*vev**2) - (32*cba**5*MHH**4*sb**9*sba**5)/(cb*vev**2) + (64*cba**5*MHH**2*MHL**2*sb**9*sba**5)/(cb*vev**2) - (32*cba**5*MHL**4*sb**9*sba**5)/(cb*vev**2) - (30*cb**6*cba**2*MHH**4*sba**6)/vev**2 + (26*cb**8*cba**4*MHH**4*sba**6)/vev**2 + (102*cb**6*cba**2*MHH**2*MHL**2*sba**6)/vev**2 - (52*cb**8*cba**4*MHH**2*MHL**2*sba**6)/vev**2 - (72*cb**6*cba**2*MHL**4*sba**6)/vev**2 + (26*cb**8*cba**4*MHL**4*sba**6)/vev**2 + (cb**10*cba**4*MHH**4*sba**6)/(sb**2*vev**2) - (2*cb**10*cba**4*MHH**2*MHL**2*sba**6)/(sb**2*vev**2) + (cb**10*cba**4*MHL**4*sba**6)/(sb**2*vev**2) - (50*cb**7*cba**2*m122*MHH**2*sba**6)/(sb*vev**2) + (50*cb**7*cba**2*m122*MHL**2*sba**6)/(sb*vev**2) - (92*cb**5*cba**2*m122*MHH**2*sb*sba**6)/vev**2 + (92*cb**5*cba**2*m122*MHL**2*sb*sba**6)/vev**2 + (9*cb**2*MHH**4*sb**2*sba**6)/vev**2 + (102*cb**4*cba**2*MHH**4*sb**2*sba**6)/vev**2 - (28*cb**6*cba**4*MHH**4*sb**2*sba**6)/vev**2 - (18*cb**2*MHH**2*MHL**2*sb**2*sba**6)/vev**2 - (168*cb**4*cba**2*MHH**2*MHL**2*sb**2*sba**6)/vev**2 + (56*cb**6*cba**4*MHH**2*MHL**2*sb**2*sba**6)/vev**2 + (9*cb**2*MHL**4*sb**2*sba**6)/vev**2 + (66*cb**4*cba**2*MHL**4*sb**2*sba**6)/vev**2 - (28*cb**6*cba**4*MHL**4*sb**2*sba**6)/vev**2 + (24*cb**3*cba**2*m122*MHH**2*sb**3*sba**6)/vev**2 - (24*cb**3*cba**2*m122*MHL**2*sb**3*sba**6)/vev**2 + (42*cb**2*cba**2*MHH**4*sb**4*sba**6)/vev**2 - (100*cb**4*cba**4*MHH**4*sb**4*sba**6)/vev**2 - (138*cb**2*cba**2*MHH**2*MHL**2*sb**4*sba**6)/vev**2 + (200*cb**4*cba**4*MHH**2*MHL**2*sb**4*sba**6)/vev**2 + (96*cb**2*cba**2*MHL**4*sb**4*sba**6)/vev**2 - (100*cb**4*cba**4*MHL**4*sb**4*sba**6)/vev**2 + (124*cb*cba**2*m122*MHH**2*sb**5*sba**6)/vev**2 - (124*cb*cba**2*m122*MHL**2*sb**5*sba**6)/vev**2 - (90*cba**2*MHH**4*sb**6*sba**6)/vev**2 - (13*cb**2*cba**4*MHH**4*sb**6*sba**6)/vev**2 + (132*cba**2*MHH**2*MHL**2*sb**6*sba**6)/vev**2 + (26*cb**2*cba**4*MHH**2*MHL**2*sb**6*sba**6)/vev**2 - (42*cba**2*MHL**4*sb**6*sba**6)/vev**2 - (13*cb**2*cba**4*MHL**4*sb**6*sba**6)/vev**2 + (58*cba**2*m122*MHH**2*sb**7*sba**6)/(cb*vev**2) - (58*cba**2*m122*MHL**2*sb**7*sba**6)/(cb*vev**2) + (38*cba**4*MHH**4*sb**8*sba**6)/vev**2 - (76*cba**4*MHH**2*MHL**2*sb**8*sba**6)/vev**2 + (38*cba**4*MHL**4*sb**8*sba**6)/vev**2 + (4*cba**4*MHH**4*sb**10*sba**6)/(cb**2*vev**2) - (8*cba**4*MHH**2*MHL**2*sb**10*sba**6)/(cb**2*vev**2) + (4*cba**4*MHL**4*sb**10*sba**6)/(cb**2*vev**2) - (24*cb**6*cba*m122*MHH**2*sba**7)/vev**2 + (24*cb**6*cba*m122*MHL**2*sba**7)/vev**2 + (8*cb**9*cba**3*MHH**4*sba**7)/(sb*vev**2) - (16*cb**9*cba**3*MHH**2*MHL**2*sba**7)/(sb*vev**2) + (8*cb**9*cba**3*MHL**4*sba**7)/(sb*vev**2) - (42*cb**5*cba*MHH**4*sb*sba**7)/vev**2 + (102*cb**5*cba*MHH**2*MHL**2*sb*sba**7)/vev**2 - (60*cb**5*cba*MHL**4*sb*sba**7)/vev**2 - (72*cb**4*cba*m122*MHH**2*sb**2*sba**7)/vev**2 + (72*cb**4*cba*m122*MHL**2*sb**2*sba**7)/vev**2 + (24*cb**3*cba*MHH**4*sb**3*sba**7)/vev**2 + (4*cb**5*cba**3*MHH**4*sb**3*sba**7)/vev**2 - (12*cb**3*cba*MHH**2*MHL**2*sb**3*sba**7)/vev**2 - (8*cb**5*cba**3*MHH**2*MHL**2*sb**3*sba**7)/vev**2 - (12*cb**3*cba*MHL**4*sb**3*sba**7)/vev**2 + (4*cb**5*cba**3*MHL**4*sb**3*sba**7)/vev**2 - (72*cb**2*cba*m122*MHH**2*sb**4*sba**7)/vev**2 + (72*cb**2*cba*m122*MHL**2*sb**4*sba**7)/vev**2 + (66*cb*cba*MHH**4*sb**5*sba**7)/vev**2 + (20*cb**3*cba**3*MHH**4*sb**5*sba**7)/vev**2 - (114*cb*cba*MHH**2*MHL**2*sb**5*sba**7)/vev**2 - (40*cb**3*cba**3*MHH**2*MHL**2*sb**5*sba**7)/vev**2 + (48*cb*cba*MHL**4*sb**5*sba**7)/vev**2 + (20*cb**3*cba**3*MHL**4*sb**5*sba**7)/vev**2 - (24*cba*m122*MHH**2*sb**6*sba**7)/vev**2 + (24*cba*m122*MHL**2*sb**6*sba**7)/vev**2 - (12*cb*cba**3*MHH**4*sb**7*sba**7)/vev**2 + (24*cb*cba**3*MHH**2*MHL**2*sb**7*sba**7)/vev**2 - (12*cb*cba**3*MHL**4*sb**7*sba**7)/vev**2 - (20*cba**3*MHH**4*sb**9*sba**7)/(cb*vev**2) + (40*cba**3*MHH**2*MHL**2*sb**9*sba**7)/(cb*vev**2) - (20*cba**3*MHL**4*sb**9*sba**7)/(cb*vev**2) + (22*cb**8*cba**2*MHH**4*sba**8)/vev**2 - (44*cb**8*cba**2*MHH**2*MHL**2*sba**8)/vev**2 + (22*cb**8*cba**2*MHL**4*sba**8)/vev**2 - (18*cb**4*MHH**4*sb**2*sba**8)/vev**2 - (14*cb**6*cba**2*MHH**4*sb**2*sba**8)/vev**2 + (36*cb**4*MHH**2*MHL**2*sb**2*sba**8)/vev**2 + (28*cb**6*cba**2*MHH**2*MHL**2*sb**2*sba**8)/vev**2 - (18*cb**4*MHL**4*sb**2*sba**8)/vev**2 - (14*cb**6*cba**2*MHL**4*sb**2*sba**8)/vev**2 - (18*cb**2*MHH**4*sb**4*sba**8)/vev**2 - (57*cb**4*cba**2*MHH**4*sb**4*sba**8)/vev**2 + (36*cb**2*MHH**2*MHL**2*sb**4*sba**8)/vev**2 + (114*cb**4*cba**2*MHH**2*MHL**2*sb**4*sba**8)/vev**2 - (18*cb**2*MHL**4*sb**4*sba**8)/vev**2 - (57*cb**4*cba**2*MHL**4*sb**4*sba**8)/vev**2 + (16*cb**2*cba**2*MHH**4*sb**6*sba**8)/vev**2 - (32*cb**2*cba**2*MHH**2*MHL**2*sb**6*sba**8)/vev**2 + (16*cb**2*cba**2*MHL**4*sb**6*sba**8)/vev**2 + (37*cba**2*MHH**4*sb**8*sba**8)/vev**2 - (74*cba**2*MHH**2*MHL**2*sb**8*sba**8)/vev**2 + (37*cba**2*MHL**4*sb**8*sba**8)/vev**2 + (24*cb**7*cba*MHH**4*sb*sba**9)/vev**2 - (48*cb**7*cba*MHH**2*MHL**2*sb*sba**9)/vev**2 + (24*cb**7*cba*MHL**4*sb*sba**9)/vev**2 + (18*cb**5*cba*MHH**4*sb**3*sba**9)/vev**2 - (36*cb**5*cba*MHH**2*MHL**2*sb**3*sba**9)/vev**2 + (18*cb**5*cba*MHL**4*sb**3*sba**9)/vev**2 - (36*cb**3*cba*MHH**4*sb**5*sba**9)/vev**2 + (72*cb**3*cba*MHH**2*MHL**2*sb**5*sba**9)/vev**2 - (36*cb**3*cba*MHL**4*sb**5*sba**9)/vev**2 - (30*cb*cba*MHH**4*sb**7*sba**9)/vev**2 + (60*cb*cba*MHH**2*MHL**2*sb**7*sba**9)/vev**2 - (30*cb*cba*MHL**4*sb**7*sba**9)/vev**2 + (9*cb**6*MHH**4*sb**2*sba**10)/vev**2 - (18*cb**6*MHH**2*MHL**2*sb**2*sba**10)/vev**2 + (9*cb**6*MHL**4*sb**2*sba**10)/vev**2 + (18*cb**4*MHH**4*sb**4*sba**10)/vev**2 - (36*cb**4*MHH**2*MHL**2*sb**4*sba**10)/vev**2 + (18*cb**4*MHL**4*sb**4*sba**10)/vev**2 + (9*cb**2*MHH**4*sb**6*sba**10)/vev**2 - (18*cb**2*MHH**2*MHL**2*sb**6*sba**10)/vev**2 + (9*cb**2*MHL**4*sb**6*sba**10)/vev**2)*cmath.sqrt(MHH**4 - 4*MHH**2*MHL**2))/(32.*cmath.pi*abs(MHH)**3)',
(P.H__plus__,P.W__minus__):'((-(cb**4*ee**2*MHH**2*sba**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2*sba**2)/(2.*sw**2) + (cb**4*ee**2*MHH**4*sba**2)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHH**2*MHp**2*sba**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4*sba**2)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2*sba**2)/(4.*sw**2) - (cb**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2*sba**2)/sw**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHH**2*MHp**2*sb**2*sba**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2*sba**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2*sba**2)/(2.*sw**2) - (ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) - (ee**2*MHp**2*sb**4*sba**2)/(2.*sw**2) + (ee**2*MHH**4*sb**4*sba**2)/(4.*MW**2*sw**2) - (ee**2*MHH**2*MHp**2*sb**4*sba**2)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4*sba**2)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4*sba**2)/(4.*sw**2))*cmath.sqrt(MHH**4 - 2*MHH**2*MHp**2 + MHp**4 - 2*MHH**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHH)**3)',
(P.H__minus__,P.H__plus__):'(((16*cb**4*cba**2*m122**2)/vev**2 + (cb**8*cba**6*MHH**4)/vev**2 + (2*cb**6*cba**4*MHH**2*MHL**2)/vev**2 - (2*cb**8*cba**6*MHH**2*MHL**2)/vev**2 + (cb**4*cba**2*MHL**4)/vev**2 - (2*cb**6*cba**4*MHL**4)/vev**2 + (cb**8*cba**6*MHL**4)/vev**2 + (4*cb**8*cba**4*MHH**2*MHp**2)/vev**2 + (4*cb**6*cba**2*MHL**2*MHp**2)/vev**2 - (4*cb**8*cba**4*MHL**2*MHp**2)/vev**2 + (4*cb**8*cba**2*MHp**4)/vev**2 + (4*cb**6*cba**2*m122**2)/(sb**2*vev**2) - (4*cb**7*cba**4*m122*MHH**2)/(sb*vev**2) - (4*cb**5*cba**2*m122*MHL**2)/(sb*vev**2) + (4*cb**7*cba**4*m122*MHL**2)/(sb*vev**2) - (8*cb**7*cba**2*m122*MHp**2)/(sb*vev**2) - (4*cb**3*cba**2*m122*MHH**2*sb)/vev**2 - (12*cb**5*cba**4*m122*MHH**2*sb)/vev**2 - (8*cb**3*cba**2*m122*MHL**2*sb)/vev**2 + (12*cb**5*cba**4*m122*MHL**2*sb)/vev**2 - (32*cb**5*cba**2*m122*MHp**2*sb)/vev**2 + (24*cb**2*cba**2*m122**2*sb**2)/vev**2 + (2*cb**4*cba**4*MHH**4*sb**2)/vev**2 + (2*cb**6*cba**6*MHH**4*sb**2)/vev**2 + (2*cb**2*cba**2*MHH**2*MHL**2*sb**2)/vev**2 - (4*cb**6*cba**6*MHH**2*MHL**2*sb**2)/vev**2 - (2*cb**4*cba**4*MHL**4*sb**2)/vev**2 + (2*cb**6*cba**6*MHL**4*sb**2)/vev**2 + (4*cb**4*cba**2*MHH**2*MHp**2*sb**2)/vev**2 + (12*cb**6*cba**4*MHH**2*MHp**2*sb**2)/vev**2 + (8*cb**4*cba**2*MHL**2*MHp**2*sb**2)/vev**2 - (12*cb**6*cba**4*MHL**2*MHp**2*sb**2)/vev**2 + (16*cb**6*cba**2*MHp**4*sb**2)/vev**2 - (8*cb*cba**2*m122*MHH**2*sb**3)/vev**2 - (12*cb**3*cba**4*m122*MHH**2*sb**3)/vev**2 - (4*cb*cba**2*m122*MHL**2*sb**3)/vev**2 + (12*cb**3*cba**4*m122*MHL**2*sb**3)/vev**2 - (48*cb**3*cba**2*m122*MHp**2*sb**3)/vev**2 + (16*cba**2*m122**2*sb**4)/vev**2 + (cba**2*MHH**4*sb**4)/vev**2 + (2*cb**2*cba**4*MHH**4*sb**4)/vev**2 + (cb**4*cba**6*MHH**4*sb**4)/vev**2 - (2*cb**2*cba**4*MHH**2*MHL**2*sb**4)/vev**2 - (2*cb**4*cba**6*MHH**2*MHL**2*sb**4)/vev**2 + (cb**4*cba**6*MHL**4*sb**4)/vev**2 + (8*cb**2*cba**2*MHH**2*MHp**2*sb**4)/vev**2 + (12*cb**4*cba**4*MHH**2*MHp**2*sb**4)/vev**2 + (4*cb**2*cba**2*MHL**2*MHp**2*sb**4)/vev**2 - (12*cb**4*cba**4*MHL**2*MHp**2*sb**4)/vev**2 + (24*cb**4*cba**2*MHp**4*sb**4)/vev**2 - (4*cba**2*m122*MHH**2*sb**5)/(cb*vev**2) - (4*cb*cba**4*m122*MHH**2*sb**5)/vev**2 + (4*cb*cba**4*m122*MHL**2*sb**5)/vev**2 - (32*cb*cba**2*m122*MHp**2*sb**5)/vev**2 + (4*cba**2*m122**2*sb**6)/(cb**2*vev**2) + (4*cba**2*MHH**2*MHp**2*sb**6)/vev**2 + (4*cb**2*cba**4*MHH**2*MHp**2*sb**6)/vev**2 - (4*cb**2*cba**4*MHL**2*MHp**2*sb**6)/vev**2 + (16*cb**2*cba**2*MHp**4*sb**6)/vev**2 - (8*cba**2*m122*MHp**2*sb**7)/(cb*vev**2) + (4*cba**2*MHp**4*sb**8)/vev**2 + (2*cb**4*cba*m122*MHH**2*sba)/vev**2 + (16*cb**6*cba**3*m122*MHH**2*sba)/vev**2 + (10*cb**4*cba*m122*MHL**2*sba)/vev**2 - (16*cb**6*cba**3*m122*MHL**2*sba)/vev**2 + (12*cb**6*cba*m122*MHp**2*sba)/vev**2 - (4*cb**7*cba*m122**2*sba)/(sb**3*vev**2) + (6*cb**8*cba**3*m122*MHH**2*sba)/(sb**2*vev**2) + (6*cb**6*cba*m122*MHL**2*sba)/(sb**2*vev**2) - (6*cb**8*cba**3*m122*MHL**2*sba)/(sb**2*vev**2) + (4*cb**8*cba*m122*MHp**2*sba)/(sb**2*vev**2) - (12*cb**5*cba*m122**2*sba)/(sb*vev**2) - (2*cb**9*cba**5*MHH**4*sba)/(sb*vev**2) - (4*cb**7*cba**3*MHH**2*MHL**2*sba)/(sb*vev**2) + (4*cb**9*cba**5*MHH**2*MHL**2*sba)/(sb*vev**2) - (2*cb**5*cba*MHL**4*sba)/(sb*vev**2) + (4*cb**7*cba**3*MHL**4*sba)/(sb*vev**2) - (2*cb**9*cba**5*MHL**4*sba)/(sb*vev**2) - (4*cb**9*cba**3*MHH**2*MHp**2*sba)/(sb*vev**2) - (4*cb**7*cba*MHL**2*MHp**2*sba)/(sb*vev**2) + (4*cb**9*cba**3*MHL**2*MHp**2*sba)/(sb*vev**2) - (8*cb**3*cba*m122**2*sb*sba)/vev**2 - (2*cb**5*cba**3*MHH**4*sb*sba)/vev**2 - (4*cb**7*cba**5*MHH**4*sb*sba)/vev**2 - (2*cb**3*cba*MHH**2*MHL**2*sb*sba)/vev**2 - (2*cb**5*cba**3*MHH**2*MHL**2*sb*sba)/vev**2 + (8*cb**7*cba**5*MHH**2*MHL**2*sb*sba)/vev**2 + (4*cb**5*cba**3*MHL**4*sb*sba)/vev**2 - (4*cb**7*cba**5*MHL**4*sb*sba)/vev**2 - (12*cb**7*cba**3*MHH**2*MHp**2*sb*sba)/vev**2 - (8*cb**5*cba*MHL**2*MHp**2*sb*sba)/vev**2 + (12*cb**7*cba**3*MHL**2*MHp**2*sb*sba)/vev**2 - (2*cb**2*cba*m122*MHH**2*sb**2*sba)/vev**2 + (12*cb**4*cba**3*m122*MHH**2*sb**2*sba)/vev**2 + (2*cb**2*cba*m122*MHL**2*sb**2*sba)/vev**2 - (12*cb**4*cba**3*m122*MHL**2*sb**2*sba)/vev**2 + (8*cb**4*cba*m122*MHp**2*sb**2*sba)/vev**2 + (8*cb*cba*m122**2*sb**3*sba)/vev**2 - (2*cb**5*cba**5*MHH**4*sb**3*sba)/vev**2 + (2*cb*cba*MHH**2*MHL**2*sb**3*sba)/vev**2 + (4*cb**5*cba**5*MHH**2*MHL**2*sb**3*sba)/vev**2 - (2*cb**5*cba**5*MHL**4*sb**3*sba)/vev**2 + (4*cb**3*cba*MHH**2*MHp**2*sb**3*sba)/vev**2 - (12*cb**5*cba**3*MHH**2*MHp**2*sb**3*sba)/vev**2 - (4*cb**3*cba*MHL**2*MHp**2*sb**3*sba)/vev**2 + (12*cb**5*cba**3*MHL**2*MHp**2*sb**3*sba)/vev**2 - (10*cba*m122*MHH**2*sb**4*sba)/vev**2 - (2*cba*m122*MHL**2*sb**4*sba)/vev**2 - (8*cb**2*cba*m122*MHp**2*sb**4*sba)/vev**2 + (12*cba*m122**2*sb**5*sba)/(cb*vev**2) + (2*cba*MHH**4*sb**5*sba)/(cb*vev**2) + (2*cb*cba**3*MHH**4*sb**5*sba)/vev**2 - (2*cb*cba**3*MHH**2*MHL**2*sb**5*sba)/vev**2 + (8*cb*cba*MHH**2*MHp**2*sb**5*sba)/vev**2 - (4*cb**3*cba**3*MHH**2*MHp**2*sb**5*sba)/vev**2 + (4*cb**3*cba**3*MHL**2*MHp**2*sb**5*sba)/vev**2 - (6*cba*m122*MHH**2*sb**6*sba)/(cb**2*vev**2) - (2*cba**3*m122*MHH**2*sb**6*sba)/vev**2 + (2*cba**3*m122*MHL**2*sb**6*sba)/vev**2 - (12*cba*m122*MHp**2*sb**6*sba)/vev**2 + (4*cba*m122**2*sb**7*sba)/(cb**3*vev**2) + (4*cba*MHH**2*MHp**2*sb**7*sba)/(cb*vev**2) - (4*cba*m122*MHp**2*sb**8*sba)/(cb**2*vev**2) - (cb**4*m122**2*sba**2)/vev**2 + (4*cb**8*cba**4*MHH**4*sba**2)/vev**2 + (4*cb**6*cba**2*MHH**2*MHL**2*sba**2)/vev**2 - (8*cb**8*cba**4*MHH**2*MHL**2*sba**2)/vev**2 - (4*cb**6*cba**2*MHL**4*sba**2)/vev**2 + (4*cb**8*cba**4*MHL**4*sba**2)/vev**2 + (4*cb**8*cba**2*MHH**2*MHp**2*sba**2)/vev**2 - (4*cb**8*cba**2*MHL**2*MHp**2*sba**2)/vev**2 + (cb**8*m122**2*sba**2)/(sb**4*vev**2) - (2*cb**9*cba**2*m122*MHH**2*sba**2)/(sb**3*vev**2) - (2*cb**7*m122*MHL**2*sba**2)/(sb**3*vev**2) + (2*cb**9*cba**2*m122*MHL**2*sba**2)/(sb**3*vev**2) + (2*cb**6*m122**2*sba**2)/(sb**2*vev**2) + (cb**10*cba**4*MHH**4*sba**2)/(sb**2*vev**2) + (2*cb**8*cba**2*MHH**2*MHL**2*sba**2)/(sb**2*vev**2) - (2*cb**10*cba**4*MHH**2*MHL**2*sba**2)/(sb**2*vev**2) + (cb**6*MHL**4*sba**2)/(sb**2*vev**2) - (2*cb**8*cba**2*MHL**4*sba**2)/(sb**2*vev**2) + (cb**10*cba**4*MHL**4*sba**2)/(sb**2*vev**2) - (8*cb**7*cba**2*m122*MHH**2*sba**2)/(sb*vev**2) - (2*cb**5*m122*MHL**2*sba**2)/(sb*vev**2) + (8*cb**7*cba**2*m122*MHL**2*sba**2)/(sb*vev**2) + (2*cb**3*m122*MHH**2*sb*sba**2)/vev**2 - (12*cb**5*cba**2*m122*MHH**2*sb*sba**2)/vev**2 + (2*cb**3*m122*MHL**2*sb*sba**2)/vev**2 + (12*cb**5*cba**2*m122*MHL**2*sb*sba**2)/vev**2 - (4*cb**2*m122**2*sb**2*sba**2)/vev**2 + (5*cb**6*cba**4*MHH**4*sb**2*sba**2)/vev**2 - (2*cb**2*MHH**2*MHL**2*sb**2*sba**2)/vev**2 + (2*cb**4*cba**2*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (10*cb**6*cba**4*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (2*cb**4*cba**2*MHL**4*sb**2*sba**2)/vev**2 + (5*cb**6*cba**4*MHL**4*sb**2*sba**2)/vev**2 + (12*cb**6*cba**2*MHH**2*MHp**2*sb**2*sba**2)/vev**2 - (12*cb**6*cba**2*MHL**2*MHp**2*sb**2*sba**2)/vev**2 + (2*cb*m122*MHH**2*sb**3*sba**2)/vev**2 - (8*cb**3*cba**2*m122*MHH**2*sb**3*sba**2)/vev**2 + (2*cb*m122*MHL**2*sb**3*sba**2)/vev**2 + (8*cb**3*cba**2*m122*MHL**2*sb**3*sba**2)/vev**2 - (m122**2*sb**4*sba**2)/vev**2 + (2*cb**4*cba**4*MHH**4*sb**4*sba**2)/vev**2 - (4*cb**4*cba**4*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (2*cb**4*cba**4*MHL**4*sb**4*sba**2)/vev**2 + (12*cb**4*cba**2*MHH**2*MHp**2*sb**4*sba**2)/vev**2 - (12*cb**4*cba**2*MHL**2*MHp**2*sb**4*sba**2)/vev**2 - (2*m122*MHH**2*sb**5*sba**2)/(cb*vev**2) - (2*cb*cba**2*m122*MHH**2*sb**5*sba**2)/vev**2 + (2*cb*cba**2*m122*MHL**2*sb**5*sba**2)/vev**2 + (2*m122**2*sb**6*sba**2)/(cb**2*vev**2) + (MHH**4*sb**6*sba**2)/(cb**2*vev**2) + (4*cb**2*cba**2*MHH**2*MHp**2*sb**6*sba**2)/vev**2 - (4*cb**2*cba**2*MHL**2*MHp**2*sb**6*sba**2)/vev**2 - (2*m122*MHH**2*sb**7*sba**2)/(cb**3*vev**2) + (m122**2*sb**8*sba**2)/(cb**4*vev**2) + (16*cb**6*cba*m122*MHH**2*sba**3)/vev**2 - (16*cb**6*cba*m122*MHL**2*sba**3)/vev**2 + (6*cb**8*cba*m122*MHH**2*sba**3)/(sb**2*vev**2) - (6*cb**8*cba*m122*MHL**2*sba**3)/(sb**2*vev**2) - (4*cb**9*cba**3*MHH**4*sba**3)/(sb*vev**2) - (4*cb**7*cba*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (8*cb**9*cba**3*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (4*cb**7*cba*MHL**4*sba**3)/(sb*vev**2) - (4*cb**9*cba**3*MHL**4*sba**3)/(sb*vev**2) - (4*cb**9*cba*MHH**2*MHp**2*sba**3)/(sb*vev**2) + (4*cb**9*cba*MHL**2*MHp**2*sba**3)/(sb*vev**2) - (2*cb**5*cba*MHH**4*sb*sba**3)/vev**2 - (8*cb**7*cba**3*MHH**4*sb*sba**3)/vev**2 - (2*cb**5*cba*MHH**2*MHL**2*sb*sba**3)/vev**2 + (16*cb**7*cba**3*MHH**2*MHL**2*sb*sba**3)/vev**2 + (4*cb**5*cba*MHL**4*sb*sba**3)/vev**2 - (8*cb**7*cba**3*MHL**4*sb*sba**3)/vev**2 - (12*cb**7*cba*MHH**2*MHp**2*sb*sba**3)/vev**2 + (12*cb**7*cba*MHL**2*MHp**2*sb*sba**3)/vev**2 + (12*cb**4*cba*m122*MHH**2*sb**2*sba**3)/vev**2 - (12*cb**4*cba*m122*MHL**2*sb**2*sba**3)/vev**2 - (4*cb**5*cba**3*MHH**4*sb**3*sba**3)/vev**2 + (8*cb**5*cba**3*MHH**2*MHL**2*sb**3*sba**3)/vev**2 - (4*cb**5*cba**3*MHL**4*sb**3*sba**3)/vev**2 - (12*cb**5*cba*MHH**2*MHp**2*sb**3*sba**3)/vev**2 + (12*cb**5*cba*MHL**2*MHp**2*sb**3*sba**3)/vev**2 + (2*cb*cba*MHH**4*sb**5*sba**3)/vev**2 - (2*cb*cba*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (4*cb**3*cba*MHH**2*MHp**2*sb**5*sba**3)/vev**2 + (4*cb**3*cba*MHL**2*MHp**2*sb**5*sba**3)/vev**2 - (2*cba*m122*MHH**2*sb**6*sba**3)/vev**2 + (2*cba*m122*MHL**2*sb**6*sba**3)/vev**2 + (5*cb**8*cba**2*MHH**4*sba**4)/vev**2 + (2*cb**6*MHH**2*MHL**2*sba**4)/vev**2 - (10*cb**8*cba**2*MHH**2*MHL**2*sba**4)/vev**2 - (2*cb**6*MHL**4*sba**4)/vev**2 + (5*cb**8*cba**2*MHL**4*sba**4)/vev**2 - (2*cb**9*m122*MHH**2*sba**4)/(sb**3*vev**2) + (2*cb**9*m122*MHL**2*sba**4)/(sb**3*vev**2) + (2*cb**10*cba**2*MHH**4*sba**4)/(sb**2*vev**2) + (2*cb**8*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (4*cb**10*cba**2*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (2*cb**8*MHL**4*sba**4)/(sb**2*vev**2) + (2*cb**10*cba**2*MHL**4*sba**4)/(sb**2*vev**2) - (4*cb**7*m122*MHH**2*sba**4)/(sb*vev**2) + (4*cb**7*m122*MHL**2*sba**4)/(sb*vev**2) - (2*cb**4*MHH**4*sb**2*sba**4)/vev**2 + (4*cb**6*cba**2*MHH**4*sb**2*sba**4)/vev**2 + (2*cb**4*MHH**2*MHL**2*sb**2*sba**4)/vev**2 - (8*cb**6*cba**2*MHH**2*MHL**2*sb**2*sba**4)/vev**2 + (4*cb**6*cba**2*MHL**4*sb**2*sba**4)/vev**2 + (4*cb**3*m122*MHH**2*sb**3*sba**4)/vev**2 - (4*cb**3*m122*MHL**2*sb**3*sba**4)/vev**2 - (2*cb**2*MHH**4*sb**4*sba**4)/vev**2 + (cb**4*cba**2*MHH**4*sb**4*sba**4)/vev**2 + (2*cb**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 - (2*cb**4*cba**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (cb**4*cba**2*MHL**4*sb**4*sba**4)/vev**2 + (2*cb*m122*MHH**2*sb**5*sba**4)/vev**2 - (2*cb*m122*MHL**2*sb**5*sba**4)/vev**2 - (2*cb**9*cba*MHH**4*sba**5)/(sb*vev**2) + (4*cb**9*cba*MHH**2*MHL**2*sba**5)/(sb*vev**2) - (2*cb**9*cba*MHL**4*sba**5)/(sb*vev**2) - (4*cb**7*cba*MHH**4*sb*sba**5)/vev**2 + (8*cb**7*cba*MHH**2*MHL**2*sb*sba**5)/vev**2 - (4*cb**7*cba*MHL**4*sb*sba**5)/vev**2 - (2*cb**5*cba*MHH**4*sb**3*sba**5)/vev**2 + (4*cb**5*cba*MHH**2*MHL**2*sb**3*sba**5)/vev**2 - (2*cb**5*cba*MHL**4*sb**3*sba**5)/vev**2 + (2*cb**8*MHH**4*sba**6)/vev**2 - (4*cb**8*MHH**2*MHL**2*sba**6)/vev**2 + (2*cb**8*MHL**4*sba**6)/vev**2 + (cb**10*MHH**4*sba**6)/(sb**2*vev**2) - (2*cb**10*MHH**2*MHL**2*sba**6)/(sb**2*vev**2) + (cb**10*MHL**4*sba**6)/(sb**2*vev**2) + (cb**6*MHH**4*sb**2*sba**6)/vev**2 - (2*cb**6*MHH**2*MHL**2*sb**2*sba**6)/vev**2 + (cb**6*MHL**4*sb**2*sba**6)/vev**2)*cmath.sqrt(MHH**4 - 4*MHH**2*MHp**2))/(16.*cmath.pi*abs(MHH)**3)',
(P.H__minus__,P.W__plus__):'((-(cb**4*ee**2*MHH**2*sba**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2*sba**2)/(2.*sw**2) + (cb**4*ee**2*MHH**4*sba**2)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHH**2*MHp**2*sba**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4*sba**2)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2*sba**2)/(4.*sw**2) - (cb**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2*sba**2)/sw**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHH**2*MHp**2*sb**2*sba**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2*sba**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2*sba**2)/(2.*sw**2) - (ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) - (ee**2*MHp**2*sb**4*sba**2)/(2.*sw**2) + (ee**2*MHH**4*sb**4*sba**2)/(4.*MW**2*sw**2) - (ee**2*MHH**2*MHp**2*sb**4*sba**2)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4*sba**2)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4*sba**2)/(4.*sw**2))*cmath.sqrt(MHH**4 - 2*MHH**2*MHp**2 + MHp**4 - 2*MHH**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHH)**3)',
(P.t,P.t__tilde__):'((3*cba**2*MHH**2*yt**2 - 12*cba**2*MT**2*yt**2 - (6*cb*cba*MHH**2*sba*yt**2)/sb + (24*cb*cba*MT**2*sba*yt**2)/sb + (3*cb**2*MHH**2*sba**2*yt**2)/sb**2 - (12*cb**2*MT**2*sba**2*yt**2)/sb**2)*cmath.sqrt(MHH**4 - 4*MHH**2*MT**2))/(16.*cmath.pi*abs(MHH)**3)',
(P.ta__minus__,P.ta__plus__):'((cba**2*MHH**2*ytau**2 - 4*cba**2*MTA**2*ytau**2 - (2*cb*cba*MHH**2*sba*ytau**2)/sb + (8*cb*cba*MTA**2*sba*ytau**2)/sb + (cb**2*MHH**2*sba**2*ytau**2)/sb**2 - (4*cb**2*MTA**2*sba**2*ytau**2)/sb**2)*cmath.sqrt(MHH**4 - 4*MHH**2*MTA**2))/(16.*cmath.pi*abs(MHH)**3)',
(P.W__minus__,P.W__plus__):'(((3*cb**4*cba**2*ee**4*vev**2)/(4.*sw**4) + (cb**4*cba**2*ee**4*MHH**4*vev**2)/(16.*MW**4*sw**4) - (cb**4*cba**2*ee**4*MHH**2*vev**2)/(4.*MW**2*sw**4) + (3*cb**2*cba**2*ee**4*sb**2*vev**2)/(2.*sw**4) + (cb**2*cba**2*ee**4*MHH**4*sb**2*vev**2)/(8.*MW**4*sw**4) - (cb**2*cba**2*ee**4*MHH**2*sb**2*vev**2)/(2.*MW**2*sw**4) + (3*cba**2*ee**4*sb**4*vev**2)/(4.*sw**4) + (cba**2*ee**4*MHH**4*sb**4*vev**2)/(16.*MW**4*sw**4) - (cba**2*ee**4*MHH**2*sb**4*vev**2)/(4.*MW**2*sw**4))*cmath.sqrt(MHH**4 - 4*MHH**2*MW**2))/(16.*cmath.pi*abs(MHH)**3)',
(P.Z,P.Z):'(((9*cb**4*cba**2*ee**4*vev**2)/2. + (3*cb**4*cba**2*ee**4*MHH**4*vev**2)/(8.*MZ**4) - (3*cb**4*cba**2*ee**4*MHH**2*vev**2)/(2.*MZ**2) + 9*cb**2*cba**2*ee**4*sb**2*vev**2 + (3*cb**2*cba**2*ee**4*MHH**4*sb**2*vev**2)/(4.*MZ**4) - (3*cb**2*cba**2*ee**4*MHH**2*sb**2*vev**2)/MZ**2 + (9*cba**2*ee**4*sb**4*vev**2)/2. + (3*cba**2*ee**4*MHH**4*sb**4*vev**2)/(8.*MZ**4) - (3*cba**2*ee**4*MHH**2*sb**4*vev**2)/(2.*MZ**2) + (3*cb**4*cba**2*cw**4*ee**4*vev**2)/(4.*sw**4) + (cb**4*cba**2*cw**4*ee**4*MHH**4*vev**2)/(16.*MZ**4*sw**4) - (cb**4*cba**2*cw**4*ee**4*MHH**2*vev**2)/(4.*MZ**2*sw**4) + (3*cb**2*cba**2*cw**4*ee**4*sb**2*vev**2)/(2.*sw**4) + (cb**2*cba**2*cw**4*ee**4*MHH**4*sb**2*vev**2)/(8.*MZ**4*sw**4) - (cb**2*cba**2*cw**4*ee**4*MHH**2*sb**2*vev**2)/(2.*MZ**2*sw**4) + (3*cba**2*cw**4*ee**4*sb**4*vev**2)/(4.*sw**4) + (cba**2*cw**4*ee**4*MHH**4*sb**4*vev**2)/(16.*MZ**4*sw**4) - (cba**2*cw**4*ee**4*MHH**2*sb**4*vev**2)/(4.*MZ**2*sw**4) + (3*cb**4*cba**2*cw**2*ee**4*vev**2)/sw**2 + (cb**4*cba**2*cw**2*ee**4*MHH**4*vev**2)/(4.*MZ**4*sw**2) - (cb**4*cba**2*cw**2*ee**4*MHH**2*vev**2)/(MZ**2*sw**2) + (6*cb**2*cba**2*cw**2*ee**4*sb**2*vev**2)/sw**2 + (cb**2*cba**2*cw**2*ee**4*MHH**4*sb**2*vev**2)/(2.*MZ**4*sw**2) - (2*cb**2*cba**2*cw**2*ee**4*MHH**2*sb**2*vev**2)/(MZ**2*sw**2) + (3*cba**2*cw**2*ee**4*sb**4*vev**2)/sw**2 + (cba**2*cw**2*ee**4*MHH**4*sb**4*vev**2)/(4.*MZ**4*sw**2) - (cba**2*cw**2*ee**4*MHH**2*sb**4*vev**2)/(MZ**2*sw**2) + (3*cb**4*cba**2*ee**4*sw**2*vev**2)/cw**2 + (cb**4*cba**2*ee**4*MHH**4*sw**2*vev**2)/(4.*cw**2*MZ**4) - (cb**4*cba**2*ee**4*MHH**2*sw**2*vev**2)/(cw**2*MZ**2) + (6*cb**2*cba**2*ee**4*sb**2*sw**2*vev**2)/cw**2 + (cb**2*cba**2*ee**4*MHH**4*sb**2*sw**2*vev**2)/(2.*cw**2*MZ**4) - (2*cb**2*cba**2*ee**4*MHH**2*sb**2*sw**2*vev**2)/(cw**2*MZ**2) + (3*cba**2*ee**4*sb**4*sw**2*vev**2)/cw**2 + (cba**2*ee**4*MHH**4*sb**4*sw**2*vev**2)/(4.*cw**2*MZ**4) - (cba**2*ee**4*MHH**2*sb**4*sw**2*vev**2)/(cw**2*MZ**2) + (3*cb**4*cba**2*ee**4*sw**4*vev**2)/(4.*cw**4) + (cb**4*cba**2*ee**4*MHH**4*sw**4*vev**2)/(16.*cw**4*MZ**4) - (cb**4*cba**2*ee**4*MHH**2*sw**4*vev**2)/(4.*cw**4*MZ**2) + (3*cb**2*cba**2*ee**4*sb**2*sw**4*vev**2)/(2.*cw**4) + (cb**2*cba**2*ee**4*MHH**4*sb**2*sw**4*vev**2)/(8.*cw**4*MZ**4) - (cb**2*cba**2*ee**4*MHH**2*sb**2*sw**4*vev**2)/(2.*cw**4*MZ**2) + (3*cba**2*ee**4*sb**4*sw**4*vev**2)/(4.*cw**4) + (cba**2*ee**4*MHH**4*sb**4*sw**4*vev**2)/(16.*cw**4*MZ**4) - (cba**2*ee**4*MHH**2*sb**4*sw**4*vev**2)/(4.*cw**4*MZ**2))*cmath.sqrt(MHH**4 - 4*MHH**2*MZ**2))/(32.*cmath.pi*abs(MHH)**3)'})
Decay_HL = Decay(name = 'Decay_HL',
particle = P.HL,
partial_widths = {(P.b,P.b__tilde__):'(((-12*cba**2*MB**2*sb**2*yb**2)/cb**2 + (3*cba**2*MHL**2*sb**2*yb**2)/cb**2 + (24*cba*MB**2*sb*sba*yb**2)/cb - (6*cba*MHL**2*sb*sba*yb**2)/cb - 12*MB**2*sba**2*yb**2 + 3*MHL**2*sba**2*yb**2)*cmath.sqrt(-4*MB**2*MHL**2 + MHL**4))/(16.*cmath.pi*abs(MHL)**3)',
(P.HA,P.HA):'((-((cb**4*cba**2*m122**2)/vev**2) + (cb**8*cba**2*m122**2)/(sb**4*vev**2) - (2*cb**7*cba**2*m122*MHL**2)/(sb**3*vev**2) + (2*cb**6*cba**2*m122**2)/(sb**2*vev**2) + (cb**6*cba**2*MHL**4)/(sb**2*vev**2) - (2*cb**5*cba**2*m122*MHL**2)/(sb*vev**2) + (2*cb**3*cba**2*m122*MHH**2*sb)/vev**2 - (2*cb**5*cba**4*m122*MHH**2*sb)/vev**2 + (2*cb**3*cba**2*m122*MHL**2*sb)/vev**2 + (2*cb**5*cba**4*m122*MHL**2*sb)/vev**2 - (4*cb**2*cba**2*m122**2*sb**2)/vev**2 - (2*cb**2*cba**2*MHH**2*MHL**2*sb**2)/vev**2 + (2*cb**4*cba**4*MHH**2*MHL**2*sb**2)/vev**2 - (2*cb**4*cba**4*MHL**4*sb**2)/vev**2 + (2*cb*cba**2*m122*MHH**2*sb**3)/vev**2 - (4*cb**3*cba**4*m122*MHH**2*sb**3)/vev**2 + (2*cb*cba**2*m122*MHL**2*sb**3)/vev**2 + (4*cb**3*cba**4*m122*MHL**2*sb**3)/vev**2 - (cba**2*m122**2*sb**4)/vev**2 + (2*cb**2*cba**4*MHH**2*MHL**2*sb**4)/vev**2 - (2*cb**2*cba**4*MHL**4*sb**4)/vev**2 - (2*cba**2*m122*MHH**2*sb**5)/(cb*vev**2) + (2*cba**2*m122**2*sb**6)/(cb**2*vev**2) + (cba**2*MHH**4*sb**6)/(cb**2*vev**2) - (2*cba**4*MHH**4*sb**6)/vev**2 + (cb**2*cba**6*MHH**4*sb**6)/vev**2 + (2*cba**4*MHH**2*MHL**2*sb**6)/vev**2 - (2*cb**2*cba**6*MHH**2*MHL**2*sb**6)/vev**2 + (cb**2*cba**6*MHL**4*sb**6)/vev**2 - (2*cba**2*m122*MHH**2*sb**7)/(cb**3*vev**2) + (4*cba**4*m122*MHH**2*sb**7)/(cb*vev**2) - (4*cba**4*m122*MHL**2*sb**7)/(cb*vev**2) + (cba**2*m122**2*sb**8)/(cb**4*vev**2) - (2*cba**4*MHH**4*sb**8)/(cb**2*vev**2) + (2*cba**6*MHH**4*sb**8)/vev**2 + (2*cba**4*MHH**2*MHL**2*sb**8)/(cb**2*vev**2) - (4*cba**6*MHH**2*MHL**2*sb**8)/vev**2 + (2*cba**6*MHL**4*sb**8)/vev**2 + (2*cba**4*m122*MHH**2*sb**9)/(cb**3*vev**2) - (2*cba**4*m122*MHL**2*sb**9)/(cb**3*vev**2) + (cba**6*MHH**4*sb**10)/(cb**2*vev**2) - (2*cba**6*MHH**2*MHL**2*sb**10)/(cb**2*vev**2) + (cba**6*MHL**4*sb**10)/(cb**2*vev**2) - (12*cb**6*cba*m122*MHA**2*sba)/vev**2 - (2*cb**4*cba*m122*MHH**2*sba)/vev**2 + (2*cb**6*cba**3*m122*MHH**2*sba)/vev**2 - (10*cb**4*cba*m122*MHL**2*sba)/vev**2 - (2*cb**6*cba**3*m122*MHL**2*sba)/vev**2 + (4*cb**7*cba*m122**2*sba)/(sb**3*vev**2) - (4*cb**8*cba*m122*MHA**2*sba)/(sb**2*vev**2) - (6*cb**6*cba*m122*MHL**2*sba)/(sb**2*vev**2) + (12*cb**5*cba*m122**2*sba)/(sb*vev**2) + (4*cb**7*cba*MHA**2*MHL**2*sba)/(sb*vev**2) + (2*cb**5*cba*MHL**4*sba)/(sb*vev**2) + (8*cb**3*cba*m122**2*sb*sba)/vev**2 + (8*cb**5*cba*MHA**2*MHL**2*sb*sba)/vev**2 + (2*cb**3*cba*MHH**2*MHL**2*sb*sba)/vev**2 - (2*cb**5*cba**3*MHH**2*MHL**2*sb*sba)/vev**2 + (2*cb**5*cba**3*MHL**4*sb*sba)/vev**2 - (8*cb**4*cba*m122*MHA**2*sb**2*sba)/vev**2 + (2*cb**2*cba*m122*MHH**2*sb**2*sba)/vev**2 - (2*cb**2*cba*m122*MHL**2*sb**2*sba)/vev**2 - (8*cb*cba*m122**2*sb**3*sba)/vev**2 - (4*cb**3*cba*MHA**2*MHH**2*sb**3*sba)/vev**2 + (4*cb**5*cba**3*MHA**2*MHH**2*sb**3*sba)/vev**2 + (4*cb**3*cba*MHA**2*MHL**2*sb**3*sba)/vev**2 - (4*cb**5*cba**3*MHA**2*MHL**2*sb**3*sba)/vev**2 - (2*cb*cba*MHH**2*MHL**2*sb**3*sba)/vev**2 + (8*cb**2*cba*m122*MHA**2*sb**4*sba)/vev**2 + (10*cba*m122*MHH**2*sb**4*sba)/vev**2 - (12*cb**2*cba**3*m122*MHH**2*sb**4*sba)/vev**2 + (2*cba*m122*MHL**2*sb**4*sba)/vev**2 + (12*cb**2*cba**3*m122*MHL**2*sb**4*sba)/vev**2 - (12*cba*m122**2*sb**5*sba)/(cb*vev**2) - (8*cb*cba*MHA**2*MHH**2*sb**5*sba)/vev**2 + (12*cb**3*cba**3*MHA**2*MHH**2*sb**5*sba)/vev**2 - (2*cba*MHH**4*sb**5*sba)/(cb*vev**2) + (4*cb*cba**3*MHH**4*sb**5*sba)/vev**2 - (2*cb**3*cba**5*MHH**4*sb**5*sba)/vev**2 - (12*cb**3*cba**3*MHA**2*MHL**2*sb**5*sba)/vev**2 - (2*cb*cba**3*MHH**2*MHL**2*sb**5*sba)/vev**2 + (4*cb**3*cba**5*MHH**2*MHL**2*sb**5*sba)/vev**2 - (2*cb*cba**3*MHL**4*sb**5*sba)/vev**2 - (2*cb**3*cba**5*MHL**4*sb**5*sba)/vev**2 + (12*cba*m122*MHA**2*sb**6*sba)/vev**2 + (6*cba*m122*MHH**2*sb**6*sba)/(cb**2*vev**2) - (16*cba**3*m122*MHH**2*sb**6*sba)/vev**2 + (16*cba**3*m122*MHL**2*sb**6*sba)/vev**2 - (4*cba*m122**2*sb**7*sba)/(cb**3*vev**2) - (4*cba*MHA**2*MHH**2*sb**7*sba)/(cb*vev**2) + (12*cb*cba**3*MHA**2*MHH**2*sb**7*sba)/vev**2 + (4*cba**3*MHH**4*sb**7*sba)/(cb*vev**2) - (4*cb*cba**5*MHH**4*sb**7*sba)/vev**2 - (12*cb*cba**3*MHA**2*MHL**2*sb**7*sba)/vev**2 - (4*cba**3*MHH**2*MHL**2*sb**7*sba)/(cb*vev**2) + (8*cb*cba**5*MHH**2*MHL**2*sb**7*sba)/vev**2 - (4*cb*cba**5*MHL**4*sb**7*sba)/vev**2 + (4*cba*m122*MHA**2*sb**8*sba)/(cb**2*vev**2) - (6*cba**3*m122*MHH**2*sb**8*sba)/(cb**2*vev**2) + (6*cba**3*m122*MHL**2*sb**8*sba)/(cb**2*vev**2) + (4*cba**3*MHA**2*MHH**2*sb**9*sba)/(cb*vev**2) - (2*cba**5*MHH**4*sb**9*sba)/(cb*vev**2) - (4*cba**3*MHA**2*MHL**2*sb**9*sba)/(cb*vev**2) + (4*cba**5*MHH**2*MHL**2*sb**9*sba)/(cb*vev**2) - (2*cba**5*MHL**4*sb**9*sba)/(cb*vev**2) + (16*cb**4*m122**2*sba**2)/vev**2 + (4*cb**8*MHA**4*sba**2)/vev**2 + (4*cb**6*MHA**2*MHL**2*sba**2)/vev**2 + (cb**4*MHL**4*sba**2)/vev**2 + (4*cb**6*m122**2*sba**2)/(sb**2*vev**2) - (8*cb**7*m122*MHA**2*sba**2)/(sb*vev**2) - (4*cb**5*m122*MHL**2*sba**2)/(sb*vev**2) - (32*cb**5*m122*MHA**2*sb*sba**2)/vev**2 - (4*cb**3*m122*MHH**2*sb*sba**2)/vev**2 + (2*cb**5*cba**2*m122*MHH**2*sb*sba**2)/vev**2 - (8*cb**3*m122*MHL**2*sb*sba**2)/vev**2 - (2*cb**5*cba**2*m122*MHL**2*sb*sba**2)/vev**2 + (24*cb**2*m122**2*sb**2*sba**2)/vev**2 + (16*cb**6*MHA**4*sb**2*sba**2)/vev**2 + (4*cb**4*MHA**2*MHH**2*sb**2*sba**2)/vev**2 - (4*cb**6*cba**2*MHA**2*MHH**2*sb**2*sba**2)/vev**2 + (8*cb**4*MHA**2*MHL**2*sb**2*sba**2)/vev**2 + (4*cb**6*cba**2*MHA**2*MHL**2*sb**2*sba**2)/vev**2 + (2*cb**2*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (48*cb**3*m122*MHA**2*sb**3*sba**2)/vev**2 - (8*cb*m122*MHH**2*sb**3*sba**2)/vev**2 + (8*cb**3*cba**2*m122*MHH**2*sb**3*sba**2)/vev**2 - (4*cb*m122*MHL**2*sb**3*sba**2)/vev**2 - (8*cb**3*cba**2*m122*MHL**2*sb**3*sba**2)/vev**2 + (16*m122**2*sb**4*sba**2)/vev**2 + (24*cb**4*MHA**4*sb**4*sba**2)/vev**2 + (8*cb**2*MHA**2*MHH**2*sb**4*sba**2)/vev**2 - (12*cb**4*cba**2*MHA**2*MHH**2*sb**4*sba**2)/vev**2 + (MHH**4*sb**4*sba**2)/vev**2 - (2*cb**2*cba**2*MHH**4*sb**4*sba**2)/vev**2 + (cb**4*cba**4*MHH**4*sb**4*sba**2)/vev**2 + (4*cb**2*MHA**2*MHL**2*sb**4*sba**2)/vev**2 + (12*cb**4*cba**2*MHA**2*MHL**2*sb**4*sba**2)/vev**2 + (2*cb**2*cba**2*MHH**2*MHL**2*sb**4*sba**2)/vev**2 - (2*cb**4*cba**4*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (cb**4*cba**4*MHL**4*sb**4*sba**2)/vev**2 - (32*cb*m122*MHA**2*sb**5*sba**2)/vev**2 - (4*m122*MHH**2*sb**5*sba**2)/(cb*vev**2) + (12*cb*cba**2*m122*MHH**2*sb**5*sba**2)/vev**2 - (12*cb*cba**2*m122*MHL**2*sb**5*sba**2)/vev**2 + (4*m122**2*sb**6*sba**2)/(cb**2*vev**2) + (16*cb**2*MHA**4*sb**6*sba**2)/vev**2 + (4*MHA**2*MHH**2*sb**6*sba**2)/vev**2 - (12*cb**2*cba**2*MHA**2*MHH**2*sb**6*sba**2)/vev**2 - (4*cba**2*MHH**4*sb**6*sba**2)/vev**2 + (4*cb**2*cba**4*MHH**4*sb**6*sba**2)/vev**2 + (12*cb**2*cba**2*MHA**2*MHL**2*sb**6*sba**2)/vev**2 + (4*cba**2*MHH**2*MHL**2*sb**6*sba**2)/vev**2 - (8*cb**2*cba**4*MHH**2*MHL**2*sb**6*sba**2)/vev**2 + (4*cb**2*cba**4*MHL**4*sb**6*sba**2)/vev**2 - (8*m122*MHA**2*sb**7*sba**2)/(cb*vev**2) + (8*cba**2*m122*MHH**2*sb**7*sba**2)/(cb*vev**2) - (8*cba**2*m122*MHL**2*sb**7*sba**2)/(cb*vev**2) + (4*MHA**4*sb**8*sba**2)/vev**2 - (4*cba**2*MHA**2*MHH**2*sb**8*sba**2)/vev**2 - (2*cba**2*MHH**4*sb**8*sba**2)/(cb**2*vev**2) + (5*cba**4*MHH**4*sb**8*sba**2)/vev**2 + (4*cba**2*MHA**2*MHL**2*sb**8*sba**2)/vev**2 + (2*cba**2*MHH**2*MHL**2*sb**8*sba**2)/(cb**2*vev**2) - (10*cba**4*MHH**2*MHL**2*sb**8*sba**2)/vev**2 + (5*cba**4*MHL**4*sb**8*sba**2)/vev**2 + (2*cba**2*m122*MHH**2*sb**9*sba**2)/(cb**3*vev**2) - (2*cba**2*m122*MHL**2*sb**9*sba**2)/(cb**3*vev**2) + (2*cba**4*MHH**4*sb**10*sba**2)/(cb**2*vev**2) - (4*cba**4*MHH**2*MHL**2*sb**10*sba**2)/(cb**2*vev**2) + (2*cba**4*MHL**4*sb**10*sba**2)/(cb**2*vev**2) + (2*cb**6*cba*m122*MHH**2*sba**3)/vev**2 - (2*cb**6*cba*m122*MHL**2*sba**3)/vev**2 - (2*cb**5*cba*MHH**2*MHL**2*sb*sba**3)/vev**2 + (2*cb**5*cba*MHL**4*sb*sba**3)/vev**2 + (4*cb**5*cba*MHA**2*MHH**2*sb**3*sba**3)/vev**2 - (4*cb**5*cba*MHA**2*MHL**2*sb**3*sba**3)/vev**2 - (12*cb**2*cba*m122*MHH**2*sb**4*sba**3)/vev**2 + (12*cb**2*cba*m122*MHL**2*sb**4*sba**3)/vev**2 + (12*cb**3*cba*MHA**2*MHH**2*sb**5*sba**3)/vev**2 + (4*cb*cba*MHH**4*sb**5*sba**3)/vev**2 - (4*cb**3*cba**3*MHH**4*sb**5*sba**3)/vev**2 - (12*cb**3*cba*MHA**2*MHL**2*sb**5*sba**3)/vev**2 - (2*cb*cba*MHH**2*MHL**2*sb**5*sba**3)/vev**2 + (8*cb**3*cba**3*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (2*cb*cba*MHL**4*sb**5*sba**3)/vev**2 - (4*cb**3*cba**3*MHL**4*sb**5*sba**3)/vev**2 - (16*cba*m122*MHH**2*sb**6*sba**3)/vev**2 + (16*cba*m122*MHL**2*sb**6*sba**3)/vev**2 + (12*cb*cba*MHA**2*MHH**2*sb**7*sba**3)/vev**2 + (4*cba*MHH**4*sb**7*sba**3)/(cb*vev**2) - (8*cb*cba**3*MHH**4*sb**7*sba**3)/vev**2 - (12*cb*cba*MHA**2*MHL**2*sb**7*sba**3)/vev**2 - (4*cba*MHH**2*MHL**2*sb**7*sba**3)/(cb*vev**2) + (16*cb*cba**3*MHH**2*MHL**2*sb**7*sba**3)/vev**2 - (8*cb*cba**3*MHL**4*sb**7*sba**3)/vev**2 - (6*cba*m122*MHH**2*sb**8*sba**3)/(cb**2*vev**2) + (6*cba*m122*MHL**2*sb**8*sba**3)/(cb**2*vev**2) + (4*cba*MHA**2*MHH**2*sb**9*sba**3)/(cb*vev**2) - (4*cba**3*MHH**4*sb**9*sba**3)/(cb*vev**2) - (4*cba*MHA**2*MHL**2*sb**9*sba**3)/(cb*vev**2) + (8*cba**3*MHH**2*MHL**2*sb**9*sba**3)/(cb*vev**2) - (4*cba**3*MHL**4*sb**9*sba**3)/(cb*vev**2) + (4*cb**5*m122*MHH**2*sb*sba**4)/vev**2 - (4*cb**5*m122*MHL**2*sb*sba**4)/vev**2 - (4*cb**6*MHA**2*MHH**2*sb**2*sba**4)/vev**2 + (4*cb**6*MHA**2*MHL**2*sb**2*sba**4)/vev**2 - (2*cb**4*MHH**2*MHL**2*sb**2*sba**4)/vev**2 + (2*cb**4*MHL**4*sb**2*sba**4)/vev**2 + (12*cb**3*m122*MHH**2*sb**3*sba**4)/vev**2 - (12*cb**3*m122*MHL**2*sb**3*sba**4)/vev**2 - (12*cb**4*MHA**2*MHH**2*sb**4*sba**4)/vev**2 - (2*cb**2*MHH**4*sb**4*sba**4)/vev**2 + (2*cb**4*cba**2*MHH**4*sb**4*sba**4)/vev**2 + (12*cb**4*MHA**2*MHL**2*sb**4*sba**4)/vev**2 - (4*cb**4*cba**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (2*cb**2*MHL**4*sb**4*sba**4)/vev**2 + (2*cb**4*cba**2*MHL**4*sb**4*sba**4)/vev**2 + (12*cb*m122*MHH**2*sb**5*sba**4)/vev**2 - (12*cb*m122*MHL**2*sb**5*sba**4)/vev**2 - (12*cb**2*MHA**2*MHH**2*sb**6*sba**4)/vev**2 - (2*MHH**4*sb**6*sba**4)/vev**2 + (5*cb**2*cba**2*MHH**4*sb**6*sba**4)/vev**2 + (12*cb**2*MHA**2*MHL**2*sb**6*sba**4)/vev**2 + (2*MHH**2*MHL**2*sb**6*sba**4)/vev**2 - (10*cb**2*cba**2*MHH**2*MHL**2*sb**6*sba**4)/vev**2 + (5*cb**2*cba**2*MHL**4*sb**6*sba**4)/vev**2 + (4*m122*MHH**2*sb**7*sba**4)/(cb*vev**2) - (4*m122*MHL**2*sb**7*sba**4)/(cb*vev**2) - (4*MHA**2*MHH**2*sb**8*sba**4)/vev**2 + (4*cba**2*MHH**4*sb**8*sba**4)/vev**2 + (4*MHA**2*MHL**2*sb**8*sba**4)/vev**2 - (8*cba**2*MHH**2*MHL**2*sb**8*sba**4)/vev**2 + (4*cba**2*MHL**4*sb**8*sba**4)/vev**2 + (cba**2*MHH**4*sb**10*sba**4)/(cb**2*vev**2) - (2*cba**2*MHH**2*MHL**2*sb**10*sba**4)/(cb**2*vev**2) + (cba**2*MHL**4*sb**10*sba**4)/(cb**2*vev**2) - (2*cb**3*cba*MHH**4*sb**5*sba**5)/vev**2 + (4*cb**3*cba*MHH**2*MHL**2*sb**5*sba**5)/vev**2 - (2*cb**3*cba*MHL**4*sb**5*sba**5)/vev**2 - (4*cb*cba*MHH**4*sb**7*sba**5)/vev**2 + (8*cb*cba*MHH**2*MHL**2*sb**7*sba**5)/vev**2 - (4*cb*cba*MHL**4*sb**7*sba**5)/vev**2 - (2*cba*MHH**4*sb**9*sba**5)/(cb*vev**2) + (4*cba*MHH**2*MHL**2*sb**9*sba**5)/(cb*vev**2) - (2*cba*MHL**4*sb**9*sba**5)/(cb*vev**2) + (cb**4*MHH**4*sb**4*sba**6)/vev**2 - (2*cb**4*MHH**2*MHL**2*sb**4*sba**6)/vev**2 + (cb**4*MHL**4*sb**4*sba**6)/vev**2 + (2*cb**2*MHH**4*sb**6*sba**6)/vev**2 - (4*cb**2*MHH**2*MHL**2*sb**6*sba**6)/vev**2 + (2*cb**2*MHL**4*sb**6*sba**6)/vev**2 + (MHH**4*sb**8*sba**6)/vev**2 - (2*MHH**2*MHL**2*sb**8*sba**6)/vev**2 + (MHL**4*sb**8*sba**6)/vev**2)*cmath.sqrt(-4*MHA**2*MHL**2 + MHL**4))/(32.*cmath.pi*abs(MHL)**3)',
(P.HA,P.Z):'((-(cb**4*cba**2*ee**2*MHA**2) - cb**4*cba**2*ee**2*MHL**2 + (cb**4*cba**2*ee**2*MHA**4)/(2.*MZ**2) - (cb**4*cba**2*ee**2*MHA**2*MHL**2)/MZ**2 + (cb**4*cba**2*ee**2*MHL**4)/(2.*MZ**2) + (cb**4*cba**2*ee**2*MZ**2)/2. - 2*cb**2*cba**2*ee**2*MHA**2*sb**2 - 2*cb**2*cba**2*ee**2*MHL**2*sb**2 + (cb**2*cba**2*ee**2*MHA**4*sb**2)/MZ**2 - (2*cb**2*cba**2*ee**2*MHA**2*MHL**2*sb**2)/MZ**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/MZ**2 + cb**2*cba**2*ee**2*MZ**2*sb**2 - cba**2*ee**2*MHA**2*sb**4 - cba**2*ee**2*MHL**2*sb**4 + (cba**2*ee**2*MHA**4*sb**4)/(2.*MZ**2) - (cba**2*ee**2*MHA**2*MHL**2*sb**4)/MZ**2 + (cba**2*ee**2*MHL**4*sb**4)/(2.*MZ**2) + (cba**2*ee**2*MZ**2*sb**4)/2. - (cb**4*cba**2*cw**2*ee**2*MHA**2)/(2.*sw**2) - (cb**4*cba**2*cw**2*ee**2*MHL**2)/(2.*sw**2) + (cb**4*cba**2*cw**2*ee**2*MHA**4)/(4.*MZ**2*sw**2) - (cb**4*cba**2*cw**2*ee**2*MHA**2*MHL**2)/(2.*MZ**2*sw**2) + (cb**4*cba**2*cw**2*ee**2*MHL**4)/(4.*MZ**2*sw**2) + (cb**4*cba**2*cw**2*ee**2*MZ**2)/(4.*sw**2) - (cb**2*cba**2*cw**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*cba**2*cw**2*ee**2*MHL**2*sb**2)/sw**2 + (cb**2*cba**2*cw**2*ee**2*MHA**4*sb**2)/(2.*MZ**2*sw**2) - (cb**2*cba**2*cw**2*ee**2*MHA**2*MHL**2*sb**2)/(MZ**2*sw**2) + (cb**2*cba**2*cw**2*ee**2*MHL**4*sb**2)/(2.*MZ**2*sw**2) + (cb**2*cba**2*cw**2*ee**2*MZ**2*sb**2)/(2.*sw**2) - (cba**2*cw**2*ee**2*MHA**2*sb**4)/(2.*sw**2) - (cba**2*cw**2*ee**2*MHL**2*sb**4)/(2.*sw**2) + (cba**2*cw**2*ee**2*MHA**4*sb**4)/(4.*MZ**2*sw**2) - (cba**2*cw**2*ee**2*MHA**2*MHL**2*sb**4)/(2.*MZ**2*sw**2) + (cba**2*cw**2*ee**2*MHL**4*sb**4)/(4.*MZ**2*sw**2) + (cba**2*cw**2*ee**2*MZ**2*sb**4)/(4.*sw**2) - (cb**4*cba**2*ee**2*MHA**2*sw**2)/(2.*cw**2) - (cb**4*cba**2*ee**2*MHL**2*sw**2)/(2.*cw**2) + (cb**4*cba**2*ee**2*MHA**4*sw**2)/(4.*cw**2*MZ**2) - (cb**4*cba**2*ee**2*MHA**2*MHL**2*sw**2)/(2.*cw**2*MZ**2) + (cb**4*cba**2*ee**2*MHL**4*sw**2)/(4.*cw**2*MZ**2) + (cb**4*cba**2*ee**2*MZ**2*sw**2)/(4.*cw**2) - (cb**2*cba**2*ee**2*MHA**2*sb**2*sw**2)/cw**2 - (cb**2*cba**2*ee**2*MHL**2*sb**2*sw**2)/cw**2 + (cb**2*cba**2*ee**2*MHA**4*sb**2*sw**2)/(2.*cw**2*MZ**2) - (cb**2*cba**2*ee**2*MHA**2*MHL**2*sb**2*sw**2)/(cw**2*MZ**2) + (cb**2*cba**2*ee**2*MHL**4*sb**2*sw**2)/(2.*cw**2*MZ**2) + (cb**2*cba**2*ee**2*MZ**2*sb**2*sw**2)/(2.*cw**2) - (cba**2*ee**2*MHA**2*sb**4*sw**2)/(2.*cw**2) - (cba**2*ee**2*MHL**2*sb**4*sw**2)/(2.*cw**2) + (cba**2*ee**2*MHA**4*sb**4*sw**2)/(4.*cw**2*MZ**2) - (cba**2*ee**2*MHA**2*MHL**2*sb**4*sw**2)/(2.*cw**2*MZ**2) + (cba**2*ee**2*MHL**4*sb**4*sw**2)/(4.*cw**2*MZ**2) + (cba**2*ee**2*MZ**2*sb**4*sw**2)/(4.*cw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHL**2 + MHL**4 - 2*MHA**2*MZ**2 - 2*MHL**2*MZ**2 + MZ**4))/(16.*cmath.pi*abs(MHL)**3)',
(P.HH,P.HH):'(((9*cb**2*cba**6*MHH**4*sb**2)/vev**2 - (18*cb**4*cba**8*MHH**4*sb**2)/vev**2 + (9*cb**6*cba**10*MHH**4*sb**2)/vev**2 - (18*cb**2*cba**6*MHH**2*MHL**2*sb**2)/vev**2 + (36*cb**4*cba**8*MHH**2*MHL**2*sb**2)/vev**2 - (18*cb**6*cba**10*MHH**2*MHL**2*sb**2)/vev**2 + (9*cb**2*cba**6*MHL**4*sb**2)/vev**2 - (18*cb**4*cba**8*MHL**4*sb**2)/vev**2 + (9*cb**6*cba**10*MHL**4*sb**2)/vev**2 - (18*cb**2*cba**8*MHH**4*sb**4)/vev**2 + (18*cb**4*cba**10*MHH**4*sb**4)/vev**2 + (36*cb**2*cba**8*MHH**2*MHL**2*sb**4)/vev**2 - (36*cb**4*cba**10*MHH**2*MHL**2*sb**4)/vev**2 - (18*cb**2*cba**8*MHL**4*sb**4)/vev**2 + (18*cb**4*cba**10*MHL**4*sb**4)/vev**2 + (9*cb**2*cba**10*MHH**4*sb**6)/vev**2 - (18*cb**2*cba**10*MHH**2*MHL**2*sb**6)/vev**2 + (9*cb**2*cba**10*MHL**4*sb**6)/vev**2 - (24*cb**4*cba**5*m122*MHH**2*sba)/vev**2 + (24*cb**6*cba**7*m122*MHH**2*sba)/vev**2 + (24*cb**4*cba**5*m122*MHL**2*sba)/vev**2 - (24*cb**6*cba**7*m122*MHL**2*sba)/vev**2 - (18*cb**3*cba**5*MHH**4*sb*sba)/vev**2 + (48*cb**5*cba**7*MHH**4*sb*sba)/vev**2 - (30*cb**7*cba**9*MHH**4*sb*sba)/vev**2 + (54*cb**3*cba**5*MHH**2*MHL**2*sb*sba)/vev**2 - (114*cb**5*cba**7*MHH**2*MHL**2*sb*sba)/vev**2 + (60*cb**7*cba**9*MHH**2*MHL**2*sb*sba)/vev**2 - (36*cb**3*cba**5*MHL**4*sb*sba)/vev**2 + (66*cb**5*cba**7*MHL**4*sb*sba)/vev**2 - (30*cb**7*cba**9*MHL**4*sb*sba)/vev**2 - (48*cb**2*cba**5*m122*MHH**2*sb**2*sba)/vev**2 + (72*cb**4*cba**7*m122*MHH**2*sb**2*sba)/vev**2 + (48*cb**2*cba**5*m122*MHL**2*sb**2*sba)/vev**2 - (72*cb**4*cba**7*m122*MHL**2*sb**2*sba)/vev**2 + (36*cb*cba**5*MHH**4*sb**3*sba)/vev**2 - (12*cb**3*cba**7*MHH**4*sb**3*sba)/vev**2 - (36*cb**5*cba**9*MHH**4*sb**3*sba)/vev**2 - (54*cb*cba**5*MHH**2*MHL**2*sb**3*sba)/vev**2 - (12*cb**3*cba**7*MHH**2*MHL**2*sb**3*sba)/vev**2 + (72*cb**5*cba**9*MHH**2*MHL**2*sb**3*sba)/vev**2 + (18*cb*cba**5*MHL**4*sb**3*sba)/vev**2 + (24*cb**3*cba**7*MHL**4*sb**3*sba)/vev**2 - (36*cb**5*cba**9*MHL**4*sb**3*sba)/vev**2 - (24*cba**5*m122*MHH**2*sb**4*sba)/vev**2 + (72*cb**2*cba**7*m122*MHH**2*sb**4*sba)/vev**2 + (24*cba**5*m122*MHL**2*sb**4*sba)/vev**2 - (72*cb**2*cba**7*m122*MHL**2*sb**4*sba)/vev**2 - (60*cb*cba**7*MHH**4*sb**5*sba)/vev**2 + (18*cb**3*cba**9*MHH**4*sb**5*sba)/vev**2 + (102*cb*cba**7*MHH**2*MHL**2*sb**5*sba)/vev**2 - (36*cb**3*cba**9*MHH**2*MHL**2*sb**5*sba)/vev**2 - (42*cb*cba**7*MHL**4*sb**5*sba)/vev**2 + (18*cb**3*cba**9*MHL**4*sb**5*sba)/vev**2 + (24*cba**7*m122*MHH**2*sb**6*sba)/vev**2 - (24*cba**7*m122*MHL**2*sb**6*sba)/vev**2 + (24*cb*cba**9*MHH**4*sb**7*sba)/vev**2 - (48*cb*cba**9*MHH**2*MHL**2*sb**7*sba)/vev**2 + (24*cb*cba**9*MHL**4*sb**7*sba)/vev**2 + (64*cb**4*cba**4*m122**2*sba**2)/vev**2 + (9*cb**4*cba**4*MHH**4*sba**2)/vev**2 - (42*cb**6*cba**6*MHH**4*sba**2)/vev**2 + (37*cb**8*cba**8*MHH**4*sba**2)/vev**2 - (54*cb**4*cba**4*MHH**2*MHL**2*sba**2)/vev**2 + (132*cb**6*cba**6*MHH**2*MHL**2*sba**2)/vev**2 - (74*cb**8*cba**8*MHH**2*MHL**2*sba**2)/vev**2 + (54*cb**4*cba**4*MHL**4*sba**2)/vev**2 - (90*cb**6*cba**6*MHL**4*sba**2)/vev**2 + (37*cb**8*cba**8*MHL**4*sba**2)/vev**2 + (16*cb**6*cba**4*m122**2*sba**2)/(sb**2*vev**2) + (42*cb**5*cba**4*m122*MHH**2*sba**2)/(sb*vev**2) - (58*cb**7*cba**6*m122*MHH**2*sba**2)/(sb*vev**2) - (66*cb**5*cba**4*m122*MHL**2*sba**2)/(sb*vev**2) + (58*cb**7*cba**6*m122*MHL**2*sba**2)/(sb*vev**2) + (18*cb**3*cba**4*m122*MHH**2*sb*sba**2)/vev**2 - (124*cb**5*cba**6*m122*MHH**2*sb*sba**2)/vev**2 - (90*cb**3*cba**4*m122*MHL**2*sb*sba**2)/vev**2 + (124*cb**5*cba**6*m122*MHL**2*sb*sba**2)/vev**2 + (96*cb**2*cba**4*m122**2*sb**2*sba**2)/vev**2 - (72*cb**2*cba**4*MHH**4*sb**2*sba**2)/vev**2 + (96*cb**4*cba**6*MHH**4*sb**2*sba**2)/vev**2 + (16*cb**6*cba**8*MHH**4*sb**2*sba**2)/vev**2 + (162*cb**2*cba**4*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (138*cb**4*cba**6*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (32*cb**6*cba**8*MHH**2*MHL**2*sb**2*sba**2)/vev**2 - (72*cb**2*cba**4*MHL**4*sb**2*sba**2)/vev**2 + (42*cb**4*cba**6*MHL**4*sb**2*sba**2)/vev**2 + (16*cb**6*cba**8*MHL**4*sb**2*sba**2)/vev**2 - (90*cb*cba**4*m122*MHH**2*sb**3*sba**2)/vev**2 - (24*cb**3*cba**6*m122*MHH**2*sb**3*sba**2)/vev**2 + (18*cb*cba**4*m122*MHL**2*sb**3*sba**2)/vev**2 + (24*cb**3*cba**6*m122*MHL**2*sb**3*sba**2)/vev**2 + (64*cba**4*m122**2*sb**4*sba**2)/vev**2 + (54*cba**4*MHH**4*sb**4*sba**2)/vev**2 + (66*cb**2*cba**6*MHH**4*sb**4*sba**2)/vev**2 - (57*cb**4*cba**8*MHH**4*sb**4*sba**2)/vev**2 - (54*cba**4*MHH**2*MHL**2*sb**4*sba**2)/vev**2 - (168*cb**2*cba**6*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (114*cb**4*cba**8*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (9*cba**4*MHL**4*sb**4*sba**2)/vev**2 + (102*cb**2*cba**6*MHL**4*sb**4*sba**2)/vev**2 - (57*cb**4*cba**8*MHL**4*sb**4*sba**2)/vev**2 - (66*cba**4*m122*MHH**2*sb**5*sba**2)/(cb*vev**2) + (92*cb*cba**6*m122*MHH**2*sb**5*sba**2)/vev**2 + (42*cba**4*m122*MHL**2*sb**5*sba**2)/(cb*vev**2) - (92*cb*cba**6*m122*MHL**2*sb**5*sba**2)/vev**2 + (16*cba**4*m122**2*sb**6*sba**2)/(cb**2*vev**2) - (72*cba**6*MHH**4*sb**6*sba**2)/vev**2 - (14*cb**2*cba**8*MHH**4*sb**6*sba**2)/vev**2 + (102*cba**6*MHH**2*MHL**2*sb**6*sba**2)/vev**2 + (28*cb**2*cba**8*MHH**2*MHL**2*sb**6*sba**2)/vev**2 - (30*cba**6*MHL**4*sb**6*sba**2)/vev**2 - (14*cb**2*cba**8*MHL**4*sb**6*sba**2)/vev**2 + (50*cba**6*m122*MHH**2*sb**7*sba**2)/(cb*vev**2) - (50*cba**6*m122*MHL**2*sb**7*sba**2)/(cb*vev**2) + (22*cba**8*MHH**4*sb**8*sba**2)/vev**2 - (44*cba**8*MHH**2*MHL**2*sb**8*sba**2)/vev**2 + (22*cba**8*MHL**4*sb**8*sba**2)/vev**2 + (78*cb**4*cba**3*m122*MHH**2*sba**3)/vev**2 + (48*cb**6*cba**5*m122*MHH**2*sba**3)/vev**2 + (6*cb**4*cba**3*m122*MHL**2*sba**3)/vev**2 - (48*cb**6*cba**5*m122*MHL**2*sba**3)/vev**2 - (24*cb**7*cba**3*m122**2*sba**3)/(sb**3*vev**2) - (18*cb**6*cba**3*m122*MHH**2*sba**3)/(sb**2*vev**2) + (46*cb**8*cba**5*m122*MHH**2*sba**3)/(sb**2*vev**2) + (60*cb**6*cba**3*m122*MHL**2*sba**3)/(sb**2*vev**2) - (46*cb**8*cba**5*m122*MHL**2*sba**3)/(sb**2*vev**2) - (72*cb**5*cba**3*m122**2*sba**3)/(sb*vev**2) + (12*cb**7*cba**5*MHH**4*sba**3)/(sb*vev**2) - (20*cb**9*cba**7*MHH**4*sba**3)/(sb*vev**2) + (18*cb**5*cba**3*MHH**2*MHL**2*sba**3)/(sb*vev**2) - (66*cb**7*cba**5*MHH**2*MHL**2*sba**3)/(sb*vev**2) + (40*cb**9*cba**7*MHH**2*MHL**2*sba**3)/(sb*vev**2) - (36*cb**5*cba**3*MHL**4*sba**3)/(sb*vev**2) + (54*cb**7*cba**5*MHL**4*sba**3)/(sb*vev**2) - (20*cb**9*cba**7*MHL**4*sba**3)/(sb*vev**2) - (48*cb**3*cba**3*m122**2*sb*sba**3)/vev**2 + (36*cb**3*cba**3*MHH**4*sb*sba**3)/vev**2 - (72*cb**5*cba**5*MHH**4*sb*sba**3)/vev**2 - (12*cb**7*cba**7*MHH**4*sb*sba**3)/vev**2 - (162*cb**3*cba**3*MHH**2*MHL**2*sb*sba**3)/vev**2 + (150*cb**5*cba**5*MHH**2*MHL**2*sb*sba**3)/vev**2 + (24*cb**7*cba**7*MHH**2*MHL**2*sb*sba**3)/vev**2 + (108*cb**3*cba**3*MHL**4*sb*sba**3)/vev**2 - (78*cb**5*cba**5*MHL**4*sb*sba**3)/vev**2 - (12*cb**7*cba**7*MHL**4*sb*sba**3)/vev**2 + (150*cb**2*cba**3*m122*MHH**2*sb**2*sba**3)/vev**2 - (100*cb**4*cba**5*m122*MHH**2*sb**2*sba**3)/vev**2 - (150*cb**2*cba**3*m122*MHL**2*sb**2*sba**3)/vev**2 + (100*cb**4*cba**5*m122*MHL**2*sb**2*sba**3)/vev**2 + (48*cb*cba**3*m122**2*sb**3*sba**3)/vev**2 - (108*cb*cba**3*MHH**4*sb**3*sba**3)/vev**2 + (36*cb**3*cba**5*MHH**4*sb**3*sba**3)/vev**2 + (20*cb**5*cba**7*MHH**4*sb**3*sba**3)/vev**2 + (162*cb*cba**3*MHH**2*MHL**2*sb**3*sba**3)/vev**2 + (36*cb**3*cba**5*MHH**2*MHL**2*sb**3*sba**3)/vev**2 - (40*cb**5*cba**7*MHH**2*MHL**2*sb**3*sba**3)/vev**2 - (36*cb*cba**3*MHL**4*sb**3*sba**3)/vev**2 - (72*cb**3*cba**5*MHL**4*sb**3*sba**3)/vev**2 + (20*cb**5*cba**7*MHL**4*sb**3*sba**3)/vev**2 - (6*cba**3*m122*MHH**2*sb**4*sba**3)/vev**2 - (128*cb**2*cba**5*m122*MHH**2*sb**4*sba**3)/vev**2 - (78*cba**3*m122*MHL**2*sb**4*sba**3)/vev**2 + (128*cb**2*cba**5*m122*MHL**2*sb**4*sba**3)/vev**2 + (72*cba**3*m122**2*sb**5*sba**3)/(cb*vev**2) + (36*cba**3*MHH**4*sb**5*sba**3)/(cb*vev**2) + (84*cb*cba**5*MHH**4*sb**5*sba**3)/vev**2 + (4*cb**3*cba**7*MHH**4*sb**5*sba**3)/vev**2 - (18*cba**3*MHH**2*MHL**2*sb**5*sba**3)/(cb*vev**2) - (138*cb*cba**5*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (8*cb**3*cba**7*MHH**2*MHL**2*sb**5*sba**3)/vev**2 + (54*cb*cba**5*MHL**4*sb**5*sba**3)/vev**2 + (4*cb**3*cba**7*MHL**4*sb**5*sba**3)/vev**2 - (60*cba**3*m122*MHH**2*sb**6*sba**3)/(cb**2*vev**2) + (6*cba**5*m122*MHH**2*sb**6*sba**3)/vev**2 + (18*cba**3*m122*MHL**2*sb**6*sba**3)/(cb**2*vev**2) - (6*cba**5*m122*MHL**2*sb**6*sba**3)/vev**2 + (24*cba**3*m122**2*sb**7*sba**3)/(cb**3*vev**2) - (36*cba**5*MHH**4*sb**7*sba**3)/(cb*vev**2) + (42*cba**5*MHH**2*MHL**2*sb**7*sba**3)/(cb*vev**2) - (6*cba**5*MHL**4*sb**7*sba**3)/(cb*vev**2) + (32*cba**5*m122*MHH**2*sb**8*sba**3)/(cb**2*vev**2) - (32*cba**5*m122*MHL**2*sb**8*sba**3)/(cb**2*vev**2) + (8*cba**7*MHH**4*sb**9*sba**3)/(cb*vev**2) - (16*cba**7*MHH**2*MHL**2*sb**9*sba**3)/(cb*vev**2) + (8*cba**7*MHL**4*sb**9*sba**3)/(cb*vev**2) - (73*cb**4*cba**2*m122**2*sba**4)/vev**2 - (6*cb**6*cba**4*MHH**4*sba**4)/vev**2 + (38*cb**8*cba**6*MHH**4*sba**4)/vev**2 + (54*cb**4*cba**2*MHH**2*MHL**2*sba**4)/vev**2 - (6*cb**6*cba**4*MHH**2*MHL**2*sba**4)/vev**2 - (76*cb**8*cba**6*MHH**2*MHL**2*sba**4)/vev**2 - (72*cb**4*cba**2*MHL**4*sba**4)/vev**2 + (12*cb**6*cba**4*MHL**4*sba**4)/vev**2 + (38*cb**8*cba**6*MHL**4*sba**4)/vev**2 + (9*cb**8*cba**2*m122**2*sba**4)/(sb**4*vev**2) - (12*cb**9*cba**4*m122*MHH**2*sba**4)/(sb**3*vev**2) - (18*cb**7*cba**2*m122*MHL**2*sba**4)/(sb**3*vev**2) + (12*cb**9*cba**4*m122*MHL**2*sba**4)/(sb**3*vev**2) + (2*cb**6*cba**2*m122**2*sba**4)/(sb**2*vev**2) + (4*cb**10*cba**6*MHH**4*sba**4)/(sb**2*vev**2) + (12*cb**8*cba**4*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) - (8*cb**10*cba**6*MHH**2*MHL**2*sba**4)/(sb**2*vev**2) + (9*cb**6*cba**2*MHL**4*sba**4)/(sb**2*vev**2) - (12*cb**8*cba**4*MHL**4*sba**4)/(sb**2*vev**2) + (4*cb**10*cba**6*MHL**4*sba**4)/(sb**2*vev**2) - (48*cb**5*cba**2*m122*MHH**2*sba**4)/(sb*vev**2) - (10*cb**7*cba**4*m122*MHH**2*sba**4)/(sb*vev**2) + (66*cb**5*cba**2*m122*MHL**2*sba**4)/(sb*vev**2) + (10*cb**7*cba**4*m122*MHL**2*sba**4)/(sb*vev**2) + (6*cb**3*cba**2*m122*MHH**2*sb*sba**4)/vev**2 + (26*cb**5*cba**4*m122*MHH**2*sb*sba**4)/vev**2 + (138*cb**3*cba**2*m122*MHL**2*sb*sba**4)/vev**2 - (26*cb**5*cba**4*m122*MHL**2*sb*sba**4)/vev**2 - (132*cb**2*cba**2*m122**2*sb**2*sba**4)/vev**2 + (54*cb**2*cba**2*MHH**4*sb**2*sba**4)/vev**2 + (6*cb**4*cba**4*MHH**4*sb**2*sba**4)/vev**2 - (13*cb**6*cba**6*MHH**4*sb**2*sba**4)/vev**2 - (162*cb**2*cba**2*MHH**2*MHL**2*sb**2*sba**4)/vev**2 - (54*cb**4*cba**4*MHH**2*MHL**2*sb**2*sba**4)/vev**2 + (26*cb**6*cba**6*MHH**2*MHL**2*sb**2*sba**4)/vev**2 + (54*cb**2*cba**2*MHL**4*sb**2*sba**4)/vev**2 + (48*cb**4*cba**4*MHL**4*sb**2*sba**4)/vev**2 - (13*cb**6*cba**6*MHL**4*sb**2*sba**4)/vev**2 + (138*cb*cba**2*m122*MHH**2*sb**3*sba**4)/vev**2 + (24*cb**3*cba**4*m122*MHH**2*sb**3*sba**4)/vev**2 + (6*cb*cba**2*m122*MHL**2*sb**3*sba**4)/vev**2 - (24*cb**3*cba**4*m122*MHL**2*sb**3*sba**4)/vev**2 - (73*cba**2*m122**2*sb**4*sba**4)/vev**2 - (72*cba**2*MHH**4*sb**4*sba**4)/vev**2 + (24*cb**2*cba**4*MHH**4*sb**4*sba**4)/vev**2 - (100*cb**4*cba**6*MHH**4*sb**4*sba**4)/vev**2 + (54*cba**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 - (36*cb**2*cba**4*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (200*cb**4*cba**6*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (12*cb**2*cba**4*MHL**4*sb**4*sba**4)/vev**2 - (100*cb**4*cba**6*MHL**4*sb**4*sba**4)/vev**2 + (66*cba**2*m122*MHH**2*sb**5*sba**4)/(cb*vev**2) - (4*cb*cba**4*m122*MHH**2*sb**5*sba**4)/vev**2 - (48*cba**2*m122*MHL**2*sb**5*sba**4)/(cb*vev**2) + (4*cb*cba**4*m122*MHL**2*sb**5*sba**4)/vev**2 + (2*cba**2*m122**2*sb**6*sba**4)/(cb**2*vev**2) + (9*cba**2*MHH**4*sb**6*sba**4)/(cb**2*vev**2) + (6*cba**4*MHH**4*sb**6*sba**4)/vev**2 - (28*cb**2*cba**6*MHH**4*sb**6*sba**4)/vev**2 + (6*cba**4*MHH**2*MHL**2*sb**6*sba**4)/vev**2 + (56*cb**2*cba**6*MHH**2*MHL**2*sb**6*sba**4)/vev**2 - (12*cba**4*MHL**4*sb**6*sba**4)/vev**2 - (28*cb**2*cba**6*MHL**4*sb**6*sba**4)/vev**2 - (18*cba**2*m122*MHH**2*sb**7*sba**4)/(cb**3*vev**2) + (2*cba**4*m122*MHH**2*sb**7*sba**4)/(cb*vev**2) - (2*cba**4*m122*MHL**2*sb**7*sba**4)/(cb*vev**2) + (9*cba**2*m122**2*sb**8*sba**4)/(cb**4*vev**2) - (6*cba**4*MHH**4*sb**8*sba**4)/(cb**2*vev**2) + (26*cba**6*MHH**4*sb**8*sba**4)/vev**2 + (6*cba**4*MHH**2*MHL**2*sb**8*sba**4)/(cb**2*vev**2) - (52*cba**6*MHH**2*MHL**2*sb**8*sba**4)/vev**2 + (26*cba**6*MHL**4*sb**8*sba**4)/vev**2 + (6*cba**4*m122*MHH**2*sb**9*sba**4)/(cb**3*vev**2) - (6*cba**4*m122*MHL**2*sb**9*sba**4)/(cb**3*vev**2) + (cba**6*MHH**4*sb**10*sba**4)/(cb**2*vev**2) - (2*cba**6*MHH**2*MHL**2*sb**10*sba**4)/(cb**2*vev**2) + (cba**6*MHL**4*sb**10*sba**4)/(cb**2*vev**2) - (42*cb**4*cba*m122*MHH**2*sba**5)/vev**2 + (6*cb**6*cba**3*m122*MHH**2*sba**5)/vev**2 - (18*cb**4*cba*m122*MHL**2*sba**5)/vev**2 - (6*cb**6*cba**3*m122*MHL**2*sba**5)/vev**2 + (12*cb**7*cba*m122**2*sba**5)/(sb**3*vev**2) + (26*cb**8*cba**3*m122*MHH**2*sba**5)/(sb**2*vev**2) - (30*cb**6*cba*m122*MHL**2*sba**5)/(sb**2*vev**2) - (26*cb**8*cba**3*m122*MHL**2*sba**5)/(sb**2*vev**2) + (36*cb**5*cba*m122**2*sba**5)/(sb*vev**2) + (12*cb**7*cba**3*MHH**4*sba**5)/(sb*vev**2) - (32*cb**9*cba**5*MHH**4*sba**5)/(sb*vev**2) - (42*cb**7*cba**3*MHH**2*MHL**2*sba**5)/(sb*vev**2) + (64*cb**9*cba**5*MHH**2*MHL**2*sba**5)/(sb*vev**2) + (18*cb**5*cba*MHL**4*sba**5)/(sb*vev**2) + (30*cb**7*cba**3*MHL**4*sba**5)/(sb*vev**2) - (32*cb**9*cba**5*MHL**4*sba**5)/(sb*vev**2) + (24*cb**3*cba*m122**2*sb*sba**5)/vev**2 - (84*cb**5*cba**3*MHH**4*sb*sba**5)/vev**2 + (54*cb**7*cba**5*MHH**4*sb*sba**5)/vev**2 + (54*cb**3*cba*MHH**2*MHL**2*sb*sba**5)/vev**2 + (186*cb**5*cba**3*MHH**2*MHL**2*sb*sba**5)/vev**2 - (108*cb**7*cba**5*MHH**2*MHL**2*sb*sba**5)/vev**2 - (36*cb**3*cba*MHL**4*sb*sba**5)/vev**2 - (102*cb**5*cba**3*MHL**4*sb*sba**5)/vev**2 + (54*cb**7*cba**5*MHL**4*sb*sba**5)/vev**2 - (54*cb**2*cba*m122*MHH**2*sb**2*sba**5)/vev**2 - (116*cb**4*cba**3*m122*MHH**2*sb**2*sba**5)/vev**2 + (54*cb**2*cba*m122*MHL**2*sb**2*sba**5)/vev**2 + (116*cb**4*cba**3*m122*MHL**2*sb**2*sba**5)/vev**2 - (24*cb*cba*m122**2*sb**3*sba**5)/vev**2 + (36*cb*cba*MHH**4*sb**3*sba**5)/vev**2 + (24*cb**3*cba**3*MHH**4*sb**3*sba**5)/vev**2 + (116*cb**5*cba**5*MHH**4*sb**3*sba**5)/vev**2 - (54*cb*cba*MHH**2*MHL**2*sb**3*sba**5)/vev**2 + (24*cb**3*cba**3*MHH**2*MHL**2*sb**3*sba**5)/vev**2 - (232*cb**5*cba**5*MHH**2*MHL**2*sb**3*sba**5)/vev**2 - (48*cb**3*cba**3*MHL**4*sb**3*sba**5)/vev**2 + (116*cb**5*cba**5*MHL**4*sb**3*sba**5)/vev**2 + (18*cba*m122*MHH**2*sb**4*sba**5)/vev**2 - (124*cb**2*cba**3*m122*MHH**2*sb**4*sba**5)/vev**2 + (42*cba*m122*MHL**2*sb**4*sba**5)/vev**2 + (124*cb**2*cba**3*m122*MHL**2*sb**4*sba**5)/vev**2 - (36*cba*m122**2*sb**5*sba**5)/(cb*vev**2) - (18*cba*MHH**4*sb**5*sba**5)/(cb*vev**2) + (96*cb*cba**3*MHH**4*sb**5*sba**5)/vev**2 - (44*cb**3*cba**5*MHH**4*sb**5*sba**5)/vev**2 - (174*cb*cba**3*MHH**2*MHL**2*sb**5*sba**5)/vev**2 + (88*cb**3*cba**5*MHH**2*MHL**2*sb**5*sba**5)/vev**2 + (78*cb*cba**3*MHL**4*sb**5*sba**5)/vev**2 - (44*cb**3*cba**5*MHL**4*sb**5*sba**5)/vev**2 + (30*cba*m122*MHH**2*sb**6*sba**5)/(cb**2*vev**2) - (6*cba**3*m122*MHH**2*sb**6*sba**5)/vev**2 + (6*cba**3*m122*MHL**2*sb**6*sba**5)/vev**2 - (12*cba*m122**2*sb**7*sba**5)/(cb**3*vev**2) - (24*cba**3*MHH**4*sb**7*sba**5)/(cb*vev**2) - (60*cb*cba**5*MHH**4*sb**7*sba**5)/vev**2 + (30*cba**3*MHH**2*MHL**2*sb**7*sba**5)/(cb*vev**2) + (120*cb*cba**5*MHH**2*MHL**2*sb**7*sba**5)/vev**2 - (6*cba**3*MHL**4*sb**7*sba**5)/(cb*vev**2) - (60*cb*cba**5*MHL**4*sb**7*sba**5)/vev**2 + (22*cba**3*m122*MHH**2*sb**8*sba**5)/(cb**2*vev**2) - (22*cba**3*m122*MHL**2*sb**8*sba**5)/(cb**2*vev**2) + (14*cba**5*MHH**4*sb**9*sba**5)/(cb*vev**2) - (28*cba**5*MHH**2*MHL**2*sb**9*sba**5)/(cb*vev**2) + (14*cba**5*MHL**4*sb**9*sba**5)/(cb*vev**2) + (16*cb**4*m122**2*sba**6)/vev**2 + (36*cb**6*cba**2*MHH**4*sba**6)/vev**2 - (31*cb**8*cba**4*MHH**4*sba**6)/vev**2 - (126*cb**6*cba**2*MHH**2*MHL**2*sba**6)/vev**2 + (62*cb**8*cba**4*MHH**2*MHL**2*sba**6)/vev**2 + (9*cb**4*MHL**4*sba**6)/vev**2 + (90*cb**6*cba**2*MHL**4*sba**6)/vev**2 - (31*cb**8*cba**4*MHL**4*sba**6)/vev**2 - (12*cb**9*cba**2*m122*MHH**2*sba**6)/(sb**3*vev**2) + (12*cb**9*cba**2*m122*MHL**2*sba**6)/(sb**3*vev**2) + (4*cb**6*m122**2*sba**6)/(sb**2*vev**2) + (8*cb**10*cba**4*MHH**4*sba**6)/(sb**2*vev**2) + (12*cb**8*cba**2*MHH**2*MHL**2*sba**6)/(sb**2*vev**2) - (16*cb**10*cba**4*MHH**2*MHL**2*sba**6)/(sb**2*vev**2) - (12*cb**8*cba**2*MHL**4*sba**6)/(sb**2*vev**2) + (8*cb**10*cba**4*MHL**4*sba**6)/(sb**2*vev**2) + (40*cb**7*cba**2*m122*MHH**2*sba**6)/(sb*vev**2) - (12*cb**5*m122*MHL**2*sba**6)/(sb*vev**2) - (40*cb**7*cba**2*m122*MHL**2*sba**6)/(sb*vev**2) - (12*cb**3*m122*MHH**2*sb*sba**6)/vev**2 + (130*cb**5*cba**2*m122*MHH**2*sb*sba**6)/vev**2 - (24*cb**3*m122*MHL**2*sb*sba**6)/vev**2 - (130*cb**5*cba**2*m122*MHL**2*sb*sba**6)/vev**2 + (24*cb**2*m122**2*sb**2*sba**6)/vev**2 - (96*cb**4*cba**2*MHH**4*sb**2*sba**6)/vev**2 - (34*cb**6*cba**4*MHH**4*sb**2*sba**6)/vev**2 + (18*cb**2*MHH**2*MHL**2*sb**2*sba**6)/vev**2 + (114*cb**4*cba**2*MHH**2*MHL**2*sb**2*sba**6)/vev**2 + (68*cb**6*cba**4*MHH**2*MHL**2*sb**2*sba**6)/vev**2 - (18*cb**4*cba**2*MHL**4*sb**2*sba**6)/vev**2 - (34*cb**6*cba**4*MHL**4*sb**2*sba**6)/vev**2 - (24*cb*m122*MHH**2*sb**3*sba**6)/vev**2 + (36*cb**3*cba**2*m122*MHH**2*sb**3*sba**6)/vev**2 - (12*cb*m122*MHL**2*sb**3*sba**6)/vev**2 - (36*cb**3*cba**2*m122*MHL**2*sb**3*sba**6)/vev**2 + (16*m122**2*sb**4*sba**6)/vev**2 + (9*MHH**4*sb**4*sba**6)/vev**2 - (54*cb**2*cba**2*MHH**4*sb**4*sba**6)/vev**2 + (40*cb**4*cba**4*MHH**4*sb**4*sba**6)/vev**2 + (156*cb**2*cba**2*MHH**2*MHL**2*sb**4*sba**6)/vev**2 - (80*cb**4*cba**4*MHH**2*MHL**2*sb**4*sba**6)/vev**2 - (102*cb**2*cba**2*MHL**4*sb**4*sba**6)/vev**2 + (40*cb**4*cba**4*MHL**4*sb**4*sba**6)/vev**2 - (12*m122*MHH**2*sb**5*sba**6)/(cb*vev**2) - (92*cb*cba**2*m122*MHH**2*sb**5*sba**6)/vev**2 + (92*cb*cba**2*m122*MHL**2*sb**5*sba**6)/vev**2 + (4*m122**2*sb**6*sba**6)/(cb**2*vev**2) + (72*cba**2*MHH**4*sb**6*sba**6)/vev**2 + (20*cb**2*cba**4*MHH**4*sb**6*sba**6)/vev**2 - (90*cba**2*MHH**2*MHL**2*sb**6*sba**6)/vev**2 - (40*cb**2*cba**4*MHH**2*MHL**2*sb**6*sba**6)/vev**2 + (18*cba**2*MHL**4*sb**6*sba**6)/vev**2 + (20*cb**2*cba**4*MHL**4*sb**6*sba**6)/vev**2 - (44*cba**2*m122*MHH**2*sb**7*sba**6)/(cb*vev**2) + (44*cba**2*m122*MHL**2*sb**7*sba**6)/(cb*vev**2) - (6*cba**2*MHH**4*sb**8*sba**6)/(cb**2*vev**2) - (13*cba**4*MHH**4*sb**8*sba**6)/vev**2 + (6*cba**2*MHH**2*MHL**2*sb**8*sba**6)/(cb**2*vev**2) + (26*cba**4*MHH**2*MHL**2*sb**8*sba**6)/vev**2 - (13*cba**4*MHL**4*sb**8*sba**6)/vev**2 + (6*cba**2*m122*MHH**2*sb**9*sba**6)/(cb**3*vev**2) - (6*cba**2*m122*MHL**2*sb**9*sba**6)/(cb**3*vev**2) + (2*cba**4*MHH**4*sb**10*sba**6)/(cb**2*vev**2) - (4*cba**4*MHH**2*MHL**2*sb**10*sba**6)/(cb**2*vev**2) + (2*cba**4*MHL**4*sb**10*sba**6)/(cb**2*vev**2) - (18*cb**6*cba*m122*MHH**2*sba**7)/vev**2 + (18*cb**6*cba*m122*MHL**2*sba**7)/vev**2 - (20*cb**8*cba*m122*MHH**2*sba**7)/(sb**2*vev**2) + (20*cb**8*cba*m122*MHL**2*sba**7)/(sb**2*vev**2) - (4*cb**9*cba**3*MHH**4*sba**7)/(sb*vev**2) + (24*cb**7*cba*MHH**2*MHL**2*sba**7)/(sb*vev**2) + (8*cb**9*cba**3*MHH**2*MHL**2*sba**7)/(sb*vev**2) - (24*cb**7*cba*MHL**4*sba**7)/(sb*vev**2) - (4*cb**9*cba**3*MHL**4*sba**7)/(sb*vev**2) + (36*cb**5*cba*MHH**4*sb*sba**7)/vev**2 + (24*cb**7*cba**3*MHH**4*sb*sba**7)/vev**2 - (78*cb**5*cba*MHH**2*MHL**2*sb*sba**7)/vev**2 - (48*cb**7*cba**3*MHH**2*MHL**2*sb*sba**7)/vev**2 + (42*cb**5*cba*MHL**4*sb*sba**7)/vev**2 + (24*cb**7*cba**3*MHL**4*sb*sba**7)/vev**2 + (56*cb**4*cba*m122*MHH**2*sb**2*sba**7)/vev**2 - (56*cb**4*cba*m122*MHL**2*sb**2*sba**7)/vev**2 - (24*cb**3*cba*MHH**4*sb**3*sba**7)/vev**2 + (28*cb**5*cba**3*MHH**4*sb**3*sba**7)/vev**2 - (24*cb**3*cba*MHH**2*MHL**2*sb**3*sba**7)/vev**2 - (56*cb**5*cba**3*MHH**2*MHL**2*sb**3*sba**7)/vev**2 + (48*cb**3*cba*MHL**4*sb**3*sba**7)/vev**2 + (28*cb**5*cba**3*MHL**4*sb**3*sba**7)/vev**2 + (76*cb**2*cba*m122*MHH**2*sb**4*sba**7)/vev**2 - (76*cb**2*cba*m122*MHL**2*sb**4*sba**7)/vev**2 - (48*cb*cba*MHH**4*sb**5*sba**7)/vev**2 - (28*cb**3*cba**3*MHH**4*sb**5*sba**7)/vev**2 + (66*cb*cba*MHH**2*MHL**2*sb**5*sba**7)/vev**2 + (56*cb**3*cba**3*MHH**2*MHL**2*sb**5*sba**7)/vev**2 - (18*cb*cba*MHL**4*sb**5*sba**7)/vev**2 - (28*cb**3*cba**3*MHL**4*sb**5*sba**7)/vev**2 + (12*cba*m122*MHH**2*sb**6*sba**7)/vev**2 - (12*cba*m122*MHL**2*sb**6*sba**7)/vev**2 + (12*cba*MHH**4*sb**7*sba**7)/(cb*vev**2) - (24*cb*cba**3*MHH**4*sb**7*sba**7)/vev**2 - (12*cba*MHH**2*MHL**2*sb**7*sba**7)/(cb*vev**2) + (48*cb*cba**3*MHH**2*MHL**2*sb**7*sba**7)/vev**2 - (24*cb*cba**3*MHL**4*sb**7*sba**7)/vev**2 - (10*cba*m122*MHH**2*sb**8*sba**7)/(cb**2*vev**2) + (10*cba*m122*MHL**2*sb**8*sba**7)/(cb**2*vev**2) + (4*cba**3*MHH**4*sb**9*sba**7)/(cb*vev**2) - (8*cba**3*MHH**2*MHL**2*sb**9*sba**7)/(cb*vev**2) + (4*cba**3*MHL**4*sb**9*sba**7)/(cb*vev**2) - (28*cb**8*cba**2*MHH**4*sba**8)/vev**2 + (12*cb**6*MHH**2*MHL**2*sba**8)/vev**2 + (56*cb**8*cba**2*MHH**2*MHL**2*sba**8)/vev**2 - (12*cb**6*MHL**4*sba**8)/vev**2 - (28*cb**8*cba**2*MHL**4*sba**8)/vev**2 + (4*cb**10*cba**2*MHH**4*sba**8)/(sb**2*vev**2) - (8*cb**10*cba**2*MHH**2*MHL**2*sba**8)/(sb**2*vev**2) + (4*cb**10*cba**2*MHL**4*sba**8)/(sb**2*vev**2) - (8*cb**7*m122*MHH**2*sba**8)/(sb*vev**2) + (8*cb**7*m122*MHL**2*sba**8)/(sb*vev**2) - (20*cb**5*m122*MHH**2*sb*sba**8)/vev**2 + (20*cb**5*m122*MHL**2*sb*sba**8)/vev**2 + (12*cb**4*MHH**4*sb**2*sba**8)/vev**2 - (10*cb**6*cba**2*MHH**4*sb**2*sba**8)/vev**2 - (6*cb**4*MHH**2*MHL**2*sb**2*sba**8)/vev**2 + (20*cb**6*cba**2*MHH**2*MHL**2*sb**2*sba**8)/vev**2 - (6*cb**4*MHL**4*sb**2*sba**8)/vev**2 - (10*cb**6*cba**2*MHL**4*sb**2*sba**8)/vev**2 - (12*cb**3*m122*MHH**2*sb**3*sba**8)/vev**2 + (12*cb**3*m122*MHL**2*sb**3*sba**8)/vev**2 + (6*cb**2*MHH**4*sb**4*sba**8)/vev**2 + (62*cb**4*cba**2*MHH**4*sb**4*sba**8)/vev**2 - (12*cb**2*MHH**2*MHL**2*sb**4*sba**8)/vev**2 - (124*cb**4*cba**2*MHH**2*MHL**2*sb**4*sba**8)/vev**2 + (6*cb**2*MHL**4*sb**4*sba**8)/vev**2 + (62*cb**4*cba**2*MHL**4*sb**4*sba**8)/vev**2 + (4*cb*m122*MHH**2*sb**5*sba**8)/vev**2 - (4*cb*m122*MHL**2*sb**5*sba**8)/vev**2 - (6*MHH**4*sb**6*sba**8)/vev**2 + (23*cb**2*cba**2*MHH**4*sb**6*sba**8)/vev**2 + (6*MHH**2*MHL**2*sb**6*sba**8)/vev**2 - (46*cb**2*cba**2*MHH**2*MHL**2*sb**6*sba**8)/vev**2 + (23*cb**2*cba**2*MHL**4*sb**6*sba**8)/vev**2 + (4*m122*MHH**2*sb**7*sba**8)/(cb*vev**2) - (4*m122*MHL**2*sb**7*sba**8)/(cb*vev**2) - (16*cba**2*MHH**4*sb**8*sba**8)/vev**2 + (32*cba**2*MHH**2*MHL**2*sb**8*sba**8)/vev**2 - (16*cba**2*MHL**4*sb**8*sba**8)/vev**2 + (cba**2*MHH**4*sb**10*sba**8)/(cb**2*vev**2) - (2*cba**2*MHH**2*MHL**2*sb**10*sba**8)/(cb**2*vev**2) + (cba**2*MHL**4*sb**10*sba**8)/(cb**2*vev**2) + (8*cb**9*cba*MHH**4*sba**9)/(sb*vev**2) - (16*cb**9*cba*MHH**2*MHL**2*sba**9)/(sb*vev**2) + (8*cb**9*cba*MHL**4*sba**9)/(sb*vev**2) - (12*cb**7*cba*MHH**4*sb*sba**9)/vev**2 + (24*cb**7*cba*MHH**2*MHL**2*sb*sba**9)/vev**2 - (12*cb**7*cba*MHL**4*sb*sba**9)/vev**2 - (32*cb**5*cba*MHH**4*sb**3*sba**9)/vev**2 + (64*cb**5*cba*MHH**2*MHL**2*sb**3*sba**9)/vev**2 - (32*cb**5*cba*MHL**4*sb**3*sba**9)/vev**2 + (2*cb**3*cba*MHH**4*sb**5*sba**9)/vev**2 - (4*cb**3*cba*MHH**2*MHL**2*sb**5*sba**9)/vev**2 + (2*cb**3*cba*MHL**4*sb**5*sba**9)/vev**2 + (12*cb*cba*MHH**4*sb**7*sba**9)/vev**2 - (24*cb*cba*MHH**2*MHL**2*sb**7*sba**9)/vev**2 + (12*cb*cba*MHL**4*sb**7*sba**9)/vev**2 - (2*cba*MHH**4*sb**9*sba**9)/(cb*vev**2) + (4*cba*MHH**2*MHL**2*sb**9*sba**9)/(cb*vev**2) - (2*cba*MHL**4*sb**9*sba**9)/(cb*vev**2) + (4*cb**8*MHH**4*sba**10)/vev**2 - (8*cb**8*MHH**2*MHL**2*sba**10)/vev**2 + (4*cb**8*MHL**4*sba**10)/vev**2 + (4*cb**6*MHH**4*sb**2*sba**10)/vev**2 - (8*cb**6*MHH**2*MHL**2*sb**2*sba**10)/vev**2 + (4*cb**6*MHL**4*sb**2*sba**10)/vev**2 - (3*cb**4*MHH**4*sb**4*sba**10)/vev**2 + (6*cb**4*MHH**2*MHL**2*sb**4*sba**10)/vev**2 - (3*cb**4*MHL**4*sb**4*sba**10)/vev**2 - (2*cb**2*MHH**4*sb**6*sba**10)/vev**2 + (4*cb**2*MHH**2*MHL**2*sb**6*sba**10)/vev**2 - (2*cb**2*MHL**4*sb**6*sba**10)/vev**2 + (MHH**4*sb**8*sba**10)/vev**2 - (2*MHH**2*MHL**2*sb**8*sba**10)/vev**2 + (MHL**4*sb**8*sba**10)/vev**2)*cmath.sqrt(-4*MHH**2*MHL**2 + MHL**4))/(32.*cmath.pi*abs(MHL)**3)',
(P.H__plus__,P.W__minus__):'((-(cb**4*cba**2*ee**2*MHL**2)/(2.*sw**2) - (cb**4*cba**2*ee**2*MHp**2)/(2.*sw**2) + (cb**4*cba**2*ee**2*MHL**4)/(4.*MW**2*sw**2) - (cb**4*cba**2*ee**2*MHL**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MW**2)/(4.*sw**2) - (cb**2*cba**2*ee**2*MHL**2*sb**2)/sw**2 - (cb**2*cba**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*cba**2*ee**2*MHL**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*cba**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*cba**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (cba**2*ee**2*MHL**2*sb**4)/(2.*sw**2) - (cba**2*ee**2*MHp**2*sb**4)/(2.*sw**2) + (cba**2*ee**2*MHL**4*sb**4)/(4.*MW**2*sw**2) - (cba**2*ee**2*MHL**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (cba**2*ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (cba**2*ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHL**4 - 2*MHL**2*MHp**2 + MHp**4 - 2*MHL**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHL)**3)',
(P.H__minus__,P.H__plus__):'((-((cb**4*cba**2*m122**2)/vev**2) + (cb**8*cba**2*m122**2)/(sb**4*vev**2) - (2*cb**7*cba**2*m122*MHL**2)/(sb**3*vev**2) + (2*cb**6*cba**2*m122**2)/(sb**2*vev**2) + (cb**6*cba**2*MHL**4)/(sb**2*vev**2) - (2*cb**5*cba**2*m122*MHL**2)/(sb*vev**2) + (2*cb**3*cba**2*m122*MHH**2*sb)/vev**2 - (2*cb**5*cba**4*m122*MHH**2*sb)/vev**2 + (2*cb**3*cba**2*m122*MHL**2*sb)/vev**2 + (2*cb**5*cba**4*m122*MHL**2*sb)/vev**2 - (4*cb**2*cba**2*m122**2*sb**2)/vev**2 - (2*cb**2*cba**2*MHH**2*MHL**2*sb**2)/vev**2 + (2*cb**4*cba**4*MHH**2*MHL**2*sb**2)/vev**2 - (2*cb**4*cba**4*MHL**4*sb**2)/vev**2 + (2*cb*cba**2*m122*MHH**2*sb**3)/vev**2 - (4*cb**3*cba**4*m122*MHH**2*sb**3)/vev**2 + (2*cb*cba**2*m122*MHL**2*sb**3)/vev**2 + (4*cb**3*cba**4*m122*MHL**2*sb**3)/vev**2 - (cba**2*m122**2*sb**4)/vev**2 + (2*cb**2*cba**4*MHH**2*MHL**2*sb**4)/vev**2 - (2*cb**2*cba**4*MHL**4*sb**4)/vev**2 - (2*cba**2*m122*MHH**2*sb**5)/(cb*vev**2) + (2*cba**2*m122**2*sb**6)/(cb**2*vev**2) + (cba**2*MHH**4*sb**6)/(cb**2*vev**2) - (2*cba**4*MHH**4*sb**6)/vev**2 + (cb**2*cba**6*MHH**4*sb**6)/vev**2 + (2*cba**4*MHH**2*MHL**2*sb**6)/vev**2 - (2*cb**2*cba**6*MHH**2*MHL**2*sb**6)/vev**2 + (cb**2*cba**6*MHL**4*sb**6)/vev**2 - (2*cba**2*m122*MHH**2*sb**7)/(cb**3*vev**2) + (4*cba**4*m122*MHH**2*sb**7)/(cb*vev**2) - (4*cba**4*m122*MHL**2*sb**7)/(cb*vev**2) + (cba**2*m122**2*sb**8)/(cb**4*vev**2) - (2*cba**4*MHH**4*sb**8)/(cb**2*vev**2) + (2*cba**6*MHH**4*sb**8)/vev**2 + (2*cba**4*MHH**2*MHL**2*sb**8)/(cb**2*vev**2) - (4*cba**6*MHH**2*MHL**2*sb**8)/vev**2 + (2*cba**6*MHL**4*sb**8)/vev**2 + (2*cba**4*m122*MHH**2*sb**9)/(cb**3*vev**2) - (2*cba**4*m122*MHL**2*sb**9)/(cb**3*vev**2) + (cba**6*MHH**4*sb**10)/(cb**2*vev**2) - (2*cba**6*MHH**2*MHL**2*sb**10)/(cb**2*vev**2) + (cba**6*MHL**4*sb**10)/(cb**2*vev**2) - (2*cb**4*cba*m122*MHH**2*sba)/vev**2 + (2*cb**6*cba**3*m122*MHH**2*sba)/vev**2 - (10*cb**4*cba*m122*MHL**2*sba)/vev**2 - (2*cb**6*cba**3*m122*MHL**2*sba)/vev**2 - (12*cb**6*cba*m122*MHp**2*sba)/vev**2 + (4*cb**7*cba*m122**2*sba)/(sb**3*vev**2) - (6*cb**6*cba*m122*MHL**2*sba)/(sb**2*vev**2) - (4*cb**8*cba*m122*MHp**2*sba)/(sb**2*vev**2) + (12*cb**5*cba*m122**2*sba)/(sb*vev**2) + (2*cb**5*cba*MHL**4*sba)/(sb*vev**2) + (4*cb**7*cba*MHL**2*MHp**2*sba)/(sb*vev**2) + (8*cb**3*cba*m122**2*sb*sba)/vev**2 + (2*cb**3*cba*MHH**2*MHL**2*sb*sba)/vev**2 - (2*cb**5*cba**3*MHH**2*MHL**2*sb*sba)/vev**2 + (2*cb**5*cba**3*MHL**4*sb*sba)/vev**2 + (8*cb**5*cba*MHL**2*MHp**2*sb*sba)/vev**2 + (2*cb**2*cba*m122*MHH**2*sb**2*sba)/vev**2 - (2*cb**2*cba*m122*MHL**2*sb**2*sba)/vev**2 - (8*cb**4*cba*m122*MHp**2*sb**2*sba)/vev**2 - (8*cb*cba*m122**2*sb**3*sba)/vev**2 - (2*cb*cba*MHH**2*MHL**2*sb**3*sba)/vev**2 - (4*cb**3*cba*MHH**2*MHp**2*sb**3*sba)/vev**2 + (4*cb**5*cba**3*MHH**2*MHp**2*sb**3*sba)/vev**2 + (4*cb**3*cba*MHL**2*MHp**2*sb**3*sba)/vev**2 - (4*cb**5*cba**3*MHL**2*MHp**2*sb**3*sba)/vev**2 + (10*cba*m122*MHH**2*sb**4*sba)/vev**2 - (12*cb**2*cba**3*m122*MHH**2*sb**4*sba)/vev**2 + (2*cba*m122*MHL**2*sb**4*sba)/vev**2 + (12*cb**2*cba**3*m122*MHL**2*sb**4*sba)/vev**2 + (8*cb**2*cba*m122*MHp**2*sb**4*sba)/vev**2 - (12*cba*m122**2*sb**5*sba)/(cb*vev**2) - (2*cba*MHH**4*sb**5*sba)/(cb*vev**2) + (4*cb*cba**3*MHH**4*sb**5*sba)/vev**2 - (2*cb**3*cba**5*MHH**4*sb**5*sba)/vev**2 - (2*cb*cba**3*MHH**2*MHL**2*sb**5*sba)/vev**2 + (4*cb**3*cba**5*MHH**2*MHL**2*sb**5*sba)/vev**2 - (2*cb*cba**3*MHL**4*sb**5*sba)/vev**2 - (2*cb**3*cba**5*MHL**4*sb**5*sba)/vev**2 - (8*cb*cba*MHH**2*MHp**2*sb**5*sba)/vev**2 + (12*cb**3*cba**3*MHH**2*MHp**2*sb**5*sba)/vev**2 - (12*cb**3*cba**3*MHL**2*MHp**2*sb**5*sba)/vev**2 + (6*cba*m122*MHH**2*sb**6*sba)/(cb**2*vev**2) - (16*cba**3*m122*MHH**2*sb**6*sba)/vev**2 + (16*cba**3*m122*MHL**2*sb**6*sba)/vev**2 + (12*cba*m122*MHp**2*sb**6*sba)/vev**2 - (4*cba*m122**2*sb**7*sba)/(cb**3*vev**2) + (4*cba**3*MHH**4*sb**7*sba)/(cb*vev**2) - (4*cb*cba**5*MHH**4*sb**7*sba)/vev**2 - (4*cba**3*MHH**2*MHL**2*sb**7*sba)/(cb*vev**2) + (8*cb*cba**5*MHH**2*MHL**2*sb**7*sba)/vev**2 - (4*cb*cba**5*MHL**4*sb**7*sba)/vev**2 - (4*cba*MHH**2*MHp**2*sb**7*sba)/(cb*vev**2) + (12*cb*cba**3*MHH**2*MHp**2*sb**7*sba)/vev**2 - (12*cb*cba**3*MHL**2*MHp**2*sb**7*sba)/vev**2 - (6*cba**3*m122*MHH**2*sb**8*sba)/(cb**2*vev**2) + (6*cba**3*m122*MHL**2*sb**8*sba)/(cb**2*vev**2) + (4*cba*m122*MHp**2*sb**8*sba)/(cb**2*vev**2) - (2*cba**5*MHH**4*sb**9*sba)/(cb*vev**2) + (4*cba**5*MHH**2*MHL**2*sb**9*sba)/(cb*vev**2) - (2*cba**5*MHL**4*sb**9*sba)/(cb*vev**2) + (4*cba**3*MHH**2*MHp**2*sb**9*sba)/(cb*vev**2) - (4*cba**3*MHL**2*MHp**2*sb**9*sba)/(cb*vev**2) + (16*cb**4*m122**2*sba**2)/vev**2 + (cb**4*MHL**4*sba**2)/vev**2 + (4*cb**6*MHL**2*MHp**2*sba**2)/vev**2 + (4*cb**8*MHp**4*sba**2)/vev**2 + (4*cb**6*m122**2*sba**2)/(sb**2*vev**2) - (4*cb**5*m122*MHL**2*sba**2)/(sb*vev**2) - (8*cb**7*m122*MHp**2*sba**2)/(sb*vev**2) - (4*cb**3*m122*MHH**2*sb*sba**2)/vev**2 + (2*cb**5*cba**2*m122*MHH**2*sb*sba**2)/vev**2 - (8*cb**3*m122*MHL**2*sb*sba**2)/vev**2 - (2*cb**5*cba**2*m122*MHL**2*sb*sba**2)/vev**2 - (32*cb**5*m122*MHp**2*sb*sba**2)/vev**2 + (24*cb**2*m122**2*sb**2*sba**2)/vev**2 + (2*cb**2*MHH**2*MHL**2*sb**2*sba**2)/vev**2 + (4*cb**4*MHH**2*MHp**2*sb**2*sba**2)/vev**2 - (4*cb**6*cba**2*MHH**2*MHp**2*sb**2*sba**2)/vev**2 + (8*cb**4*MHL**2*MHp**2*sb**2*sba**2)/vev**2 + (4*cb**6*cba**2*MHL**2*MHp**2*sb**2*sba**2)/vev**2 + (16*cb**6*MHp**4*sb**2*sba**2)/vev**2 - (8*cb*m122*MHH**2*sb**3*sba**2)/vev**2 + (8*cb**3*cba**2*m122*MHH**2*sb**3*sba**2)/vev**2 - (4*cb*m122*MHL**2*sb**3*sba**2)/vev**2 - (8*cb**3*cba**2*m122*MHL**2*sb**3*sba**2)/vev**2 - (48*cb**3*m122*MHp**2*sb**3*sba**2)/vev**2 + (16*m122**2*sb**4*sba**2)/vev**2 + (MHH**4*sb**4*sba**2)/vev**2 - (2*cb**2*cba**2*MHH**4*sb**4*sba**2)/vev**2 + (cb**4*cba**4*MHH**4*sb**4*sba**2)/vev**2 + (2*cb**2*cba**2*MHH**2*MHL**2*sb**4*sba**2)/vev**2 - (2*cb**4*cba**4*MHH**2*MHL**2*sb**4*sba**2)/vev**2 + (cb**4*cba**4*MHL**4*sb**4*sba**2)/vev**2 + (8*cb**2*MHH**2*MHp**2*sb**4*sba**2)/vev**2 - (12*cb**4*cba**2*MHH**2*MHp**2*sb**4*sba**2)/vev**2 + (4*cb**2*MHL**2*MHp**2*sb**4*sba**2)/vev**2 + (12*cb**4*cba**2*MHL**2*MHp**2*sb**4*sba**2)/vev**2 + (24*cb**4*MHp**4*sb**4*sba**2)/vev**2 - (4*m122*MHH**2*sb**5*sba**2)/(cb*vev**2) + (12*cb*cba**2*m122*MHH**2*sb**5*sba**2)/vev**2 - (12*cb*cba**2*m122*MHL**2*sb**5*sba**2)/vev**2 - (32*cb*m122*MHp**2*sb**5*sba**2)/vev**2 + (4*m122**2*sb**6*sba**2)/(cb**2*vev**2) - (4*cba**2*MHH**4*sb**6*sba**2)/vev**2 + (4*cb**2*cba**4*MHH**4*sb**6*sba**2)/vev**2 + (4*cba**2*MHH**2*MHL**2*sb**6*sba**2)/vev**2 - (8*cb**2*cba**4*MHH**2*MHL**2*sb**6*sba**2)/vev**2 + (4*cb**2*cba**4*MHL**4*sb**6*sba**2)/vev**2 + (4*MHH**2*MHp**2*sb**6*sba**2)/vev**2 - (12*cb**2*cba**2*MHH**2*MHp**2*sb**6*sba**2)/vev**2 + (12*cb**2*cba**2*MHL**2*MHp**2*sb**6*sba**2)/vev**2 + (16*cb**2*MHp**4*sb**6*sba**2)/vev**2 + (8*cba**2*m122*MHH**2*sb**7*sba**2)/(cb*vev**2) - (8*cba**2*m122*MHL**2*sb**7*sba**2)/(cb*vev**2) - (8*m122*MHp**2*sb**7*sba**2)/(cb*vev**2) - (2*cba**2*MHH**4*sb**8*sba**2)/(cb**2*vev**2) + (5*cba**4*MHH**4*sb**8*sba**2)/vev**2 + (2*cba**2*MHH**2*MHL**2*sb**8*sba**2)/(cb**2*vev**2) - (10*cba**4*MHH**2*MHL**2*sb**8*sba**2)/vev**2 + (5*cba**4*MHL**4*sb**8*sba**2)/vev**2 - (4*cba**2*MHH**2*MHp**2*sb**8*sba**2)/vev**2 + (4*cba**2*MHL**2*MHp**2*sb**8*sba**2)/vev**2 + (4*MHp**4*sb**8*sba**2)/vev**2 + (2*cba**2*m122*MHH**2*sb**9*sba**2)/(cb**3*vev**2) - (2*cba**2*m122*MHL**2*sb**9*sba**2)/(cb**3*vev**2) + (2*cba**4*MHH**4*sb**10*sba**2)/(cb**2*vev**2) - (4*cba**4*MHH**2*MHL**2*sb**10*sba**2)/(cb**2*vev**2) + (2*cba**4*MHL**4*sb**10*sba**2)/(cb**2*vev**2) + (2*cb**6*cba*m122*MHH**2*sba**3)/vev**2 - (2*cb**6*cba*m122*MHL**2*sba**3)/vev**2 - (2*cb**5*cba*MHH**2*MHL**2*sb*sba**3)/vev**2 + (2*cb**5*cba*MHL**4*sb*sba**3)/vev**2 + (4*cb**5*cba*MHH**2*MHp**2*sb**3*sba**3)/vev**2 - (4*cb**5*cba*MHL**2*MHp**2*sb**3*sba**3)/vev**2 - (12*cb**2*cba*m122*MHH**2*sb**4*sba**3)/vev**2 + (12*cb**2*cba*m122*MHL**2*sb**4*sba**3)/vev**2 + (4*cb*cba*MHH**4*sb**5*sba**3)/vev**2 - (4*cb**3*cba**3*MHH**4*sb**5*sba**3)/vev**2 - (2*cb*cba*MHH**2*MHL**2*sb**5*sba**3)/vev**2 + (8*cb**3*cba**3*MHH**2*MHL**2*sb**5*sba**3)/vev**2 - (2*cb*cba*MHL**4*sb**5*sba**3)/vev**2 - (4*cb**3*cba**3*MHL**4*sb**5*sba**3)/vev**2 + (12*cb**3*cba*MHH**2*MHp**2*sb**5*sba**3)/vev**2 - (12*cb**3*cba*MHL**2*MHp**2*sb**5*sba**3)/vev**2 - (16*cba*m122*MHH**2*sb**6*sba**3)/vev**2 + (16*cba*m122*MHL**2*sb**6*sba**3)/vev**2 + (4*cba*MHH**4*sb**7*sba**3)/(cb*vev**2) - (8*cb*cba**3*MHH**4*sb**7*sba**3)/vev**2 - (4*cba*MHH**2*MHL**2*sb**7*sba**3)/(cb*vev**2) + (16*cb*cba**3*MHH**2*MHL**2*sb**7*sba**3)/vev**2 - (8*cb*cba**3*MHL**4*sb**7*sba**3)/vev**2 + (12*cb*cba*MHH**2*MHp**2*sb**7*sba**3)/vev**2 - (12*cb*cba*MHL**2*MHp**2*sb**7*sba**3)/vev**2 - (6*cba*m122*MHH**2*sb**8*sba**3)/(cb**2*vev**2) + (6*cba*m122*MHL**2*sb**8*sba**3)/(cb**2*vev**2) - (4*cba**3*MHH**4*sb**9*sba**3)/(cb*vev**2) + (8*cba**3*MHH**2*MHL**2*sb**9*sba**3)/(cb*vev**2) - (4*cba**3*MHL**4*sb**9*sba**3)/(cb*vev**2) + (4*cba*MHH**2*MHp**2*sb**9*sba**3)/(cb*vev**2) - (4*cba*MHL**2*MHp**2*sb**9*sba**3)/(cb*vev**2) + (4*cb**5*m122*MHH**2*sb*sba**4)/vev**2 - (4*cb**5*m122*MHL**2*sb*sba**4)/vev**2 - (2*cb**4*MHH**2*MHL**2*sb**2*sba**4)/vev**2 + (2*cb**4*MHL**4*sb**2*sba**4)/vev**2 - (4*cb**6*MHH**2*MHp**2*sb**2*sba**4)/vev**2 + (4*cb**6*MHL**2*MHp**2*sb**2*sba**4)/vev**2 + (12*cb**3*m122*MHH**2*sb**3*sba**4)/vev**2 - (12*cb**3*m122*MHL**2*sb**3*sba**4)/vev**2 - (2*cb**2*MHH**4*sb**4*sba**4)/vev**2 + (2*cb**4*cba**2*MHH**4*sb**4*sba**4)/vev**2 - (4*cb**4*cba**2*MHH**2*MHL**2*sb**4*sba**4)/vev**2 + (2*cb**2*MHL**4*sb**4*sba**4)/vev**2 + (2*cb**4*cba**2*MHL**4*sb**4*sba**4)/vev**2 - (12*cb**4*MHH**2*MHp**2*sb**4*sba**4)/vev**2 + (12*cb**4*MHL**2*MHp**2*sb**4*sba**4)/vev**2 + (12*cb*m122*MHH**2*sb**5*sba**4)/vev**2 - (12*cb*m122*MHL**2*sb**5*sba**4)/vev**2 - (2*MHH**4*sb**6*sba**4)/vev**2 + (5*cb**2*cba**2*MHH**4*sb**6*sba**4)/vev**2 + (2*MHH**2*MHL**2*sb**6*sba**4)/vev**2 - (10*cb**2*cba**2*MHH**2*MHL**2*sb**6*sba**4)/vev**2 + (5*cb**2*cba**2*MHL**4*sb**6*sba**4)/vev**2 - (12*cb**2*MHH**2*MHp**2*sb**6*sba**4)/vev**2 + (12*cb**2*MHL**2*MHp**2*sb**6*sba**4)/vev**2 + (4*m122*MHH**2*sb**7*sba**4)/(cb*vev**2) - (4*m122*MHL**2*sb**7*sba**4)/(cb*vev**2) + (4*cba**2*MHH**4*sb**8*sba**4)/vev**2 - (8*cba**2*MHH**2*MHL**2*sb**8*sba**4)/vev**2 + (4*cba**2*MHL**4*sb**8*sba**4)/vev**2 - (4*MHH**2*MHp**2*sb**8*sba**4)/vev**2 + (4*MHL**2*MHp**2*sb**8*sba**4)/vev**2 + (cba**2*MHH**4*sb**10*sba**4)/(cb**2*vev**2) - (2*cba**2*MHH**2*MHL**2*sb**10*sba**4)/(cb**2*vev**2) + (cba**2*MHL**4*sb**10*sba**4)/(cb**2*vev**2) - (2*cb**3*cba*MHH**4*sb**5*sba**5)/vev**2 + (4*cb**3*cba*MHH**2*MHL**2*sb**5*sba**5)/vev**2 - (2*cb**3*cba*MHL**4*sb**5*sba**5)/vev**2 - (4*cb*cba*MHH**4*sb**7*sba**5)/vev**2 + (8*cb*cba*MHH**2*MHL**2*sb**7*sba**5)/vev**2 - (4*cb*cba*MHL**4*sb**7*sba**5)/vev**2 - (2*cba*MHH**4*sb**9*sba**5)/(cb*vev**2) + (4*cba*MHH**2*MHL**2*sb**9*sba**5)/(cb*vev**2) - (2*cba*MHL**4*sb**9*sba**5)/(cb*vev**2) + (cb**4*MHH**4*sb**4*sba**6)/vev**2 - (2*cb**4*MHH**2*MHL**2*sb**4*sba**6)/vev**2 + (cb**4*MHL**4*sb**4*sba**6)/vev**2 + (2*cb**2*MHH**4*sb**6*sba**6)/vev**2 - (4*cb**2*MHH**2*MHL**2*sb**6*sba**6)/vev**2 + (2*cb**2*MHL**4*sb**6*sba**6)/vev**2 + (MHH**4*sb**8*sba**6)/vev**2 - (2*MHH**2*MHL**2*sb**8*sba**6)/vev**2 + (MHL**4*sb**8*sba**6)/vev**2)*cmath.sqrt(MHL**4 - 4*MHL**2*MHp**2))/(16.*cmath.pi*abs(MHL)**3)',
(P.H__minus__,P.W__plus__):'((-(cb**4*cba**2*ee**2*MHL**2)/(2.*sw**2) - (cb**4*cba**2*ee**2*MHp**2)/(2.*sw**2) + (cb**4*cba**2*ee**2*MHL**4)/(4.*MW**2*sw**2) - (cb**4*cba**2*ee**2*MHL**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MW**2)/(4.*sw**2) - (cb**2*cba**2*ee**2*MHL**2*sb**2)/sw**2 - (cb**2*cba**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*cba**2*ee**2*MHL**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*cba**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*cba**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (cba**2*ee**2*MHL**2*sb**4)/(2.*sw**2) - (cba**2*ee**2*MHp**2*sb**4)/(2.*sw**2) + (cba**2*ee**2*MHL**4*sb**4)/(4.*MW**2*sw**2) - (cba**2*ee**2*MHL**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (cba**2*ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (cba**2*ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHL**4 - 2*MHL**2*MHp**2 + MHp**4 - 2*MHL**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHL)**3)',
(P.t,P.t__tilde__):'(((3*cb**2*cba**2*MHL**2*yt**2)/sb**2 - (12*cb**2*cba**2*MT**2*yt**2)/sb**2 + (6*cb*cba*MHL**2*sba*yt**2)/sb - (24*cb*cba*MT**2*sba*yt**2)/sb + 3*MHL**2*sba**2*yt**2 - 12*MT**2*sba**2*yt**2)*cmath.sqrt(MHL**4 - 4*MHL**2*MT**2))/(16.*cmath.pi*abs(MHL)**3)',
(P.ta__minus__,P.ta__plus__):'(((cb**2*cba**2*MHL**2*ytau**2)/sb**2 - (4*cb**2*cba**2*MTA**2*ytau**2)/sb**2 + (2*cb*cba*MHL**2*sba*ytau**2)/sb - (8*cb*cba*MTA**2*sba*ytau**2)/sb + MHL**2*sba**2*ytau**2 - 4*MTA**2*sba**2*ytau**2)*cmath.sqrt(MHL**4 - 4*MHL**2*MTA**2))/(16.*cmath.pi*abs(MHL)**3)',
(P.W__minus__,P.W__plus__):'(((3*cb**4*ee**4*sba**2*vev**2)/(4.*sw**4) + (cb**4*ee**4*MHL**4*sba**2*vev**2)/(16.*MW**4*sw**4) - (cb**4*ee**4*MHL**2*sba**2*vev**2)/(4.*MW**2*sw**4) + (3*cb**2*ee**4*sb**2*sba**2*vev**2)/(2.*sw**4) + (cb**2*ee**4*MHL**4*sb**2*sba**2*vev**2)/(8.*MW**4*sw**4) - (cb**2*ee**4*MHL**2*sb**2*sba**2*vev**2)/(2.*MW**2*sw**4) + (3*ee**4*sb**4*sba**2*vev**2)/(4.*sw**4) + (ee**4*MHL**4*sb**4*sba**2*vev**2)/(16.*MW**4*sw**4) - (ee**4*MHL**2*sb**4*sba**2*vev**2)/(4.*MW**2*sw**4))*cmath.sqrt(MHL**4 - 4*MHL**2*MW**2))/(16.*cmath.pi*abs(MHL)**3)',
(P.Z,P.Z):'(((9*cb**4*ee**4*sba**2*vev**2)/2. + (3*cb**4*ee**4*MHL**4*sba**2*vev**2)/(8.*MZ**4) - (3*cb**4*ee**4*MHL**2*sba**2*vev**2)/(2.*MZ**2) + 9*cb**2*ee**4*sb**2*sba**2*vev**2 + (3*cb**2*ee**4*MHL**4*sb**2*sba**2*vev**2)/(4.*MZ**4) - (3*cb**2*ee**4*MHL**2*sb**2*sba**2*vev**2)/MZ**2 + (9*ee**4*sb**4*sba**2*vev**2)/2. + (3*ee**4*MHL**4*sb**4*sba**2*vev**2)/(8.*MZ**4) - (3*ee**4*MHL**2*sb**4*sba**2*vev**2)/(2.*MZ**2) + (3*cb**4*cw**4*ee**4*sba**2*vev**2)/(4.*sw**4) + (cb**4*cw**4*ee**4*MHL**4*sba**2*vev**2)/(16.*MZ**4*sw**4) - (cb**4*cw**4*ee**4*MHL**2*sba**2*vev**2)/(4.*MZ**2*sw**4) + (3*cb**2*cw**4*ee**4*sb**2*sba**2*vev**2)/(2.*sw**4) + (cb**2*cw**4*ee**4*MHL**4*sb**2*sba**2*vev**2)/(8.*MZ**4*sw**4) - (cb**2*cw**4*ee**4*MHL**2*sb**2*sba**2*vev**2)/(2.*MZ**2*sw**4) + (3*cw**4*ee**4*sb**4*sba**2*vev**2)/(4.*sw**4) + (cw**4*ee**4*MHL**4*sb**4*sba**2*vev**2)/(16.*MZ**4*sw**4) - (cw**4*ee**4*MHL**2*sb**4*sba**2*vev**2)/(4.*MZ**2*sw**4) + (3*cb**4*cw**2*ee**4*sba**2*vev**2)/sw**2 + (cb**4*cw**2*ee**4*MHL**4*sba**2*vev**2)/(4.*MZ**4*sw**2) - (cb**4*cw**2*ee**4*MHL**2*sba**2*vev**2)/(MZ**2*sw**2) + (6*cb**2*cw**2*ee**4*sb**2*sba**2*vev**2)/sw**2 + (cb**2*cw**2*ee**4*MHL**4*sb**2*sba**2*vev**2)/(2.*MZ**4*sw**2) - (2*cb**2*cw**2*ee**4*MHL**2*sb**2*sba**2*vev**2)/(MZ**2*sw**2) + (3*cw**2*ee**4*sb**4*sba**2*vev**2)/sw**2 + (cw**2*ee**4*MHL**4*sb**4*sba**2*vev**2)/(4.*MZ**4*sw**2) - (cw**2*ee**4*MHL**2*sb**4*sba**2*vev**2)/(MZ**2*sw**2) + (3*cb**4*ee**4*sba**2*sw**2*vev**2)/cw**2 + (cb**4*ee**4*MHL**4*sba**2*sw**2*vev**2)/(4.*cw**2*MZ**4) - (cb**4*ee**4*MHL**2*sba**2*sw**2*vev**2)/(cw**2*MZ**2) + (6*cb**2*ee**4*sb**2*sba**2*sw**2*vev**2)/cw**2 + (cb**2*ee**4*MHL**4*sb**2*sba**2*sw**2*vev**2)/(2.*cw**2*MZ**4) - (2*cb**2*ee**4*MHL**2*sb**2*sba**2*sw**2*vev**2)/(cw**2*MZ**2) + (3*ee**4*sb**4*sba**2*sw**2*vev**2)/cw**2 + (ee**4*MHL**4*sb**4*sba**2*sw**2*vev**2)/(4.*cw**2*MZ**4) - (ee**4*MHL**2*sb**4*sba**2*sw**2*vev**2)/(cw**2*MZ**2) + (3*cb**4*ee**4*sba**2*sw**4*vev**2)/(4.*cw**4) + (cb**4*ee**4*MHL**4*sba**2*sw**4*vev**2)/(16.*cw**4*MZ**4) - (cb**4*ee**4*MHL**2*sba**2*sw**4*vev**2)/(4.*cw**4*MZ**2) + (3*cb**2*ee**4*sb**2*sba**2*sw**4*vev**2)/(2.*cw**4) + (cb**2*ee**4*MHL**4*sb**2*sba**2*sw**4*vev**2)/(8.*cw**4*MZ**4) - (cb**2*ee**4*MHL**2*sb**2*sba**2*sw**4*vev**2)/(2.*cw**4*MZ**2) + (3*ee**4*sb**4*sba**2*sw**4*vev**2)/(4.*cw**4) + (ee**4*MHL**4*sb**4*sba**2*sw**4*vev**2)/(16.*cw**4*MZ**4) - (ee**4*MHL**2*sb**4*sba**2*sw**4*vev**2)/(4.*cw**4*MZ**2))*cmath.sqrt(MHL**4 - 4*MHL**2*MZ**2))/(32.*cmath.pi*abs(MHL)**3)'})
Decay_H__plus__ = Decay(name = 'Decay_H__plus__',
particle = P.H__plus__,
partial_widths = {(P.HA,P.W__plus__):'((-(cb**4*ee**2*MHA**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2)/(2.*sw**2) + (cb**4*ee**2*MHA**4)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHA**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2)/(4.*sw**2) - (cb**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*ee**2*MHA**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHA**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (ee**2*MHA**2*sb**4)/(2.*sw**2) - (ee**2*MHp**2*sb**4)/(2.*sw**2) + (ee**2*MHA**4*sb**4)/(4.*MW**2*sw**2) - (ee**2*MHA**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHp**2 + MHp**4 - 2*MHA**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHp)**3)',
(P.HH,P.W__plus__):'((-(cb**4*ee**2*MHH**2*sba**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2*sba**2)/(2.*sw**2) + (cb**4*ee**2*MHH**4*sba**2)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHH**2*MHp**2*sba**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4*sba**2)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2*sba**2)/(4.*sw**2) - (cb**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2*sba**2)/sw**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHH**2*MHp**2*sb**2*sba**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2*sba**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2*sba**2)/(2.*sw**2) - (ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) - (ee**2*MHp**2*sb**4*sba**2)/(2.*sw**2) + (ee**2*MHH**4*sb**4*sba**2)/(4.*MW**2*sw**2) - (ee**2*MHH**2*MHp**2*sb**4*sba**2)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4*sba**2)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4*sba**2)/(4.*sw**2))*cmath.sqrt(MHH**4 - 2*MHH**2*MHp**2 + MHp**4 - 2*MHH**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHp)**3)',
(P.HL,P.W__plus__):'((-(cb**4*cba**2*ee**2*MHL**2)/(2.*sw**2) - (cb**4*cba**2*ee**2*MHp**2)/(2.*sw**2) + (cb**4*cba**2*ee**2*MHL**4)/(4.*MW**2*sw**2) - (cb**4*cba**2*ee**2*MHL**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MW**2)/(4.*sw**2) - (cb**2*cba**2*ee**2*MHL**2*sb**2)/sw**2 - (cb**2*cba**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*cba**2*ee**2*MHL**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*cba**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*cba**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (cba**2*ee**2*MHL**2*sb**4)/(2.*sw**2) - (cba**2*ee**2*MHp**2*sb**4)/(2.*sw**2) + (cba**2*ee**2*MHL**4*sb**4)/(4.*MW**2*sw**2) - (cba**2*ee**2*MHL**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (cba**2*ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (cba**2*ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHL**4 - 2*MHL**2*MHp**2 + MHp**4 - 2*MHL**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(16.*cmath.pi*abs(MHp)**3)',
(P.t,P.b__tilde__):'((-6*I4a33*MB*MT*complexconjugate(I3a33) - (3*cb**2*I3a33*MB**2*complexconjugate(I3a33))/sb**2 + (3*cb**2*I3a33*MHp**2*complexconjugate(I3a33))/sb**2 - (3*cb**2*I3a33*MT**2*complexconjugate(I3a33))/sb**2 - 6*I3a33*MB*MT*complexconjugate(I4a33) - (3*I4a33*MB**2*sb**2*complexconjugate(I4a33))/cb**2 + (3*I4a33*MHp**2*sb**2*complexconjugate(I4a33))/cb**2 - (3*I4a33*MT**2*sb**2*complexconjugate(I4a33))/cb**2)*cmath.sqrt(MB**4 - 2*MB**2*MHp**2 + MHp**4 - 2*MB**2*MT**2 - 2*MHp**2*MT**2 + MT**4))/(16.*cmath.pi*abs(MHp)**3)',
(P.vt,P.ta__plus__):'((MHp**2 - MTA**2)*((cb**2*MHp**2*ytau**2)/sb**2 - (cb**2*MTA**2*ytau**2)/sb**2))/(16.*cmath.pi*abs(MHp)**3)'})
Decay_t = Decay(name = 'Decay_t',
particle = P.t,
partial_widths = {(P.H__plus__,P.b):'((6*I2a33*MB*MT*complexconjugate(I1a33) + (3*I1a33*MB**2*sb**2*complexconjugate(I1a33))/cb**2 - (3*I1a33*MHp**2*sb**2*complexconjugate(I1a33))/cb**2 + (3*I1a33*MT**2*sb**2*complexconjugate(I1a33))/cb**2 + 6*I1a33*MB*MT*complexconjugate(I2a33) + (3*cb**2*I2a33*MB**2*complexconjugate(I2a33))/sb**2 - (3*cb**2*I2a33*MHp**2*complexconjugate(I2a33))/sb**2 + (3*cb**2*I2a33*MT**2*complexconjugate(I2a33))/sb**2)*cmath.sqrt(MB**4 - 2*MB**2*MHp**2 + MHp**4 - 2*MB**2*MT**2 - 2*MHp**2*MT**2 + MT**4))/(96.*cmath.pi*abs(MT)**3)',
(P.W__plus__,P.b):'(((3*ee**2*MB**2)/(2.*sw**2) + (3*ee**2*MT**2)/(2.*sw**2) + (3*ee**2*MB**4)/(2.*MW**2*sw**2) - (3*ee**2*MB**2*MT**2)/(MW**2*sw**2) + (3*ee**2*MT**4)/(2.*MW**2*sw**2) - (3*ee**2*MW**2)/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(96.*cmath.pi*abs(MT)**3)'})
Decay_ta__minus__ = Decay(name = 'Decay_ta__minus__',
particle = P.ta__minus__,
partial_widths = {(P.H__minus__,P.vt):'((-MHp**2 + MTA**2)*(-((cb**2*MHp**2*ytau**2)/sb**2) + (cb**2*MTA**2*ytau**2)/sb**2))/(32.*cmath.pi*abs(MTA)**3)',
(P.W__minus__,P.vt):'((MTA**2 - MW**2)*((ee**2*MTA**2)/(2.*sw**2) + (ee**2*MTA**4)/(2.*MW**2*sw**2) - (ee**2*MW**2)/sw**2))/(32.*cmath.pi*abs(MTA)**3)'})
Decay_W__plus__ = Decay(name = 'Decay_W__plus__',
particle = P.W__plus__,
partial_widths = {(P.c,P.d__tilde__):'(CKM2x1*ee**2*MW**4*complexconjugate(CKM2x1))/(16.*cmath.pi*sw**2*abs(MW)**3)',
(P.c,P.s__tilde__):'(CKM2x2*ee**2*MW**4*complexconjugate(CKM2x2))/(16.*cmath.pi*sw**2*abs(MW)**3)',
(P.HA,P.H__plus__):'((-(cb**4*ee**2*MHA**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2)/(2.*sw**2) + (cb**4*ee**2*MHA**4)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHA**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2)/(4.*sw**2) - (cb**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*ee**2*MHA**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHA**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (ee**2*MHA**2*sb**4)/(2.*sw**2) - (ee**2*MHp**2*sb**4)/(2.*sw**2) + (ee**2*MHA**4*sb**4)/(4.*MW**2*sw**2) - (ee**2*MHA**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHp**2 + MHp**4 - 2*MHA**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.HH,P.H__plus__):'((-(cb**4*ee**2*MHH**2*sba**2)/(2.*sw**2) - (cb**4*ee**2*MHp**2*sba**2)/(2.*sw**2) + (cb**4*ee**2*MHH**4*sba**2)/(4.*MW**2*sw**2) - (cb**4*ee**2*MHH**2*MHp**2*sba**2)/(2.*MW**2*sw**2) + (cb**4*ee**2*MHp**4*sba**2)/(4.*MW**2*sw**2) + (cb**4*ee**2*MW**2*sba**2)/(4.*sw**2) - (cb**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 - (cb**2*ee**2*MHp**2*sb**2*sba**2)/sw**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MW**2*sw**2) - (cb**2*ee**2*MHH**2*MHp**2*sb**2*sba**2)/(MW**2*sw**2) + (cb**2*ee**2*MHp**4*sb**2*sba**2)/(2.*MW**2*sw**2) + (cb**2*ee**2*MW**2*sb**2*sba**2)/(2.*sw**2) - (ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) - (ee**2*MHp**2*sb**4*sba**2)/(2.*sw**2) + (ee**2*MHH**4*sb**4*sba**2)/(4.*MW**2*sw**2) - (ee**2*MHH**2*MHp**2*sb**4*sba**2)/(2.*MW**2*sw**2) + (ee**2*MHp**4*sb**4*sba**2)/(4.*MW**2*sw**2) + (ee**2*MW**2*sb**4*sba**2)/(4.*sw**2))*cmath.sqrt(MHH**4 - 2*MHH**2*MHp**2 + MHp**4 - 2*MHH**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.HL,P.H__plus__):'((-(cb**4*cba**2*ee**2*MHL**2)/(2.*sw**2) - (cb**4*cba**2*ee**2*MHp**2)/(2.*sw**2) + (cb**4*cba**2*ee**2*MHL**4)/(4.*MW**2*sw**2) - (cb**4*cba**2*ee**2*MHL**2*MHp**2)/(2.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MHp**4)/(4.*MW**2*sw**2) + (cb**4*cba**2*ee**2*MW**2)/(4.*sw**2) - (cb**2*cba**2*ee**2*MHL**2*sb**2)/sw**2 - (cb**2*cba**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/(2.*MW**2*sw**2) - (cb**2*cba**2*ee**2*MHL**2*MHp**2*sb**2)/(MW**2*sw**2) + (cb**2*cba**2*ee**2*MHp**4*sb**2)/(2.*MW**2*sw**2) + (cb**2*cba**2*ee**2*MW**2*sb**2)/(2.*sw**2) - (cba**2*ee**2*MHL**2*sb**4)/(2.*sw**2) - (cba**2*ee**2*MHp**2*sb**4)/(2.*sw**2) + (cba**2*ee**2*MHL**4*sb**4)/(4.*MW**2*sw**2) - (cba**2*ee**2*MHL**2*MHp**2*sb**4)/(2.*MW**2*sw**2) + (cba**2*ee**2*MHp**4*sb**4)/(4.*MW**2*sw**2) + (cba**2*ee**2*MW**2*sb**4)/(4.*sw**2))*cmath.sqrt(MHL**4 - 2*MHL**2*MHp**2 + MHp**4 - 2*MHL**2*MW**2 - 2*MHp**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.t,P.b__tilde__):'(((-3*ee**2*MB**2)/(2.*sw**2) - (3*ee**2*MT**2)/(2.*sw**2) - (3*ee**2*MB**4)/(2.*MW**2*sw**2) + (3*ee**2*MB**2*MT**2)/(MW**2*sw**2) - (3*ee**2*MT**4)/(2.*MW**2*sw**2) + (3*ee**2*MW**2)/sw**2)*cmath.sqrt(MB**4 - 2*MB**2*MT**2 + MT**4 - 2*MB**2*MW**2 - 2*MT**2*MW**2 + MW**4))/(48.*cmath.pi*abs(MW)**3)',
(P.u,P.d__tilde__):'(CKM1x1*ee**2*MW**4*complexconjugate(CKM1x1))/(16.*cmath.pi*sw**2*abs(MW)**3)',
(P.u,P.s__tilde__):'(CKM1x2*ee**2*MW**4*complexconjugate(CKM1x2))/(16.*cmath.pi*sw**2*abs(MW)**3)',
(P.ve,P.e__plus__):'(ee**2*MW**4)/(48.*cmath.pi*sw**2*abs(MW)**3)',
(P.vm,P.mu__plus__):'(ee**2*MW**4)/(48.*cmath.pi*sw**2*abs(MW)**3)',
(P.vt,P.ta__plus__):'((-MTA**2 + MW**2)*(-(ee**2*MTA**2)/(2.*sw**2) - (ee**2*MTA**4)/(2.*MW**2*sw**2) + (ee**2*MW**2)/sw**2))/(48.*cmath.pi*abs(MW)**3)'})
Decay_Z = Decay(name = 'Decay_Z',
particle = P.Z,
partial_widths = {(P.b,P.b__tilde__):'((-7*ee**2*MB**2 + ee**2*MZ**2 - (3*cw**2*ee**2*MB**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) - (17*ee**2*MB**2*sw**2)/(6.*cw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MB**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.c,P.c__tilde__):'(MZ**2*(-(ee**2*MZ**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.d,P.d__tilde__):'(MZ**2*(ee**2*MZ**2 + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.e__minus__,P.e__plus__):'(MZ**2*(-(ee**2*MZ**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.HA,P.HH):'((-(cb**4*ee**2*MHA**2*sba**2) - cb**4*ee**2*MHH**2*sba**2 + (cb**4*ee**2*MHA**4*sba**2)/(2.*MZ**2) - (cb**4*ee**2*MHA**2*MHH**2*sba**2)/MZ**2 + (cb**4*ee**2*MHH**4*sba**2)/(2.*MZ**2) + (cb**4*ee**2*MZ**2*sba**2)/2. - 2*cb**2*ee**2*MHA**2*sb**2*sba**2 - 2*cb**2*ee**2*MHH**2*sb**2*sba**2 + (cb**2*ee**2*MHA**4*sb**2*sba**2)/MZ**2 - (2*cb**2*ee**2*MHA**2*MHH**2*sb**2*sba**2)/MZ**2 + (cb**2*ee**2*MHH**4*sb**2*sba**2)/MZ**2 + cb**2*ee**2*MZ**2*sb**2*sba**2 - ee**2*MHA**2*sb**4*sba**2 - ee**2*MHH**2*sb**4*sba**2 + (ee**2*MHA**4*sb**4*sba**2)/(2.*MZ**2) - (ee**2*MHA**2*MHH**2*sb**4*sba**2)/MZ**2 + (ee**2*MHH**4*sb**4*sba**2)/(2.*MZ**2) + (ee**2*MZ**2*sb**4*sba**2)/2. - (cb**4*cw**2*ee**2*MHA**2*sba**2)/(2.*sw**2) - (cb**4*cw**2*ee**2*MHH**2*sba**2)/(2.*sw**2) + (cb**4*cw**2*ee**2*MHA**4*sba**2)/(4.*MZ**2*sw**2) - (cb**4*cw**2*ee**2*MHA**2*MHH**2*sba**2)/(2.*MZ**2*sw**2) + (cb**4*cw**2*ee**2*MHH**4*sba**2)/(4.*MZ**2*sw**2) + (cb**4*cw**2*ee**2*MZ**2*sba**2)/(4.*sw**2) - (cb**2*cw**2*ee**2*MHA**2*sb**2*sba**2)/sw**2 - (cb**2*cw**2*ee**2*MHH**2*sb**2*sba**2)/sw**2 + (cb**2*cw**2*ee**2*MHA**4*sb**2*sba**2)/(2.*MZ**2*sw**2) - (cb**2*cw**2*ee**2*MHA**2*MHH**2*sb**2*sba**2)/(MZ**2*sw**2) + (cb**2*cw**2*ee**2*MHH**4*sb**2*sba**2)/(2.*MZ**2*sw**2) + (cb**2*cw**2*ee**2*MZ**2*sb**2*sba**2)/(2.*sw**2) - (cw**2*ee**2*MHA**2*sb**4*sba**2)/(2.*sw**2) - (cw**2*ee**2*MHH**2*sb**4*sba**2)/(2.*sw**2) + (cw**2*ee**2*MHA**4*sb**4*sba**2)/(4.*MZ**2*sw**2) - (cw**2*ee**2*MHA**2*MHH**2*sb**4*sba**2)/(2.*MZ**2*sw**2) + (cw**2*ee**2*MHH**4*sb**4*sba**2)/(4.*MZ**2*sw**2) + (cw**2*ee**2*MZ**2*sb**4*sba**2)/(4.*sw**2) - (cb**4*ee**2*MHA**2*sba**2*sw**2)/(2.*cw**2) - (cb**4*ee**2*MHH**2*sba**2*sw**2)/(2.*cw**2) + (cb**4*ee**2*MHA**4*sba**2*sw**2)/(4.*cw**2*MZ**2) - (cb**4*ee**2*MHA**2*MHH**2*sba**2*sw**2)/(2.*cw**2*MZ**2) + (cb**4*ee**2*MHH**4*sba**2*sw**2)/(4.*cw**2*MZ**2) + (cb**4*ee**2*MZ**2*sba**2*sw**2)/(4.*cw**2) - (cb**2*ee**2*MHA**2*sb**2*sba**2*sw**2)/cw**2 - (cb**2*ee**2*MHH**2*sb**2*sba**2*sw**2)/cw**2 + (cb**2*ee**2*MHA**4*sb**2*sba**2*sw**2)/(2.*cw**2*MZ**2) - (cb**2*ee**2*MHA**2*MHH**2*sb**2*sba**2*sw**2)/(cw**2*MZ**2) + (cb**2*ee**2*MHH**4*sb**2*sba**2*sw**2)/(2.*cw**2*MZ**2) + (cb**2*ee**2*MZ**2*sb**2*sba**2*sw**2)/(2.*cw**2) - (ee**2*MHA**2*sb**4*sba**2*sw**2)/(2.*cw**2) - (ee**2*MHH**2*sb**4*sba**2*sw**2)/(2.*cw**2) + (ee**2*MHA**4*sb**4*sba**2*sw**2)/(4.*cw**2*MZ**2) - (ee**2*MHA**2*MHH**2*sb**4*sba**2*sw**2)/(2.*cw**2*MZ**2) + (ee**2*MHH**4*sb**4*sba**2*sw**2)/(4.*cw**2*MZ**2) + (ee**2*MZ**2*sb**4*sba**2*sw**2)/(4.*cw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHH**2 + MHH**4 - 2*MHA**2*MZ**2 - 2*MHH**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.HA,P.HL):'((-(cb**4*cba**2*ee**2*MHA**2) - cb**4*cba**2*ee**2*MHL**2 + (cb**4*cba**2*ee**2*MHA**4)/(2.*MZ**2) - (cb**4*cba**2*ee**2*MHA**2*MHL**2)/MZ**2 + (cb**4*cba**2*ee**2*MHL**4)/(2.*MZ**2) + (cb**4*cba**2*ee**2*MZ**2)/2. - 2*cb**2*cba**2*ee**2*MHA**2*sb**2 - 2*cb**2*cba**2*ee**2*MHL**2*sb**2 + (cb**2*cba**2*ee**2*MHA**4*sb**2)/MZ**2 - (2*cb**2*cba**2*ee**2*MHA**2*MHL**2*sb**2)/MZ**2 + (cb**2*cba**2*ee**2*MHL**4*sb**2)/MZ**2 + cb**2*cba**2*ee**2*MZ**2*sb**2 - cba**2*ee**2*MHA**2*sb**4 - cba**2*ee**2*MHL**2*sb**4 + (cba**2*ee**2*MHA**4*sb**4)/(2.*MZ**2) - (cba**2*ee**2*MHA**2*MHL**2*sb**4)/MZ**2 + (cba**2*ee**2*MHL**4*sb**4)/(2.*MZ**2) + (cba**2*ee**2*MZ**2*sb**4)/2. - (cb**4*cba**2*cw**2*ee**2*MHA**2)/(2.*sw**2) - (cb**4*cba**2*cw**2*ee**2*MHL**2)/(2.*sw**2) + (cb**4*cba**2*cw**2*ee**2*MHA**4)/(4.*MZ**2*sw**2) - (cb**4*cba**2*cw**2*ee**2*MHA**2*MHL**2)/(2.*MZ**2*sw**2) + (cb**4*cba**2*cw**2*ee**2*MHL**4)/(4.*MZ**2*sw**2) + (cb**4*cba**2*cw**2*ee**2*MZ**2)/(4.*sw**2) - (cb**2*cba**2*cw**2*ee**2*MHA**2*sb**2)/sw**2 - (cb**2*cba**2*cw**2*ee**2*MHL**2*sb**2)/sw**2 + (cb**2*cba**2*cw**2*ee**2*MHA**4*sb**2)/(2.*MZ**2*sw**2) - (cb**2*cba**2*cw**2*ee**2*MHA**2*MHL**2*sb**2)/(MZ**2*sw**2) + (cb**2*cba**2*cw**2*ee**2*MHL**4*sb**2)/(2.*MZ**2*sw**2) + (cb**2*cba**2*cw**2*ee**2*MZ**2*sb**2)/(2.*sw**2) - (cba**2*cw**2*ee**2*MHA**2*sb**4)/(2.*sw**2) - (cba**2*cw**2*ee**2*MHL**2*sb**4)/(2.*sw**2) + (cba**2*cw**2*ee**2*MHA**4*sb**4)/(4.*MZ**2*sw**2) - (cba**2*cw**2*ee**2*MHA**2*MHL**2*sb**4)/(2.*MZ**2*sw**2) + (cba**2*cw**2*ee**2*MHL**4*sb**4)/(4.*MZ**2*sw**2) + (cba**2*cw**2*ee**2*MZ**2*sb**4)/(4.*sw**2) - (cb**4*cba**2*ee**2*MHA**2*sw**2)/(2.*cw**2) - (cb**4*cba**2*ee**2*MHL**2*sw**2)/(2.*cw**2) + (cb**4*cba**2*ee**2*MHA**4*sw**2)/(4.*cw**2*MZ**2) - (cb**4*cba**2*ee**2*MHA**2*MHL**2*sw**2)/(2.*cw**2*MZ**2) + (cb**4*cba**2*ee**2*MHL**4*sw**2)/(4.*cw**2*MZ**2) + (cb**4*cba**2*ee**2*MZ**2*sw**2)/(4.*cw**2) - (cb**2*cba**2*ee**2*MHA**2*sb**2*sw**2)/cw**2 - (cb**2*cba**2*ee**2*MHL**2*sb**2*sw**2)/cw**2 + (cb**2*cba**2*ee**2*MHA**4*sb**2*sw**2)/(2.*cw**2*MZ**2) - (cb**2*cba**2*ee**2*MHA**2*MHL**2*sb**2*sw**2)/(cw**2*MZ**2) + (cb**2*cba**2*ee**2*MHL**4*sb**2*sw**2)/(2.*cw**2*MZ**2) + (cb**2*cba**2*ee**2*MZ**2*sb**2*sw**2)/(2.*cw**2) - (cba**2*ee**2*MHA**2*sb**4*sw**2)/(2.*cw**2) - (cba**2*ee**2*MHL**2*sb**4*sw**2)/(2.*cw**2) + (cba**2*ee**2*MHA**4*sb**4*sw**2)/(4.*cw**2*MZ**2) - (cba**2*ee**2*MHA**2*MHL**2*sb**4*sw**2)/(2.*cw**2*MZ**2) + (cba**2*ee**2*MHL**4*sb**4*sw**2)/(4.*cw**2*MZ**2) + (cba**2*ee**2*MZ**2*sb**4*sw**2)/(4.*cw**2))*cmath.sqrt(MHA**4 - 2*MHA**2*MHL**2 + MHL**4 - 2*MHA**2*MZ**2 - 2*MHL**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.H__minus__,P.H__plus__):'((2*cb**4*ee**2*MHp**2 - (cb**4*ee**2*MZ**2)/2. + 4*cb**2*ee**2*MHp**2*sb**2 - cb**2*ee**2*MZ**2*sb**2 + 2*ee**2*MHp**2*sb**4 - (ee**2*MZ**2*sb**4)/2. - (cb**4*cw**2*ee**2*MHp**2)/sw**2 + (cb**4*cw**2*ee**2*MZ**2)/(4.*sw**2) - (2*cb**2*cw**2*ee**2*MHp**2*sb**2)/sw**2 + (cb**2*cw**2*ee**2*MZ**2*sb**2)/(2.*sw**2) - (cw**2*ee**2*MHp**2*sb**4)/sw**2 + (cw**2*ee**2*MZ**2*sb**4)/(4.*sw**2) - (cb**4*ee**2*MHp**2*sw**2)/cw**2 + (cb**4*ee**2*MZ**2*sw**2)/(4.*cw**2) - (2*cb**2*ee**2*MHp**2*sb**2*sw**2)/cw**2 + (cb**2*ee**2*MZ**2*sb**2*sw**2)/(2.*cw**2) - (ee**2*MHp**2*sb**4*sw**2)/cw**2 + (ee**2*MZ**2*sb**4*sw**2)/(4.*cw**2))*cmath.sqrt(-4*MHp**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.mu__minus__,P.mu__plus__):'(MZ**2*(-(ee**2*MZ**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.s,P.s__tilde__):'(MZ**2*(ee**2*MZ**2 + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (5*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.t,P.t__tilde__):'((-11*ee**2*MT**2 - ee**2*MZ**2 - (3*cw**2*ee**2*MT**2)/(2.*sw**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MT**2*sw**2)/(6.*cw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2))*cmath.sqrt(-4*MT**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.ta__minus__,P.ta__plus__):'((-5*ee**2*MTA**2 - ee**2*MZ**2 - (cw**2*ee**2*MTA**2)/(2.*sw**2) + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (7*ee**2*MTA**2*sw**2)/(2.*cw**2) + (5*ee**2*MZ**2*sw**2)/(2.*cw**2))*cmath.sqrt(-4*MTA**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)',
(P.u,P.u__tilde__):'(MZ**2*(-(ee**2*MZ**2) + (3*cw**2*ee**2*MZ**2)/(2.*sw**2) + (17*ee**2*MZ**2*sw**2)/(6.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.ve,P.ve__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.vm,P.vm__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.vt,P.vt__tilde__):'(MZ**2*(ee**2*MZ**2 + (cw**2*ee**2*MZ**2)/(2.*sw**2) + (ee**2*MZ**2*sw**2)/(2.*cw**2)))/(48.*cmath.pi*abs(MZ)**3)',
(P.W__minus__,P.W__plus__):'(((-12*cw**2*ee**2*MW**2)/sw**2 - (17*cw**2*ee**2*MZ**2)/sw**2 + (4*cw**2*ee**2*MZ**4)/(MW**2*sw**2) + (cw**2*ee**2*MZ**6)/(4.*MW**4*sw**2))*cmath.sqrt(-4*MW**2*MZ**2 + MZ**4))/(48.*cmath.pi*abs(MZ)**3)'})
| 1,337.076923
| 23,997
| 0.497656
| 36,679
| 139,056
| 1.87696
| 0.00458
| 0.124802
| 0.044738
| 0.046946
| 0.984342
| 0.977268
| 0.962713
| 0.942291
| 0.913487
| 0.88461
| 0
| 0.167979
| 0.061414
| 139,056
| 103
| 23,998
| 1,350.058252
| 0.359504
| 0.001122
| 0
| 0
| 0
| 0.770115
| 0.965614
| 0.891616
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022989
| 0
| 0.022989
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
916ba4220d31d5cbd15989d19ce09d2d7da80bf0
| 190
|
py
|
Python
|
String/strip-method.py
|
manish1822510059/Python-1000-program
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | 1
|
2021-03-06T03:33:42.000Z
|
2021-03-06T03:33:42.000Z
|
String/strip-method.py
|
manish1822510059/Python-1000-programs
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | null | null | null |
String/strip-method.py
|
manish1822510059/Python-1000-programs
|
d03c1920fe63a7e32ac5bd9a13e2766d7a25756c
|
[
"Apache-2.0"
] | null | null | null |
# name = input("Enter your name ?").strip()
# print(len(name))
# print(name)
print(" hello python world".strip())
print(" hello python world ".rstrip())
| 23.75
| 54
| 0.531579
| 21
| 190
| 4.809524
| 0.52381
| 0.19802
| 0.316832
| 0.415842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.3
| 190
| 8
| 54
| 23.75
| 0.759399
| 0.368421
| 0
| 0
| 0
| 0
| 0.62037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
919deb2fe4e595a65a9264d6dde542d0a5a20f28
| 110
|
py
|
Python
|
Python/Mundo01/teste/teste5.py
|
eStev4m/CursoPython
|
8b52a618e67c80d66518ef91c1d4596a2bfddc22
|
[
"MIT"
] | null | null | null |
Python/Mundo01/teste/teste5.py
|
eStev4m/CursoPython
|
8b52a618e67c80d66518ef91c1d4596a2bfddc22
|
[
"MIT"
] | null | null | null |
Python/Mundo01/teste/teste5.py
|
eStev4m/CursoPython
|
8b52a618e67c80d66518ef91c1d4596a2bfddc22
|
[
"MIT"
] | null | null | null |
frase = 'Curso em Vídeo Python'
print(frase[1:15:3])
frase = 'Curso em Vídeo Python'
print(frase.count('o'))
| 18.333333
| 31
| 0.690909
| 19
| 110
| 4
| 0.578947
| 0.263158
| 0.315789
| 0.447368
| 0.868421
| 0.868421
| 0.868421
| 0
| 0
| 0
| 0
| 0.042105
| 0.136364
| 110
| 5
| 32
| 22
| 0.757895
| 0
| 0
| 0.5
| 0
| 0
| 0.390909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
91cf0d1e7cdda7dc1a7540c525e1f2ce0b57d09a
| 12,070
|
py
|
Python
|
Codigos/NYT/NYT 2013.py
|
rafgui12/Newspaper-Library
|
96de4f91a25fa06e7bfb8b8f5981b032fb7b8bf8
|
[
"Unlicense"
] | null | null | null |
Codigos/NYT/NYT 2013.py
|
rafgui12/Newspaper-Library
|
96de4f91a25fa06e7bfb8b8f5981b032fb7b8bf8
|
[
"Unlicense"
] | null | null | null |
Codigos/NYT/NYT 2013.py
|
rafgui12/Newspaper-Library
|
96de4f91a25fa06e7bfb8b8f5981b032fb7b8bf8
|
[
"Unlicense"
] | null | null | null |
# importing the requests library
import requests
import numpy as np
import os
import time
# Cadenas de Pais Y Palabras
country = ["Colombia","Canada","Cuba","China","Cameroon","Cambodia","Costa Rica","Croatia","Czech Republic","Argentina"]
country2 = ["Afghanistan","Australia","Algeria","Austria","Brazil","Bolivia","Belgium","Bangladesh","Denmark","Dominican Republic"]
country3 = ["Egypt","Ethiopia","Finland","Ghana","Germany","Greece","Guatemala","Hungary","Iceland","India"]
country4 = ["Indonesia","Iran","Iraq","Ireland","Israel","Italy","Jamaica","Japan","Kenya","Lithuania"]
country5 = ["Luxembourg","Malaysia","Morocco","Netherlands","New Zealand","Namibia","Norway","Nicaragua","Pakistan","Panama"]
country6 = ["Portugal","Peru","Poland","Philippines","Russia","Singapore","South Africa","South Korea","Sweden","Switzerland"]
country7 = ["Thailand","Turkey","United Arab Emirates","United Kingdom","United States","Vietnam","Mexico","Ecuador","Venezuela","Spain"]
country8 = ["France","Estonia","Slovakia","Slovenia","Uruguay","Paraguay","Chile","Sri Lanka","Romania","Tanzania"]
country9 = ["Tunisia","Bulgaria","Nigeria","Latvia","Saudi Arabia","Belarus","Serbia","Senegal","Scotland"]
keywords = ['"science"']
countryTotal = []
year = []
datos = []
StartT = 20130101
EndT = 20131231
csvList = []
urlList = []
# api-endpoint
URL = "https://api.nytimes.com/svc/search/v2/articlesearch.json"
######################## COUNTRY ################################################
for c in country:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 2 ################################################
for c in country2:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 3 ################################################
for c in country3:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 4 ################################################
for c in country4:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 5 ################################################
for c in country5:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 6 ################################################
for c in country6:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 7 ################################################
for c in country7:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 8 ################################################
for c in country8:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
######################## TIMER ################################################
time.sleep(60) #Seconds
######################## COUNTRY 9 ################################################
for c in country9:
for k in keywords:
apikey = "g3uH0lOVGucjdU8oKTF7evGQ7AwBjtV3"
begindate = StartT
enddate = EndT
q = k + "," + "\"" + c + "\"" + "\""
# defining a params dict for the parameters to be sent to the API
PARAMS = {'q':q, 'begin_date':begindate, 'end_date':enddate,'api-key':apikey }
# sending get request and saving the response as response object
r = requests.get(url = URL, params = PARAMS)
# extracting data in json format
data = r.json()
# printing the output
#datos.append(data["response"]["meta"]["hits"])
print (c)
#print (data["response"]["meta"]["hits"])
datos.append(data["response"]["meta"]["hits"])
year.append(begindate)
countryTotal.append(c)
print(data["response"]["meta"]["hits"])
print (data["status"])
csvList = [countryTotal, datos, year]
#TXT
l = np.asarray(csvList)
ruta = os.getcwd() + os.sep
np.savetxt( "output_"+str(StartT)+"_NYT.txt", # Archivo de salida
l.T, # Trasponemos los datos
fmt="%s", # Usamos números enteros
delimiter=",")
#CSV
l = np.asarray(csvList)
ruta = os.getcwd() + os.sep
np.savetxt( "output_"+str(StartT)+"_NYT.csv", # Archivo de salida
l.T, # Trasponemos los datos
fmt="%s", # Usamos números enteros
delimiter=",")
print("Los archivos se han exportado satisfactoriamente")
| 35.816024
| 137
| 0.509445
| 1,209
| 12,070
| 5.067825
| 0.181969
| 0.070508
| 0.09401
| 0.117513
| 0.801371
| 0.801371
| 0.801371
| 0.801371
| 0.801371
| 0.801371
| 0
| 0.012279
| 0.251036
| 12,070
| 337
| 138
| 35.816024
| 0.665487
| 0.236703
| 0
| 0.804469
| 0
| 0.027933
| 0.227514
| 0.036382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022346
| 0
| 0.022346
| 0.156425
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91ee36c312eecf4b748cf43cb9e4b139cd06f8cf
| 11,014
|
py
|
Python
|
integration/core/test_replica.py
|
lazerdye/longhorn-engine
|
f0437554c642590c1ecc7ab7c1ba467f7cbd5069
|
[
"Apache-2.0"
] | null | null | null |
integration/core/test_replica.py
|
lazerdye/longhorn-engine
|
f0437554c642590c1ecc7ab7c1ba467f7cbd5069
|
[
"Apache-2.0"
] | null | null | null |
integration/core/test_replica.py
|
lazerdye/longhorn-engine
|
f0437554c642590c1ecc7ab7c1ba467f7cbd5069
|
[
"Apache-2.0"
] | null | null | null |
import time
import random
import datetime
import grpc
import pytest
from common.constants import SIZE_STR
@pytest.fixture
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def test_create(grpc_replica_client): # NOQA
r = grpc_replica_client.replica_get()
assert r.state == 'initial'
assert r.size == '0'
assert r.sectorSize == 0
assert r.parent == ''
assert r.head == ''
r = grpc_replica_client.replica_create(size=SIZE_STR)
assert r.state == 'closed'
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
def test_open(grpc_replica_client): # NOQA
r = grpc_replica_client.replica_get()
assert r.state == 'initial'
assert r.size == '0'
assert r.sectorSize == 0
assert r.parent == ''
assert r.head == ''
r = grpc_replica_client.replica_create(size=SIZE_STR)
assert r.state == 'closed'
assert not r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
r = grpc_replica_client.replica_open()
assert r.state == 'open'
assert not r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
def test_close(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
r = grpc_replica_client.replica_open()
assert r.state == 'open'
assert not r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
r = grpc_replica_client.replica_close()
assert r.state == 'closed'
assert not r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
def test_snapshot(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
r = grpc_replica_client.replica_open()
assert r.state == 'open'
assert not r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
r = grpc_replica_client.replica_snapshot(
name='000', created=datetime.datetime.utcnow().isoformat(),
labels={"name": "000", "key": "value"})
assert r.state == 'dirty'
assert r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.disks["volume-snap-000.img"].labels["name"] == "000"
assert r.disks["volume-snap-000.img"].labels["key"] == "value"
r = grpc_replica_client.replica_snapshot(
name='001', created=datetime.datetime.utcnow().isoformat())
assert r.state == 'dirty'
assert r.dirty
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.head == 'volume-head-002.img'
assert r.parent == 'volume-snap-001.img'
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img',
'volume-snap-000.img']
def test_remove_disk(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
grpc_replica_client.replica_open()
grpc_replica_client.replica_snapshot(
name='000', created=datetime.datetime.utcnow().isoformat())
r = grpc_replica_client.replica_snapshot(
name='001', created=datetime.datetime.utcnow().isoformat())
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img',
'volume-snap-000.img']
# idempotent
grpc_replica_client.disk_mark_as_removed(name='003')
grpc_replica_client.disk_prepare_remove(name='003')
with pytest.raises(grpc.RpcError) as e:
grpc_replica_client.disk_mark_as_removed(name='volume-head-002.img')
assert "Can not mark the active" in str(e.value)
with pytest.raises(grpc.RpcError) as e:
grpc_replica_client.disk_prepare_remove(name='volume-head-002.img')
assert "Can not delete the active" in str(e.value)
grpc_replica_client.disk_mark_as_removed(name='001')
ops = grpc_replica_client.disk_prepare_remove(name='001').operations
assert len(ops) == 0
r = grpc_replica_client.disk_remove(name='volume-snap-001.img')
assert r.state == 'dirty'
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.head == 'volume-head-002.img'
assert r.parent == 'volume-snap-000.img'
assert r.chain == ['volume-head-002.img', 'volume-snap-000.img']
def test_remove_last_disk(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
grpc_replica_client.replica_open()
grpc_replica_client.replica_snapshot(
name='000', created=datetime.datetime.utcnow().isoformat())
r = grpc_replica_client.replica_snapshot(
name='001', created=datetime.datetime.utcnow().isoformat())
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img',
'volume-snap-000.img']
grpc_replica_client.disk_mark_as_removed(name='volume-snap-000.img')
ops = grpc_replica_client.disk_prepare_remove(
name='volume-snap-000.img').operations
assert len(ops) == 2
assert ops[0].action == "coalesce"
assert ops[0].source == "volume-snap-000.img"
assert ops[0].target == "volume-snap-001.img"
assert ops[1].action == "replace"
assert ops[1].source == "volume-snap-000.img"
assert ops[1].target == "volume-snap-001.img"
r = grpc_replica_client.disk_remove(name='volume-snap-000.img')
assert r.state == 'dirty'
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.head == 'volume-head-002.img'
assert r.parent == 'volume-snap-001.img'
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img']
def test_reload(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
grpc_replica_client.replica_open()
r = grpc_replica_client.replica_get()
assert r.chain == ['volume-head-000.img']
r = grpc_replica_client.replica_snapshot(
name='000', created=datetime.datetime.utcnow().isoformat())
assert r.chain == ['volume-head-001.img', 'volume-snap-000.img']
r = grpc_replica_client.replica_snapshot(
name='001', created=datetime.datetime.utcnow().isoformat())
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img',
'volume-snap-000.img']
r = grpc_replica_client.disk_remove(name='volume-snap-000.img')
assert r.state == 'dirty'
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.head == 'volume-head-002.img'
assert r.parent == 'volume-snap-001.img'
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img']
r = grpc_replica_client.replica_reload()
assert r.state == 'dirty'
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img']
assert r.head == 'volume-head-002.img'
assert r.parent == 'volume-snap-001.img'
grpc_replica_client.replica_close()
r = grpc_replica_client.replica_open()
assert r.state == 'open'
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.chain == ['volume-head-002.img', 'volume-snap-001.img']
assert r.head == 'volume-head-002.img'
assert r.parent == 'volume-snap-001.img'
def test_reload_simple(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
r = grpc_replica_client.replica_open()
assert r.state == 'open'
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
r = grpc_replica_client.replica_reload()
assert r.state == 'open'
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
def test_rebuilding(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
grpc_replica_client.replica_open()
r = grpc_replica_client.replica_snapshot(
name='001', created=datetime.datetime.utcnow().isoformat())
assert r.state == 'dirty'
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
r = grpc_replica_client.rebuilding_set(rebuilding=True)
assert r.state == 'rebuilding'
assert r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
grpc_replica_client.replica_close()
r = grpc_replica_client.replica_open()
assert r.state == 'rebuilding'
assert r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
r = grpc_replica_client.replica_reload()
assert r.state == 'rebuilding'
assert r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
def test_not_rebuilding(grpc_replica_client): # NOQA
grpc_replica_client.replica_create(size=SIZE_STR)
grpc_replica_client.replica_open()
r = grpc_replica_client.replica_snapshot(
name='001', created=datetime.datetime.utcnow().isoformat())
assert r.state == 'dirty'
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
r = grpc_replica_client.rebuilding_set(rebuilding=True)
assert r.state == 'rebuilding'
assert r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
r = grpc_replica_client.rebuilding_set(rebuilding=False)
assert r.state == 'dirty'
assert not r.rebuilding
assert r.size == SIZE_STR
assert r.sectorSize == 512
assert r.parent == 'volume-snap-001.img'
assert r.head == 'volume-head-001.img'
assert r.chain == ['volume-head-001.img', 'volume-snap-001.img']
| 33.274924
| 76
| 0.671963
| 1,572
| 11,014
| 4.552163
| 0.056616
| 0.140861
| 0.15204
| 0.134153
| 0.941448
| 0.933902
| 0.926076
| 0.90749
| 0.858999
| 0.843767
| 0
| 0.044185
| 0.190394
| 11,014
| 330
| 77
| 33.375758
| 0.758327
| 0.005448
| 0
| 0.807547
| 0
| 0
| 0.176019
| 0
| 0
| 0
| 0
| 0
| 0.649057
| 1
| 0.045283
| false
| 0
| 0.022642
| 0.007547
| 0.075472
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7276252732cd77e205a015a461f9afd94152ff6a
| 3,339
|
py
|
Python
|
Heap/heap_implementation.py
|
liu-yunpeng/algorithms
|
1bbe34b4a3d2f090007faec25220ac8535213da7
|
[
"MIT"
] | 3
|
2021-04-24T01:20:27.000Z
|
2021-04-24T20:39:27.000Z
|
Heap/heap_implementation.py
|
liu-yunpeng/algorithms
|
1bbe34b4a3d2f090007faec25220ac8535213da7
|
[
"MIT"
] | null | null | null |
Heap/heap_implementation.py
|
liu-yunpeng/algorithms
|
1bbe34b4a3d2f090007faec25220ac8535213da7
|
[
"MIT"
] | null | null | null |
class Heap(object):
def __init__(self, array = []):
self.array = array
def insert(self, value):
pass
class Min_Heap(Heap):
def __init__(self, array):
super().__init__(array)
def insert(self, value):
self.array.append(value)
pos = len(self.array) - 1
while pos != 0:
parent_pos = int((pos - 1) / 2)
if self.array[pos] < self.array[parent_pos]:
self.array[pos], self.array[parent_pos] = self.array[parent_pos], self.array[pos]
pos = parent_pos
return self.array
def pop(self):
self.array[0], self.array[-1] = self.array[-1], self.array[0]
min_value = self.array.pop()
pos = 0
while pos * 2 + 2 < len(self.array):
left = pos * 2 + 1
right = pos * 2 + 2
if self.array[left] < self.array[right]:
if self.array[pos] > self.array[left]:
self.array[pos], self.array[left] = self.array[left], self.array[pos]
pos = left
else:
break
else:
if self.array[pos] > self.array[right]:
self.array[pos], self.array[right] = self.array[right], self.array[pos]
pos = right
else:
break
# handle single left
if pos * 2 + 1 < len(self.array):
left = pos * 2 + 1
if self.array[pos] > self.array[left]:
self.array[pos], self.array[left] = self.array[left], self.array[pos]
pos = left
return min_value
class Max_Heap(Heap):
def __init__(self, array):
super().__init__(array)
def insert(self, value):
self.array.append(value)
pos = len(self.array) - 1
while pos != 0:
parent_pos = int((pos - 1) / 2)
if self.array[pos] > self.array[parent_pos]:
self.array[pos], self.array[parent_pos] = self.array[parent_pos], self.array[pos]
pos = parent_pos
return self.array
def pop(self):
self.array[0], self.array[-1] = self.array[-1], self.array[0]
max_value = self.array.pop()
pos = 0
while pos * 2 + 2 < len(self.array):
left = pos * 2 + 1
right = pos * 2 + 2
if self.array[left] > self.array[right]:
if self.array[pos] < self.array[left]:
self.array[pos], self.array[left] = self.array[left], self.array[pos]
pos = left
else:
break
else:
if self.array[pos] < self.array[right]:
self.array[pos], self.array[right] = self.array[right], self.array[pos]
pos = right
else:
break
# handle single left
if pos * 2 + 1 < len(self.array):
left = pos * 2 + 1
if self.array[pos] < self.array[left]:
self.array[pos], self.array[left] = self.array[left], self.array[pos]
pos = left
return max_value
| 34.071429
| 97
| 0.465109
| 395
| 3,339
| 3.840506
| 0.078481
| 0.45089
| 0.189848
| 0.168754
| 0.945946
| 0.930784
| 0.930784
| 0.930784
| 0.930784
| 0.930784
| 0
| 0.019437
| 0.414495
| 3,339
| 98
| 98
| 34.071429
| 0.756522
| 0.013477
| 0
| 0.797468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101266
| false
| 0.012658
| 0
| 0
| 0.189873
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
72812ba39571213609c33dd3ba455b804bf2b6a9
| 12,112
|
py
|
Python
|
my/bert_codertimo_pytorch/trainer/pretrain.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
my/bert_codertimo_pytorch/trainer/pretrain.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
my/bert_codertimo_pytorch/trainer/pretrain.py
|
notyetend/annotated-transformer
|
9c4fdbbbc5ab4d3bff931b540be5f1d811de3ea0
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from ..model import BERTLM, BERT, CustomBERTLM, CustomBERT
from .optim_schedule import ScheduledOptim
import tqdm
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Masked Language Model : 3.3.1 Task #1: Masked LM
2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction
please check the details on README.md with simple example.
"""
def __init__(
self,
bert: BERT,
vocab_size: int,
train_dataloader: DataLoader,
test_dataloader: DataLoader = None,
lr: float = 1e-4,
betas=(0.9, 0.999),
weight_decay: float = 0.01,
warmup_steps=10000,
with_cuda: bool = True,
cuda_devices=None,
log_freq: int = 10
):
"""
:param bert: BERT model which you want to train
:param vocab_size: total word vocab size
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
:param with_cuda: traning with cuda
:param log_freq: logging frequency of the batch iteration
"""
# Setup cuda device for BERT training, argument -c, --cuda should be true
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device("cuda:0" if cuda_condition else "cpu")
# This BERT model will be saved every epoch
self.bert = bert
# Initialize the BERT Language Model, with BERT model
self.model = BERTLM(bert, vocab_size).to(self.device)
# Distributed GPU training if CUDA can detect more than 1 GPU
if with_cuda and torch.cuda.device_count() > 1:
print("Using %d GPUS for BERT" % torch.cuda.device_count())
self.model = nn.DataParallel(self.model, device_ids=cuda_devices)
# Setting the train and test data loader
self.train_data = train_dataloader
self.test_data = test_dataloader
# Setting the Adam optimizer with hyper-param
self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)
# Using Negative Log Likelihood Loss function for predicting the masked_token
self.criterion = nn.NLLLoss(ignore_index=0)
self.log_freq = log_freq
print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))
def train(self, epoch):
self.iteration(epoch, self.train_data)
def test(self, epoch):
self.iteration(epoch, self.test_data, train=False)
def iteration(self, epoch, data_loader, train=True):
"""
loop over the data_loader for training or testing
if on train status, backward operation is activated
and also auto save the model every peoch
:param epoch: current epoch index
:param data_loader: torch.utils.data.DataLoader for iteration
:param train: boolean value of is train or test
:return: None
"""
str_code = "train" if train else "test"
# Setting the tqdm progress bar
data_iter = tqdm.tqdm(enumerate(data_loader),
desc="EP_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
total_correct = 0
total_element = 0
for i, data in data_iter:
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
# 1. forward the next_sentence_prediction and masked_lm model
next_sent_output, mask_lm_output = self.model.forward(data["bert_input"], data["segment_label"])
# 2-1. NLL(negative log likelihood) loss of is_next classification result
next_loss = self.criterion(next_sent_output, data["is_next"])
# 2-2. NLLLoss of predicting masked token word
mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data["bert_label"])
# 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
loss = next_loss + mask_loss
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
# next sentence prediction accuracy
correct = next_sent_output.argmax(dim=-1).eq(data["is_next"]).sum().item()
avg_loss += loss.item()
# avg_loss += loss.detach().item() # fixed by kdw
total_correct += correct
total_element += data["is_next"].nelement()
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"avg_acc": total_correct / total_element * 100,
"loss": loss.item()
# "loss": loss.detach().item()
}
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
print("EP%d_%s, avg_loss=" % (epoch, str_code), avg_loss / len(data_iter), "total_acc=",
total_correct * 100.0 / total_element)
def save(self, epoch, file_path="output/bert_trained.model"):
"""
Saving the current BERT model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + ".ep%d" % epoch
torch.save(self.bert.cpu(), output_path)
self.bert.to(self.device)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
class CustomBERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Masked Language Model : 3.3.1 Task #1: Masked LM
please check the details on README.md with simple example.
"""
def __init__(
self,
bert: CustomBERT,
vocab_size: int,
train_dataloader: DataLoader,
test_dataloader: DataLoader = None,
lr: float = 1e-4,
betas=(0.9, 0.999),
weight_decay: float = 0.01,
warmup_steps=10000,
with_cuda: bool = True,
cuda_devices=[],
log_freq: int = 10
):
"""
:param bert: BERT model which you want to train
:param vocab_size: total word vocab size
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
:param with_cuda: traning with cuda
:param log_freq: logging frequency of the batch iteration
"""
# Setup cuda device for BERT training, argument -c, --cuda should be true
cuda_condition = torch.cuda.is_available() and with_cuda
cuda_devices_to_use = list(set(cuda_devices) or set(range(torch.cuda.device_count()))) # intersection
self.device = torch.device("cuda:{}".format(cuda_devices_to_use[0])
if cuda_condition and cuda_devices_to_use else "cpu")
print('cuda_devices_to_use', cuda_devices_to_use, 'self.device', self.device)
# This BERT model will be saved every epoch
self.bert = bert
# Initialize the BERT Language Model, with BERT model
self.model = CustomBERTLM(bert, vocab_size).to(self.device)
# Distributed GPU training if CUDA can detect more than 1 GPU
# if with_cuda and torch.cuda.device_count() > 1:
if with_cuda and len(cuda_devices_to_use) > 1:
print("Using GPUS{} for BERT".format(cuda_devices_to_use))
self.model = nn.DataParallel(self.model, device_ids=cuda_devices_to_use)
# Setting the train and test data loader
self.train_data = train_dataloader
self.test_data = test_dataloader
# Setting the Adam optimizer with hyper-param
self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)
# Using Negative Log Likelihood Loss function for predicting the masked_token
self.criterion = nn.NLLLoss(ignore_index=0)
self.log_freq = log_freq
print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))
def train(self, epoch):
self.iteration(epoch, self.train_data)
def test(self, epoch):
self.iteration(epoch, self.test_data, train=False)
def iteration(self, epoch, data_loader, train=True):
"""
loop over the data_loader for training or testing
if on train status, backward operation is activated
and also auto save the model every peoch
:param epoch: current epoch index
:param data_loader: torch.utils.data.DataLoader for iteration
:param train: boolean value of is train or test
:return: None
"""
str_code = "train" if train else "test"
# Setting the tqdm progress bar
data_iter = tqdm.tqdm(enumerate(data_loader),
desc="EP_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
# total_correct = 0
# total_element = 0
for i, data in data_iter:
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
# 1. forward the next_sentence_prediction and masked_lm model
mask_lm_output = self.model.forward(data["bert_input"])
# 2-2. NLLLoss of predicting masked token word
loss = self.criterion(mask_lm_output.transpose(1, 2), data["bert_label"])
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
# next sentence prediction accuracy
# correct = next_sent_output.argmax(dim=-1).eq(data["is_next"]).sum().item()
avg_loss += loss.item()
# avg_loss += loss.detach().item() # fixed by kdw
# total_correct += correct
# total_element += data["is_next"].nelement()
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
# "avg_acc": total_correct / total_element * 100,
"loss": loss.item()
}
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
print("EP%d_%s, avg_loss=" % (epoch, str_code), avg_loss / len(data_iter))
def save(self, epoch, file_path="output/bert_trained.model"):
"""
Saving the current BERT model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + ".ep%d" % epoch
torch.save(self.bert.cpu(), output_path)
self.bert.to(self.device)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
| 38.573248
| 110
| 0.607662
| 1,567
| 12,112
| 4.529675
| 0.146139
| 0.022542
| 0.014652
| 0.018033
| 0.881375
| 0.865314
| 0.865314
| 0.865314
| 0.85517
| 0.843618
| 0
| 0.01229
| 0.301354
| 12,112
| 313
| 111
| 38.696486
| 0.826519
| 0.340571
| 0
| 0.707483
| 0
| 0
| 0.061331
| 0.006681
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068027
| false
| 0
| 0.047619
| 0
| 0.142857
| 0.061224
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72c440770b42821019dc1b4dcf4192f36579822d
| 178
|
py
|
Python
|
agents/random_agent.py
|
renatopp/marioai
|
2f53e79cb99ab399a9a7d98a71999139ed06b2fc
|
[
"MIT"
] | 3
|
2015-11-12T23:38:16.000Z
|
2019-10-01T23:50:52.000Z
|
agents/random_agent.py
|
Behery/marioai
|
adbe1625e9acad55dde057a8df3cb2a7036f7eff
|
[
"MIT"
] | null | null | null |
agents/random_agent.py
|
Behery/marioai
|
adbe1625e9acad55dde057a8df3cb2a7036f7eff
|
[
"MIT"
] | 6
|
2016-07-20T01:59:57.000Z
|
2021-07-05T05:53:41.000Z
|
import random
import marioai
__all__ = ['RandomAgent']
class RandomAgent(marioai.Agent):
def act(self):
return [0, 1, 0, random.randint(0, 1), random.randint(0, 1)]
| 22.25
| 68
| 0.674157
| 25
| 178
| 4.64
| 0.56
| 0.051724
| 0.241379
| 0.258621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047945
| 0.179775
| 178
| 8
| 68
| 22.25
| 0.746575
| 0
| 0
| 0
| 0
| 0
| 0.061453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
72caa83016e5ec1d4153f4ac5b823d2f7956f6d2
| 2,113
|
py
|
Python
|
tests/conftest.py
|
eyllanesc/mkdocs-static-i18n
|
283f9a080603904a581370bdae01d517977c9b51
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
eyllanesc/mkdocs-static-i18n
|
283f9a080603904a581370bdae01d517977c9b51
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
eyllanesc/mkdocs-static-i18n
|
283f9a080603904a581370bdae01d517977c9b51
|
[
"MIT"
] | null | null | null |
import tempfile
import pytest
from mkdocs.config.base import load_config
@pytest.fixture
def config_base():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_base.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_base_rtd():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_base_rtd.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_plugin():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_plugin_static_nav():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_static_nav.yml", docs_dir="../docs/", site_dir=site_dir
)
@pytest.fixture
def config_plugin_no_default_language():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_no_default_language.yml",
docs_dir="../docs/",
site_dir=site_dir,
)
@pytest.fixture
def config_plugin_default_language_only():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_default_language_only.yml",
docs_dir="../docs/",
site_dir=site_dir,
)
@pytest.fixture
def config_plugin_translated_nav():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_translated_nav.yml",
docs_dir="../docs/",
site_dir=site_dir,
)
@pytest.fixture
def config_plugin_rtd():
with tempfile.TemporaryDirectory(prefix="mkdocs_tests_") as site_dir:
return load_config(
"tests/mkdocs_i18n_rtd.yml", docs_dir="../docs/", site_dir=site_dir
)
| 28.173333
| 86
| 0.675343
| 259
| 2,113
| 5.135135
| 0.11583
| 0.126316
| 0.096241
| 0.132331
| 0.875188
| 0.852632
| 0.852632
| 0.852632
| 0.852632
| 0.827068
| 0
| 0.007246
| 0.21628
| 2,113
| 74
| 87
| 28.554054
| 0.795894
| 0
| 0
| 0.526316
| 0
| 0
| 0.194983
| 0.115476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140351
| true
| 0
| 0.052632
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f400de2c3646dc0b64ad46147c3bcbe27d91925c
| 20,168
|
py
|
Python
|
demisto_sdk/commands/lint/tests/test_linter/os_runner_test.py
|
sturmianseq/demisto-sdk
|
67ce7ee70ccd557d661e03a60469301c5cbcb9c0
|
[
"MIT"
] | 42
|
2019-11-07T13:02:00.000Z
|
2022-03-29T03:39:04.000Z
|
demisto_sdk/commands/lint/tests/test_linter/os_runner_test.py
|
sturmianseq/demisto-sdk
|
67ce7ee70ccd557d661e03a60469301c5cbcb9c0
|
[
"MIT"
] | 1,437
|
2019-11-07T13:02:25.000Z
|
2022-03-31T12:48:11.000Z
|
demisto_sdk/commands/lint/tests/test_linter/os_runner_test.py
|
sturmianseq/demisto-sdk
|
67ce7ee70ccd557d661e03a60469301c5cbcb9c0
|
[
"MIT"
] | 46
|
2019-12-09T21:44:30.000Z
|
2022-03-24T17:36:45.000Z
|
from pathlib import Path
from typing import List
import pytest
from demisto_sdk.commands.lint.linter import Linter
class TestFlake8:
def test_run_flake8_success(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
linter.run_command_os.return_value = ('', '', 0)
exit_code, output = linter_obj._run_flake8(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b0, "Exit code should be 0"
assert output == '', "Output should be empty"
def test_run_flake8_fail_lint(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = (expected_output, '', 1)
exit_code, output = linter_obj._run_flake8(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
def test_run_flake8_usage_stderr(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = ('not good', expected_output, 1)
exit_code, output = linter_obj._run_flake8(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
class TestBandit:
def test_run_bandit_success(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
linter.run_command_os.return_value = ('', '', 0)
exit_code, output = linter_obj._run_bandit(lint_files=lint_files)
assert exit_code == 0b0, "Exit code should be 0"
assert output == '', "Output should be empty"
def test_run_bandit_fail_lint(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = (expected_output, '', 1)
exit_code, output = linter_obj._run_bandit(lint_files=lint_files)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
def test_run_bandit_usage_stderr(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = ('not good', expected_output, 1)
exit_code, output = linter_obj._run_bandit(lint_files=lint_files)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
class TestMypy:
def test_run_mypy_success(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
linter.run_command_os.return_value = ('Success: no issues found', '', 0)
exit_code, output = linter_obj._run_mypy(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b0, "Exit code should be 0"
assert output == '', "Output should be empty"
def test_run_mypy_fail_lint(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = (expected_output, '', 1)
exit_code, output = linter_obj._run_mypy(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
def test_run_mypy_usage_stderr(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = ('not good', expected_output, 1)
exit_code, output = linter_obj._run_mypy(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
class TestVulture:
def test_run_vulture_success(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
linter.run_command_os.return_value = ('', '', 0)
exit_code, output = linter_obj._run_vulture(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b0, "Exit code should be 0"
assert output == '', "Output should be empty"
def test_run_vulture_fail_lint(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = (expected_output, '', 1)
exit_code, output = linter_obj._run_vulture(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
def test_run_vulture_usage_stderr(self, linter_obj: Linter, lint_files: List[Path], mocker):
from demisto_sdk.commands.lint import linter
mocker.patch.object(linter, 'run_command_os')
expected_output = 'Error code found'
linter.run_command_os.return_value = ('not good', expected_output, 1)
exit_code, output = linter_obj._run_vulture(lint_files=lint_files, py_num=3.7)
assert exit_code == 0b1, "Exit code should be 1"
assert output == expected_output, "Output should be empty"
class TestRunLintInHost:
"""Flake8/Bandit/Mypy/Vulture"""
@pytest.mark.parametrize(argnames="no_flake8, no_xsoar_linter, no_bandit, no_mypy, no_vulture",
argvalues=[(True, False, True, True, False),
(False, False, True, True, True),
(True, False, True, False, True),
(True, False, False, True, True)])
@pytest.mark.usefixtures("linter_obj", "mocker", "lint_files")
def test_run_one_lint_check_success(self, mocker, linter_obj, lint_files, no_flake8: bool, no_xsoar_linter: bool,
no_bandit: bool, no_mypy: bool, no_vulture: bool):
mocker.patch.dict(linter_obj._facts, {
"images": [["image", "3.7"]],
"test": False,
"version_two": False,
"lint_files": lint_files,
"additional_requirements": []
})
mocker.patch.object(linter_obj, '_run_flake8')
linter_obj._run_flake8.return_value = (0b0, '')
mocker.patch.object(linter_obj, '_run_bandit')
linter_obj._run_bandit.return_value = (0b0, '')
mocker.patch.object(linter_obj, '_run_xsoar_linter')
linter_obj._run_xsoar_linter.return_value = (0b0, '')
mocker.patch.object(linter_obj, '_run_mypy')
linter_obj._run_mypy.return_value = (0b0, '')
mocker.patch.object(linter_obj, '_run_vulture')
linter_obj._run_vulture.return_value = (0b0, '')
linter_obj._run_lint_in_host(no_flake8=no_flake8,
no_xsoar_linter=no_xsoar_linter,
no_bandit=no_bandit,
no_mypy=no_mypy,
no_vulture=no_vulture)
assert linter_obj._pkg_lint_status.get("exit_code") == 0b0
if not no_flake8:
linter_obj._run_flake8.assert_called_once()
assert linter_obj._pkg_lint_status.get("flake8_errors") is None
if not no_xsoar_linter:
linter_obj._run_xsoar_linter.assert_called_once()
assert linter_obj._pkg_lint_status.get("xsoar_linter_errors") is None
elif not no_bandit:
linter_obj._run_bandit.assert_called_once()
assert linter_obj._pkg_lint_status.get("bandit_errors") is None
elif not no_mypy:
linter_obj._run_mypy.assert_called_once()
assert linter_obj._pkg_lint_status.get("mypy_errors") is None
elif not no_vulture:
linter_obj._run_vulture.assert_called_once()
assert linter_obj._pkg_lint_status.get("vulture_errors") is None
@pytest.mark.parametrize(argnames="no_flake8, no_xsoar_linter, no_bandit, no_mypy, no_vulture",
argvalues=[(True, True, True, True, False),
(False, True, True, True, True),
(True, True, True, False, True),
(True, True, False, True, True),
(True, False, True, True, True)])
@pytest.mark.usefixtures("linter_obj", "mocker", "lint_files")
def test_run_one_lint_check_fail(self, mocker, linter_obj, lint_files, no_flake8: bool, no_xsoar_linter: bool,
no_bandit: bool, no_mypy: bool, no_vulture: bool):
from demisto_sdk.commands.lint.linter import EXIT_CODES
mocker.patch.dict(linter_obj._facts, {
"images": [["image", "3.7"]],
"test": False,
"version_two": False,
"lint_files": lint_files,
"additional_requirements": []
})
mocker.patch.object(linter_obj, '_run_flake8')
linter_obj._run_flake8.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_xsoar_linter')
linter_obj._run_xsoar_linter.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_bandit')
linter_obj._run_bandit.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_mypy')
linter_obj._run_mypy.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_vulture')
linter_obj._run_vulture.return_value = (0b1, 'Error')
linter_obj._run_lint_in_host(no_flake8=no_flake8,
no_xsoar_linter=no_xsoar_linter,
no_bandit=no_bandit,
no_mypy=no_mypy,
no_vulture=no_vulture)
if not no_flake8:
linter_obj._run_flake8.assert_called_once()
assert linter_obj._pkg_lint_status.get("flake8_errors") == 'Error'
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['flake8']
elif not no_xsoar_linter:
linter_obj._run_xsoar_linter.assert_called_once()
assert linter_obj._pkg_lint_status.get("XSOAR_linter_errors") == 'Error'
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['XSOAR_linter']
elif not no_bandit:
linter_obj._run_bandit.assert_called_once()
assert linter_obj._pkg_lint_status.get("bandit_errors") == 'Error'
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['bandit']
elif not no_mypy:
linter_obj._run_mypy.assert_called_once()
assert linter_obj._pkg_lint_status.get("mypy_errors") == 'Error'
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['mypy']
elif not no_vulture:
linter_obj._run_vulture.assert_called_once()
assert linter_obj._pkg_lint_status.get("vulture_errors") == 'Error'
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['vulture']
@pytest.mark.usefixtures("linter_obj", "mocker", "lint_files")
def test_run_all_lint_fail_all(self, mocker, linter_obj, lint_files):
from demisto_sdk.commands.lint.linter import EXIT_CODES
mocker.patch.dict(linter_obj._facts, {
"images": [["image", "3.7"]],
"test": False,
"version_two": False,
"lint_files": lint_files,
"additional_requirements": []
})
mocker.patch.object(linter_obj, '_run_flake8')
linter_obj._run_flake8.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_xsoar_linter')
linter_obj._run_xsoar_linter.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_bandit')
linter_obj._run_bandit.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_mypy')
linter_obj._run_mypy.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_vulture')
linter_obj._run_vulture.return_value = (0b1, 'Error')
linter_obj._run_lint_in_host(no_flake8=False,
no_bandit=False,
no_xsoar_linter=False,
no_mypy=False,
no_vulture=False)
linter_obj._run_flake8.assert_called_once()
assert linter_obj._pkg_lint_status.get("flake8_errors") == 'Error'
linter_obj._run_xsoar_linter.assert_called_once()
assert linter_obj._pkg_lint_status.get("XSOAR_linter_errors") == 'Error'
linter_obj._run_bandit.assert_called_once()
assert linter_obj._pkg_lint_status.get("bandit_errors") == 'Error'
linter_obj._run_mypy.assert_called_once()
assert linter_obj._pkg_lint_status.get("mypy_errors") == 'Error'
linter_obj._run_vulture.assert_called_once()
assert linter_obj._pkg_lint_status.get("vulture_errors") == 'Error'
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['flake8'] + EXIT_CODES['bandit'] + \
EXIT_CODES['mypy'] + EXIT_CODES['vulture'] + EXIT_CODES['XSOAR_linter']
def test_no_lint_files(self, mocker, linter_obj):
"""No lint files exsits - not running any lint check"""
mocker.patch.dict(linter_obj._facts, {
"images": [["image", "3.7"]],
"test": False,
"version_two": False,
"lint_files": [],
"additional_requirements": []
})
mocker.patch.object(linter_obj, '_run_flake8')
mocker.patch.object(linter_obj, '_run_bandit')
mocker.patch.object(linter_obj, '_run_xsoar_linter')
mocker.patch.object(linter_obj, '_run_mypy')
mocker.patch.object(linter_obj, '_run_vulture')
linter_obj._run_lint_in_host(no_flake8=False,
no_bandit=False,
no_xsoar_linter=False,
no_mypy=False,
no_vulture=False)
linter_obj._run_flake8.assert_not_called()
linter_obj._run_bandit.assert_not_called()
linter_obj._run_xsoar_linter.assert_not_called()
linter_obj._run_mypy.assert_not_called()
linter_obj._run_vulture.assert_not_called()
@pytest.mark.usefixtures("linter_obj", "mocker", "lint_files")
def test_fail_lint_on_only_test_file(self, mocker, linter_obj, lint_files):
"""
Given
- Only one file was collected for linting.
- The collected file is a unittest file.
- All linters are enabled.
When
- Running the Linter class's _run_lint_in_host() method.
Then
- Only the flake8 linter should run
- The flake8 linter is passed the unittest file
"""
from demisto_sdk.commands.lint.linter import EXIT_CODES
unittest_path = lint_files[0].parent / 'intergration_sample_test.py'
mocker.patch.dict(linter_obj._facts, {
"images": [["image", 3.7]],
"test": False,
"version_two": False,
"lint_files": [],
"lint_unittest_files": [unittest_path],
"additional_requirements": [],
"python_version": 3.7,
})
mocker.patch.object(linter_obj, '_run_flake8')
linter_obj._run_flake8.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_xsoar_linter')
linter_obj._run_xsoar_linter.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_bandit')
linter_obj._run_bandit.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_mypy')
linter_obj._run_mypy.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_vulture')
linter_obj._run_vulture.return_value = (0b1, 'Error')
linter_obj._run_lint_in_host(no_flake8=False,
no_bandit=False,
no_xsoar_linter=False,
no_mypy=False,
no_vulture=False)
linter_obj._run_flake8.assert_called_once()
assert linter_obj._pkg_lint_status.get("flake8_errors") == 'Error'
linter_obj._run_bandit.assert_not_called()
linter_obj._run_mypy.assert_not_called()
linter_obj._run_xsoar_linter.assert_not_called()
linter_obj._run_vulture.assert_not_called()
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['flake8']
@pytest.mark.usefixtures("linter_obj", "mocker", "lint_files")
def test_fail_lint_on_normal_and_test_file(self, mocker, linter_obj, lint_files):
"""
Given
- Two files are collected for linting.
- One is a normal python code file and the other is a unittest python file.
- All linters are enabled.
When
- Running the Linter class's _run_lint_in_host() method.
Then
- The flake8 linter should run on the normal file and the unittest file
- The other linters should only run on the normal file
"""
from demisto_sdk.commands.lint.linter import EXIT_CODES
unittest_path = lint_files[0].parent / 'intergration_sample_test.py'
mocker.patch.dict(linter_obj._facts, {
"images": [["image", 3.7]],
"test": False,
"version_two": False,
"lint_files": lint_files,
"lint_unittest_files": [unittest_path],
"additional_requirements": [],
"python_version": 3.7,
})
mocker.patch.object(linter_obj, '_run_flake8')
linter_obj._run_flake8.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_xsoar_linter')
linter_obj._run_xsoar_linter.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_bandit')
linter_obj._run_bandit.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_mypy')
linter_obj._run_mypy.return_value = (0b1, 'Error')
mocker.patch.object(linter_obj, '_run_vulture')
linter_obj._run_vulture.return_value = (0b1, 'Error')
linter_obj._run_lint_in_host(no_flake8=False,
no_bandit=False,
no_mypy=False,
no_xsoar_linter=False,
no_vulture=False)
linter_obj._run_flake8.assert_called_once()
linter_obj._run_bandit.assert_called_once()
linter_obj._run_xsoar_linter.assert_called_once()
linter_obj._run_mypy.assert_called_once()
linter_obj._run_vulture.assert_called_once()
assert linter_obj._pkg_lint_status.get("exit_code") == EXIT_CODES['flake8'] + EXIT_CODES['bandit'] + \
EXIT_CODES['mypy'] + EXIT_CODES['vulture'] + EXIT_CODES['XSOAR_linter']
| 47.791469
| 117
| 0.637247
| 2,548
| 20,168
| 4.653846
| 0.048273
| 0.11916
| 0.104233
| 0.081464
| 0.947968
| 0.940799
| 0.925367
| 0.910778
| 0.904115
| 0.88902
| 0
| 0.012477
| 0.260859
| 20,168
| 421
| 118
| 47.904988
| 0.782988
| 0.034213
| 0
| 0.832827
| 0
| 0
| 0.131136
| 0.009952
| 0
| 0
| 0
| 0
| 0.240122
| 1
| 0.054711
| false
| 0
| 0.06079
| 0
| 0.130699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f411343b7e6e8370dcb8d20879f56341dfdb0412
| 2,730
|
py
|
Python
|
tests/test_set.py
|
koodaa-team/redis-natives-py
|
dbe3ee65527a7a1b0b1dc35ae968588635732718
|
[
"MIT"
] | 1
|
2022-02-17T20:02:51.000Z
|
2022-02-17T20:02:51.000Z
|
tests/test_set.py
|
koodaa-team/redis-natives-py
|
dbe3ee65527a7a1b0b1dc35ae968588635732718
|
[
"MIT"
] | null | null | null |
tests/test_set.py
|
koodaa-team/redis-natives-py
|
dbe3ee65527a7a1b0b1dc35ae968588635732718
|
[
"MIT"
] | null | null | null |
from tests import SetTestCase, IntegerSetTestCase
class TestSet(SetTestCase):
def test_length_initially_zero(self):
assert len(self.set) == 0
def test_add_value_increases_length(self):
self.set.add(1)
assert len(self.set) == 1
def test_add_saves_values_in_redis(self):
self.set.add(1)
assert self.redis.smembers('test_key') == set(['1'])
def test_remove(self):
self.set.add(1)
self.set.discard(1)
assert len(self.set) == 0
def test_pop(self):
self.set.add(1)
assert self.set.pop() == '1'
assert len(self.set) == 0
def test_contains(self):
self.set.add(1)
assert '1' in self.set
def test_iterator(self):
self.set.add(1)
self.set.add(2)
assert [i for i in self.set] == ['1', '2']
def test_redis_type(self):
self.set.add(1)
assert self.set.redis_type == 'set'
def test_copy(self):
self.set.add(1)
set_ = self.set.copy('copy_key')
assert set_.key == 'copy_key'
assert [i for i in set_] == ['1']
class TestIntegerSet(IntegerSetTestCase):
def test_add_value_increases_length(self):
self.set.add(1)
assert len(self.set) == 1
def test_add_saves_values_in_redis(self):
self.set.add(1)
assert self.redis.smembers('test_key') == set(['1'])
def test_remove(self):
self.set.add(1)
self.set.discard(1)
assert len(self.set) == 0
def test_pop(self):
self.set.add(1)
assert self.set.pop() == 1
assert len(self.set) == 0
def test_contains(self):
self.set.add(1)
assert 1 in self.set
def test_clear(self):
self.set.add(1)
self.set.clear()
assert len(self.set) == 0
def test_iterator(self):
self.set.add(1)
self.set.add(2)
assert [i for i in self.set] == [1, 2]
def test_redis_type(self):
self.set.add(1)
assert self.set.redis_type == 'set'
def test_issuperset(self):
self.set.add(1)
self.set.add(2)
self.other_set.add(2)
assert self.set.issuperset(self.other_set)
self.other_set.add(3)
assert not self.set.issuperset(self.other_set)
def test_issubset(self):
self.set.add(1)
self.other_set.add(2)
assert not self.set.issubset(self.other_set)
self.other_set.add(1)
assert self.set.issubset(self.other_set)
def test_isdisjoint(self):
self.set.add(1)
self.other_set.add(2)
assert self.set.isdisjoint(self.other_set)
self.other_set.add(1)
assert not self.set.isdisjoint(self.other_set)
| 25.754717
| 60
| 0.59011
| 407
| 2,730
| 3.813268
| 0.110565
| 0.216495
| 0.141753
| 0.171392
| 0.847938
| 0.820876
| 0.743557
| 0.690077
| 0.666881
| 0.606314
| 0
| 0.024873
| 0.278388
| 2,730
| 105
| 61
| 26
| 0.762944
| 0
| 0
| 0.679012
| 0
| 0
| 0.016484
| 0
| 0
| 0
| 0
| 0
| 0.320988
| 1
| 0.246914
| false
| 0
| 0.012346
| 0
| 0.283951
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f411db345cda67b0ec3c4a0a32bb810b435d151a
| 1,452
|
py
|
Python
|
tests/test_validate.py
|
yevgenyr/regolith
|
4fc6c7bb2d24d9caf75e977b8e39637f62e5d584
|
[
"CC0-1.0"
] | null | null | null |
tests/test_validate.py
|
yevgenyr/regolith
|
4fc6c7bb2d24d9caf75e977b8e39637f62e5d584
|
[
"CC0-1.0"
] | null | null | null |
tests/test_validate.py
|
yevgenyr/regolith
|
4fc6c7bb2d24d9caf75e977b8e39637f62e5d584
|
[
"CC0-1.0"
] | null | null | null |
import os
import sys
from io import StringIO
import pytest
from xonsh.lib import subprocess
from regolith.main import main
def test_validate_python(make_db):
repo = make_db
os.chdir(repo)
backup = sys.stdout
sys.stdout = StringIO()
main(["validate"])
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = backup
assert "NO ERRORS IN DBS" in out
def test_validate_python_single_col(make_db):
repo = make_db
os.chdir(repo)
backup = sys.stdout
sys.stdout = StringIO()
main(["validate", "--collection", "people"])
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = backup
assert "NO ERRORS IN DBS" in out
def test_validate_bad_python(make_bad_db):
repo = make_bad_db
os.chdir(repo)
backup = sys.stdout
sys.stdout = StringIO()
with pytest.raises(SystemExit):
main(["validate"])
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = backup
assert "Errors found in " in out
assert "NO ERRORS IN DBS" not in out
def test_validate(make_db):
repo = make_db
os.chdir(repo)
out = subprocess.run(["regolith", "validate"], check=False).out
assert "NO ERRORS IN DBS" in out
def test_validate_bad(make_bad_db):
repo = make_bad_db
os.chdir(repo)
out = subprocess.run(["regolith", "validate"], check=False).out
assert "Errors found in " in out
assert "NO ERRORS IN DBS" not in out
| 23.419355
| 67
| 0.667355
| 211
| 1,452
| 4.469194
| 0.203791
| 0.14316
| 0.079533
| 0.068929
| 0.801697
| 0.78579
| 0.78579
| 0.78579
| 0.770944
| 0.770944
| 0
| 0
| 0.222452
| 1,452
| 61
| 68
| 23.803279
| 0.835252
| 0
| 0
| 0.734694
| 0
| 0
| 0.128099
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.102041
| false
| 0
| 0.122449
| 0
| 0.22449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be74ab2df96fed559dfb14e4247b1d1d48a43c4d
| 128
|
py
|
Python
|
lixian_plugins/queries/torrentz.py
|
1py/xunlei-lixian
|
1881932b9d5ccba78c7788fbad12982e05bf7f86
|
[
"MIT"
] | 2,177
|
2015-01-02T09:56:51.000Z
|
2022-03-27T01:48:37.000Z
|
lixian_plugins/queries/torrentz.py
|
1py/xunlei-lixian
|
1881932b9d5ccba78c7788fbad12982e05bf7f86
|
[
"MIT"
] | 29
|
2015-01-24T17:38:59.000Z
|
2021-08-29T03:39:30.000Z
|
lixian_plugins/queries/torrentz.py
|
1py/xunlei-lixian
|
1881932b9d5ccba78c7788fbad12982e05bf7f86
|
[
"MIT"
] | 516
|
2015-01-02T18:48:29.000Z
|
2022-01-26T07:12:35.000Z
|
from lixian_plugins.api import extract_info_hash_from_url
extract_info_hash_from_url(r'^http://torrentz.eu/([0-9a-f]{40})$')
| 21.333333
| 66
| 0.78125
| 23
| 128
| 3.956522
| 0.73913
| 0.241758
| 0.32967
| 0.417582
| 0.483516
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.0625
| 128
| 5
| 67
| 25.6
| 0.725
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
be8553032e5979c7bf25e3143e9e66baa4b949a2
| 11,352
|
py
|
Python
|
tinkoff/invest/grpc/marketdata_pb2_grpc.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
tinkoff/invest/grpc/marketdata_pb2_grpc.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
tinkoff/invest/grpc/marketdata_pb2_grpc.py
|
forked-group/invest-python
|
3398391f5bb4a52020c312855de175cfe8cdc021
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from tinkoff.invest.grpc import marketdata_pb2 as tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2
class MarketDataServiceStub(object):
"""Сервис получения биржевой информации:</br> **1**. свечи;</br> **2**. стаканы;</br> **3**. торговые статусы;</br> **4**. лента сделок.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCandles = channel.unary_unary(
'/tinkoff.public.invest.api.contract.v1.MarketDataService/GetCandles',
request_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetCandlesRequest.SerializeToString,
response_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetCandlesResponse.FromString,
)
self.GetLastPrices = channel.unary_unary(
'/tinkoff.public.invest.api.contract.v1.MarketDataService/GetLastPrices',
request_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetLastPricesRequest.SerializeToString,
response_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetLastPricesResponse.FromString,
)
self.GetOrderBook = channel.unary_unary(
'/tinkoff.public.invest.api.contract.v1.MarketDataService/GetOrderBook',
request_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetOrderBookRequest.SerializeToString,
response_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetOrderBookResponse.FromString,
)
self.GetTradingStatus = channel.unary_unary(
'/tinkoff.public.invest.api.contract.v1.MarketDataService/GetTradingStatus',
request_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetTradingStatusRequest.SerializeToString,
response_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetTradingStatusResponse.FromString,
)
class MarketDataServiceServicer(object):
"""Сервис получения биржевой информации:</br> **1**. свечи;</br> **2**. стаканы;</br> **3**. торговые статусы;</br> **4**. лента сделок.
"""
def GetCandles(self, request, context):
"""Метод запроса исторических свечей по инструменту.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLastPrices(self, request, context):
"""Метод запроса последних цен по инструментам.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOrderBook(self, request, context):
"""Метод получения стакана по инструменту.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTradingStatus(self, request, context):
"""Метод запроса статуса торгов по инструментам.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MarketDataServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCandles': grpc.unary_unary_rpc_method_handler(
servicer.GetCandles,
request_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetCandlesRequest.FromString,
response_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetCandlesResponse.SerializeToString,
),
'GetLastPrices': grpc.unary_unary_rpc_method_handler(
servicer.GetLastPrices,
request_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetLastPricesRequest.FromString,
response_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetLastPricesResponse.SerializeToString,
),
'GetOrderBook': grpc.unary_unary_rpc_method_handler(
servicer.GetOrderBook,
request_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetOrderBookRequest.FromString,
response_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetOrderBookResponse.SerializeToString,
),
'GetTradingStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetTradingStatus,
request_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetTradingStatusRequest.FromString,
response_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetTradingStatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tinkoff.public.invest.api.contract.v1.MarketDataService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MarketDataService(object):
"""Сервис получения биржевой информации:</br> **1**. свечи;</br> **2**. стаканы;</br> **3**. торговые статусы;</br> **4**. лента сделок.
"""
@staticmethod
def GetCandles(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.public.invest.api.contract.v1.MarketDataService/GetCandles',
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetCandlesRequest.SerializeToString,
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetCandlesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLastPrices(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.public.invest.api.contract.v1.MarketDataService/GetLastPrices',
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetLastPricesRequest.SerializeToString,
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetLastPricesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOrderBook(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.public.invest.api.contract.v1.MarketDataService/GetOrderBook',
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetOrderBookRequest.SerializeToString,
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetOrderBookResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTradingStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.public.invest.api.contract.v1.MarketDataService/GetTradingStatus',
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetTradingStatusRequest.SerializeToString,
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.GetTradingStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class MarketDataStreamServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.MarketDataStream = channel.stream_stream(
'/tinkoff.public.invest.api.contract.v1.MarketDataStreamService/MarketDataStream',
request_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.MarketDataRequest.SerializeToString,
response_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.MarketDataResponse.FromString,
)
class MarketDataStreamServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def MarketDataStream(self, request_iterator, context):
"""Bi-directional стрим предоставления биржевой информации.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MarketDataStreamServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'MarketDataStream': grpc.stream_stream_rpc_method_handler(
servicer.MarketDataStream,
request_deserializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.MarketDataRequest.FromString,
response_serializer=tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.MarketDataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tinkoff.public.invest.api.contract.v1.MarketDataStreamService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MarketDataStreamService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def MarketDataStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/tinkoff.public.invest.api.contract.v1.MarketDataStreamService/MarketDataStream',
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.MarketDataRequest.SerializeToString,
tinkoff_dot_invest_dot_grpc_dot_marketdata__pb2.MarketDataResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 48.306383
| 155
| 0.700317
| 1,082
| 11,352
| 6.987061
| 0.134935
| 0.055026
| 0.065608
| 0.07791
| 0.857937
| 0.846032
| 0.846032
| 0.809524
| 0.764947
| 0.724206
| 0
| 0.006341
| 0.222075
| 11,352
| 234
| 156
| 48.512821
| 0.849734
| 0.105092
| 0
| 0.511628
| 1
| 0
| 0.112495
| 0.082902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.011628
| 0.02907
| 0.156977
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bebed49568cf43a72b9fc081916101c7b12b3f19
| 5,055
|
py
|
Python
|
venv/lib/python3.9/site-packages/tests/validation_strategies/test_header_validation_strategy.py
|
neophyte88/sistine3
|
e685d751a7f1c836d5603211d7cf267420fc8fa2
|
[
"MIT"
] | 1
|
2022-03-09T08:16:09.000Z
|
2022-03-09T08:16:09.000Z
|
venv/lib/python3.9/site-packages/tests/validation_strategies/test_header_validation_strategy.py
|
neophyte88/sistine3
|
e685d751a7f1c836d5603211d7cf267420fc8fa2
|
[
"MIT"
] | null | null | null |
venv/lib/python3.9/site-packages/tests/validation_strategies/test_header_validation_strategy.py
|
neophyte88/sistine3
|
e685d751a7f1c836d5603211d7cf267420fc8fa2
|
[
"MIT"
] | null | null | null |
import simplejson as json
import pytest
from gemstone.auth.validation_strategies import BasicCookieStrategy, HeaderValidationStrategy
from tests.services.service_validation_strategies import ValidationStrategyTestService
VS_HEADER = HeaderValidationStrategy("X-Auth-Token")
@pytest.fixture
def app():
service = ValidationStrategyTestService()
service.set_validation_strategy(VS_HEADER)
return service.make_tornado_app()
@pytest.mark.gen_test
def test_header_strategy_token_wrong(http_client, base_url):
base_url += "/api"
body = {
"jsonrpc": "2.0",
"id": 1,
"method": "private_echo",
"params": ["test"]
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json",
"x-auth-token": "wrong_token"})
assert result.code == 200
result_body = json.loads(result.body)
print(result_body)
assert result_body["id"] == 1
assert result_body["result"] is None
assert result_body["error"] is not None
assert result_body["error"]["code"] == -32001
assert result_body["error"]["message"] == "Access denied"
@pytest.mark.gen_test
def test_header_strategy_token_ok(http_client, base_url):
base_url += "/api"
body = {
"jsonrpc": "2.0",
"id": 1,
"method": "private_echo",
"params": ["test"]
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json",
"x-auth-token": "secret"})
assert result.code == 200
result_body = json.loads(result.body)
print(result_body)
assert result_body["id"] == 1
assert result_body["result"] == "test"
assert result_body["error"] is None
@pytest.mark.gen_test
def test_header_strategy_token_ok_wrong_header_name(http_client, base_url):
base_url += "/api"
body = {
"jsonrpc": "2.0",
"id": 1,
"method": "private_echo",
"params": ["test"]
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json",
"header-auth-token": "secret"})
assert result.code == 200
result_body = json.loads(result.body)
print(result_body)
assert result_body["id"] == 1
assert result_body["result"] is None
assert result_body["error"] is not None
assert result_body["error"]["code"] == -32001
assert result_body["error"]["message"] == "Access denied"
@pytest.mark.gen_test
def test_header_strategy_token_ok_wrong_header_name(http_client, base_url):
base_url += "/api"
body = {
"jsonrpc": "2.0",
"id": 1,
"method": "private_echo",
"params": ["test"]
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json",
"header-auth-token": "secret"})
assert result.code == 200
result_body = json.loads(result.body)
print(result_body)
assert result_body["id"] == 1
assert result_body["result"] is None
assert result_body["error"] is not None
assert result_body["error"]["code"] == -32001
assert result_body["error"]["message"] == "Access denied"
@pytest.mark.gen_test
def test_header_strategy_notification_token_wrong(http_client, base_url):
base_url += "/api"
body = {
"jsonrpc": "2.0",
"method": "private_echo",
"params": ["test"]
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json",
"header-auth-token": "wrong_token"})
assert result.code == 200
result_body = json.loads(result.body)
print(result_body)
assert "id" not in result_body
assert result_body["result"] is None
assert result_body["error"] is None
@pytest.mark.gen_test
def test_header_strategy_notification_token_ok(http_client, base_url):
base_url += "/api"
body = {
"jsonrpc": "2.0",
"method": "private_echo",
"params": ["test"]
}
result = yield http_client.fetch(base_url, method="POST", body=json.dumps(body),
headers={"content-type": "application/json",
"x-auth-token": "secret"})
assert result.code == 200
result_body = json.loads(result.body)
print(result_body)
assert "id" not in result_body
assert result_body["result"] is None
assert result_body["error"] is None
| 36.107143
| 94
| 0.582987
| 574
| 5,055
| 4.923345
| 0.12892
| 0.14862
| 0.124558
| 0.089172
| 0.871904
| 0.871904
| 0.871904
| 0.871904
| 0.871904
| 0.858457
| 0
| 0.014722
| 0.287834
| 5,055
| 139
| 95
| 36.366906
| 0.770278
| 0
| 0
| 0.811475
| 0
| 0
| 0.158869
| 0
| 0
| 0
| 0
| 0
| 0.245902
| 1
| 0.057377
| false
| 0
| 0.032787
| 0
| 0.098361
| 0.04918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe38c49d2707dcdf9061e2d8fff362c06e62fb3d
| 109
|
py
|
Python
|
src/etc/gdb_load_rust_pretty_printers.py
|
Timmmm/rust
|
e369d87b015a84653343032833d65d0545fd3f26
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 15
|
2015-12-17T18:20:31.000Z
|
2019-12-10T20:07:24.000Z
|
src/etc/gdb_load_rust_pretty_printers.py
|
Timmmm/rust
|
e369d87b015a84653343032833d65d0545fd3f26
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2019-08-14T16:32:51.000Z
|
2021-07-20T23:38:07.000Z
|
src/etc/gdb_load_rust_pretty_printers.py
|
Timmmm/rust
|
e369d87b015a84653343032833d65d0545fd3f26
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 10
|
2016-12-13T07:07:21.000Z
|
2022-03-20T06:08:58.000Z
|
import gdb
import gdb_rust_pretty_printing
gdb_rust_pretty_printing.register_printers(gdb.current_objfile())
| 27.25
| 65
| 0.899083
| 16
| 109
| 5.625
| 0.5625
| 0.2
| 0.288889
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045872
| 109
| 3
| 66
| 36.333333
| 0.865385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
fe87f83a1a43b55c18133a2d16f5215c584a7b69
| 30,435
|
py
|
Python
|
auxiliary/table5.py
|
huiren-j/Replication-of-Decarolis-2014-
|
162fc0f60b067c88370922f901fac7fa17d78cd3
|
[
"MIT"
] | null | null | null |
auxiliary/table5.py
|
huiren-j/Replication-of-Decarolis-2014-
|
162fc0f60b067c88370922f901fac7fa17d78cd3
|
[
"MIT"
] | null | null | null |
auxiliary/table5.py
|
huiren-j/Replication-of-Decarolis-2014-
|
162fc0f60b067c88370922f901fac7fa17d78cd3
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from linearmodels import PanelOLS
import statsmodels.api as sm
import econtools as econ
import econtools.metrics as mt
import math
from statsmodels.stats.outliers_influence import variance_inflation_factor
from auxiliary.prepare import *
from auxiliary.table2 import *
from auxiliary.table3 import *
from auxiliary.table4 import *
from auxiliary.table5 import *
from auxiliary.table6 import *
from auxiliary.table7 import *
from auxiliary.extension import *
from auxiliary.table_formula import *
def calc_vif(X):
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
return(vif)
def table5_setting(data):
df = data
df = df[((df['turin_co_sample']==1) | (df['turin_pr_sample']==1)) & ((df['post_experience']>=5)|(df['post_experience'].isnull()==True)) & ((df['pre_experience']>=5)|(df['pre_experience'].isnull()==True))& (df['missing']==0)]
df = df[(df['ctrl_pop_turin_co_sample']==1) | (df['ctrl_pop_turin_pr_sample']==1) | (df['ctrl_exp_turin_co_sample']==1) | (df['ctrl_exp_turin_pr_sample']==1) | (df['ctrl_pop_exp_turin_co_sample']==1) | (df['ctrl_pop_exp_turin_pr_sample']==1)]
df = df.reset_index()
#re-construct trend-pa: setting
id_auth_remained = df['id_auth'].unique()
id_auth_remained_df = pd.DataFrame({'id_auth': [], 'group_num': []})
for i in range(len(id_auth_remained)):
id_auth_remained_df.loc[i,'id_auth'] = id_auth_remained[i]
id_auth_remained_df.loc[i,'group_num'] = i+1
for i in range(len(df)):
for j in range(len(id_auth_remained_df)):
if df.loc[i, 'id_auth'] == id_auth_remained_df.loc[j, 'id_auth']:
df.loc[i, 'id_auth_remained'] = j+1
id_auth_remained_dum = pd.get_dummies(df['id_auth_remained']).rename(columns=lambda x: 'id_auth_remained' + str(x))
df = pd.concat([df, id_auth_remained_dum],axis = 1)
#re-construct trend-pa
for i in range(len(id_auth_remained_dum.columns)):
df['trend_pa_remained_'+str(i+1)] = 0
for j in range(len(df)):
if df.loc[j, id_auth_remained_dum.columns[i]]==1 and df.loc[j, 'authority_code']!=3090272 and df.loc[j, 'authority_code']!=3070001:
df.loc[j,'trend_pa_remained_'+str(i+1)] = 1
df.drop([id_auth_remained_dum.columns[i]],axis = 1)
return(df)
def table5_PanelA_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelA_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_odd(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelB_even(data):
outcomes = ['discount','delay_ratio','overrun_ratio','days_to_award']
t = 'turin_pr_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency','trend','trend_treat']
for i in range(1,36):
exog_var.append('trend_pa_remained_'+str(i))
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('year_dum_2006.0')
exog.remove('work_dum_OG01')
for i in [2,4,6,7,9,11,12,13,15,16,17,18,20,21,22,23,24,25,26,28,34,35]:
exog.remove('trend_pa_remained_'+str(i))
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code', check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
ci = pd.concat([ci_discount,ci_delay_ratio,ci_overrun_ratio,ci_days_to_award],axis=1).reset_index()
del ci['index']
return(ci)
def table5_PanelA_odd_row3(data):
outcomes = ['discount','delay_ratio','overrun_ratio'] #Aodd_days to award값 안나옴
t = 'turin_co_sample'
g = 'ctrl_exp'
c_outcomes=1
i = 5
df1 = data
df1_tmp = df1[(df1[t]==1)& (df1[g +'_' + t]==1) & (df1['post_experience']>=i) & (df1['pre_experience']>=i)& (df1['post_experience'].isnull()==False) & (df1['pre_experience'].isnull()==False) & (df1['missing']==0) & (df1['fiscal_efficiency'].isnull()==False) & (df1['reserve_price'].isnull()==False)&(df1['municipality'].isnull()==False)]
for o in outcomes:
df1 = df1_tmp[df1_tmp[o].isnull()==False]
df1 = df1.reset_index()
df1 = df1.sort_values(by = 'authority_code', ascending = True)
df1['ind'] = np.nan
for i in range(len(df1)):
if i == 0:
df1.loc[i, 'ind'] = 1
else:
if df1.loc[i, 'authority_code'] != df1.loc[i-1, 'authority_code']:
df1.loc[i, 'ind'] = 1
#create dummies for administration-year pairs
all_years = df1['year'].unique()
all_authorities = df1['authority_code'].unique()
auth_year_reg_col = []
for auth in all_authorities:
for yr in all_years:
df1['auth_year_' + str(auth)+'_' + str(yr)] = 0
auth_year_reg_col.append('auth_year_' + str(auth)+'_' + str(yr))
df1.loc[(df1['year']==yr) & (df1['authority_code']==auth), 'auth_year_' + str(auth)+'_' + str(yr) ] = 1
##regression for first stage
#create dummies for work category
all_categories = df1['work_category'].unique()
for cat in all_categories:
df1['cat_'+cat] = 0
df1.loc[df1['work_category']==cat, 'cat_'+cat] =1
### Regression first stage
#setting
work_dum = pd.get_dummies(df1['work_category']).rename(columns=lambda x: 'work_dum_' + str(x))
year_dum = pd.get_dummies(df1['year']).rename(columns=lambda x: 'year_dum_' + str(x))
auth_dum = pd.get_dummies(df1['authority_code']).rename(columns=lambda x: 'auth_dum_' + str(x))
dum_df = pd.concat([work_dum, year_dum, auth_dum],axis = 1)
df1 = pd.concat([df1,dum_df],axis = 1)
work_list = list(work_dum.columns)
year_list = list(year_dum.columns)
auth_list = list(auth_dum.columns)
reg_col = []
for i in work_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in auth_list:
reg_col.append(k)
exog_var = ['fpsb_auction','reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
exog.remove('year_dum_2000.0')
exog.remove('work_dum_OG01')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1708.0')
#1. reg
fe_reg_1 = mt.reg(df1, o, exog, cluster = 'auth_anno', addcons= True, check_colinear = True)
#2. reg
fe_reg_2 = mt.reg(df1, o, exog, cluster = 'authority_code',addcons= True, check_colinear = True)
ci_1 = fe_reg_1.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
ci_2 = fe_reg_2.summary.loc['fpsb_auction',['CI_low', 'CI_high']].round()
if o == 'discount':
ci_discount = pd.DataFrame((ci_1,ci_2))
elif o == 'delay_ratio':
ci_delay_ratio = pd.DataFrame((ci_1,ci_2))
elif o == 'overrun_ratio':
ci_overrun_ratio = pd.DataFrame((ci_1,ci_2))
else:
ci_days_to_award = pd.DataFrame((ci_1,ci_2))
reg_col = auth_year_reg_col
for cat in all_categories:
reg_col.append('cat_'+cat)
exog_var = ['reserve_price','municipality','fiscal_efficiency']
exog = exog_var + reg_col
if o != 'overrun_ratio':
exog.remove('auth_year_4.0_2000.0')
exog.remove('auth_year_6.0_2000.0')
exog.remove('auth_year_16.0_2002.0')
exog.remove('auth_year_16.0_2003.0')
exog.remove('auth_year_16.0_2004.0')
exog.remove('auth_year_1246.0_2000.0')
exog.remove('cat_OS07')
exog.remove('fiscal_efficiency')
else:
exog.remove('auth_year_4.0_2000.0')
exog.remove('auth_year_6.0_2000.0')
exog.remove('auth_year_16.0_2002.0')
exog.remove('auth_year_16.0_2003.0')
exog.remove('auth_year_16.0_2004.0')
exog.remove('auth_year_1246.0_2000.0')
exog.remove('fiscal_efficiency')
exog.remove('auth_year_6.0_2005.0')
exog.remove('auth_year_9.0_2006.0')
exog.remove('auth_year_25.0_2002.0')
if o != 'discount':
exog.remove('auth_year_20.0_2001.0')
exog.remove('auth_year_20.0_2003.0')
fe_reg_3 = mt.reg(df1, o, exog, cluster = 'auth_anno', check_colinear = True)
df1['dummy_cat'] = 0
beta_cat_list = []
beta_list = []
for i in range(len(exog)):
for cat in all_categories:
if exog[i] == 'cat_'+cat:
beta_cat_list.append(exog[i])
for exo in exog_var:
if exog[i] == exo:
beta_list.append(exog[i])
if o == 'discount':
discount_hat = fe_reg_3.yhat
for i in range(len(df1)):
for cat in beta_cat_list:
df1.loc[i, 'discount_beta'] = df1.loc[i,'dummy_cat']-df1.loc[i,cat] * fe_reg_3.beta[cat]
for exo in beta_list:
df1.loc[i,'discount_beta'] = df1.loc[i,'discount_beta']- df1.loc[i,exo]*fe_reg_3.beta[exo]
df1['discount_beta'] = discount_hat - df1['discount_beta']
elif o == 'delay_ratio':
delay_ratio_hat = fe_reg_3.yhat
for i in range(len(df1)):
for cat in beta_cat_list:
df1.loc[i, 'delay_ratio_beta'] = df1.loc[i,'dummy_cat']-df1.loc[i,cat] * fe_reg_3.beta[cat]
for exo in beta_list:
df1.loc[i,'delay_ratio_beta'] = df1.loc[i,'delay_ratio_beta']- df1.loc[i,exo]*fe_reg_3.beta[exo]
df1['delay_ratio_beta'] = delay_ratio_hat - df1['delay_ratio_beta']
elif o == 'overrun_ratio':
overrun_ratio_hat = fe_reg_3.yhat
for i in range(len(df1)):
for cat in beta_cat_list:
df1.loc[i, 'overrun_ratio_beta'] = df1.loc[i,'dummy_cat']-df1.loc[i,cat] * fe_reg_3.beta[cat]
for exo in beta_list:
df1.loc[i,'overrun_ratio_beta'] = df1.loc[i,'overrun_ratio_beta']- df1.loc[i,exo]*fe_reg_3.beta[exo]
df1['overrun_ratio_beta'] = overrun_ratio_hat - df1['overrun_ratio_beta']
else:
days_to_award_hat = fe_reg_3.yhat
for i in range(len(df1)):
for cat in beta_cat_list:
df1.loc[i, 'days_to_award_beta'] = df1.loc[i,'dummy_cat']-df1.loc[i,cat] * fe_reg_3.beta[cat]
for exo in beta_list:
df1.loc[i,'days_to_award_beta'] = df1.loc[i,'days_to_award_beta']- df1.loc[i,exo]*fe_reg_3.beta[exo]
df1['days_to_award_beta'] = days_to_award_hat - df1['days_to_award_beta']
#create weigths - working well
nrep_s = df1.groupby(['authority_code','year']).size().unstack(level=1)
df1_nrep = pd.DataFrame(nrep_s)/len(df1)
df1['weights'] = np.nan
for auth in all_authorities:
for yr in all_years:
df1.loc[(df1['authority_code']==auth)&(df1['year']==yr),'weights'] = df1_nrep.loc[auth, yr]
#Keep only beta coefficients for state*year terms
collapse_list = [o +'_beta', 'authority_code', 'year', 'fpsb_auction', 'municipality', 'fiscal_efficiency', 'missing', 'turin_co_sample', 'weights'] + year_list + auth_list
collapse = df1.groupby(['auth_anno'])[collapse_list].mean()
df2 = collapse
df2 = df2.reset_index()
#Core conley-taber method
exog_var = ['fpsb_auction', 'municipality', 'fiscal_efficiency']
reg_col = []
reg_col_new = []
#reg_col.append(j)
for i in auth_list:
reg_col.append(i)
for j in year_list:
reg_col.append(j)
for k in reg_col:
for j in df2.columns:
if k ==j:
reg_col_new.append(j)
exog = exog_var + reg_col_new
X = df2.loc[:,exog]
vif = calc_vif(X)
#delete from col list
for i in range(len(vif)):
if np.isnan(vif.loc[i, 'VIF']) == True:
reg_col.remove(vif.loc[i, 'variables'])
exog = exog_var + reg_col_new
exog.remove('year_dum_2000.0')
exog.remove('auth_dum_3.0')
exog.remove('auth_dum_1866.0')
wls = mt.reg(df2, o+'_beta', exog , cluster = 'auth_anno',addcons = True, awt_name = 'weights')
#predic res
df2['eta'] = wls.resid
df2['eta'] = df2['eta']+ df2['fpsb_auction']*wls.beta['fpsb_auction']
#Create tilde
df2 = df2.sort_values(by = 'year',ascending = True)
df2_wls= df2[(df2['authority_code']==3090272) | (df2['authority_code']==3070001)]
df2_wls = pd.DataFrame(df2_wls.groupby(['year'])['fpsb_auction'].mean())
for i in range(len(df2)):
if df2.loc[i, 'authority_code']==3090272 or df2.loc[i, 'authority_code']==3070001:
for j in list(df2_wls.index):
if df2.loc[i, 'year'] == j:
df2.loc[i,'djtga'] = df2_wls.loc[j, 'fpsb_auction']
df2_wls = pd.DataFrame(df2.groupby(['year'])['djtga'].sum())
for i in range(len(df2)):
for j in list(df2_wls.index):
if df2.loc[i, 'year'] == j:
df2.loc[i,'djt'] = df2_wls.loc[j, 'djtga']
df2 = df2.sort_values(by = 'authority_code', ascending = True)
df2_wls = pd.DataFrame(df2.groupby(['authority_code'])['djt'].mean())
for i in range(len(df2)):
for j in list(df2_wls.index):
if df2.loc[i, 'authority_code'] == j:
df2.loc[i,'meandjt'] = df2_wls.loc[j, 'djt']
df2['dtil'] = df2['djt'] - df2['meandjt']
#obtain diff in diff coeff
#renormalize weights
df2.loc[(df2['authority_code']==3090272) | (df2['authority_code']==3070001),'tot_weights'] = df2['weights'].sum()
df2['new_weights'] = df2['weights']/df2['tot_weights']
df2_wls = df2[(df2['authority_code']==3090272) | (df2['authority_code']==3070001)]
wls_2 = mt.reg(df2_wls, 'eta' , 'dtil' , awt_name = 'new_weights', addcons = True,check_colinear = True)
alpha = [wls_2.beta['dtil']]
df2 = df2.drop(['tot_weights','new_weights'],axis = 1)
#simulataneous for each public
asim = []
for auth in all_authorities:
if auth !=3090272 and auth !=3070001:
df2.loc[df2['authority_code']==auth, 'tot_weights'] = df2['weights'].sum()
df2['new_weights'] = df2['weights']/df2['tot_weights']
df2_wls_3 = df2[df2['authority_code']==auth]
wls_3 = mt.reg(df2_wls_3, 'eta' , 'dtil' , awt_name = 'new_weights',check_colinear=True)
asim.append(wls_3.beta['dtil'])
df2 = df2.drop(['tot_weights','new_weights'],axis = 1)
for i in range(len(asim)-1):
alpha.append(alpha[0])
asim_tmp = []
for i in range(min(len(alpha),len(asim))):
asim_tmp.append(alpha[i] - asim[i])
#asim = asim_tmp
df2['ci'] = np.nan
df2['asim'] = np.nan
for i in range(len(asim)):
df2.loc[i, 'ci'] = asim_tmp[i]
df2.loc[i, 'asim'] = asim[i]
#form confidence level
numst=len(asim)+1
i025=math.floor(0.025*(numst-1))
i025=max([i025,1])
i975=math.ceil(0.975*(numst-1))
i05=math.floor(0.050*(numst-1))
i05=max([i05,i025+1])
i95=math.ceil(0.950*(numst-1))
i95=min([i95,numst-2])
stima_ta = alpha[0]
df2.sort_values(by = 'asim',ascending = True)
ci_ta025 = min([df2.loc[i025,'ci'], df2.loc[i975, 'ci'] ])
ci_ta975 = max([df2.loc[i025,'ci'], df2.loc[i975, 'ci'] ])
if o == 'discount':
discount_list = [round(ci_ta025), round(ci_ta975)]
elif o == 'delay_ratio':
delay_ratio_list = [ci_ta025, ci_ta975]
elif o == 'overrun_ratio':
overrun_ratio_list = [ci_ta025, ci_ta975]
else:
days_to_award_list = [ci_ta025, ci_ta975]
final_list = discount_list + delay_ratio_list + overrun_ratio_list
return(final_list)
| 41.295794
| 341
| 0.58045
| 4,358
| 30,435
| 3.791418
| 0.065168
| 0.014525
| 0.018641
| 0.015312
| 0.815651
| 0.787024
| 0.743751
| 0.730436
| 0.716395
| 0.714035
| 0
| 0.045919
| 0.262987
| 30,435
| 737
| 342
| 41.295794
| 0.690696
| 0.035748
| 0
| 0.705989
| 0
| 0
| 0.177419
| 0.013217
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012704
| false
| 0
| 0.032668
| 0
| 0.045372
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
feae61ecb44693302ec14df676ec42baba5e9b55
| 154
|
py
|
Python
|
Task/Averages-Arithmetic-mean/Python/averages-arithmetic-mean-3.py
|
mullikine/RosettaCodeData
|
4f0027c6ce83daa36118ee8b67915a13cd23ab67
|
[
"Info-ZIP"
] | 5
|
2021-01-29T20:08:05.000Z
|
2022-03-22T06:16:05.000Z
|
Task/Averages-Arithmetic-mean/Python/averages-arithmetic-mean-3.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | null | null | null |
Task/Averages-Arithmetic-mean/Python/averages-arithmetic-mean-3.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2021-04-13T04:19:31.000Z
|
2021-04-13T04:19:31.000Z
|
def average(x):
return sum(x)/float(len(x)) if x else 0
print (average([0,0,3,1,4,1,5,9,0,0]))
print (average([1e20,-1e-20,3,1,4,1,5,9,-1e20,1e-20]))
| 30.8
| 54
| 0.61039
| 39
| 154
| 2.410256
| 0.487179
| 0.12766
| 0.276596
| 0.085106
| 0.12766
| 0.12766
| 0
| 0
| 0
| 0
| 0
| 0.210145
| 0.103896
| 154
| 4
| 55
| 38.5
| 0.471014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
|
0
| 7
|
22be88cefe3b588522344ff5bb414c0e6fe5f2dd
| 781
|
py
|
Python
|
timetable.py
|
isawunicorn/python-vk-bot
|
8d6499bf0f706fae55ed432d69525121a7c95d9b
|
[
"Unlicense"
] | null | null | null |
timetable.py
|
isawunicorn/python-vk-bot
|
8d6499bf0f706fae55ed432d69525121a7c95d9b
|
[
"Unlicense"
] | null | null | null |
timetable.py
|
isawunicorn/python-vk-bot
|
8d6499bf0f706fae55ed432d69525121a7c95d9b
|
[
"Unlicense"
] | 1
|
2020-05-23T19:08:41.000Z
|
2020-05-23T19:08:41.000Z
|
timetable = {
"2020-02-26" : "8:00 - 9:30: Разработка программных приложений (лабораторная) Пушкарев А.Н., ауд. 404\n9:45 - 11:15: Теория систем и системный анализ (лабораторная) Донкова И.А, ауд. 412\n11:30 - 1:30: Моделирование бизнес-процессов (лекция), ауд. 216\n13:30 - 15:00: Проектирование информационных систем (лабораторная) Ивашко А., ауд. 411" ,
"2020-02-27" : "8:00 - 9:30: Разработка программных приложений (лабораторная) Пушкарев А.Н., ауд. 404\n9:45 - 11:15: Теория систем и системный анализ (лабораторная) Донкова И.А, ауд. 412\n11:30 - 1:30: Моделирование бизнес-процессов (лекция), ауд. 216\n13:30 - 15:00: Проектирование информационных систем (лабораторная) Ивашко А., ауд. 411" ,
"2020-02-28" : "sdsd",
"2020-02-29" : "sdsd"
}
| 111.571429
| 350
| 0.68758
| 115
| 781
| 4.669565
| 0.382609
| 0.044693
| 0.014898
| 0.022346
| 0.931099
| 0.931099
| 0.931099
| 0.931099
| 0.931099
| 0.931099
| 0
| 0.172043
| 0.166453
| 781
| 6
| 351
| 130.166667
| 0.652842
| 0
| 0
| 0
| 0
| 0.333333
| 0.888604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
22ddb5a4c50acfd44a1a56ac98487a908dc314fe
| 9,516
|
py
|
Python
|
test/routing/test_reservation.py
|
nmluci/ZonaCipta-Backend
|
8e56740be7bcc111a54887aaf09bbc4a8ff90d8b
|
[
"MIT"
] | null | null | null |
test/routing/test_reservation.py
|
nmluci/ZonaCipta-Backend
|
8e56740be7bcc111a54887aaf09bbc4a8ff90d8b
|
[
"MIT"
] | null | null | null |
test/routing/test_reservation.py
|
nmluci/ZonaCipta-Backend
|
8e56740be7bcc111a54887aaf09bbc4a8ff90d8b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import pytz
import hashlib
import requests, json, pytest, os
# Variables
uname = "fuyunaa"
fname = "Lynne"
lname = "Fuyuna"
order_id = 1
product_id = 1
reserve_time_str = "2022-01-01 16:04:20"
reserve_time = datetime.strptime("2022-01-01 16:04:20",
"%Y-%m-%d %H:%M:%S")
product_price = 19999999
product_sum = 4
product_name = "Lunar Room"
order_total_price = product_price * product_sum
sign_key = os.urandom(16).hex()
def generateSecureKey(id, total_cost, first_name, last_name, username):
key = hashlib.sha256()
key.update(str(id).encode('utf-8'))
key.update(str(total_cost).encode('utf-8'))
key.update(f"{first_name}{last_name}".encode('utf-8'))
key.update(str(username).encode('utf-8'))
return key.digest().hex()
@pytest.mark.order(5)
@pytest.mark.dependency()
def test_reserve_room():
data = {
"username": uname,
"product_id": product_id,
"reserved_time": reserve_time_str,
"sum": product_sum
}
res = requests.post("http://localhost:5000/api/book/", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "OK"
assert res_json.get("error_message") == None
assert res_json.get("data") != None
jsonData = res_json.get("data")[0]
assert jsonData.get("order_id") == 1
assert jsonData.get("username") == uname
assert jsonData.get("product_id") == product_id
assert jsonData.get("product_name") == product_name
assert jsonData.get("reserved_time") == reserve_time_str
assert jsonData.get("total_price") == order_total_price
@pytest.mark.order(5)
@pytest.mark.dependency()
def test_reserve_room_false_username():
data = {
"username": "Fuuu",
"product_id": product_id,
"reserved_time": reserve_time_str,
"sum": product_sum
}
res = requests.post("http://localhost:5000/api/book/", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Username"
assert res_json.get("data") == None
@pytest.mark.order(5)
@pytest.mark.dependency()
def test_reserve_room_false_product():
data = {
"username": uname,
"product_id": 5,
"reserved_time": reserve_time_str,
"sum": product_sum
}
res = requests.post("http://localhost:5000/api/book/", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Product Id"
assert res_json.get("data") == None
@pytest.mark.order(5)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_get_order_info():
res = requests.get(f"http://localhost:5000/api/book/{uname}", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
})
res_json = res.json()
assert res_json.get("status") == "OK"
assert res_json.get("error_message") == None
assert res_json.get("data") != None
jsonData = res_json.get("data")[0]
assert jsonData.get("username") == uname
assert jsonData.get("order_id") == 1
assert jsonData.get("grand_total") == order_total_price
assert jsonData.get("done") == False
assert len(jsonData.get("items")) > 0
@pytest.mark.order(5)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_get_order_info_invalid_username():
res = requests.get("http://localhost:5000/api/book/Fuuu", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
})
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Username"
assert res_json.get("data") == None
@pytest.mark.order(4)
@pytest.mark.dependency()
def test_get_order_info_no_orders():
res = requests.get(f"http://localhost:5000/api/book/{uname}", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
})
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Couldn't find any unpaid orders"
assert res_json.get("data") == None
@pytest.mark.order(6)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_verify_payment_valid():
data = {
"username": uname,
"first_name": fname,
"last_name": lname,
"order_id": order_id,
"total_price": order_total_price,
"sign_key": generateSecureKey(order_id, order_total_price, fname, lname, uname)
}
res = requests.post(f"http://localhost:5000/api/book/{uname}/pay", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "OK"
assert res_json.get("error_message") == None
assert res_json.get("data") == None
@pytest.mark.order(6)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_verify_payment_invalid_cost():
data = {
"username": uname,
"first_name": fname,
"last_name": lname,
"order_id": order_id,
"total_price": 928502522,
"sign_key": generateSecureKey(order_id, 928502522, fname, lname, uname)
}
res = requests.post(f"http://localhost:5000/api/book/{uname}/pay", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Sign Key"
assert res_json.get("data") == None
@pytest.mark.order(6)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_verify_payment_invalid_first_name():
data = {
"username": uname,
"first_name": "Charlie",
"last_name": lname,
"order_id": order_id,
"total_price": order_total_price,
"sign_key": generateSecureKey(order_id, order_total_price, fname, lname, uname)
}
res = requests.post(f"http://localhost:5000/api/book/{uname}/pay", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Credentials"
assert res_json.get("data") == None
@pytest.mark.order(6)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_verify_payment_invalid_last_name():
data = {
"username": uname,
"first_name": fname,
"last_name": "Musk",
"order_id": order_id,
"total_price": order_total_price,
"sign_key": generateSecureKey(order_id, order_total_price, fname, lname, uname)
}
res = requests.post(f"http://localhost:5000/api/book/{uname}/pay", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Credentials"
assert res_json.get("data") == None
@pytest.mark.order(6)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_verify_payment_invalid_username():
data = {
"username": "fyn15",
"first_name": fname,
"last_name": lname,
"order_id": order_id,
"total_price": order_total_price,
"sign_key": generateSecureKey(order_id, order_total_price, fname, lname, uname)
}
res = requests.post(f"http://localhost:5000/api/book/fyn15/pay", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Username"
assert res_json.get("data") == None
@pytest.mark.order(6)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_verify_payment_invalid_order_id():
data = {
"username": uname,
"first_name": fname,
"last_name": lname,
"order_id": 9,
"total_price": order_total_price,
"sign_key": generateSecureKey(order_id, order_total_price, fname, lname, uname)
}
res = requests.post(f"http://localhost:5000/api/book/{uname}/pay", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
"Content-Type": "application/json"
}, json=data)
res_json = res.json()
assert res_json.get("status") == "ERROR"
assert res_json.get("error_message") == "Invalid Order Id"
assert res_json.get("data") == None
@pytest.mark.order(7)
@pytest.mark.dependency(depends=["test_reserve_room"])
def test_zone_get_orders():
res = requests.get("http://localhost:5000/api/zones/1/orders", headers={
"ZC-API-TOKEN": "KoreWaABeriSekyurEiPiAiKagiNanoDesu",
})
res_json = res.json()
assert res_json.get("status") == "OK"
assert len(res_json.get("data")) != 0
| 33.507042
| 87
| 0.657945
| 1,193
| 9,516
| 5.041073
| 0.099749
| 0.076821
| 0.066511
| 0.098437
| 0.862488
| 0.820253
| 0.802627
| 0.79132
| 0.777685
| 0.758397
| 0
| 0.018557
| 0.184531
| 9,516
| 284
| 88
| 33.507042
| 0.756443
| 0.000946
| 0
| 0.709016
| 0
| 0
| 0.292236
| 0.050284
| 0
| 0
| 0
| 0
| 0.20082
| 1
| 0.057377
| false
| 0
| 0.016393
| 0
| 0.077869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe1e1119292ecaa4ea750fed9bad95c5d4377796
| 22,758
|
py
|
Python
|
2017/2017_11b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
2017/2017_11b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
2017/2017_11b.py
|
davidxiao93/Advent-of-Code
|
29503100ae4eb46b048fc3ab68ff0181c6f00ee5
|
[
"MIT"
] | null | null | null |
input = """ne,nw,se,nw,ne,s,s,s,sw,ne,sw,sw,sw,sw,sw,nw,nw,sw,se,ne,nw,nw,nw,nw,nw,nw,n,n,s,nw,n,n,nw,n,n,n,n,ne,n,n,ne,n,n,s,n,se,ne,ne,ne,n,se,ne,ne,ne,ne,se,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,s,se,se,se,s,ne,ne,se,ne,ne,sw,ne,se,se,se,se,se,se,s,s,se,se,ne,se,se,se,se,ne,se,se,s,se,se,s,n,s,se,s,ne,se,se,nw,ne,s,n,s,se,se,s,se,se,se,s,sw,se,s,s,s,n,se,se,s,se,s,se,se,s,se,se,s,s,ne,s,s,se,n,s,s,s,s,sw,s,s,n,s,sw,n,s,s,s,s,s,sw,s,s,s,s,s,sw,s,se,ne,s,s,s,s,s,sw,s,sw,s,nw,sw,s,s,sw,sw,nw,s,sw,se,sw,sw,sw,s,se,s,sw,se,nw,sw,s,n,s,sw,n,sw,n,sw,sw,sw,nw,nw,sw,sw,sw,s,sw,s,sw,sw,sw,sw,sw,sw,sw,se,n,nw,s,se,sw,sw,se,sw,se,sw,nw,sw,s,sw,sw,sw,sw,s,s,sw,n,nw,nw,se,sw,nw,sw,sw,sw,nw,ne,s,sw,nw,n,nw,n,sw,nw,ne,n,n,sw,nw,sw,nw,nw,n,nw,nw,nw,se,nw,sw,se,nw,sw,nw,nw,nw,nw,sw,nw,n,sw,nw,nw,nw,nw,nw,nw,sw,nw,nw,sw,nw,sw,nw,sw,nw,sw,sw,nw,nw,nw,sw,nw,se,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,s,nw,nw,nw,sw,se,nw,nw,nw,nw,nw,nw,n,nw,sw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,sw,s,ne,nw,nw,s,se,ne,nw,nw,nw,nw,nw,sw,nw,ne,se,nw,nw,nw,s,n,ne,n,nw,nw,n,s,nw,se,nw,s,sw,n,n,nw,n,nw,nw,nw,n,n,nw,n,nw,nw,n,ne,s,n,n,n,sw,nw,nw,ne,ne,n,nw,n,nw,n,n,se,nw,n,s,n,nw,n,n,nw,se,sw,nw,n,nw,n,n,sw,n,n,n,nw,n,nw,n,n,n,n,ne,n,n,nw,n,nw,n,n,n,nw,n,s,nw,n,n,se,sw,n,n,n,ne,se,n,n,se,s,n,n,n,nw,n,n,n,ne,n,n,n,n,n,s,n,n,n,n,n,n,n,n,n,ne,n,n,n,n,ne,n,ne,se,n,n,n,n,sw,sw,n,n,ne,n,n,n,sw,ne,n,n,s,ne,n,ne,n,ne,ne,n,s,nw,sw,n,n,ne,ne,n,n,n,n,n,n,nw,ne,n,n,n,n,ne,n,ne,ne,n,se,n,s,n,n,n,n,n,s,n,ne,n,n,ne,sw,ne,ne,ne,n,ne,nw,ne,n,n,n,se,n,se,ne,ne,ne,se,ne,n,n,ne,nw,n,n,n,ne,sw,n,ne,nw,nw,ne,ne,ne,ne,ne,sw,se,ne,ne,ne,ne,n,ne,n,n,sw,ne,nw,n,ne,ne,ne,ne,ne,sw,ne,n,ne,n,ne,nw,ne,ne,sw,ne,nw,nw,ne,ne,ne,ne,n,sw,se,ne,ne,ne,ne,ne,ne,ne,n,n,sw,ne,ne,se,n,ne,sw,ne,ne,ne,n,ne,ne,ne,ne,sw,ne,ne,ne,ne,sw,ne,ne,ne,ne,ne,nw,ne,ne,ne,ne,ne,ne,nw,ne,ne,se,ne,ne,se,ne,ne,ne,ne,ne,ne,s,se,s,ne,ne,ne,n,ne,ne,n,ne,ne,ne,nw,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,ne,ne,se,ne,ne,s,s,nw,ne,ne,ne,se,ne,s,n,se,n,ne,ne,s,se,se,se,ne,ne,ne,se,sw,se,ne,ne,nw,ne,se,ne,ne,ne,ne,se,ne,nw,ne,ne,ne,se,ne,se,s,se,se,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,nw,ne,ne,se,sw,ne,se,ne,nw,se,n,se,n,ne,ne,ne,ne,s,se,ne,se,se,ne,ne,ne,ne,ne,se,ne,se,ne,se,ne,se,se,se,ne,nw,ne,nw,ne,ne,se,s,ne,s,n,se,sw,ne,se,ne,ne,ne,se,se,ne,ne,se,se,se,se,ne,n,sw,ne,se,ne,sw,sw,nw,s,se,se,ne,se,n,ne,se,s,sw,ne,se,se,s,se,se,se,se,se,sw,ne,se,s,se,se,s,se,ne,se,ne,ne,se,ne,se,ne,se,se,ne,ne,ne,se,ne,se,n,se,se,se,s,ne,se,se,ne,s,s,nw,ne,se,se,ne,se,se,se,se,se,se,se,s,se,se,sw,se,sw,se,se,se,ne,se,s,se,se,se,se,se,se,se,se,ne,se,s,se,se,se,se,sw,n,se,se,se,ne,se,se,se,se,s,se,se,sw,se,se,se,sw,se,n,se,se,se,se,se,nw,se,se,sw,se,se,s,se,se,se,ne,se,se,se,se,s,se,se,se,se,s,se,se,se,s,se,se,s,se,se,se,se,se,se,se,se,se,se,n,se,s,se,se,se,se,se,ne,se,se,se,ne,se,se,se,se,n,se,se,se,se,se,se,se,se,se,se,s,s,se,ne,s,se,s,se,s,se,se,s,s,s,se,s,se,se,ne,s,s,se,se,se,s,s,se,se,se,s,se,se,ne,s,n,s,s,se,ne,se,se,sw,se,se,se,se,s,n,s,s,sw,sw,se,s,se,ne,se,se,se,s,se,n,s,s,sw,se,se,se,s,se,se,s,sw,s,s,se,se,nw,s,s,se,nw,n,s,se,s,se,se,sw,s,s,se,se,s,se,n,nw,se,se,s,s,s,se,s,s,se,s,se,se,s,n,s,se,s,se,s,sw,nw,ne,s,s,se,se,se,se,s,se,se,s,s,s,se,se,se,sw,se,se,n,se,se,n,s,se,se,s,s,se,ne,sw,se,se,s,se,s,s,se,se,s,se,se,s,s,nw,se,se,s,se,n,s,n,n,se,s,se,se,se,sw,s,s,n,s,s,se,ne,se,nw,se,s,s,s,s,s,se,ne,s,se,s,se,n,sw,nw,s,s,s,s,nw,se,sw,se,s,s,sw,s,nw,se,nw,s,s,nw,n,s,sw,se,s,nw,s,se,se,n,ne,s,s,s,s,se,s,se,s,se,s,s,s,se,s,s,s,sw,s,s,s,se,s,s,s,s,se,s,s,s,ne,se,s,se,s,s,s,se,n,s,se,se,s,se,ne,s,n,s,s,se,ne,s,s,s,s,s,s,s,s,s,s,s,s,s,nw,se,s,n,se,s,se,s,ne,s,s,s,ne,n,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,se,s,s,n,s,n,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,s,n,s,s,s,s,s,s,s,s,s,sw,s,nw,sw,s,nw,s,s,s,nw,s,se,nw,s,s,s,s,s,s,sw,s,s,nw,s,sw,s,s,sw,s,s,s,s,ne,s,s,s,sw,sw,s,sw,s,s,s,s,se,s,s,sw,sw,s,s,s,s,s,n,nw,s,s,s,ne,s,n,s,s,sw,s,s,s,s,s,s,n,s,s,ne,s,ne,sw,s,ne,sw,s,s,ne,s,s,s,s,s,sw,s,s,s,s,sw,sw,sw,sw,s,se,sw,s,s,s,s,s,s,s,s,s,n,sw,s,s,s,sw,s,sw,sw,n,ne,s,s,s,s,s,s,s,s,s,n,s,sw,sw,s,s,s,sw,s,s,s,sw,s,s,se,sw,s,sw,sw,sw,s,ne,nw,sw,n,s,s,sw,s,s,s,nw,s,s,sw,s,se,sw,sw,sw,s,sw,s,s,n,ne,s,nw,s,s,s,n,ne,s,s,sw,sw,sw,nw,s,s,sw,s,sw,sw,sw,s,sw,sw,se,se,s,s,s,s,s,sw,se,s,s,sw,s,s,nw,s,s,sw,sw,s,s,s,s,s,sw,n,sw,sw,sw,sw,sw,sw,s,sw,s,s,se,sw,s,sw,sw,sw,sw,s,n,s,s,sw,s,nw,s,s,ne,nw,sw,sw,sw,sw,s,s,s,s,s,sw,s,s,s,sw,sw,s,sw,s,s,s,sw,sw,s,s,s,nw,sw,s,sw,ne,s,sw,sw,nw,s,sw,s,sw,nw,sw,ne,ne,sw,sw,sw,sw,s,sw,s,sw,se,n,s,s,sw,sw,se,nw,sw,sw,sw,ne,sw,sw,se,sw,sw,se,s,ne,s,se,nw,sw,sw,sw,sw,s,s,sw,sw,sw,se,sw,nw,sw,se,ne,s,sw,sw,n,s,s,sw,nw,s,sw,sw,sw,s,ne,sw,nw,n,sw,n,sw,s,sw,sw,sw,s,sw,s,s,s,nw,s,sw,se,sw,sw,s,sw,s,s,s,sw,sw,n,sw,sw,sw,ne,sw,se,sw,s,nw,ne,ne,s,s,s,sw,ne,sw,sw,sw,sw,sw,sw,se,sw,s,sw,sw,sw,s,ne,s,sw,sw,s,nw,sw,sw,s,sw,s,s,sw,n,sw,s,sw,nw,nw,sw,sw,sw,s,ne,s,sw,sw,sw,s,n,sw,sw,s,sw,sw,sw,sw,n,sw,sw,ne,sw,ne,nw,sw,n,nw,s,s,sw,s,sw,sw,sw,sw,sw,sw,sw,se,sw,sw,sw,nw,sw,sw,n,ne,sw,sw,sw,sw,ne,sw,sw,sw,sw,sw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,ne,s,sw,n,s,sw,sw,sw,sw,sw,sw,ne,s,sw,sw,nw,s,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,ne,sw,sw,sw,sw,sw,sw,sw,nw,se,sw,sw,sw,sw,se,sw,sw,se,sw,sw,se,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,se,sw,s,sw,sw,sw,n,sw,sw,nw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,n,ne,n,sw,sw,nw,sw,sw,sw,nw,sw,sw,n,n,sw,sw,sw,sw,sw,se,n,nw,sw,nw,nw,ne,n,s,n,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,se,se,sw,sw,nw,sw,sw,nw,sw,s,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,nw,sw,ne,sw,sw,sw,se,sw,sw,sw,ne,n,s,sw,nw,sw,ne,ne,sw,sw,sw,sw,sw,nw,sw,nw,sw,sw,sw,sw,nw,s,nw,sw,sw,sw,se,n,sw,sw,sw,sw,sw,nw,sw,sw,sw,sw,nw,sw,nw,sw,sw,nw,s,s,sw,sw,ne,nw,sw,sw,sw,sw,sw,sw,n,ne,ne,sw,se,n,nw,nw,nw,s,sw,ne,sw,se,ne,sw,sw,sw,sw,sw,sw,s,sw,nw,sw,n,sw,nw,nw,sw,sw,sw,sw,sw,sw,ne,nw,sw,sw,n,sw,sw,sw,sw,sw,n,sw,nw,nw,sw,sw,sw,sw,s,sw,sw,se,sw,sw,nw,sw,sw,sw,nw,sw,nw,n,sw,sw,sw,n,sw,nw,nw,sw,nw,nw,nw,n,sw,s,nw,nw,sw,sw,ne,sw,sw,s,s,se,nw,sw,n,se,sw,nw,sw,n,nw,nw,se,nw,nw,sw,nw,sw,nw,s,sw,sw,sw,sw,ne,sw,sw,sw,sw,nw,nw,se,nw,nw,sw,nw,nw,sw,sw,ne,n,sw,nw,s,ne,sw,ne,sw,sw,sw,sw,nw,nw,sw,sw,sw,sw,sw,sw,sw,s,sw,n,nw,sw,sw,sw,sw,nw,sw,s,nw,sw,sw,nw,nw,sw,sw,nw,sw,nw,sw,sw,sw,nw,nw,sw,sw,ne,nw,nw,sw,sw,sw,sw,sw,nw,ne,nw,sw,sw,ne,sw,nw,n,se,nw,sw,sw,se,se,nw,s,sw,sw,sw,sw,sw,s,nw,nw,sw,sw,sw,nw,sw,nw,n,nw,nw,s,s,sw,ne,n,sw,nw,sw,nw,sw,nw,nw,sw,sw,sw,sw,nw,sw,s,nw,nw,sw,sw,nw,nw,nw,sw,se,ne,sw,nw,nw,n,sw,nw,s,se,nw,nw,nw,ne,nw,sw,sw,nw,s,ne,n,s,nw,nw,nw,sw,n,nw,s,se,nw,nw,nw,sw,nw,ne,nw,nw,nw,sw,sw,se,nw,nw,sw,ne,sw,nw,sw,nw,sw,sw,sw,nw,n,nw,sw,s,sw,nw,sw,sw,sw,nw,s,ne,sw,nw,nw,sw,se,sw,nw,nw,nw,nw,nw,nw,sw,n,nw,nw,nw,s,nw,sw,nw,sw,sw,s,nw,sw,nw,sw,nw,sw,sw,sw,nw,nw,sw,sw,nw,sw,nw,nw,nw,sw,sw,nw,sw,nw,sw,nw,se,nw,nw,s,sw,sw,nw,nw,nw,sw,nw,nw,n,nw,sw,sw,nw,sw,ne,s,nw,sw,sw,nw,sw,sw,sw,nw,nw,sw,nw,ne,nw,nw,nw,n,ne,nw,sw,sw,nw,sw,n,nw,sw,nw,sw,sw,nw,nw,ne,nw,se,sw,nw,se,sw,nw,nw,s,nw,s,nw,nw,nw,s,nw,sw,nw,nw,nw,sw,nw,nw,se,nw,nw,nw,nw,sw,n,s,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,s,nw,s,sw,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,se,ne,nw,s,se,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,sw,sw,se,sw,nw,nw,ne,nw,sw,nw,nw,s,n,nw,sw,s,nw,sw,ne,n,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,s,nw,nw,s,se,nw,nw,nw,ne,nw,nw,nw,nw,nw,sw,nw,n,n,se,nw,nw,nw,nw,sw,nw,nw,se,nw,nw,sw,nw,nw,nw,nw,sw,nw,ne,nw,nw,sw,nw,nw,nw,n,nw,se,nw,nw,ne,nw,se,nw,sw,nw,nw,n,nw,nw,s,nw,nw,nw,ne,nw,nw,nw,nw,sw,ne,n,sw,n,se,nw,sw,s,nw,sw,nw,sw,nw,nw,nw,nw,nw,nw,se,se,nw,nw,nw,s,nw,nw,nw,sw,nw,nw,se,nw,sw,nw,nw,ne,ne,nw,s,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,n,nw,nw,nw,s,s,se,nw,nw,nw,ne,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,n,nw,se,nw,n,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,sw,ne,nw,nw,nw,nw,nw,ne,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,se,n,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,se,nw,nw,nw,nw,nw,nw,s,nw,nw,nw,n,nw,nw,nw,n,nw,nw,nw,nw,n,nw,sw,n,nw,se,n,nw,ne,nw,n,n,nw,s,nw,se,sw,n,nw,nw,nw,n,nw,nw,nw,n,n,nw,nw,ne,sw,nw,ne,nw,nw,nw,sw,s,nw,nw,nw,se,nw,nw,sw,n,sw,nw,sw,nw,nw,n,nw,nw,n,n,nw,nw,nw,nw,nw,ne,se,nw,nw,n,nw,nw,nw,nw,nw,ne,nw,nw,n,nw,nw,n,ne,nw,ne,ne,n,nw,nw,nw,nw,n,nw,n,nw,nw,nw,nw,nw,nw,ne,se,nw,nw,n,nw,n,sw,nw,n,s,sw,n,n,ne,sw,nw,n,s,n,se,nw,se,n,ne,nw,nw,nw,s,nw,nw,nw,nw,se,se,nw,n,nw,nw,nw,nw,nw,nw,nw,n,n,nw,n,n,sw,sw,n,nw,n,nw,nw,s,nw,nw,n,nw,nw,nw,nw,n,se,s,nw,n,nw,nw,se,s,sw,n,nw,nw,nw,ne,nw,nw,n,s,nw,n,n,se,n,n,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,ne,n,s,nw,nw,nw,ne,ne,n,nw,nw,n,s,nw,n,n,n,n,nw,s,n,n,nw,nw,n,nw,n,nw,nw,nw,n,nw,n,nw,ne,nw,nw,nw,nw,nw,nw,s,n,nw,n,nw,nw,n,nw,n,n,nw,n,nw,n,n,n,n,n,n,n,ne,n,nw,nw,n,s,n,n,nw,nw,nw,s,n,nw,nw,n,sw,n,nw,n,s,nw,nw,n,se,n,s,n,nw,n,n,nw,n,nw,n,s,n,n,nw,s,nw,n,n,n,n,nw,nw,n,nw,nw,nw,nw,s,nw,n,nw,n,nw,n,nw,se,nw,nw,n,ne,sw,sw,nw,nw,nw,n,nw,nw,n,nw,ne,nw,n,n,n,nw,n,nw,se,se,n,nw,nw,nw,n,s,s,nw,n,nw,nw,s,nw,n,n,n,nw,ne,nw,nw,nw,sw,nw,nw,ne,nw,ne,nw,sw,se,nw,n,nw,nw,n,nw,sw,n,n,n,n,n,nw,s,nw,nw,nw,nw,nw,s,nw,n,nw,nw,n,n,n,ne,n,nw,nw,ne,sw,nw,nw,nw,s,n,n,ne,sw,se,n,nw,n,nw,n,n,nw,nw,nw,nw,n,n,se,s,n,n,n,nw,n,n,n,nw,nw,nw,nw,nw,n,n,n,se,nw,se,n,n,n,n,ne,nw,se,n,n,n,n,nw,se,nw,nw,nw,n,se,n,nw,n,nw,nw,nw,n,s,n,ne,nw,s,n,nw,n,nw,nw,n,nw,nw,ne,ne,se,n,n,n,n,n,nw,n,n,se,nw,n,n,n,sw,sw,n,n,se,nw,n,n,n,n,n,sw,s,n,n,nw,nw,n,n,nw,n,nw,nw,se,nw,n,n,n,n,n,n,nw,nw,ne,nw,n,nw,sw,n,n,n,n,sw,nw,n,n,n,nw,n,n,s,nw,s,n,sw,nw,nw,n,nw,s,se,nw,nw,n,n,n,n,nw,n,n,n,nw,nw,sw,s,nw,n,nw,n,nw,s,n,nw,nw,n,n,nw,n,n,n,nw,n,se,nw,se,n,se,n,ne,s,n,n,n,s,sw,nw,s,n,n,n,n,nw,n,nw,nw,sw,s,nw,nw,n,nw,ne,nw,n,sw,n,se,n,n,n,nw,nw,ne,nw,n,n,n,n,s,n,n,n,nw,n,nw,n,nw,n,sw,s,n,nw,n,n,n,n,s,n,n,nw,n,nw,nw,nw,nw,n,n,n,n,n,n,n,nw,sw,se,s,nw,n,ne,sw,n,nw,nw,n,n,n,n,n,n,n,n,nw,nw,n,s,n,n,s,n,ne,sw,nw,s,nw,n,n,n,se,n,n,n,nw,ne,n,nw,n,n,n,n,n,n,se,s,n,n,n,nw,ne,se,n,se,n,nw,n,n,nw,n,n,sw,nw,n,n,n,ne,n,n,n,n,nw,n,n,n,se,nw,n,n,n,n,n,n,nw,nw,n,n,nw,nw,n,n,nw,n,n,n,n,n,n,nw,nw,n,n,n,se,n,n,se,n,nw,nw,nw,ne,n,se,s,nw,n,nw,n,n,n,n,n,s,se,n,n,n,n,n,nw,sw,sw,ne,n,nw,nw,ne,se,n,n,sw,se,n,n,n,nw,se,n,nw,n,n,s,ne,ne,nw,n,nw,n,n,nw,n,nw,nw,n,n,n,n,n,n,nw,sw,n,sw,n,n,nw,n,n,n,n,n,se,n,n,se,n,se,n,n,n,n,n,ne,n,ne,n,ne,n,sw,n,sw,n,n,n,n,n,n,n,n,se,n,n,n,ne,n,n,n,n,n,n,s,nw,n,n,n,nw,n,n,n,n,n,n,ne,sw,n,ne,n,n,n,n,n,n,n,n,nw,n,n,n,n,n,n,n,n,se,n,n,n,n,n,n,s,nw,n,ne,n,n,sw,sw,n,n,n,ne,n,n,n,n,n,n,n,sw,n,n,n,n,ne,se,se,s,n,n,n,n,n,n,n,n,nw,ne,n,n,nw,n,se,n,sw,n,n,n,nw,n,s,se,n,se,n,n,n,n,n,sw,ne,sw,n,s,n,nw,n,nw,n,s,n,s,se,ne,nw,n,n,n,n,n,n,n,n,n,ne,se,n,n,n,ne,n,n,n,se,n,n,n,n,nw,n,n,n,n,n,n,n,ne,sw,n,n,n,se,n,sw,nw,n,nw,n,ne,n,n,n,n,n,s,n,n,n,n,n,se,ne,n,sw,s,s,n,n,n,n,n,n,n,n,n,s,s,n,n,n,n,n,sw,nw,n,n,n,n,n,se,n,n,n,sw,n,n,n,n,n,n,n,n,ne,n,s,n,n,n,n,se,n,n,n,n,n,n,ne,n,n,s,n,s,n,n,n,n,n,sw,n,n,n,se,n,n,n,se,ne,n,ne,n,n,n,n,n,s,n,n,n,n,n,se,n,sw,nw,s,n,n,n,s,n,n,n,n,n,n,sw,n,n,n,n,n,n,n,n,n,n,n,n,ne,n,s,s,n,ne,n,n,n,se,ne,ne,n,nw,nw,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,ne,n,n,n,n,ne,sw,n,nw,sw,ne,s,n,n,n,n,se,n,sw,n,n,ne,n,n,n,n,ne,ne,n,n,n,n,n,n,n,ne,ne,n,se,n,s,n,se,n,se,n,nw,n,sw,n,n,n,n,n,ne,ne,se,nw,ne,n,nw,n,n,ne,ne,n,n,n,ne,n,n,ne,ne,n,n,n,n,n,n,n,n,s,n,n,n,ne,ne,n,sw,n,n,n,n,n,ne,n,n,n,sw,n,n,n,n,n,n,ne,ne,n,n,se,n,ne,n,ne,n,n,n,n,n,n,sw,n,n,ne,n,ne,n,nw,ne,ne,n,n,n,n,n,ne,n,ne,sw,n,n,n,n,nw,ne,n,ne,n,n,ne,nw,n,se,se,n,n,n,ne,nw,sw,ne,ne,ne,n,ne,n,n,n,n,n,s,n,ne,n,n,ne,n,n,n,nw,ne,n,nw,n,nw,n,se,s,n,n,n,n,n,n,n,n,n,ne,se,n,ne,n,n,nw,ne,n,n,s,ne,n,n,n,n,n,ne,n,n,ne,ne,n,n,n,n,n,n,sw,nw,n,n,nw,ne,sw,n,n,n,n,n,n,n,nw,n,se,sw,ne,n,se,ne,sw,n,n,n,n,n,s,n,ne,n,n,n,n,ne,n,ne,se,n,n,n,n,n,ne,nw,ne,n,n,n,ne,sw,se,n,s,n,n,sw,n,n,n,n,sw,ne,n,ne,ne,n,n,n,ne,se,ne,n,n,ne,ne,n,se,ne,ne,ne,n,n,se,n,n,n,ne,n,se,ne,ne,sw,n,ne,se,ne,ne,n,se,n,n,nw,n,n,n,ne,n,ne,ne,ne,ne,ne,n,n,n,ne,n,n,n,nw,n,n,n,n,n,s,n,ne,ne,n,ne,nw,ne,n,n,n,n,ne,n,ne,ne,sw,ne,nw,se,n,ne,nw,ne,ne,n,n,n,s,s,s,ne,sw,ne,ne,s,sw,ne,se,se,n,n,n,sw,n,n,ne,n,ne,n,n,n,n,n,n,ne,n,n,n,ne,se,n,n,n,n,ne,n,n,se,n,sw,n,n,ne,n,ne,ne,n,ne,nw,sw,se,n,n,ne,n,se,ne,n,ne,ne,n,ne,ne,ne,nw,s,ne,nw,ne,ne,n,sw,n,n,n,n,ne,n,s,n,n,ne,sw,s,n,n,n,n,sw,n,sw,n,n,se,s,n,se,n,n,ne,ne,n,ne,s,n,ne,n,n,n,se,n,n,ne,ne,sw,se,n,ne,ne,n,ne,ne,n,ne,ne,ne,n,ne,n,ne,n,n,ne,ne,ne,ne,se,ne,se,ne,ne,sw,n,n,ne,ne,ne,n,n,ne,n,n,se,ne,ne,ne,ne,se,n,ne,n,ne,ne,se,ne,s,ne,n,n,n,ne,se,nw,ne,ne,sw,ne,n,ne,n,sw,ne,ne,sw,ne,n,n,se,ne,ne,ne,n,n,n,ne,n,n,ne,n,n,se,ne,n,ne,n,n,ne,ne,ne,n,ne,ne,n,n,ne,ne,ne,se,ne,ne,ne,n,n,sw,s,ne,ne,n,sw,n,ne,sw,n,ne,sw,n,n,ne,ne,nw,sw,ne,s,ne,ne,n,s,se,nw,s,ne,n,n,s,ne,ne,ne,n,ne,ne,ne,ne,se,ne,ne,ne,se,ne,ne,ne,n,n,n,n,nw,ne,ne,ne,n,n,n,n,ne,nw,n,n,n,ne,nw,n,sw,ne,n,ne,ne,ne,ne,sw,ne,nw,nw,ne,ne,n,ne,n,s,ne,sw,s,ne,ne,n,ne,n,n,sw,ne,n,ne,n,ne,ne,sw,se,ne,n,n,sw,ne,ne,nw,n,sw,ne,n,ne,ne,nw,n,ne,ne,n,se,n,s,ne,ne,ne,s,n,n,ne,n,ne,n,ne,ne,se,ne,ne,n,ne,ne,ne,ne,n,ne,ne,nw,ne,se,ne,ne,ne,ne,n,ne,ne,ne,ne,ne,se,ne,n,n,ne,ne,n,nw,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,n,n,ne,ne,sw,n,ne,ne,se,s,ne,n,sw,ne,n,ne,n,ne,n,ne,ne,ne,ne,ne,n,ne,ne,n,sw,n,ne,ne,ne,n,ne,n,se,n,n,ne,n,n,n,n,ne,ne,ne,s,n,ne,ne,ne,ne,ne,ne,ne,ne,n,n,ne,sw,ne,n,s,n,ne,se,ne,ne,ne,ne,nw,n,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,n,sw,ne,n,n,n,ne,s,ne,ne,n,s,ne,sw,nw,ne,s,ne,ne,ne,se,ne,n,ne,ne,sw,ne,n,ne,ne,se,s,s,n,n,ne,ne,se,ne,ne,ne,n,ne,ne,n,sw,ne,ne,ne,ne,s,ne,se,ne,ne,ne,ne,ne,n,ne,ne,se,nw,ne,nw,ne,ne,ne,ne,n,se,ne,n,nw,ne,ne,ne,n,ne,ne,ne,n,ne,n,n,ne,ne,ne,ne,ne,s,n,n,ne,se,ne,sw,n,se,sw,ne,n,ne,ne,ne,ne,ne,s,nw,n,ne,ne,ne,ne,sw,n,ne,ne,ne,ne,sw,s,ne,ne,ne,ne,nw,n,n,ne,ne,se,ne,ne,se,ne,sw,ne,sw,ne,ne,ne,n,ne,s,se,ne,ne,n,ne,n,n,sw,ne,sw,ne,ne,ne,ne,ne,ne,n,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,n,ne,n,sw,ne,ne,ne,ne,n,s,se,ne,ne,s,ne,n,ne,ne,n,n,ne,ne,ne,se,ne,n,s,n,ne,s,ne,ne,se,ne,ne,s,n,se,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,n,ne,s,ne,ne,ne,sw,n,ne,ne,ne,ne,ne,se,nw,n,n,ne,ne,n,ne,ne,ne,ne,ne,ne,ne,ne,ne,nw,se,n,ne,se,n,ne,n,ne,ne,n,n,ne,ne,ne,ne,ne,ne,ne,sw,sw,n,ne,s,se,ne,ne,ne,se,ne,ne,ne,se,nw,nw,ne,ne,sw,n,ne,s,se,n,s,ne,ne,ne,ne,ne,ne,s,ne,s,nw,n,ne,ne,se,ne,nw,sw,ne,s,ne,ne,ne,ne,ne,nw,ne,ne,ne,n,ne,se,n,ne,ne,ne,ne,se,ne,ne,ne,sw,ne,ne,ne,ne,ne,se,ne,ne,n,ne,ne,ne,sw,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,ne,sw,n,ne,sw,ne,ne,ne,sw,nw,ne,ne,ne,se,ne,ne,n,ne,ne,ne,ne,ne,se,ne,se,ne,ne,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,sw,ne,ne,ne,ne,ne,ne,s,ne,ne,ne,ne,ne,ne,ne,ne,sw,ne,ne,ne,ne,ne,sw,n,nw,ne,ne,ne,n,ne,ne,ne,n,ne,ne,ne,ne,se,ne,ne,s,nw,ne,ne,ne,sw,ne,ne,n,ne,n,ne,ne,ne,s,ne,sw,ne,n,ne,ne,ne,ne,nw,nw,nw,sw,sw,sw,ne,sw,nw,s,s,se,s,n,s,s,s,s,se,se,se,sw,nw,se,n,se,se,ne,sw,sw,ne,ne,ne,ne,ne,s,ne,ne,n,ne,ne,nw,ne,n,se,n,n,ne,ne,ne,ne,nw,n,n,ne,n,n,n,n,n,n,nw,n,n,s,n,n,s,n,nw,n,ne,n,n,n,n,ne,nw,ne,n,nw,nw,nw,n,n,nw,nw,nw,nw,nw,nw,n,nw,nw,sw,sw,nw,nw,nw,nw,nw,nw,nw,sw,nw,nw,nw,nw,n,sw,nw,n,nw,nw,sw,sw,sw,nw,sw,nw,sw,sw,sw,sw,ne,nw,sw,s,n,sw,sw,nw,nw,n,se,sw,s,sw,se,s,nw,nw,sw,se,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,sw,sw,sw,nw,sw,s,sw,n,sw,se,s,sw,sw,sw,s,s,sw,s,sw,ne,sw,s,sw,s,sw,s,s,s,nw,s,s,s,sw,s,sw,s,s,ne,s,s,s,s,sw,s,ne,s,n,sw,s,s,s,s,s,se,n,s,s,s,s,n,n,s,nw,se,s,s,s,se,sw,s,ne,se,n,s,s,s,s,s,n,se,s,se,s,se,s,se,n,se,sw,sw,s,se,s,s,s,s,sw,s,s,s,se,se,s,s,se,se,s,se,n,s,n,s,se,se,n,s,sw,s,ne,se,sw,se,se,n,s,s,se,nw,se,s,s,n,se,se,se,se,se,se,s,s,nw,se,s,se,se,se,se,se,se,s,s,s,se,ne,n,se,s,se,se,s,s,sw,se,se,nw,sw,se,se,se,se,ne,se,se,sw,se,sw,s,sw,n,se,se,se,se,se,ne,se,se,se,se,se,se,se,ne,se,se,se,s,ne,se,n,ne,sw,ne,ne,se,nw,se,sw,se,se,ne,n,se,se,se,ne,se,se,s,ne,ne,nw,se,se,n,se,se,sw,ne,se,ne,se,se,ne,se,se,nw,ne,ne,se,ne,n,ne,ne,se,ne,ne,ne,ne,ne,ne,se,se,ne,se,ne,se,se,ne,ne,se,s,ne,ne,ne,ne,ne,ne,s,ne,ne,se,se,ne,ne,se,ne,ne,ne,se,se,se,se,ne,ne,ne,ne,ne,ne,se,nw,ne,ne,se,ne,ne,s,se,ne,ne,s,ne,ne,ne,ne,ne,nw,ne,ne,ne,ne,ne,ne,ne,se,ne,ne,ne,s,ne,n,ne,ne,ne,ne,ne,s,s,n,ne,ne,ne,ne,ne,ne,ne,ne,se,ne,n,sw,ne,ne,ne,ne,se,ne,ne,ne,n,ne,ne,ne,ne,ne,nw,ne,n,ne,ne,ne,n,nw,ne,ne,nw,n,ne,ne,ne,se,ne,ne,ne,n,ne,nw,n,ne,se,ne,se,n,se,n,ne,n,ne,ne,se,ne,ne,ne,ne,nw,n,ne,n,ne,n,ne,ne,se,sw,n,se,ne,nw,n,ne,s,s,n,ne,ne,s,ne,nw,ne,ne,ne,ne,n,n,s,n,s,s,n,ne,n,ne,n,nw,sw,s,n,s,n,n,n,sw,ne,n,n,ne,n,se,nw,n,n,ne,n,n,nw,s,n,ne,n,ne,n,ne,n,n,ne,n,ne,ne,n,ne,n,ne,ne,n,sw,ne,nw,n,n,n,ne,n,n,n,ne,sw,n,n,n,n,se,sw,n,se,n,nw,ne,n,ne,ne,n,sw,n,se,n,n,sw,nw,n,n,ne,s,n,n,n,n,n,n,n,n,n,n,n,n,ne,s,n,n,n,n,n,sw,nw,n,n,n,n,n,nw,n,n,n,ne,ne,n,ne,nw,n,n,nw,n,ne,n,n,sw,n,s,n,nw,n,se,n,n,s,s,ne,ne,nw,n,nw,nw,n,n,n,sw,ne,n,n,n,n,n,n,n,n,n,n,s,ne,se,nw,n,nw,n,ne,se,nw,nw,n,n,n,ne,nw,n,n,s,n,nw,n,n,nw,se,nw,n,se,n,n,n,sw,nw,nw,n,n,n,nw,nw,nw,n,n,n,n,n,ne,n,n,sw,n,n,n,n,nw,n,sw,n,n,ne,n,ne,nw,se,nw,ne,n,sw,nw,s,n,n,n,n,nw,n,nw,n,n,sw,n,ne,n,nw,ne,n,nw,nw,nw,nw,n,nw,sw,n,nw,n,n,nw,nw,nw,nw,n,ne,sw,nw,n,n,ne,nw,nw,nw,nw,nw,nw,n,ne,n,nw,nw,nw,nw,nw,nw,nw,nw,nw,sw,n,nw,sw,nw,nw,n,nw,n,nw,nw,se,n,s,n,n,nw,nw,nw,nw,nw,nw,n,nw,nw,nw,nw,nw,nw,nw,nw,n,nw,nw,n,nw,nw,nw,ne,nw,n,nw,nw,se,s,nw,nw,ne,ne,nw,sw,nw,nw,se,se,nw,nw,nw,nw,nw,n,n,nw,nw,ne,n,nw,se,nw,nw,nw,nw,nw,nw,sw,nw,nw,n,nw,nw,s,nw,nw,n,nw,n,sw,nw,nw,nw,nw,nw,nw,nw,s,nw,se,nw,nw,nw,nw,se,nw,sw,nw,nw,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,ne,nw,nw,nw,nw,nw,se,nw,nw,s,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,s,n,nw,nw,nw,nw,n,sw,nw,nw,nw,sw,nw,nw,nw,nw,s,sw,sw,s,s,nw,s,nw,se,nw,nw,sw,nw,nw,nw,nw,nw,nw,nw,nw,nw,nw,ne,nw,nw,nw,nw,nw,sw,s,sw,nw,nw,se,nw,nw,nw,nw,nw,nw,sw,s,nw,ne,sw,nw,nw,nw,s,sw,nw,nw,nw,nw,nw,sw,ne,nw,ne,sw,sw,sw,nw,nw,nw,sw,se,nw,s,sw,ne,nw,nw,nw,sw,ne,sw,s,nw,nw,nw,sw,nw,nw,nw,n,sw,nw,nw,s,nw,se,nw,sw,nw,nw,sw,s,sw,nw,sw,s,sw,nw,sw,nw,n,nw,sw,sw,nw,sw,nw,nw,n,sw,sw,nw,nw,sw,s,sw,ne,n,nw,sw,sw,nw,nw,sw,s,nw,sw,nw,s,nw,sw,nw,sw,nw,sw,sw,nw,sw,sw,nw,nw,nw,ne,sw,nw,sw,sw,nw,nw,nw,se,sw,nw,sw,s,se,sw,nw,sw,nw,nw,nw,se,sw,nw,nw,sw,ne,sw,sw,n,nw,n,n,sw,sw,nw,sw,sw,sw,sw,sw,s,n,sw,sw,sw,s,sw,se,n,nw,sw,sw,ne,nw,sw,sw,s,sw,sw,sw,nw,nw,sw,nw,sw,n,sw,nw,n,ne,sw,sw,nw,sw,s,nw,sw,sw,nw,s,sw,s,sw,nw,nw,nw,sw,sw,sw,nw,n,n,sw,s,sw,s,nw,ne,nw,nw,nw,sw,sw,sw,se,nw,sw,sw,nw,s,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,sw,ne,sw,sw,sw,nw,sw,sw,nw,nw,sw,sw,sw,nw,n,sw,sw,sw,sw,se,sw,se,sw,nw,sw,sw,sw,sw,se,nw,ne,sw,sw,sw,sw,sw,nw,nw,sw,sw,sw,sw,s,nw,sw,n,sw,sw,se,sw,sw,nw,nw,sw,sw,nw,nw,sw,sw,sw,ne,s,sw,sw,s,sw,sw,sw,nw,sw,sw,sw,sw,sw,sw,s,se,n,sw,s,sw,sw,sw,sw,sw,sw,ne,se,sw,sw,sw,sw,sw,n,sw,sw,s,sw,n,sw,sw,sw,sw,sw,sw,sw,se,se,sw,sw,sw,sw,ne,sw,nw,sw,sw,sw,sw,sw,sw,n,ne,ne,sw,sw,sw,sw,sw,sw,sw,sw,se,sw,sw,ne,sw,sw,sw,sw,sw,nw,sw,sw,sw,ne,sw,n,sw,sw,sw,se,sw,sw,sw,sw,ne,sw,sw,sw,se,s,sw,sw,ne,sw,sw,sw,n,sw,sw,sw,sw,sw,ne,s,sw,sw,se,ne,sw,sw,sw,nw,ne,sw,s,s,sw,s,s,s,sw,sw,s,sw,se,sw,nw,sw,s,sw,ne,s,sw,s,sw,n,sw,sw,sw,sw,sw,sw,sw,ne,n,sw,n,sw,s,sw,sw,s,sw,ne,se,sw,s,s,sw,s,sw,sw,ne,s,sw,sw,sw,sw,s,sw,sw,nw,sw,sw,s,sw,n,s,s,nw,se,sw,sw,se,sw,sw,sw,ne,s,sw,sw,s,sw,sw,sw,sw,s,sw,s,s,ne,sw,sw,s,ne,s,se,sw,ne,s,s,sw,sw,s,nw,n,ne,sw,sw,s,s,nw,nw,sw,sw,sw,sw,sw,sw,ne,s,sw,s,sw,s,s,sw,s,sw,s,sw,sw,s,sw,se,sw,nw,nw,ne,ne,sw,sw,sw,sw,s,s,sw,sw,sw,sw,sw,nw,se,nw,sw,sw,sw,n,s,sw,sw,se,sw,sw,s,nw,sw,s,s,sw,sw,sw,s,sw,s,sw,sw,n,sw,s,sw,s,sw,ne,s,s,sw,sw,sw,s,s,sw,s,sw,s,s,se,s,sw,s,s,sw,se,sw,nw,s,sw,s,s,s,s,s,s,s,n,sw,sw,sw,sw,s,nw,s,s,sw,s,sw,sw,s,sw,se,sw,sw,s,s,sw,s,s,s,s,s,sw,nw,s,ne,sw,s,sw,sw,s,s,s,sw,sw,s,s,s,sw,nw,se,s,s,s,sw,s,s,sw,s,se,s,se,se,sw,s,s,nw,sw,s,s,s,sw,s,ne,sw,sw,sw,s,s,s,s,s,s,sw,s,sw,sw,s,s,s,s,sw,s,s,ne,ne,nw,ne,n,s,sw,s,s,nw,s,n,sw,ne,s,s,sw,sw,s,s,n,s,ne,s,sw,n,s,sw,sw,s,sw,sw,sw,se,se,s,s,s,sw,ne,s,s,s,s,n,sw,sw,s,n,s,s,s,s,nw,se,s,sw,sw,s,s,s,ne,nw,s,s,s,n,sw,s,s,s,s,s,s,s,s,s,ne,s,s,s,s,s,sw,sw,s,se,nw,s,s,se,s,s,nw,nw,s,s,s,nw,s,s,ne,sw,s,s,s,s,sw,sw,s,s,s,s,s,s,s,s,s,s,s,s,se,s,ne,sw,s,n,s,s,s,s,sw,se,sw,ne,s,sw,ne,s,n,s,n,s,se,s,s,s,sw,s,se,s,s,s,ne,s,s,s,s,s,s,s,s,s,sw,s,s,s,s,s,s,se,sw,s,s,n,s,s,s,s,nw,s,s,sw,s,n,se,nw,s,sw,s,s,s,ne,se,s,se,s,s,s,nw,nw,se,n,s,s,s,s,s,ne,se,se,nw,s,s,s,s,s,s,s,s,nw,se,nw,s,n,s,s,ne,n,n,se,s,s,s,s,s,se,sw,ne,s,s,s,s,s,s,s,s,se,nw,s,s,se,nw,s,sw,s,se,s,s,s,s,s,s,s,n,s,se,ne,se,se,s,s,s,s,se,s,s,se,se,s,s,s,se,sw,s,s,s,sw,se,s,s,nw,ne,n,nw,nw,se,s,s,se,s,s,ne,s,s,n,s,s,n,s,s,sw,s,s,s,s,s,ne,se,s,sw,nw,s,s,s,sw,n,ne,nw,s,se,ne,s,s,s,ne,se,s,n,nw,s,s,s,s,s,s,s,s,s,se,se,sw,s,s,s,s,se,se,s,n,s,s,s,s,s,s,se,se,s,s,s,se,se,se,s,ne,s,s,s,nw,s,sw,s,s,s,nw,s,nw,s,s,se,s,s,s,s,ne,s,s,s,s,s,sw,n,s,s,s,s,se,se,s,s,n,s,nw,sw,se,s,s,se,nw,s,s,s,se,sw,se,s,s,s,s,se,se,s,s,s,s,nw,se,se,s,se,ne,s,s,se,s,se,s,sw,sw,s,s,s,s,s,se,s,se,se,se,s,ne,s,nw,s,se,se,nw,s,s,ne,s,s,s,s,sw,s,n,s,s,s,s,sw,se,s,s,nw,s,ne,se,se,s,s,s,se,s,s,s,se,sw,se,n,sw,sw,se,s,s,s,s,s,s,s,s,ne,se,nw,sw,ne,s,s,s,s,sw,nw,se,s,n,nw,se,se,se,s,s,s,se,s,se,se,s,se,s,se,se,sw,nw,se,se,s,se,s,se,nw,ne,s,se,s,se,sw,s,s,se,s,n,nw,s,se,s,ne,sw,s,s,s,n,se,s,se,s,sw,se,s,sw,se,s,s,se,se,se,s,sw,se,se,s,s,s,s,n,se,s,se,nw,nw,s,se,s,s,s,se,s,se,ne,se,s,se,se,se,se,sw,se,se,se,se,se,se,n,s,se,se,n,nw,s,s,s,s,nw,se,s,s,se,s,s,nw,sw,se,s,s,s,se,se,s,se,s,se,s,sw,se,se,s,s,se,se,se,ne,n,s,s,se,se,se,se,s,s,se,se,s,se,s,s,se,sw,se,s,se,se,s,sw,se,s,sw,sw,se,s,s,s,se,sw,s,se,se,se,se,sw,se,s,s,se,se,s,s,s,s,ne,ne,s,s,se,se,se,s,se,se,se,se,se,se,s,se,se,se,n,se,se,s,se,sw,se,se,se,ne,se,se,se,se,se,nw,se,se,se,se,se,se,s,nw,se,se,se,s,se,se,s,se,s,s,ne,sw,se,se,se,s,s,se,se,s,se,se,s,se,n,se,se,s,s,se,se,ne,se,se,n,n,ne,s,se,se,s,n,s,sw,n,se,s,se,se,se,se,s,n,se,se,se,se,s,se,se,se,nw,se,se,s,se,se,se,se,sw,ne,s,se,s,se,se,se,se,n,se,se,se,se,se,se,se,se,se,se,se,se,se,se,sw,sw,se,se,se,nw,s,nw,se,se,se,se,se,se,s,s,se,se,nw,se,se,nw,nw,se,se,s,se,sw,se,se,se,se,n,ne,se,s,se,se,se,se,se,se,se,se,sw,s,se,se,sw,se,se,se,se,se,se,se,se,se,se,s,se,se,se,ne,nw,s,se,se,se,se,ne,se,s,se,se,se,se,se,se,nw,se,s,se,se,se,se,s,se,se,n,se,se,se,sw,se,sw,se,se,n,se,se,se,s,se,se,sw,ne,sw,s,se,se,se,se,sw,se,s,se,se,se,se,se,s,se,se,se,se,nw,ne,nw,se,se,n,se"""
directions = input.split(",")
def get_distance(directions):
direction_steps = {
"nw": 0,
"n": 0,
"ne": 0,
"se": 0,
"s": 0,
"sw": 0
}
for direction in directions:
direction_steps[direction] += 1
simplifications = {
("n", "s"): [],
("ne", "sw"): [],
("nw", "se"): [],
("ne", "s"): ["se"],
("nw", "s"): ["sw"],
("n", "se"): ["ne"],
("n", "sw"): ["nw"],
("se", "sw"): ["s"],
("ne", "nw"): ["n"]
}
has_changed = True
while has_changed:
has_changed = False
for (a, b), rs in simplifications.items():
if direction_steps[a] > 0 and direction_steps[b] > 0:
has_changed = True
direction_steps[a] -= 1
direction_steps[b] -= 1
for r in rs:
direction_steps[r] += 1
if sum(direction_steps.values()) == 0:
break
return sum(direction_steps.values())
print(max([get_distance(directions[:i]) for i in range(1, len(directions))]))
| 484.212766
| 21,645
| 0.610818
| 8,348
| 22,758
| 1.663392
| 0.004792
| 0.106438
| 0.093331
| 0.07576
| 0.94995
| 0.869509
| 0.736857
| 0.569566
| 0.405012
| 0.253709
| 0
| 0.000627
| 0.018147
| 22,758
| 46
| 21,646
| 494.73913
| 0.62081
| 0
| 0
| 0.052632
| 0
| 0.026316
| 0.95272
| 0.950479
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0
| 0
| 0.052632
| 0.026316
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a3d4faa19a7954c211341362595dded1563ab1c0
| 67
|
py
|
Python
|
utils/envs/__init__.py
|
RockWenJJ/rl-agents
|
d55126007fcc5d2a882843f6c3a63000b4fc7c92
|
[
"MIT"
] | 342
|
2018-07-14T06:53:32.000Z
|
2022-03-25T08:30:23.000Z
|
utils/envs/__init__.py
|
RockWenJJ/rl-agents
|
d55126007fcc5d2a882843f6c3a63000b4fc7c92
|
[
"MIT"
] | 68
|
2018-05-23T12:47:05.000Z
|
2021-12-03T13:25:12.000Z
|
utils/envs/__init__.py
|
RockWenJJ/rl-agents
|
d55126007fcc5d2a882843f6c3a63000b4fc7c92
|
[
"MIT"
] | 106
|
2018-09-28T16:04:47.000Z
|
2022-03-23T01:42:48.000Z
|
from utils.envs.dynamics import *
from utils.envs.gridenv import *
| 22.333333
| 33
| 0.791045
| 10
| 67
| 5.3
| 0.6
| 0.339623
| 0.490566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 67
| 2
| 34
| 33.5
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a3f5550c8aec47d4ade99f084b89a8536d489813
| 157
|
py
|
Python
|
simple_ftp/__init__.py
|
marekvymazal/simple_ftp
|
b5f3c639b991be6ae1bdc9854fc40d53e1af7475
|
[
"MIT"
] | null | null | null |
simple_ftp/__init__.py
|
marekvymazal/simple_ftp
|
b5f3c639b991be6ae1bdc9854fc40d53e1af7475
|
[
"MIT"
] | null | null | null |
simple_ftp/__init__.py
|
marekvymazal/simple_ftp
|
b5f3c639b991be6ae1bdc9854fc40d53e1af7475
|
[
"MIT"
] | null | null | null |
# import FTP class for package convenience
# ( so users can just from simple_ftp import FTP instead of from simple_ftp.ftp import FTP )
from .ftp import FTP
| 39.25
| 92
| 0.783439
| 27
| 157
| 4.481481
| 0.518519
| 0.297521
| 0.297521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178344
| 157
| 3
| 93
| 52.333333
| 0.937985
| 0.834395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
a3f9778f13c9ecf12aba2f6d8524e2de73e14d24
| 83
|
py
|
Python
|
src/prefect/client/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 8,633
|
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
src/prefect/client/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 3,903
|
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
src/prefect/client/__init__.py
|
concreted/prefect
|
dd732f5990ee2b0f3d816adb285168fd63b239e4
|
[
"Apache-2.0"
] | 937
|
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
from prefect.client.client import Client
from prefect.client.secrets import Secret
| 27.666667
| 41
| 0.855422
| 12
| 83
| 5.916667
| 0.5
| 0.309859
| 0.478873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096386
| 83
| 2
| 42
| 41.5
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
43101561acfcf00a5a4677accd854b3aaaf160b8
| 128
|
py
|
Python
|
src/core/python/core/util/periodic_ean_helper.py
|
railtoolkit/OpenLinTim
|
27eba8b6038946ce162e9f7bbc0bd23045029d51
|
[
"MIT"
] | null | null | null |
src/core/python/core/util/periodic_ean_helper.py
|
railtoolkit/OpenLinTim
|
27eba8b6038946ce162e9f7bbc0bd23045029d51
|
[
"MIT"
] | null | null | null |
src/core/python/core/util/periodic_ean_helper.py
|
railtoolkit/OpenLinTim
|
27eba8b6038946ce162e9f7bbc0bd23045029d51
|
[
"MIT"
] | null | null | null |
def transformTimeToPeriodic(time: int, period_length: int):
return ((time % period_length) + period_length) % period_length
| 42.666667
| 67
| 0.765625
| 15
| 128
| 6.266667
| 0.466667
| 0.510638
| 0.382979
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132813
| 128
| 2
| 68
| 64
| 0.846847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
4a436a7ccf399a98a20c1b683e9c7fa5f1ea11ce
| 11,358
|
py
|
Python
|
pyNastran/op2/dev/pyyeti/testdev_op4.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/dev/pyyeti/testdev_op4.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/op2/dev/pyyeti/testdev_op4.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | 1
|
2020-10-04T19:28:07.000Z
|
2020-10-04T19:28:07.000Z
|
import numpy as np
import op4
from scipy.io import matlab
import os
from glob import glob
#from nose.tools import *
import unittest
from op2 import OP2
class TestOP4New(unittest.TestCase):
def test_rdop4(self):
matfile = 'nastran_op4_data/r_c_rc.mat'
filenames = glob('nastran_op4_data/*.op4')
o4 = op4.OP4()
m = matlab.loadmat(matfile)
for filename in filenames:
if filename.find('badname') > -1:
with assert_warns(RuntimeWarning) as cm:
dct = o4.dctload(filename)
the_warning = str(cm.warning)
assert 0 == the_warning.find('Output4 file has matrix '
'name: 1mat')
with assert_warns(RuntimeWarning) as cm:
names, mats, forms, mtypes = o4.listload(filename)
the_warning = str(cm.warning)
assert 0 == the_warning.find('Output4 file has matrix '
'name: 1mat')
with assert_warns(RuntimeWarning) as cm:
names2, sizes, forms2, mtypes2 = o4.dir(filename,
verbose=False)
the_warning = str(cm.warning)
assert 0 == the_warning.find('Output4 file has matrix '
'name: 1mat')
else:
dct = o4.dctload(filename)
names, mats, forms, mtypes = o4.listload(filename)
names2, sizes, forms2, mtypes2 = o4.dir(filename,
verbose=False)
assert sorted(dct.keys()) == sorted(names)
assert names == names2
assert forms == forms2
assert mtypes == mtypes2
for mat, sz in zip(mats, sizes):
assert mat.shape == sz
for nm in dct:
if nm[-1] == 's':
matnm = nm[:-1]
elif nm == '_1mat':
matnm = 'rmat'
else:
matnm = nm
assert np.allclose(m[matnm], dct[nm][0])
pos = names.index(nm)
assert np.allclose(m[matnm], mats[pos])
assert dct[nm][1] == forms[pos]
assert dct[nm][2] == mtypes[pos]
nm2 = nm = 'rcmat'
if filename.find('single') > -1:
nm2 = 'rcmats'
if filename.find('badname') > -1:
with assert_warns(RuntimeWarning) as cm:
dct = o4.dctload(filename, nm2)
name, mat = o4.listload(filename, [nm2])[1:]
else:
dct = o4.dctload(filename, [nm2])
name, mat = o4.listload(filename, nm2)[1:]
assert np.allclose(m[nm], dct[nm2][0])
assert np.allclose(m[nm], mat[0])
def test_rdop4_zero_rowscutoff(self):
matfile = 'nastran_op4_data/r_c_rc.mat'
filenames = glob('nastran_op4_data/*.op4')
o4 = op4.OP4()
o4._rowsCutoff = 0
m = matlab.loadmat(matfile)
for filename in filenames:
if filename.find('badname') > -1:
with assert_warns(RuntimeWarning) as cm:
dct = o4.dctload(filename)
the_warning = str(cm.warning)
assert 0 == the_warning.find('Output4 file has matrix '
'name: 1mat')
with assert_warns(RuntimeWarning) as cm:
names, mats, forms, mtypes = o4.listload(filename)
the_warning = str(cm.warning)
assert 0 == the_warning.find('Output4 file has matrix '
'name: 1mat')
with assert_warns(RuntimeWarning) as cm:
names2, sizes, forms2, mtypes2 = o4.dir(filename,
verbose=False)
the_warning = str(cm.warning)
assert 0 == the_warning.find('Output4 file has matrix '
'name: 1mat')
else:
dct = o4.dctload(filename)
names, mats, forms, mtypes = o4.listload(filename)
names2, sizes, forms2, mtypes2 = o4.dir(filename,
verbose=False)
assert sorted(dct.keys()) == sorted(names)
assert names == names2
assert forms == forms2
assert mtypes == mtypes2
for mat, sz in zip(mats, sizes):
assert mat.shape == sz
for nm in dct:
if nm[-1] == 's':
matnm = nm[:-1]
elif nm == '_1mat':
matnm = 'rmat'
else:
matnm = nm
assert np.allclose(m[matnm], dct[nm][0])
pos = names.index(nm)
assert np.allclose(m[matnm], mats[pos])
assert dct[nm][1] == forms[pos]
assert dct[nm][2] == mtypes[pos]
nm2 = nm = 'rcmat'
if filename.find('single') > -1:
nm2 = 'rcmats'
if filename.find('badname') > -1:
with assert_warns(RuntimeWarning) as cm:
dct = o4.dctload(filename, nm2)
name, mat = o4.listload(filename, [nm2])[1:]
else:
dct = o4.dctload(filename, [nm2])
name, mat = o4.listload(filename, nm2)[1:]
assert np.allclose(m[nm], dct[nm2][0])
assert np.allclose(m[nm], mat[0])
def test_rdop4_partb(self):
filenames = glob('nastran_op4_data/*other')
file1 = filenames[0]
filenames = filenames[1:]
o4 = op4.OP4()
dct = o4.dctload(file1)
for filename in filenames:
dct2 = o4.dctload(filename)
assert set(dct2.keys()) == set(dct.keys())
for nm in dct:
for j in range(3):
assert np.allclose(dct2[nm][j], dct[nm][j])
def test_wtop4(self):
matfile = 'nastran_op4_data/r_c_rc.mat'
o4 = op4.OP4()
m = matlab.loadmat(matfile)
names = ['rmat', 'cmat', 'rcmat']
mats = []
wtdct = {}
for nm in names:
mats.append(m[nm])
wtdct[nm] = m[nm]
# write(filename, names, matrices=None,
# binary=True, digits=16, endian='')
filenames = [
['nastran_op4_data/temp_ascii.op4', False, ''],
['nastran_op4_data/temp_le.op4', True, '<'],
['nastran_op4_data/temp_be.op4', True, '>'],
]
for item in filenames:
filename = item[0]
binary = item[1]
endian = item[2]
o4.write(filename, names, mats,
binary=binary, endian=endian)
names2, sizes, forms, mtypes = o4.dir(filename, verbose=False)
assert names2 == names
dct = o4.dctload(filename)
for nm in dct:
assert np.allclose(m[nm], dct[nm][0])
o4.write(filename, wtdct,
binary=binary, endian=endian)
dct = o4.dctload(filename)
for nm in dct:
assert np.allclose(m[nm], dct[nm][0])
# clean up:
for item in filenames:
os.remove(item[0])
def test_wtop4_single(self):
matfile = 'nastran_op4_data/r_c_rc.mat'
o4 = op4.OP4()
m = matlab.loadmat(matfile)
name = 'rmat'
mat = m[name]
# write(filename, names, matrices=None,
# binary=True, digits=16, endian='')
filenames = [
['nastran_op4_data/temp_ascii.op4', False, ''],
]
for item in filenames:
filename = item[0]
binary = item[1]
endian = item[2]
o4.write(filename, name, mat,
binary=binary, endian=endian)
dct = o4.dctload(filename)
for nm in dct:
assert np.allclose(m[nm], dct[nm][0])
# clean up:
for item in filenames:
os.remove(item[0])
def test_wtop4_nonbigmat_binary(self):
filenames = glob('nastran_op4_data/*.op4') +\
glob('nastran_op4_data/*.op4.other')
o4 = op4.OP4()
for name in filenames:
if name.find('badname') != -1:
continue
data = o4.listload(name)
o4.write('temp.op4', data[0], data[1], sparse='nonbigmat')
data2 = o4.listload('temp.op4')
assert data[0] == data2[0]
for d1, d2 in zip(data[1], data2[1]):
assert np.all(d1 == d2)
os.remove('temp.op4')
def test_wtop4_bigmat_binary(self):
filenames = glob('nastran_op4_data/*.op4') +\
glob('nastran_op4_data/*.op4.other')
o4 = op4.OP4()
for name in filenames:
if name.find('badname') != -1:
continue
data = o4.listload(name)
o4.write('temp.op4', data[0], data[1], sparse='bigmat')
data2 = o4.listload('temp.op4')
assert data[0] == data2[0]
for d1, d2 in zip(data[1], data2[1]):
assert np.all(d1 == d2)
os.remove('temp.op4')
def test_wtop4_nonbigmat_ascii(self):
filenames = glob('nastran_op4_data/*.op4') +\
glob('nastran_op4_data/*.op4.other')
o4 = op4.OP4()
for name in filenames:
if name.find('badname') != -1:
continue
data = o4.listload(name)
o4.write('temp.op4', data[0], data[1], sparse='nonbigmat',
binary=False)
data2 = o4.listload('temp.op4')
assert data[0] == data2[0]
for d1, d2 in zip(data[1], data2[1]):
assert np.all(d1 == d2)
os.remove('temp.op4')
def test_wtop4_bigmat_ascii(self):
filenames = glob('nastran_op4_data/*.op4') +\
glob('nastran_op4_data/*.op4.other')
o4 = op4.OP4()
for name in filenames:
if name.find('badname') != -1:
continue
data = o4.listload(name)
o4.write('temp.op4', data[0], data[1], sparse='bigmat',
binary=False)
data2 = o4.listload('temp.op4')
assert data[0] == data2[0]
for d1, d2 in zip(data[1], data2[1]):
assert np.all(d1 == d2)
os.remove('temp.op4')
def test_non_float64(self):
i8 = np.array([1, 2, 0, 4], np.int8)
i16 = i8.astype(np.int16)
i32 = i8.astype(np.int32)
i64 = i8.astype(np.int64)
f32 = i8.astype(np.float32)
c32 = (f32 + 1j*f32).astype(np.complex64)
o4 = op4.OP4()
for mat in [i8, i16, i32, i64, f32, c32]:
o4.write('temp.op4', dict(mat=mat))
mat2 = o4.dctload('temp.op4', 'mat')['mat'][0]
assert np.all(mat2 == mat)
os.remove('temp.op4')
if __name__ == '__main__':
unittest.main()
| 38.501695
| 74
| 0.47658
| 1,271
| 11,358
| 4.177026
| 0.115657
| 0.030326
| 0.050104
| 0.037295
| 0.827274
| 0.827274
| 0.815596
| 0.815596
| 0.815596
| 0.815596
| 0
| 0.05076
| 0.403328
| 11,358
| 294
| 75
| 38.632653
| 0.732625
| 0.017785
| 0
| 0.771863
| 0
| 0
| 0.088797
| 0.044219
| 0
| 0
| 0
| 0
| 0.193916
| 1
| 0.038023
| false
| 0
| 0.026616
| 0
| 0.068441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a4ff299809ec56535e2e8ba6c5a987c50a81128
| 3,456
|
py
|
Python
|
pyaz/monitor/log_analytics/workspace/linked_storage/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/monitor/log_analytics/workspace/linked_storage/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/monitor/log_analytics/workspace/linked_storage/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from ..... pyaz_utils import _call_az
def create(resource_group, storage_accounts, type, workspace_name):
'''
Create some linked storage accounts for log analytics workspace.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- storage_accounts -- List of Name or ID of Azure Storage Account.
- type -- Data source type for the linked storage account.
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace linked-storage create", locals())
def add(resource_group, storage_accounts, type, workspace_name):
'''
Add some linked storage accounts with specific data source type for log analytics workspace.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- storage_accounts -- List of Name or ID of Azure Storage Account.
- type -- Data source type for the linked storage account.
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace linked-storage add", locals())
def remove(resource_group, storage_accounts, type, workspace_name):
'''
Remove some linked storage accounts with specific data source type for log analytics workspace
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- storage_accounts -- List of Name or ID of Azure Storage Account.
- type -- Data source type for the linked storage account.
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace linked-storage remove", locals())
def delete(resource_group, type, workspace_name, yes=None):
'''
Delete all linked storage accounts with specific data source type for log analytics workspace.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- type -- Data source type for the linked storage account.
- workspace_name -- Name of the Log Analytics Workspace.
Optional Parameters:
- yes -- Do not prompt for confirmation.
'''
return _call_az("az monitor log-analytics workspace linked-storage delete", locals())
def show(resource_group, type, workspace_name):
'''
List all linked storage accounts with specific data source type for a log analytics workspace.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- type -- Data source type for the linked storage account.
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace linked-storage show", locals())
def list(resource_group, workspace_name):
'''
List all linked storage accounts for a log analytics workspace.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- Name of the Log Analytics Workspace.
'''
return _call_az("az monitor log-analytics workspace linked-storage list", locals())
| 43.746835
| 128
| 0.728877
| 457
| 3,456
| 5.413567
| 0.12035
| 0.094584
| 0.152789
| 0.061843
| 0.898949
| 0.879952
| 0.879952
| 0.80194
| 0.80194
| 0.80194
| 0
| 0
| 0.193866
| 3,456
| 78
| 129
| 44.307692
| 0.888011
| 0.662326
| 0
| 0
| 0
| 0
| 0.341641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.461538
| false
| 0
| 0.076923
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ac6462a60f24eaf094b3a0e33a01dfd1b65e3ea
| 263
|
py
|
Python
|
sentinel/node/__init__.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | 342
|
2017-08-21T20:12:56.000Z
|
2022-03-19T17:58:25.000Z
|
sentinel/node/__init__.py
|
allagog0x01/sentwg
|
52285ecf2b03c30a78901a29a7af96c8ab5764c8
|
[
"Apache-2.0"
] | 57
|
2017-11-13T11:16:47.000Z
|
2022-03-01T13:54:31.000Z
|
vpn-node-cosmos/sentinel/node/__init__.py
|
smtcrms/sentinel
|
ff65bc9200f6c940aa184c0ec0872fdcfef25363
|
[
"MIT"
] | 72
|
2017-11-23T05:13:24.000Z
|
2022-02-25T14:18:33.000Z
|
# coding=utf-8
from .controllers import add_tx
from .controllers import get_free_coins
from .controllers import list_node
from .controllers import update_node
from .controllers import update_session
from .controllers import update_sessions
from .node import node
| 29.222222
| 40
| 0.847909
| 38
| 263
| 5.684211
| 0.421053
| 0.416667
| 0.583333
| 0.375
| 0.287037
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004292
| 0.114068
| 263
| 8
| 41
| 32.875
| 0.922747
| 0.045627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
434e9d910af22d384cba4f92495c8dc224d8e157
| 67
|
py
|
Python
|
lithops/job/__init__.py
|
kpavel/lithops
|
395eef8b283512bd714d3633dcd94258e1df620c
|
[
"Apache-2.0"
] | 158
|
2020-09-16T13:22:03.000Z
|
2022-03-28T20:01:31.000Z
|
lithops/job/__init__.py
|
kpavel/lithops
|
395eef8b283512bd714d3633dcd94258e1df620c
|
[
"Apache-2.0"
] | 256
|
2018-05-20T13:01:51.000Z
|
2020-09-16T09:09:54.000Z
|
lithops/job/__init__.py
|
kpavel/lithops
|
395eef8b283512bd714d3633dcd94258e1df620c
|
[
"Apache-2.0"
] | 48
|
2020-09-19T15:29:53.000Z
|
2022-03-23T17:08:24.000Z
|
from .job import create_map_job
from .job import create_reduce_job
| 22.333333
| 34
| 0.850746
| 12
| 67
| 4.416667
| 0.5
| 0.264151
| 0.490566
| 0.716981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 67
| 2
| 35
| 33.5
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4389d0d933a80d6c23c1d1a8444b337afe89e671
| 623
|
py
|
Python
|
learn/model/RandomArch.py
|
fuyuanlyu/OptInter
|
95abda78261818e093dabe508d3609806372f2a5
|
[
"Apache-2.0"
] | 1
|
2022-03-15T08:52:09.000Z
|
2022-03-15T08:52:09.000Z
|
learn/model/RandomArch.py
|
fuyuanlyu/OptInter
|
95abda78261818e093dabe508d3609806372f2a5
|
[
"Apache-2.0"
] | null | null | null |
learn/model/RandomArch.py
|
fuyuanlyu/OptInter
|
95abda78261818e093dabe508d3609806372f2a5
|
[
"Apache-2.0"
] | 1
|
2022-03-22T10:37:31.000Z
|
2022-03-22T10:37:31.000Z
|
import numpy as np
import os
# arch_parameters = np.random.rand(325, 3)
# save_path = 'logs/search-Criteo-random-10'
# os.makedirs(save_path, exist_ok=True)
# np.save(os.path.join(save_path, 'arch_weight.npy'), arch_parameters)
# arch_parameters = np.random.rand(276, 3)
# save_path = 'logs/search-Avazu-random-10'
# os.makedirs(save_path, exist_ok=True)
# np.save(os.path.join(save_path, 'arch_weight.npy'), arch_parameters)
arch_parameters = np.random.rand(120, 3)
save_path = 'logs/search-iPinYou-random-10'
os.makedirs(save_path, exist_ok=True)
np.save(os.path.join(save_path, 'arch_weight.npy'), arch_parameters)
| 31.15
| 70
| 0.754414
| 105
| 623
| 4.27619
| 0.266667
| 0.160356
| 0.106904
| 0.146993
| 0.888641
| 0.703786
| 0.703786
| 0.703786
| 0.703786
| 0.703786
| 0
| 0.03169
| 0.088283
| 623
| 19
| 71
| 32.789474
| 0.758803
| 0.609952
| 0
| 0
| 0
| 0
| 0.188034
| 0.123932
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
439b2dc8a9495dbdea970728117997adb9bcc64d
| 2,465
|
py
|
Python
|
fsdviz/common/migrations/0020_common_colorfields.py
|
AdamCottrill/fsdivz
|
98dd1f35a08dba26424e2951a40715e01399478c
|
[
"MIT"
] | null | null | null |
fsdviz/common/migrations/0020_common_colorfields.py
|
AdamCottrill/fsdivz
|
98dd1f35a08dba26424e2951a40715e01399478c
|
[
"MIT"
] | 6
|
2020-02-12T00:03:40.000Z
|
2020-11-30T01:20:56.000Z
|
fsdviz/common/migrations/0020_common_colorfields.py
|
AdamCottrill/fsdviz
|
98dd1f35a08dba26424e2951a40715e01399478c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.24 on 2021-09-13 14:28
import colorfield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0019_sequence_lower_upper'),
]
operations = [
migrations.AddField(
model_name='agency',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='compositefinclip',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='finclip',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='fishtag',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='jurisdiction',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='lake',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='managementunit',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='physchemmark',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='species',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='stateprovince',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='strain',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='strainraw',
name='color',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
]
| 32.866667
| 81
| 0.567951
| 223
| 2,465
| 6.156951
| 0.215247
| 0.151493
| 0.20102
| 0.23598
| 0.775674
| 0.775674
| 0.775674
| 0.775674
| 0.775674
| 0.775674
| 0
| 0.053676
| 0.304665
| 2,465
| 74
| 82
| 33.310811
| 0.747375
| 0.018661
| 0
| 0.705882
| 1
| 0
| 0.119156
| 0.010343
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.073529
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
43bfeaa8a4a19b3afd0dcb09d3699403206b451a
| 20,109
|
py
|
Python
|
src/clients/ctm_api_client/api/archive_api.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 5
|
2021-12-01T18:40:00.000Z
|
2022-03-04T10:51:44.000Z
|
src/clients/ctm_api_client/api/archive_api.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 3
|
2022-02-21T20:08:32.000Z
|
2022-03-16T17:41:03.000Z
|
src/clients/ctm_api_client/api/archive_api.py
|
IceT-M/ctm-python-client
|
0ef1d8a3c9a27a01c088be1cdf5d177d25912bac
|
[
"BSD-3-Clause"
] | 7
|
2021-12-01T11:59:16.000Z
|
2022-03-01T18:16:40.000Z
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from clients.ctm_api_client.api_client import ApiClient
class ArchiveApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_archive_job_log(self, job_id, run_no, **kwargs): # noqa: E501
"""Get job log # noqa: E501
Get job log by unique job key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_archive_job_log(job_id, run_no, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str job_id: The job ID (required)
:param int run_no: The execution number in case of multiple executions (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.get_archive_job_log_with_http_info(
job_id, run_no, **kwargs
) # noqa: E501
else:
(data) = self.get_archive_job_log_with_http_info(
job_id, run_no, **kwargs
) # noqa: E501
return data
def get_archive_job_log_with_http_info(
self, job_id, run_no, **kwargs
): # noqa: E501
"""Get job log # noqa: E501
Get job log by unique job key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_archive_job_log_with_http_info(job_id, run_no, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str job_id: The job ID (required)
:param int run_no: The execution number in case of multiple executions (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["job_id", "run_no"] # noqa: E501
all_params.append("async_req")
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_archive_job_log" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and (
"job_id" not in params or params["job_id"] is None
): # noqa: E501
raise ValueError(
"Missing the required parameter `job_id` when calling `get_archive_job_log`"
) # noqa: E501
# verify the required parameter 'run_no' is set
if self.api_client.client_side_validation and (
"run_no" not in params or params["run_no"] is None
): # noqa: E501
raise ValueError(
"Missing the required parameter `run_no` when calling `get_archive_job_log`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "job_id" in params:
path_params["jobId"] = params["job_id"] # noqa: E501
query_params = []
if "run_no" in params:
query_params.append(("runNo", params["run_no"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "text/plain"]
) # noqa: E501
# Authentication setting
auth_settings = ["Bearer"] # noqa: E501
return self.api_client.call_api(
"/archive/{jobId}/log",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="str", # noqa: E501
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_archive_job_output(self, job_id, run_no, **kwargs): # noqa: E501
"""Get job output # noqa: E501
Get job output by unique job key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_archive_job_output(job_id, run_no, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str job_id: The job ID (required)
:param int run_no: The execution number in case of multiple executions (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.get_archive_job_output_with_http_info(
job_id, run_no, **kwargs
) # noqa: E501
else:
(data) = self.get_archive_job_output_with_http_info(
job_id, run_no, **kwargs
) # noqa: E501
return data
def get_archive_job_output_with_http_info(
self, job_id, run_no, **kwargs
): # noqa: E501
"""Get job output # noqa: E501
Get job output by unique job key # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_archive_job_output_with_http_info(job_id, run_no, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str job_id: The job ID (required)
:param int run_no: The execution number in case of multiple executions (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ["job_id", "run_no"] # noqa: E501
all_params.append("async_req")
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_archive_job_output" % key
)
params[key] = val
del params["kwargs"]
# verify the required parameter 'job_id' is set
if self.api_client.client_side_validation and (
"job_id" not in params or params["job_id"] is None
): # noqa: E501
raise ValueError(
"Missing the required parameter `job_id` when calling `get_archive_job_output`"
) # noqa: E501
# verify the required parameter 'run_no' is set
if self.api_client.client_side_validation and (
"run_no" not in params or params["run_no"] is None
): # noqa: E501
raise ValueError(
"Missing the required parameter `run_no` when calling `get_archive_job_output`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "job_id" in params:
path_params["jobId"] = params["job_id"] # noqa: E501
query_params = []
if "run_no" in params:
query_params.append(("runNo", params["run_no"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json", "text/plain"]
) # noqa: E501
# Authentication setting
auth_settings = ["Bearer"] # noqa: E501
return self.api_client.call_api(
"/archive/{jobId}/output",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="str", # noqa: E501
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
def search_jobs(self, **kwargs): # noqa: E501
"""Search jobs in Archive # noqa: E501
Get all the Control-M Archiving jobs that match the search criterias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_jobs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: maximum jobs to fetch (default 500).
:param str jobname: The name of the job.
:param str jobid:
:param str ctm: The name of the Control-M server in which the job was ordered from.
:param str server: The name of the Control-M server in which the job was ordered from.
:param str folder: The name of the parent folder.
:param str from_time: Job execution start date. Date format - YYYY-MM-DD.
:param str to_time: Job execution end date. Date format - YYYY-MM-DD.
:param str log_contains: Job log must contain the given phrase.
:param str output_contains: Job output must contain the given phrase.
:param str application: The name of the application the jobs belong to.
:param str sub_application: The name of the sub-application the jobs belong to.
:param str library: The job's library name.
:param str mem_name: Member name.
:param str mem_library: Member's library.
:param str host:
:param str host_group: Job's host group.
:param str run_as: Runs as (username on agent machine).
:param str order_id: Job's order id.
:param str status: The job's end status.
:param str order_date_from: Indicating a date by which will look for jobs that their order date started afterwards. Date format - YYYY-MM-DD.
:param str order_date_to: Indicating a date by which will look for jobs that their order date ended before. Date format - YYYY-MM-DD.
:param int number_of_runs:
:return: ArchiveJobsList
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
if kwargs.get("async_req"):
return self.search_jobs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_jobs_with_http_info(**kwargs) # noqa: E501
return data
def search_jobs_with_http_info(self, **kwargs): # noqa: E501
"""Search jobs in Archive # noqa: E501
Get all the Control-M Archiving jobs that match the search criterias # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_jobs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int limit: maximum jobs to fetch (default 500).
:param str jobname: The name of the job.
:param str jobid:
:param str ctm: The name of the Control-M server in which the job was ordered from.
:param str server: The name of the Control-M server in which the job was ordered from.
:param str folder: The name of the parent folder.
:param str from_time: Job execution start date. Date format - YYYY-MM-DD.
:param str to_time: Job execution end date. Date format - YYYY-MM-DD.
:param str log_contains: Job log must contain the given phrase.
:param str output_contains: Job output must contain the given phrase.
:param str application: The name of the application the jobs belong to.
:param str sub_application: The name of the sub-application the jobs belong to.
:param str library: The job's library name.
:param str mem_name: Member name.
:param str mem_library: Member's library.
:param str host:
:param str host_group: Job's host group.
:param str run_as: Runs as (username on agent machine).
:param str order_id: Job's order id.
:param str status: The job's end status.
:param str order_date_from: Indicating a date by which will look for jobs that their order date started afterwards. Date format - YYYY-MM-DD.
:param str order_date_to: Indicating a date by which will look for jobs that their order date ended before. Date format - YYYY-MM-DD.
:param int number_of_runs:
:return: ArchiveJobsList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
"limit",
"jobname",
"jobid",
"ctm",
"server",
"folder",
"from_time",
"to_time",
"log_contains",
"output_contains",
"application",
"sub_application",
"library",
"mem_name",
"mem_library",
"host",
"host_group",
"run_as",
"order_id",
"status",
"order_date_from",
"order_date_to",
"number_of_runs",
] # noqa: E501
all_params.append("async_req")
all_params.append("_return_http_data_only")
all_params.append("_preload_content")
all_params.append("_request_timeout")
params = locals()
for key, val in six.iteritems(params["kwargs"]):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_jobs" % key
)
params[key] = val
del params["kwargs"]
if self.api_client.client_side_validation and (
"limit" in params and params["limit"] > 1000
): # noqa: E501
raise ValueError(
"Invalid value for parameter `limit` when calling `search_jobs`, must be a value less than or equal to `1000`"
) # noqa: E501
if self.api_client.client_side_validation and (
"limit" in params and params["limit"] < 1
): # noqa: E501
raise ValueError(
"Invalid value for parameter `limit` when calling `search_jobs`, must be a value greater than or equal to `1`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if "limit" in params:
query_params.append(("limit", params["limit"])) # noqa: E501
if "jobname" in params:
query_params.append(("jobname", params["jobname"])) # noqa: E501
if "jobid" in params:
query_params.append(("jobid", params["jobid"])) # noqa: E501
if "ctm" in params:
query_params.append(("ctm", params["ctm"])) # noqa: E501
if "server" in params:
query_params.append(("server", params["server"])) # noqa: E501
if "folder" in params:
query_params.append(("folder", params["folder"])) # noqa: E501
if "from_time" in params:
query_params.append(("fromTime", params["from_time"])) # noqa: E501
if "to_time" in params:
query_params.append(("toTime", params["to_time"])) # noqa: E501
if "log_contains" in params:
query_params.append(("logContains", params["log_contains"])) # noqa: E501
if "output_contains" in params:
query_params.append(
("outputContains", params["output_contains"])
) # noqa: E501
if "application" in params:
query_params.append(("application", params["application"])) # noqa: E501
if "sub_application" in params:
query_params.append(
("subApplication", params["sub_application"])
) # noqa: E501
if "library" in params:
query_params.append(("library", params["library"])) # noqa: E501
if "mem_name" in params:
query_params.append(("memName", params["mem_name"])) # noqa: E501
if "mem_library" in params:
query_params.append(("memLibrary", params["mem_library"])) # noqa: E501
if "host" in params:
query_params.append(("host", params["host"])) # noqa: E501
if "host_group" in params:
query_params.append(("hostGroup", params["host_group"])) # noqa: E501
if "run_as" in params:
query_params.append(("runAs", params["run_as"])) # noqa: E501
if "order_id" in params:
query_params.append(("orderId", params["order_id"])) # noqa: E501
if "status" in params:
query_params.append(("status", params["status"])) # noqa: E501
if "order_date_from" in params:
query_params.append(
("orderDateFrom", params["order_date_from"])
) # noqa: E501
if "order_date_to" in params:
query_params.append(("orderDateTo", params["order_date_to"])) # noqa: E501
if "number_of_runs" in params:
query_params.append(
("numberOfRuns", params["number_of_runs"])
) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["Bearer"] # noqa: E501
return self.api_client.call_api(
"/archive/search",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="ArchiveJobsList", # noqa: E501
auth_settings=auth_settings,
async_req=params.get("async_req"),
_return_http_data_only=params.get("_return_http_data_only"),
_preload_content=params.get("_preload_content", True),
_request_timeout=params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 40.460765
| 149
| 0.595405
| 2,484
| 20,109
| 4.611514
| 0.099839
| 0.053077
| 0.043038
| 0.041467
| 0.862505
| 0.818071
| 0.790485
| 0.780445
| 0.778874
| 0.772239
| 0
| 0.018544
| 0.313491
| 20,109
| 496
| 150
| 40.542339
| 0.811228
| 0.362773
| 0
| 0.552901
| 1
| 0.006826
| 0.202214
| 0.028352
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023891
| false
| 0
| 0.013652
| 0
| 0.071672
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43d160c0d3df156e0b8e475c7350eb90339082b5
| 4,916
|
py
|
Python
|
data_api/migrations/0022_auto_20160406_0731.py
|
bwootton/Dator
|
80f736cf5d8d58312725a866ce04e03a6fefdb2c
|
[
"MIT"
] | 4
|
2015-09-04T19:37:06.000Z
|
2016-11-23T12:35:48.000Z
|
data_api/migrations/0022_auto_20160406_0731.py
|
bwootton/Dator
|
80f736cf5d8d58312725a866ce04e03a6fefdb2c
|
[
"MIT"
] | 8
|
2015-10-29T20:02:26.000Z
|
2021-06-10T19:07:26.000Z
|
data_api/migrations/0022_auto_20160406_0731.py
|
bwootton/Dator
|
80f736cf5d8d58312725a866ce04e03a6fefdb2c
|
[
"MIT"
] | 4
|
2015-09-06T22:35:46.000Z
|
2019-06-03T13:20:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('data_api', '0021_remove_experiment_media_link'),
]
operations = [
migrations.CreateModel(
name='LocalSignalTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uploaded', models.BooleanField(default=False)),
],
),
migrations.AlterField(
model_name='blob',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='blob',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='command',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='command',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='event',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='event',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='experiment',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='experiment',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='localcomputer',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='localcomputer',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='map',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='map',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='mappoint',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='mappoint',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='program',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='program',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='setting',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='setting',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='shift',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='shift',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='signal',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='signal',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='system',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='system',
name='updated_at',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='localsignaltag',
name='signal',
field=models.ForeignKey(to='data_api.Signal'),
),
]
| 33.442177
| 114
| 0.558584
| 461
| 4,916
| 5.741866
| 0.136659
| 0.085002
| 0.226672
| 0.262939
| 0.849263
| 0.849263
| 0.835285
| 0.835285
| 0.835285
| 0.777106
| 0
| 0.001508
| 0.325468
| 4,916
| 146
| 115
| 33.671233
| 0.796743
| 0.004272
| 0
| 0.871429
| 0
| 0
| 0.103004
| 0.006744
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014286
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6076b7cd5c986a9d5f89b923cc3442e51929f01b
| 8,650
|
py
|
Python
|
lifelong_rl/samplers/data_collector/path_collector.py
|
nakamotoo/lifelong_rl
|
a8376a57cdeff158810e71cd31cba089852399b7
|
[
"MIT"
] | null | null | null |
lifelong_rl/samplers/data_collector/path_collector.py
|
nakamotoo/lifelong_rl
|
a8376a57cdeff158810e71cd31cba089852399b7
|
[
"MIT"
] | null | null | null |
lifelong_rl/samplers/data_collector/path_collector.py
|
nakamotoo/lifelong_rl
|
a8376a57cdeff158810e71cd31cba089852399b7
|
[
"MIT"
] | null | null | null |
from collections import deque, OrderedDict
from lifelong_rl.util.eval_util import create_stats_ordered_dict
from lifelong_rl.samplers.utils.rollout_functions import rollout_with_latent, rollout_with_kbit_memory, rollout_with_lstm
from lifelong_rl.samplers import rollout, multitask_rollout
from lifelong_rl.samplers import PathCollector
class MdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
latent_dim=None
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._num_steps_total = 0
self._num_paths_total = 0
self._latent_dim = latent_dim
def rollout_function(self, *args, **kwargs):
return rollout(latent_dim = self._latent_dim, *args, **kwargs)
def reset_policy(self):
self._policy.reset()
self._policy.eval()
def finish_path(self, path):
return
def end_path_collection(self):
self._policy.train()
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min( # Do not go over num_steps
max_path_length,
num_steps - num_steps_collected,
)
self.reset_policy()
path = self.rollout_function(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
self.finish_path(path)
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
self.end_path_collection()
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
)
class LatentPathCollector(MdpPathCollector):
"""
At the beginning of each trajectory, sample a latent to feed as input
to a PriorLatentPolicy.
"""
def __init__(self, sample_latent_every=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sample_latent_every = sample_latent_every
self.prev_latent = None
self.rollout_func = rollout_with_latent
def rollout_function(self, *args, **kwargs):
return rollout_with_latent(sample_latent_every=self.sample_latent_every, *args, **kwargs)
def finish_path(self, path):
path['latent'] = self.prev_latent
def reset_policy(self):
super().reset_policy()
self._policy.fixed_latent = True
self._policy.sample_latent()
self.prev_latent = self._policy.get_current_latent()
def end_path_collection(self):
super().end_path_collection()
self._policy.fixed_latent = False
self._policy.sample_latent()
class KbitMemoryPathCollector(MdpPathCollector):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prev_latent = None
self.rollout_func = rollout_with_kbit_memory
def rollout_function(self, *args, **kwargs):
return rollout_with_kbit_memory(*args, **kwargs)
def finish_path(self, path):
path['latent'] = self.prev_latent
def reset_policy(self):
super().reset_policy()
self._policy.fixed_latent = True
self._policy.sample_latent()
self.prev_latent = self._policy.get_current_latent()
def end_path_collection(self):
super().end_path_collection()
self._policy.fixed_latent = False
self._policy.sample_latent()
class LSTMPathCollector(MdpPathCollector):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.rollout_func = rollout_with_lstm
def rollout_function(self, *args, **kwargs):
return rollout_with_lstm(*args, **kwargs)
def finish_path(self, path):
# path['latent'] = self.prev_latent
return
def reset_policy(self):
super().reset_policy()
# self._policy.fixed_latent = True
# self._policy.sample_latent()
# self.prev_latent = self._policy.get_current_latent()
def end_path_collection(self):
super().end_path_collection()
# self._policy.fixed_latent = False
# self._policy.sample_latent()
class GoalConditionedPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
observation_key='observation',
desired_goal_key='desired_goal',
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._render = render
self._render_kwargs = render_kwargs
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._num_steps_total = 0
self._num_paths_total = 0
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min( # Do not go over num_steps
max_path_length,
num_steps - num_steps_collected,
)
path = multitask_rollout(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
render=self._render,
render_kwargs=self._render_kwargs,
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
return_dict_obs=True,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
return dict(
env=self._env,
policy=self._policy,
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
| 31.34058
| 121
| 0.612254
| 984
| 8,650
| 4.947154
| 0.118902
| 0.042728
| 0.032046
| 0.032868
| 0.809573
| 0.77198
| 0.77198
| 0.765201
| 0.755957
| 0.708505
| 0
| 0.001332
| 0.305665
| 8,650
| 275
| 122
| 31.454545
| 0.809191
| 0.041156
| 0
| 0.809955
| 0
| 0
| 0.019717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140271
| false
| 0
| 0.022624
| 0.045249
| 0.248869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60ba2617b5387422cc68e2c3820c5128ec989cd5
| 10,805
|
py
|
Python
|
tests/test_count.py
|
vishalbelsare/category_encoders
|
55636b5ae11dc45075a0c248028f17f9df93bbb9
|
[
"BSD-3-Clause"
] | 616
|
2020-04-28T09:22:07.000Z
|
2022-03-31T06:57:01.000Z
|
tests/test_count.py
|
vishalbelsare/category_encoders
|
55636b5ae11dc45075a0c248028f17f9df93bbb9
|
[
"BSD-3-Clause"
] | 101
|
2020-04-27T19:29:39.000Z
|
2022-03-31T12:28:37.000Z
|
tests/test_count.py
|
vishalbelsare/category_encoders
|
55636b5ae11dc45075a0c248028f17f9df93bbb9
|
[
"BSD-3-Clause"
] | 108
|
2020-04-30T09:49:30.000Z
|
2022-03-17T02:53:28.000Z
|
import pandas as pd
from unittest import TestCase # or `from unittest import ...` if on Python 3.4+
import numpy as np
import category_encoders as encoders
X = pd.DataFrame({
'none': [
'A', 'A', 'B', None, None, 'C', None, 'C', None, 'B',
'A', 'A', 'C', 'B', 'B', 'A', 'A', None, 'B', None
],
'na_categorical': [
'A', 'A', 'C', 'A', 'B', 'C', 'C', 'A', np.nan, 'B', 'A',
'C', 'C', 'A', 'B', 'C', np.nan, 'A', np.nan, np.nan
]
})
X_t = pd.DataFrame({
'none': [
'A', 'C', None, 'B', 'C', 'C', None, None, 'A',
'A', 'C', 'A', 'B', 'A', 'A'
],
'na_categorical': [
'C', 'C', 'A', 'B', 'C', 'A', np.nan, 'B', 'A', 'A',
'B', np.nan, 'A', np.nan, 'A'
]
})
class TestCountEncoder(TestCase):
def test_count_defaults(self):
"""Test the defaults are working as expected on 'none' and 'categorical'
which are the most extreme edge cases for the count encoder."""
enc = encoders.CountEncoder(verbose=1)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([5, 3, 6]).isin(out['none'].unique()).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([6, 3]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(enc.mapping is not None)
def test_count_handle_missing_string(self):
"""Test the handle_missing string on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
handle_missing='return_nan'
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_missing)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 3)
self.assertTrue(pd.Series([6, 7, 3]).isin(out['na_categorical']).all())
self.assertFalse(pd.Series([4]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_missing_dict(self):
"""Test the handle_missing dict on 'none' and 'na_categorical'.
We want to see differing behavour between 'none' and 'na_cat' cols."""
enc = encoders.CountEncoder(
handle_missing={'na_categorical': 'return_nan'}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_missing)
self.assertTrue(pd.Series([5, 3, 6]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([6, 7, 3]).isin(out['na_categorical']).all())
self.assertFalse(pd.Series([4]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_unknown_string(self):
"""Test the handle_unknown string on 'none' and 'na_categorical'.
The 'handle_missing' must be set to 'return_nan' in order to test
'handle_unkown' correctly."""
enc = encoders.CountEncoder(
handle_missing='return_nan',
handle_unknown='return_nan',
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_unknown)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 3)
self.assertTrue(pd.Series([3, 6, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_handle_unknown_dict(self):
"""Test the 'handle_unkown' dict with all non-default options."""
enc = encoders.CountEncoder(
handle_missing='return_nan',
handle_unknown={
'none': -1,
'na_categorical': 'return_nan'
},
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._handle_unknown)
self.assertTrue(pd.Series([6, 5, 3, -1]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (4,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertTrue(pd.Series([3, 6, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (4,))
self.assertTrue(out['na_categorical'].isnull().sum() == 3)
def test_count_min_group_size_int(self):
"""Test the min_group_size int on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(min_group_size=7)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertTrue(out['none'].unique().shape == (3,))
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertIn(np.nan, enc.mapping['none'])
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (2,))
self.assertIn('B_C_nan', enc.mapping['na_categorical'])
self.assertFalse(np.nan in enc.mapping['na_categorical'])
def test_count_min_group_size_dict(self):
"""Test the min_group_size dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={'none': 6, 'na_categorical': 7}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._min_group_size)
self.assertTrue(pd.Series([6, 8]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertTrue(out['none'].isnull().sum() == 0)
self.assertIn(np.nan, enc.mapping['none'])
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertTrue(out['na_categorical'].unique().shape == (2,))
self.assertIn('B_C_nan', enc.mapping['na_categorical'])
self.assertFalse(np.nan in enc.mapping['na_categorical'])
def test_count_combine_min_nan_groups_bool(self):
"""Test the min_nan_groups_bool on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=7,
combine_min_nan_groups=False
)
enc.fit(X)
out = enc.transform(X_t)
self.assertTrue(pd.Series([6, 5, 3]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 3)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([9, 7, 4]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 3)
self.assertTrue(enc.mapping is not None)
self.assertIn(np.nan, enc.mapping['na_categorical'])
def test_count_combine_min_nan_groups_dict(self):
"""Test the combine_min_nan_groups dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={
'none': 6,
'na_categorical': 7
},
combine_min_nan_groups={
'none': 'force',
'na_categorical': False
}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._combine_min_nan_groups)
self.assertTrue(pd.Series([14, 6]).isin(out['none']).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([9, 7, 4]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 3)
self.assertTrue(enc.mapping is not None)
self.assertIn(np.nan, enc.mapping['na_categorical'])
def test_count_min_group_name_string(self):
"""Test the min_group_name string on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=6,
min_group_name='dave'
)
enc.fit(X)
self.assertIn('dave', enc.mapping['none'])
self.assertEqual(enc.mapping['none']['dave'], 8)
self.assertIn('dave', enc.mapping['na_categorical'])
self.assertEqual(enc.mapping['na_categorical']['dave'], 7)
def test_count_min_group_name_dict(self):
"""Test the min_group_name dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size={
'none': 6, 'na_categorical': 6
},
min_group_name={
'none': 'dave', 'na_categorical': None
}
)
enc.fit(X)
self.assertIn('none', enc._min_group_name)
self.assertIn('dave', enc.mapping['none'])
self.assertEqual(enc.mapping['none']['dave'], 8)
self.assertIn('B_nan', enc.mapping['na_categorical'])
self.assertEqual(enc.mapping['na_categorical']['B_nan'], 7)
def test_count_normalize_bool(self):
"""Test the normalize bool on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=6,
normalize=True
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._normalize)
self.assertTrue(out['none'].round(5).isin([0.3, 0.4]).all())
self.assertEqual(out['none'].unique().shape[0], 2)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([0.3, 0.35]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 2)
self.assertTrue(enc.mapping is not None)
def test_count_normalize_dict(self):
"""Test the normalize dict on 'none' and 'na_categorical'."""
enc = encoders.CountEncoder(
min_group_size=7,
normalize={
'none': True, 'na_categorical': False
}
)
enc.fit(X)
out = enc.transform(X_t)
self.assertIn('none', enc._normalize)
self.assertTrue(out['none'].round(5).isin([0.3 , 0.15, 0.25]).all())
self.assertEqual(out['none'].unique().shape[0], 3)
self.assertEqual(out['none'].isnull().sum(), 0)
self.assertTrue(pd.Series([13, 7]).isin(out['na_categorical']).all())
self.assertEqual(out['na_categorical'].unique().shape[0], 2)
self.assertTrue(enc.mapping is not None)
| 40.167286
| 83
| 0.58186
| 1,404
| 10,805
| 4.325499
| 0.084758
| 0.126297
| 0.073769
| 0.072452
| 0.851309
| 0.801745
| 0.759756
| 0.757451
| 0.757451
| 0.72633
| 0
| 0.017206
| 0.236187
| 10,805
| 268
| 84
| 40.317164
| 0.718648
| 0.099861
| 0
| 0.597156
| 0
| 0
| 0.111654
| 0
| 0
| 0
| 0
| 0
| 0.43128
| 1
| 0.061611
| false
| 0
| 0.018957
| 0
| 0.085308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
880259ff905bab3c6a76e4faa9fc89a05171a426
| 20,486
|
py
|
Python
|
app/grandchallenge/challenges/migrations/0001_initial.py
|
kaczmarj/grand-challenge.org
|
8dc8a2170e51072354f7e94f2a22578805a67b94
|
[
"Apache-2.0"
] | 101
|
2018-04-11T14:48:04.000Z
|
2022-03-28T00:29:48.000Z
|
app/grandchallenge/challenges/migrations/0001_initial.py
|
kaczmarj/grand-challenge.org
|
8dc8a2170e51072354f7e94f2a22578805a67b94
|
[
"Apache-2.0"
] | 1,733
|
2018-03-21T11:56:16.000Z
|
2022-03-31T14:58:30.000Z
|
app/grandchallenge/challenges/migrations/0001_initial.py
|
kaczmarj/grand-challenge.org
|
8dc8a2170e51072354f7e94f2a22578805a67b94
|
[
"Apache-2.0"
] | 42
|
2018-06-08T05:49:07.000Z
|
2022-03-29T08:43:01.000Z
|
# Generated by Django 3.1.1 on 2020-12-02 13:08
import re
import django.contrib.postgres.fields
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import grandchallenge.challenges.models
import grandchallenge.core.storage
class Migration(migrations.Migration):
initial = True
dependencies = [
("anatomy", "0001_initial"),
("auth", "0012_alter_user_first_name_max_length"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("task_categories", "0001_initial"),
("modalities", "0001_initial"),
("forum", "0011_auto_20190627_2132"),
("publications", "0003_auto_20201001_0758"),
]
operations = [
migrations.CreateModel(
name="ChallengeSeries",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
django.contrib.postgres.fields.citext.CICharField(
max_length=64, unique=True
),
),
("url", models.URLField(blank=True)),
],
options={
"verbose_name_plural": "Challenge Series",
"ordering": ("name",),
},
),
migrations.CreateModel(
name="ExternalChallenge",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"short_name",
django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
(
"description",
models.CharField(
blank=True,
default="",
help_text="Short summary of this project, max 1024 characters.",
max_length=1024,
),
),
(
"title",
models.CharField(
blank=True,
default="",
help_text="The name of the challenge that is displayed on the All Challenges page. If this is blank the short name of the challenge will be used.",
max_length=64,
),
),
(
"logo",
models.ImageField(
blank=True,
help_text="A logo for this challenge. Should be square with a resolution of 640x640 px or higher.",
storage=grandchallenge.core.storage.PublicS3Storage(),
upload_to=grandchallenge.core.storage.get_logo_path,
),
),
(
"hidden",
models.BooleanField(
default=True,
help_text="Do not display this Project in any public overview",
),
),
(
"educational",
models.BooleanField(
default=False,
help_text="It is an educational challange",
),
),
(
"workshop_date",
models.DateField(
blank=True,
help_text="Date on which the workshop belonging to this project will be held",
null=True,
),
),
(
"event_name",
models.CharField(
blank=True,
default="",
help_text="The name of the event the workshop will be held at",
max_length=1024,
null=True,
),
),
(
"event_url",
models.URLField(
blank=True,
help_text="Website of the event which will host the workshop",
null=True,
),
),
(
"data_license_agreement",
models.TextField(
blank=True,
help_text="What is the data license agreement for this challenge?",
),
),
(
"number_of_training_cases",
models.IntegerField(blank=True, null=True),
),
(
"number_of_test_cases",
models.IntegerField(blank=True, null=True),
),
(
"filter_classes",
django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.citext.CICharField(
max_length=32
),
default=list,
editable=False,
size=None,
),
),
(
"homepage",
models.URLField(
help_text="What is the homepage for this challenge?"
),
),
(
"data_stored",
models.BooleanField(
default=False,
help_text="Has the grand-challenge team stored the data?",
),
),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"modalities",
models.ManyToManyField(
blank=True,
help_text="What imaging modalities are used in this challenge?",
to="modalities.ImagingModality",
),
),
(
"publications",
models.ManyToManyField(
blank=True,
help_text="Which publications are associated with this challenge?",
to="publications.Publication",
),
),
(
"series",
models.ManyToManyField(
blank=True,
help_text="Which challenge series is this associated with?",
to="challenges.ChallengeSeries",
),
),
(
"structures",
models.ManyToManyField(
blank=True,
help_text="What structures are used in this challenge?",
to="anatomy.BodyStructure",
),
),
(
"task_types",
models.ManyToManyField(
blank=True,
help_text="What type of task is this challenge?",
to="task_categories.TaskType",
),
),
],
options={"ordering": ("pk",), "abstract": False},
),
migrations.CreateModel(
name="Challenge",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("modified", models.DateTimeField(auto_now=True)),
(
"short_name",
django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
(
"description",
models.CharField(
blank=True,
default="",
help_text="Short summary of this project, max 1024 characters.",
max_length=1024,
),
),
(
"title",
models.CharField(
blank=True,
default="",
help_text="The name of the challenge that is displayed on the All Challenges page. If this is blank the short name of the challenge will be used.",
max_length=64,
),
),
(
"logo",
models.ImageField(
blank=True,
help_text="A logo for this challenge. Should be square with a resolution of 640x640 px or higher.",
storage=grandchallenge.core.storage.PublicS3Storage(),
upload_to=grandchallenge.core.storage.get_logo_path,
),
),
(
"hidden",
models.BooleanField(
default=True,
help_text="Do not display this Project in any public overview",
),
),
(
"educational",
models.BooleanField(
default=False,
help_text="It is an educational challange",
),
),
(
"workshop_date",
models.DateField(
blank=True,
help_text="Date on which the workshop belonging to this project will be held",
null=True,
),
),
(
"event_name",
models.CharField(
blank=True,
default="",
help_text="The name of the event the workshop will be held at",
max_length=1024,
null=True,
),
),
(
"event_url",
models.URLField(
blank=True,
help_text="Website of the event which will host the workshop",
null=True,
),
),
(
"data_license_agreement",
models.TextField(
blank=True,
help_text="What is the data license agreement for this challenge?",
),
),
(
"number_of_training_cases",
models.IntegerField(blank=True, null=True),
),
(
"number_of_test_cases",
models.IntegerField(blank=True, null=True),
),
(
"filter_classes",
django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.citext.CICharField(
max_length=32
),
default=list,
editable=False,
size=None,
),
),
(
"banner",
models.ImageField(
blank=True,
help_text="Image that gets displayed at the top of each page. Recommended resolution 2200x440 px.",
storage=grandchallenge.core.storage.PublicS3Storage(),
upload_to=grandchallenge.core.storage.get_banner_path,
),
),
(
"disclaimer",
models.CharField(
blank=True,
default="",
help_text="Optional text to show on each page in the project. For showing 'under construction' type messages",
max_length=2048,
null=True,
),
),
(
"require_participant_review",
models.BooleanField(
default=False,
help_text="If ticked, new participants need to be approved by project admins before they can access restricted pages. If not ticked, new users are allowed access immediately",
),
),
(
"use_registration_page",
models.BooleanField(
default=True,
help_text="If true, show a registration page on the challenge site.",
),
),
(
"registration_page_text",
models.TextField(
blank=True,
default="",
help_text="The text to use on the registration page, you could include a data usage agreement here. You can use HTML markup here.",
),
),
(
"use_evaluation",
models.BooleanField(
default=False,
help_text="If true, use the automated evaluation system. See the evaluation page created in the Challenge site.",
),
),
(
"use_teams",
models.BooleanField(
default=False,
help_text="If true, users are able to form teams to participate in this challenge together.",
),
),
(
"display_forum_link",
models.BooleanField(
default=False,
help_text="Display a link to the challenge forum in the nav bar.",
),
),
(
"cached_num_participants",
models.PositiveIntegerField(default=0, editable=False),
),
(
"cached_num_results",
models.PositiveIntegerField(default=0, editable=False),
),
(
"cached_latest_result",
models.DateTimeField(
blank=True, editable=False, null=True
),
),
(
"admins_group",
models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
related_name="admins_of_challenge",
to="auth.group",
),
),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"forum",
models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
to="forum.forum",
),
),
(
"modalities",
models.ManyToManyField(
blank=True,
help_text="What imaging modalities are used in this challenge?",
to="modalities.ImagingModality",
),
),
(
"participants_group",
models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
related_name="participants_of_challenge",
to="auth.group",
),
),
(
"publications",
models.ManyToManyField(
blank=True,
help_text="Which publications are associated with this challenge?",
to="publications.Publication",
),
),
(
"series",
models.ManyToManyField(
blank=True,
help_text="Which challenge series is this associated with?",
to="challenges.ChallengeSeries",
),
),
(
"structures",
models.ManyToManyField(
blank=True,
help_text="What structures are used in this challenge?",
to="anatomy.BodyStructure",
),
),
(
"task_types",
models.ManyToManyField(
blank=True,
help_text="What type of task is this challenge?",
to="task_categories.TaskType",
),
),
],
options={
"verbose_name": "challenge",
"verbose_name_plural": "challenges",
"ordering": ("pk",),
"abstract": False,
},
),
]
| 38.946768
| 199
| 0.383872
| 1,376
| 20,486
| 5.587209
| 0.204942
| 0.041623
| 0.034339
| 0.042014
| 0.766259
| 0.754943
| 0.73244
| 0.707856
| 0.689516
| 0.689516
| 0
| 0.014175
| 0.54198
| 20,486
| 525
| 200
| 39.020952
| 0.80518
| 0.002197
| 0
| 0.753876
| 1
| 0.007752
| 0.203141
| 0.026127
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015504
| 0
| 0.023256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
880ac836f64d727848455a46668c493083ac9cdc
| 4,854
|
py
|
Python
|
slam_recognition/util/relativity/get_relative_to_indices.py
|
SimLeek/pySILEnT
|
feec2d1fb654d7c8dc25f610916f4e9b202a1092
|
[
"Apache-2.0",
"MIT"
] | 5
|
2018-11-18T17:35:59.000Z
|
2019-02-13T20:25:58.000Z
|
slam_recognition/util/relativity/get_relative_to_indices.py
|
SimLeek/slam_recognition
|
feec2d1fb654d7c8dc25f610916f4e9b202a1092
|
[
"Apache-2.0",
"MIT"
] | 12
|
2018-10-31T01:57:55.000Z
|
2019-02-07T05:49:36.000Z
|
slam_recognition/util/relativity/get_relative_to_indices.py
|
SimLeek/pySILEnT
|
feec2d1fb654d7c8dc25f610916f4e9b202a1092
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import tensorflow as tf
def get_relative_to_indices(tensor, indices, rotation_relative=False):
two = tf.constant(2, dtype=tf.int32)
one = tf.constant(1, dtype=tf.int32)
w = tf.shape(tensor)[1]
h = tf.shape(tensor)[2]
to_tile = tf.pad(tf.cast(tf.shape(tensor)[1:3], tf.int32), [[1, 1]])
to_tile = tf.expand_dims(to_tile, -1)
pad_shape = tf.cast(tf.tile(to_tile, [one, two]), tf.int32)
padded_tensor = tf.pad(tensor, pad_shape, "CONSTANT")
ind_pad = tf.tile(tf.constant([[4, 0]], dtype=tf.int32), [2, 1])
no_pad = tf.tile(tf.constant([[0, 0]], dtype=tf.int32), [2, 1])
cond_pad = tf.cond(tf.equal(tf.size(indices), 0), lambda: ind_pad, lambda: no_pad)
padded_indices = tf.pad(indices, cond_pad)
def get_slices(slice):
if rotation_relative:
raise NotImplementedError("Not sure how to do this right yet.")
angle_rgb = self.padded_firing[slice[0]:slice[0] + 1, slice[1]:slice[1] + 1, slice[2]:slice[2] + 1,
:]
angle_hue = tf.image.rgb_to_hsv(angle_rgb)[0:1, 0:1, 0:1, 0:1]
angle = tf.squeeze(angle_hue)
angle.set_shape([1])
pad_rot = tf.contrib.image.transform(
self.padded_firing,
tf.contrib.image.angles_to_projective_transforms(
angle, tf.cast(tf.shape(self.padded_firing)[1], tf.float32), tf.cast(tf
.shape(
self.padded_firing)[2],
tf.float32)
))
else:
pad_rot = padded_tensor
value_slice = pad_rot[tf.newaxis,
slice[0]:slice[0] + 1,
slice[1]:slice[1] + w * 2,
slice[2]:slice[2] + h * 2,
:
]
return tf.pad(value_slice,
[[0, 0], [slice[0], tf.cast(tf.shape(pad_rot), tf.int32)[0] - slice[0]], [0, 0], [0, 0],
[0, 0]], "CONSTANT")
batch_items = tf.map_fn(fn=get_slices,
elems=padded_indices,
dtype=tf.float32)
added_relative = tf.squeeze(tf.reduce_sum(batch_items, 0), [0])
relativity_tensor = tf.cast(added_relative, tf.float32)
return relativity_tensor
def get_relative_to_indices_regions(tensor, region_shape, indices, rotation_relative=False):
two = tf.constant(2, dtype=tf.int32)
one = tf.constant(1, dtype=tf.int32)
w = region_shape[1]
h = region_shape[2]
to_tile = tf.pad(tf.cast(region_shape[1:3], tf.int32), [[1, 1]])
to_tile = tf.expand_dims(to_tile, -1)
pad_shape = tf.cast(tf.tile(to_tile, [one, two]), tf.int32)
padded_tensor = tf.pad(tensor, pad_shape, "CONSTANT")
ind_pad = tf.tile(tf.constant([[4, 0]], dtype=tf.int32), [2, 1])
no_pad = tf.tile(tf.constant([[0, 0]], dtype=tf.int32), [2, 1])
cond_pad = tf.cond(tf.equal(tf.size(indices), 0), lambda: ind_pad, lambda: no_pad)
padded_indices = tf.pad(indices, cond_pad)
def get_slices(slice):
slice = tf.cast(slice, tf.int32)
if rotation_relative:
raise NotImplementedError("Not sure how to do this right yet.")
angle_rgb = self.padded_firing[slice[0]:slice[0] + 1, slice[1]:slice[1] + 1, slice[2]:slice[2] + 1,
:]
angle_hue = tf.image.rgb_to_hsv(angle_rgb)[0:1, 0:1, 0:1, 0:1]
angle = tf.squeeze(angle_hue)
angle.set_shape([1])
pad_rot = tf.contrib.image.transform(
self.padded_firing,
tf.contrib.image.angles_to_projective_transforms(
angle, tf.cast(tf.shape(self.padded_firing)[1], tf.float32), tf.cast(tf
.shape(
self.padded_firing)[2],
tf.float32)
))
else:
pad_rot = padded_tensor
value_slice = pad_rot[tf.newaxis,
slice[0]:slice[0] + 1,
slice[1]:slice[1] + w * 2,
slice[2]:slice[2] + h * 2,
:
]
return tf.pad(value_slice,
[[0, 0], [slice[0], tf.cast(tf.shape(pad_rot), tf.int32)[0] - slice[0]], [0, 0], [0, 0],
[0, 0]], "CONSTANT")
batch_items = tf.map_fn(fn=get_slices,
elems=padded_indices,
dtype=tf.float32)
relativity_tensor = tf.cast(batch_items, tf.float32)
return relativity_tensor
| 44.127273
| 111
| 0.507417
| 633
| 4,854
| 3.723539
| 0.126382
| 0.014425
| 0.012728
| 0.013577
| 0.889266
| 0.843445
| 0.843445
| 0.828171
| 0.828171
| 0.828171
| 0
| 0.052598
| 0.357643
| 4,854
| 109
| 112
| 44.53211
| 0.703335
| 0
| 0
| 0.817204
| 0
| 0
| 0.020602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0
| 0.010753
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
880c5cb0cfaefac520164af107686d80ca16cabe
| 6,158
|
py
|
Python
|
n2vc/tests/unit/test_provisioner.py
|
TCSOSM-20/N2VC
|
d99f3f2f67d693c30494be7ad19b97f3f5528961
|
[
"Apache-2.0"
] | null | null | null |
n2vc/tests/unit/test_provisioner.py
|
TCSOSM-20/N2VC
|
d99f3f2f67d693c30494be7ad19b97f3f5528961
|
[
"Apache-2.0"
] | null | null | null |
n2vc/tests/unit/test_provisioner.py
|
TCSOSM-20/N2VC
|
d99f3f2f67d693c30494be7ad19b97f3f5528961
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, mock
from mock import mock_open
from n2vc.provisioner import SSHProvisioner
from paramiko.ssh_exception import SSHException
class ProvisionerTest(TestCase):
def setUp(self):
self.provisioner = SSHProvisioner(None, None, None)
@mock.patch("n2vc.provisioner.os.path.exists")
@mock.patch("n2vc.provisioner.paramiko.RSAKey")
@mock.patch("n2vc.provisioner.paramiko.SSHClient")
@mock.patch("builtins.open", new_callable=mock_open, read_data="data")
def test__get_ssh_client(self, _mock_open, mock_sshclient, _mock_rsakey, _mock_os):
mock_instance = mock_sshclient.return_value
sshclient = self.provisioner._get_ssh_client()
self.assertEqual(mock_instance, sshclient)
self.assertEqual(
1,
mock_instance.set_missing_host_key_policy.call_count,
"Missing host key call count",
)
self.assertEqual(1, mock_instance.connect.call_count, "Connect call count")
@mock.patch("n2vc.provisioner.os.path.exists")
@mock.patch("n2vc.provisioner.paramiko.RSAKey")
@mock.patch("n2vc.provisioner.paramiko.SSHClient")
@mock.patch("builtins.open", new_callable=mock_open, read_data="data")
def test__get_ssh_client_no_connection(
self, _mock_open, mock_sshclient, _mock_rsakey, _mock_os
):
mock_instance = mock_sshclient.return_value
mock_instance.method_inside_someobject.side_effect = ["something"]
mock_instance.connect.side_effect = SSHException()
self.assertRaises(SSHException, self.provisioner._get_ssh_client)
self.assertEqual(
1,
mock_instance.set_missing_host_key_policy.call_count,
"Missing host key call count",
)
self.assertEqual(1, mock_instance.connect.call_count, "Connect call count")
@mock.patch("n2vc.provisioner.os.path.exists")
@mock.patch("n2vc.provisioner.paramiko.RSAKey")
@mock.patch("n2vc.provisioner.paramiko.SSHClient")
@mock.patch("builtins.open", new_callable=mock_open, read_data="data")
def test__get_ssh_client_bad_banner(
self, _mock_open, mock_sshclient, _mock_rsakey, _mock_os
):
mock_instance = mock_sshclient.return_value
mock_instance.method_inside_someobject.side_effect = ["something"]
mock_instance.connect.side_effect = [
SSHException("Error reading SSH protocol banner"),
None,
None,
]
sshclient = self.provisioner._get_ssh_client()
self.assertEqual(mock_instance, sshclient)
self.assertEqual(
1,
mock_instance.set_missing_host_key_policy.call_count,
"Missing host key call count",
)
self.assertEqual(
3, mock_instance.connect.call_count, "Should attempt 3 connections"
)
@mock.patch("time.sleep", autospec=True)
@mock.patch("n2vc.provisioner.os.path.exists")
@mock.patch("n2vc.provisioner.paramiko.RSAKey")
@mock.patch("n2vc.provisioner.paramiko.SSHClient")
@mock.patch("builtins.open", new_callable=mock_open, read_data="data")
def test__get_ssh_client_unable_to_connect(
self, _mock_open, mock_sshclient, _mock_rsakey, _mock_os, _mock_sleep
):
mock_instance = mock_sshclient.return_value
mock_instance.connect.side_effect = Exception("Unable to connect to port")
self.assertRaises(Exception, self.provisioner._get_ssh_client)
self.assertEqual(
1,
mock_instance.set_missing_host_key_policy.call_count,
"Missing host key call count",
)
self.assertEqual(
11, mock_instance.connect.call_count, "Should attempt 11 connections"
)
@mock.patch("time.sleep", autospec=True)
@mock.patch("n2vc.provisioner.os.path.exists")
@mock.patch("n2vc.provisioner.paramiko.RSAKey")
@mock.patch("n2vc.provisioner.paramiko.SSHClient")
@mock.patch("builtins.open", new_callable=mock_open, read_data="data")
def test__get_ssh_client_unable_to_connect_once(
self, _mock_open, mock_sshclient, _mock_rsakey, _mock_os, _mock_sleep
):
mock_instance = mock_sshclient.return_value
mock_instance.connect.side_effect = [
Exception("Unable to connect to port"),
None,
]
sshclient = self.provisioner._get_ssh_client()
self.assertEqual(mock_instance, sshclient)
self.assertEqual(
1,
mock_instance.set_missing_host_key_policy.call_count,
"Missing host key call count",
)
self.assertEqual(
2, mock_instance.connect.call_count, "Should attempt 2 connections"
)
@mock.patch("n2vc.provisioner.os.path.exists")
@mock.patch("n2vc.provisioner.paramiko.RSAKey")
@mock.patch("n2vc.provisioner.paramiko.SSHClient")
@mock.patch("builtins.open", new_callable=mock_open, read_data="data")
def test__get_ssh_client_other_exception(
self, _mock_open, mock_sshclient, _mock_rsakey, _mock_os
):
mock_instance = mock_sshclient.return_value
mock_instance.connect.side_effect = Exception()
self.assertRaises(Exception, self.provisioner._get_ssh_client)
self.assertEqual(
1,
mock_instance.set_missing_host_key_policy.call_count,
"Missing host key call count",
)
self.assertEqual(
1, mock_instance.connect.call_count, "Should only attempt 1 connection"
)
#
| 38.72956
| 87
| 0.689672
| 746
| 6,158
| 5.414209
| 0.183646
| 0.083189
| 0.057935
| 0.106957
| 0.802922
| 0.802922
| 0.801436
| 0.770983
| 0.770983
| 0.770983
| 0
| 0.009309
| 0.215005
| 6,158
| 158
| 88
| 38.974684
| 0.826231
| 0.092725
| 0
| 0.713115
| 0
| 0
| 0.202082
| 0.105528
| 0
| 0
| 0
| 0
| 0.147541
| 1
| 0.057377
| false
| 0
| 0.032787
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7177408aa060a272ee1cd336a6945d7f52d3151c
| 135,468
|
py
|
Python
|
tests/expected_FPdescriptor_data.py
|
sharmavaruns/descriptastorus
|
7a3e457bc64e480e44f0ce624052da68d2a27bad
|
[
"BSD-3-Clause"
] | 118
|
2019-01-15T23:04:29.000Z
|
2022-03-25T01:31:06.000Z
|
tests/expected_FPdescriptor_data.py
|
sharmavaruns/descriptastorus
|
7a3e457bc64e480e44f0ce624052da68d2a27bad
|
[
"BSD-3-Clause"
] | 14
|
2019-02-15T17:15:46.000Z
|
2022-03-10T14:18:56.000Z
|
tests/expected_FPdescriptor_data.py
|
sharmavaruns/descriptastorus
|
7a3e457bc64e480e44f0ce624052da68d2a27bad
|
[
"BSD-3-Clause"
] | 40
|
2018-12-18T11:39:38.000Z
|
2022-03-23T09:45:45.000Z
|
expected_chiral_data = [(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)]
expected_RDKFP_data = [(True, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
]
expected_AtomPair_data = [(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1, 0, 0, 1, 3, 1, 6, 0, 0, 0, 0, 6, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 3, 0, 5, 0, 6, 1, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 9, 0, 0, 1, 1, 8, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 5, 6, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 0, 0, 5, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 3, 0, 3, 0, 0, 0, 0, 3, 0, 0, 8, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 3, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 2, 0, 0, 0, 0, 7, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 2, 0, 0, 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0, 0, 7, 6, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 0, 2, 4, 0, 1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 2, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 2, 4, 0, 2, 0, 0, 0, 0, 12, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 3, 0, 1, 0, 0, 0, 0, 7, 7, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 12, 0, 0, 0, 0, 0, 1, 7, 4, 4, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 3, 2, 0, 10, 0, 0, 0, 1, 13, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 5, 8, 0, 0, 1, 0, 0, 1, 0, 1, 0, 4, 4, 0, 0, 2, 7, 5, 6, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 0, 9, 9, 0, 0, 1, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 3, 1, 3, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 4, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 6, 0, 1, 0, 0, 0, 1, 9, 0, 0, 6, 0, 0, 0, 0, 1, 2, 2, 5, 0, 0, 0, 1, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 4, 6, 0, 0, 0, 0, 4, 0, 4, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 3, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 1, 0, 0, 1, 6, 2, 16, 0, 0, 0, 1, 0, 1, 0, 0, 3, 0, 2, 2, 0, 0, 0, 0, 1, 1, 1, 1, 0, 6, 0, 1, 0, 4, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 2, 9, 2, 0, 0, 0, 1, 0, 15, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 6, 0, 0, 3, 0, 0, 1, 0, 7, 3, 2, 7, 1, 0, 0, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 4, 2, 0, 0, 3, 0, 0, 3, 0, 1, 0, 0, 2, 0, 0, 2, 0, 3, 0, 0, 3, 0, 0, 5, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 5, 3, 1, 5, 1, 3, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, 0, 0, 2, 1, 0, 2, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 7, 3, 9, 2, 0, 0, 0, 0, 0, 2, 0, 6, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 7, 0, 0, 0, 0, 1, 13, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 4, 2, 0, 0, 7, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 3, 0, 4, 0, 0, 10, 1, 0, 0, 16, 2, 2, 0, 3, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
]
expected_FeatureMorgan_data = [(True, 32, 1, 7, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 5, 0, 0, 0, 0, 0, 0),
(True, 3, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(True, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
]
| 4,671.310345
| 6,188
| 0.333998
| 45,090
| 135,468
| 1.003282
| 0.000532
| 1.845661
| 2.694151
| 3.508555
| 0.997082
| 0.996264
| 0.994827
| 0.993015
| 0.992131
| 0.99076
| 0
| 0.498694
| 0.332861
| 135,468
| 28
| 6,189
| 4,838.142857
| 0.001859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
e0d9f738c122e925eeb79c2a1c4ad3303f5a5c00
| 16,114
|
py
|
Python
|
tests/unit/beacons/test_adb.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
tests/unit/beacons/test_adb.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/unit/beacons/test_adb.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# coding: utf-8
# Python libs
from __future__ import absolute_import
# Salt testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, Mock
from tests.support.mixins import LoaderModuleMockMixin
# Salt libs
import salt.beacons.adb as adb
@skipIf(NO_MOCK, NO_MOCK_REASON)
class ADBBeaconTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.beacons.adb
'''
def setup_loader_modules(self):
return {
adb: {
'last_state': {},
'last_state_extra': {'no_devices': False}
}
}
def test_no_adb_command(self):
with patch('salt.utils.path.which') as mock:
mock.return_value = None
ret = adb.__virtual__()
mock.assert_called_once_with('adb')
self.assertFalse(ret)
def test_with_adb_command(self):
with patch('salt.utils.path.which') as mock:
mock.return_value = '/usr/bin/adb'
ret = adb.__virtual__()
mock.assert_called_once_with('adb')
self.assertEqual(ret, 'adb')
def test_non_list_config(self):
config = {}
ret = adb.validate(config)
self.assertEqual(ret, (False, 'Configuration for adb beacon must'
' be a list.'))
def test_empty_config(self):
config = [{}]
ret = adb.validate(config)
self.assertEqual(ret, (False, 'Configuration for adb beacon must'
' include a states array.'))
def test_invalid_states(self):
config = [{'states': ['Random', 'Failings']}]
ret = adb.validate(config)
self.assertEqual(ret, (False, 'Need a one of the following'
' adb states: offline, bootloader,'
' device, host, recovery, no'
' permissions, sideload,'
' unauthorized, unknown, missing'))
def test_device_state(self):
config = [{'states': ['device']}]
mock = Mock(return_value='List of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
def test_device_state_change(self):
config = [{'states': ['offline']}]
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\toffline'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'offline',
'tag': 'offline'}])
def test_multiple_devices(self):
config = [{'states': ['offline', 'device']}]
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\toffline\nNexus\tdevice'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [
{'device': 'HTC', 'state': 'offline', 'tag': 'offline'},
{'device': 'Nexus', 'state': 'device', 'tag': 'device'}
])
def test_no_devices_with_different_states(self):
config = [{'states': ['offline'], 'no_devices_event': True}]
mock = Mock(return_value='List of devices attached\nHTC\tdevice')
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_no_devices_no_repeat(self):
config = [{'states': ['offline', 'device'], 'no_devices_event': True}]
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached',
'List of devices attached'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'tag': 'no_devices'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_no_devices(self):
config = [{'states': ['offline', 'device'], 'no_devices_event': True}]
out = [
'List of devices attached',
'List of devices attached'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'tag': 'no_devices'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_missing(self):
config = [{'states': ['device', 'missing']}]
out = [
'List of devices attached\nHTC\tdevice',
'List of devices attached',
'List of devices attached\nHTC\tdevice',
'List of devices attached\nHTC\tdevice'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'missing',
'tag': 'missing'}])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_with_startup(self):
config = [{'states': ['device']}]
mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice',)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
def test_with_user(self):
config = [{'states': ['device'], 'user': 'fred'}]
mock = Mock(return_value='* daemon started successfully *\nList of devices attached\nHTC\tdevice')
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
mock.assert_called_once_with('adb devices', runas='fred')
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
def test_device_low_battery(self):
config = [{'states': ['device'], 'battery_low': 30}]
out = [
'List of devices attached\nHTC\tdevice',
'25',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_device_no_repeat(self):
config = [{'states': ['device'], 'battery_low': 30}]
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'25'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_no_repeat_capacity_increase(self):
config = [{'states': ['device'], 'battery_low': 75}]
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'30'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_no_repeat_with_not_found_state(self):
config = [{'states': ['offline'], 'battery_low': 30}]
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'25'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_device_battery_charged(self):
config = [{'states': ['device'], 'battery_low': 30}]
out = [
'List of devices attached\nHTC\tdevice',
'100',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC',
'state': 'device',
'tag': 'device'}])
def test_device_low_battery_equal(self):
config = [{'states': ['device'], 'battery_low': 25}]
out = [
'List of devices attached\nHTC\tdevice',
'25',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_device_battery_not_found(self):
config = [{'states': ['device'], 'battery_low': 25}]
out = [
'List of devices attached\nHTC\tdevice',
'/system/bin/sh: cat: /sys/class/power_supply/*/capacity: No such file or directory',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_device_repeat_multi(self):
config = [{'states': ['offline'], 'battery_low': 35}]
out = [
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'40',
'List of devices attached\nHTC\tdevice',
'25',
'List of devices attached\nHTC\tdevice',
'80'
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
ret = adb.beacon(config)
self.assertEqual(ret, [])
def test_weird_batteries(self):
config = [{'states': ['device'], 'battery_low': 25}]
out = [
'List of devices attached\nHTC\tdevice',
'-9000',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'}])
def test_multiple_batteries(self):
config = [{'states': ['device'], 'battery_low': 30}]
out = [
'List of devices attached\nHTC\tdevice',
'25\n40',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
def test_multiple_low_batteries(self):
config = [{'states': ['device'], 'battery_low': 30}]
out = [
'List of devices attached\nHTC\tdevice',
'25\n14',
]
mock = Mock(side_effect=out)
with patch.dict(adb.__salt__, {'cmd.run': mock}):
ret = adb.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = adb.beacon(config)
self.assertEqual(ret, [{'device': 'HTC', 'state': 'device', 'tag': 'device'},
{'device': 'HTC', 'battery_level': 25, 'tag': 'battery_low'}])
| 35.337719
| 107
| 0.515887
| 1,627
| 16,114
| 4.955747
| 0.09158
| 0.044648
| 0.12948
| 0.166687
| 0.862086
| 0.855637
| 0.833561
| 0.832445
| 0.821654
| 0.820786
| 0
| 0.007512
| 0.339084
| 16,114
| 455
| 108
| 35.415385
| 0.749577
| 0.005275
| 0
| 0.703812
| 0
| 0.002933
| 0.238913
| 0.04391
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.076246
| false
| 0
| 0.014663
| 0.002933
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e0e65a547ba8108a989465d009f67b7d1940ce2b
| 1,613
|
py
|
Python
|
huey_logger/admin.py
|
bnznamco/django-huey-logger
|
7adbb6cb32a41b4fe14d5e0975c504cc40d20ecb
|
[
"MIT"
] | 2
|
2019-07-01T10:24:22.000Z
|
2020-05-14T07:26:35.000Z
|
huey_logger/admin.py
|
bnznamco/django-huey-logger
|
7adbb6cb32a41b4fe14d5e0975c504cc40d20ecb
|
[
"MIT"
] | null | null | null |
huey_logger/admin.py
|
bnznamco/django-huey-logger
|
7adbb6cb32a41b4fe14d5e0975c504cc40d20ecb
|
[
"MIT"
] | 1
|
2019-07-15T22:16:47.000Z
|
2019-07-15T22:16:47.000Z
|
from django.contrib import admin
from .models import LastCronRun, CronError
@admin.register(LastCronRun)
class LastCronRunAdmin(admin.ModelAdmin):
list_display = ('name', 'started_at', 'ended_at')
def get_readonly_fields(self, request, obj=None):
return [f.name for f in self.model._meta.fields]
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_save_permission(self, request, obj=None):
return False
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_save_and_continue'] = False
extra_context['show_save'] = False
return super(LastCronRunAdmin, self).changeform_view(request, object_id, extra_context=extra_context)
@admin.register(CronError)
class CronErrorAdmin(admin.ModelAdmin):
list_display = ('name', 'time', 'error')
def get_readonly_fields(self, request, obj=None):
return [f.name for f in self.model._meta.fields]
def has_add_permission(self, request, obj=None):
return False
def has_save_permission(self, request, obj=None):
return False
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['show_save_and_continue'] = False
extra_context['show_save'] = False
return super(CronErrorAdmin, self).changeform_view(request, object_id, extra_context=extra_context)
| 35.065217
| 109
| 0.710477
| 208
| 1,613
| 5.259615
| 0.259615
| 0.153565
| 0.08958
| 0.115174
| 0.80713
| 0.752285
| 0.752285
| 0.752285
| 0.752285
| 0.752285
| 0
| 0
| 0.188469
| 1,613
| 45
| 110
| 35.844444
| 0.835752
| 0
| 0
| 0.65625
| 0
| 0
| 0.060136
| 0.027278
| 0
| 0
| 0
| 0
| 0
| 1
| 0.28125
| false
| 0
| 0.0625
| 0.21875
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
4611673bb6da34bbfe92d3dc4aea5d42424a489f
| 3,389
|
py
|
Python
|
supersuit/generic_wrappers/frame_stack.py
|
jjshoots/SuperSuit
|
6d0506f44656694b1d94f547dc9ba5555ef7c463
|
[
"MIT"
] | 54
|
2021-10-15T09:45:34.000Z
|
2022-03-31T02:37:04.000Z
|
supersuit/generic_wrappers/frame_stack.py
|
jjshoots/SuperSuit
|
6d0506f44656694b1d94f547dc9ba5555ef7c463
|
[
"MIT"
] | 21
|
2021-10-17T10:53:05.000Z
|
2022-03-28T18:11:19.000Z
|
supersuit/generic_wrappers/frame_stack.py
|
jjshoots/SuperSuit
|
6d0506f44656694b1d94f547dc9ba5555ef7c463
|
[
"MIT"
] | 8
|
2021-12-21T03:23:59.000Z
|
2022-03-18T16:34:03.000Z
|
from .utils.base_modifier import BaseModifier
from .utils.shared_wrapper_util import shared_wrapper
from gym.spaces import Box, Discrete
from supersuit.utils.frame_stack import stack_obs_space, stack_init, stack_obs
def frame_stack_v1(env, stack_size=4, stack_dim=-1):
assert isinstance(stack_size, int), "stack size of frame_stack must be an int"
class FrameStackModifier(BaseModifier):
def modify_obs_space(self, obs_space):
if isinstance(obs_space, Box):
assert (
1 <= len(obs_space.shape) <= 3
), "frame_stack only works for 1, 2 or 3 dimensional observations"
elif isinstance(obs_space, Discrete):
pass
else:
assert (
False
), "Stacking is currently only allowed for Box and Discrete observation spaces. The given observation space is {}".format(
obs_space
)
self.old_obs_space = obs_space
self.observation_space = stack_obs_space(obs_space, stack_size, stack_dim)
return self.observation_space
def reset(self, seed=None, return_info=False, options=None):
self.stack = stack_init(self.old_obs_space, stack_size, stack_dim)
def modify_obs(self, obs):
self.stack = stack_obs(
self.stack, obs, self.old_obs_space, stack_size, stack_dim
)
return self.stack
def get_last_obs(self):
return self.stack
return shared_wrapper(env, FrameStackModifier)
def frame_stack_v2(env, stack_size=4, stack_dim=-1):
assert isinstance(stack_size, int), "stack size of frame_stack must be an int"
assert "stack_dim should be 0 or -1, not {}".format(stack_dim)
class FrameStackModifier(BaseModifier):
def modify_obs_space(self, obs_space):
if isinstance(obs_space, Box):
assert (
1 <= len(obs_space.shape) <= 3
), "frame_stack only works for 1, 2 or 3 dimensional observations"
elif isinstance(obs_space, Discrete):
pass
else:
assert (
False
), "Stacking is currently only allowed for Box and Discrete observation spaces. The given observation space is {}".format(
obs_space
)
self.old_obs_space = obs_space
self.observation_space = stack_obs_space(obs_space, stack_size, stack_dim)
return self.observation_space
def reset(self, seed=None, return_info=False, options=None):
self.stack = stack_init(self.old_obs_space, stack_size, stack_dim)
self.reset_flag = True
def modify_obs(self, obs):
if self.reset_flag:
for _ in range(stack_size):
self.stack = stack_obs(
self.stack, obs, self.old_obs_space, stack_size, stack_dim
)
self.reset_flag = False
else:
self.stack = stack_obs(
self.stack, obs, self.old_obs_space, stack_size, stack_dim
)
return self.stack
def get_last_obs(self):
return self.stack
return shared_wrapper(env, FrameStackModifier)
| 37.655556
| 138
| 0.594866
| 410
| 3,389
| 4.67561
| 0.190244
| 0.108503
| 0.054251
| 0.054773
| 0.844549
| 0.824726
| 0.824726
| 0.824726
| 0.824726
| 0.824726
| 0
| 0.007975
| 0.334022
| 3,389
| 89
| 139
| 38.078652
| 0.841382
| 0
| 0
| 0.774648
| 0
| 0
| 0.134258
| 0
| 0
| 0
| 0
| 0
| 0.098592
| 1
| 0.140845
| false
| 0.028169
| 0.056338
| 0.028169
| 0.338028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
08fd43bc7ea2c93a87a3b47e19399991c52876c9
| 1,236
|
py
|
Python
|
usage/models.py
|
CDAT/cdat_usage2.0
|
d675e91a6a76232a85b1fd94b9ef3bcc26b17c6a
|
[
"BSD-3-Clause"
] | null | null | null |
usage/models.py
|
CDAT/cdat_usage2.0
|
d675e91a6a76232a85b1fd94b9ef3bcc26b17c6a
|
[
"BSD-3-Clause"
] | 6
|
2020-04-10T20:14:30.000Z
|
2021-12-13T19:48:36.000Z
|
usage/models.py
|
CDAT/cdat_usage2.0
|
d675e91a6a76232a85b1fd94b9ef3bcc26b17c6a
|
[
"BSD-3-Clause"
] | 1
|
2018-10-29T20:42:22.000Z
|
2018-10-29T20:42:22.000Z
|
from django.db import models
from datetime import datetime
# Create your models here.
class Entry(models.Model):
id = models.AutoField(primary_key=True, null=False, blank=False)
platform = models.CharField(max_length=1024, blank=True, null=False)
platform_version = models.CharField(max_length=1024, blank=True, null=False)
hashed_hostname = models.CharField(max_length=1024, blank=True, null=False)
source = models.CharField(max_length=1024, blank=True, null=False)
cdat_info_version = models.CharField(max_length=1024, blank=True, null=False)
source_version = models.CharField(max_length=1024, blank=True, null=False)
action = models.CharField(max_length=1024, blank=True, null=False)
sleep = models.CharField(max_length=1024, blank=True, null=False)
pid = models.CharField(max_length=1024, blank=True, null=False)
hashed_username = models.CharField(max_length=1024, blank=True, null=False)
gmtime = models.DateTimeField(default=datetime.now, blank=True, null=False)
ip = models.GenericIPAddressField(null=False, blank=False)
domain = models.CharField(max_length=1024, blank=True, null=False)
date = models.DateTimeField(default=datetime.now, blank=True, null=False)
| 53.73913
| 81
| 0.7589
| 170
| 1,236
| 5.411765
| 0.252941
| 0.146739
| 0.197826
| 0.254348
| 0.718478
| 0.718478
| 0.718478
| 0.718478
| 0.718478
| 0.298913
| 0
| 0.040741
| 0.126214
| 1,236
| 22
| 82
| 56.181818
| 0.811111
| 0.019417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
1c07f68ab4e9801896194e272a2d5615046d378f
| 95
|
py
|
Python
|
main.py
|
nmodhipalli/alphabetlearner
|
8c9b5605e86f05921d4dcf603345723c8e8d698e
|
[
"MIT"
] | null | null | null |
main.py
|
nmodhipalli/alphabetlearner
|
8c9b5605e86f05921d4dcf603345723c8e8d698e
|
[
"MIT"
] | null | null | null |
main.py
|
nmodhipalli/alphabetlearner
|
8c9b5605e86f05921d4dcf603345723c8e8d698e
|
[
"MIT"
] | null | null | null |
from flask import render_template
def main(request):
return render_template('main.html')
| 15.833333
| 39
| 0.768421
| 13
| 95
| 5.461538
| 0.769231
| 0.394366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147368
| 95
| 5
| 40
| 19
| 0.876543
| 0
| 0
| 0
| 0
| 0
| 0.094737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
1c3f2211b675a7a05e8ac50df4265131fc2f31c0
| 127
|
py
|
Python
|
install.py
|
dmdhrumilmistry/Termux-SSH
|
65ba7868a0e8961f9a262a85e79b56f8b8a65b9e
|
[
"MIT"
] | 5
|
2021-07-17T20:40:42.000Z
|
2022-02-27T09:41:19.000Z
|
install.py
|
dmdhrumilmistry/Termux-SSH
|
65ba7868a0e8961f9a262a85e79b56f8b8a65b9e
|
[
"MIT"
] | null | null | null |
install.py
|
dmdhrumilmistry/Termux-SSH
|
65ba7868a0e8961f9a262a85e79b56f8b8a65b9e
|
[
"MIT"
] | 1
|
2021-07-17T22:36:39.000Z
|
2021-07-17T22:36:39.000Z
|
#!usr/bin/env python3
from termux import get_user, generate_passwd, install_termux_req
install_termux_req()
generate_passwd()
| 21.166667
| 64
| 0.834646
| 19
| 127
| 5.210526
| 0.684211
| 0.282828
| 0.323232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008621
| 0.086614
| 127
| 5
| 65
| 25.4
| 0.844828
| 0.15748
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.666667
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 8
|
1c5ec7892e6f88884569e4acda936ac6b937b664
| 147
|
py
|
Python
|
argumento_padrao.py
|
isabela-augusto/Python-sample
|
44b1342a01c8fdfcacea83db9d967119d2545131
|
[
"MIT"
] | null | null | null |
argumento_padrao.py
|
isabela-augusto/Python-sample
|
44b1342a01c8fdfcacea83db9d967119d2545131
|
[
"MIT"
] | null | null | null |
argumento_padrao.py
|
isabela-augusto/Python-sample
|
44b1342a01c8fdfcacea83db9d967119d2545131
|
[
"MIT"
] | null | null | null |
def salario_descontado_imposto(salario, imposto=27.):
return salario - (salario * imposto * 0.01)
print(salario_descontado_imposto(5000,2))
| 36.75
| 54
| 0.755102
| 19
| 147
| 5.631579
| 0.578947
| 0.317757
| 0.448598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 0.129252
| 147
| 3
| 55
| 49
| 0.757813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
1c7fc45bec5df1579530748459cee8f6d89cf015
| 220,745
|
py
|
Python
|
src/oci/bds/bds_client.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/bds/bds_client.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/bds/bds_client.py
|
pabs3/oci-python-sdk
|
437ba18ce39af2d1090e277c4bb8750c89f83021
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import bds_type_mapping
missing = Sentinel("Missing")
class BdsClient(object):
"""
REST API for Oracle Big Data Service. Use this API to build, deploy, and manage fully elastic Big Data Service clusters. Build on Hadoop, Spark and Data Science distributions, which can be fully integrated with existing enterprise data in Oracle Database and Oracle applications.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client uses :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` as default if no circuit breaker strategy is provided.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
:param allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this client should allow control characters in the response object. By default, the client will not
allow control characters to be in the response object.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20190531',
'service_endpoint_template': 'https://bigdataservice.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
if 'allow_control_chars' in kwargs:
base_client_init_kwargs['allow_control_chars'] = kwargs.get('allow_control_chars')
self.base_client = BaseClient("bds", config, signer, bds_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def activate_bds_metastore_configuration(self, bds_instance_id, metastore_config_id, activate_bds_metastore_configuration_details, **kwargs):
"""
Activate specified metastore configuration.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str metastore_config_id: (required)
The metastore configuration ID
:param oci.bds.models.ActivateBdsMetastoreConfigurationDetails activate_bds_metastore_configuration_details: (required)
The request body when activating specified metastore configuration.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/activate_bds_metastore_configuration.py.html>`__ to see an example of how to use activate_bds_metastore_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs/{metastoreConfigId}/actions/activate"
method = "POST"
operation_name = "activate_bds_metastore_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/ActivateBdsMetastoreConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"activate_bds_metastore_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"metastoreConfigId": metastore_config_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=activate_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=activate_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def add_auto_scaling_configuration(self, bds_instance_id, add_auto_scaling_configuration_details, **kwargs):
"""
Add an autoscale configuration to the cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.AddAutoScalingConfigurationDetails add_auto_scaling_configuration_details: (required)
Details for creating an autoscale configuration.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/add_auto_scaling_configuration.py.html>`__ to see an example of how to use add_auto_scaling_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/autoScalingConfiguration"
method = "POST"
operation_name = "add_auto_scaling_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/AddAutoScalingConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"add_auto_scaling_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_auto_scaling_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_auto_scaling_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def add_block_storage(self, bds_instance_id, add_block_storage_details, **kwargs):
"""
Adds block storage to existing worker/compute only worker nodes. The same amount of storage will be added to all worker/compute only worker nodes. No change will be made to storage that is already attached. Block storage cannot be removed.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.AddBlockStorageDetails add_block_storage_details: (required)
Details for the added block storage.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/add_block_storage.py.html>`__ to see an example of how to use add_block_storage API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/addBlockStorage"
method = "POST"
operation_name = "add_block_storage"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/AddBlockStorage"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"add_block_storage got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_block_storage_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_block_storage_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def add_cloud_sql(self, bds_instance_id, add_cloud_sql_details, **kwargs):
"""
Adds Cloud SQL to your cluster. You can use Cloud SQL to query against non-relational data stored in multiple big data sources, including Apache Hive, HDFS, Oracle NoSQL Database, and Apache HBase. Adding Cloud SQL adds a query server node to the cluster and creates cell servers on all the worker nodes in the cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.AddCloudSqlDetails add_cloud_sql_details: (required)
Details for the Cloud SQL capability
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/add_cloud_sql.py.html>`__ to see an example of how to use add_cloud_sql API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/addCloudSql"
method = "POST"
operation_name = "add_cloud_sql"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/AddCloudSql"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"add_cloud_sql got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_cloud_sql_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_cloud_sql_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def add_worker_nodes(self, bds_instance_id, add_worker_nodes_details, **kwargs):
"""
Increases the size (scales out) a cluster by adding worker nodes(data/compute). The added worker nodes will have the same shape and will have the same amount of attached block storage as other worker nodes in the cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.AddWorkerNodesDetails add_worker_nodes_details: (required)
Details for the newly added nodes.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/add_worker_nodes.py.html>`__ to see an example of how to use add_worker_nodes API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/addWorkerNodes"
method = "POST"
operation_name = "add_worker_nodes"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/AddWorkerNodes"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"add_worker_nodes got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_worker_nodes_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_worker_nodes_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def change_bds_instance_compartment(self, bds_instance_id, change_bds_instance_compartment_details, **kwargs):
"""
Moves a Big Data Service cluster into a different compartment.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.ChangeBdsInstanceCompartmentDetails change_bds_instance_compartment_details: (required)
Details for the comparment change.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/change_bds_instance_compartment.py.html>`__ to see an example of how to use change_bds_instance_compartment API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/changeCompartment"
method = "POST"
operation_name = "change_bds_instance_compartment"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/ChangeBdsInstanceCompartment"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_bds_instance_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_bds_instance_compartment_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_bds_instance_compartment_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def change_shape(self, bds_instance_id, change_shape_details, **kwargs):
"""
Changes the size of a cluster by scaling up or scaling down the nodes. Nodes are scaled up or down by changing the shapes of all the nodes of the same type to the next larger or smaller shape. The node types are master, utility, worker, and Cloud SQL. Only nodes with VM-STANDARD shapes can be scaled.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.ChangeShapeDetails change_shape_details: (required)
Individual change shape settings per node type. You can change the shape of master, worker, utility and Cloud SQL nodes.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/change_shape.py.html>`__ to see an example of how to use change_shape API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/changeShape"
method = "POST"
operation_name = "change_shape"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/ChangeShape"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_shape got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_shape_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_shape_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def create_bds_api_key(self, bds_instance_id, create_bds_api_key_details, **kwargs):
"""
Create an API key on behalf of the specified user.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.CreateBdsApiKeyDetails create_bds_api_key_details: (required)
Create a new user's API key.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/create_bds_api_key.py.html>`__ to see an example of how to use create_bds_api_key API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/apiKeys"
method = "POST"
operation_name = "create_bds_api_key"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsApiKey/CreateBdsApiKey"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_bds_api_key got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_bds_api_key_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_bds_api_key_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def create_bds_instance(self, create_bds_instance_details, **kwargs):
"""
Creates a Big Data Service cluster.
:param oci.bds.models.CreateBdsInstanceDetails create_bds_instance_details: (required)
Details for the new cluster.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/create_bds_instance.py.html>`__ to see an example of how to use create_bds_instance API.
"""
resource_path = "/bdsInstances"
method = "POST"
operation_name = "create_bds_instance"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/CreateBdsInstance"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_bds_instance got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_bds_instance_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_bds_instance_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def create_bds_metastore_configuration(self, bds_instance_id, create_bds_metastore_configuration_details, **kwargs):
"""
Create and activate external metastore configuration.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.CreateBdsMetastoreConfigurationDetails create_bds_metastore_configuration_details: (required)
The request body when creating and activating external metastore configuration.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/create_bds_metastore_configuration.py.html>`__ to see an example of how to use create_bds_metastore_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs"
method = "POST"
operation_name = "create_bds_metastore_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/CreateBdsMetastoreConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_bds_metastore_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=create_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def delete_bds_api_key(self, bds_instance_id, api_key_id, **kwargs):
"""
Deletes the user's API key represented by the provided ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str api_key_id: (required)
The API key identifier.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/delete_bds_api_key.py.html>`__ to see an example of how to use delete_bds_api_key API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/apiKeys/{apiKeyId}"
method = "DELETE"
operation_name = "delete_bds_api_key"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsApiKey/DeleteBdsApiKey"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_bds_api_key got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"apiKeyId": api_key_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def delete_bds_instance(self, bds_instance_id, **kwargs):
"""
Deletes the cluster identified by the given ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/delete_bds_instance.py.html>`__ to see an example of how to use delete_bds_instance API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}"
method = "DELETE"
operation_name = "delete_bds_instance"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/DeleteBdsInstance"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_bds_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def delete_bds_metastore_configuration(self, bds_instance_id, metastore_config_id, **kwargs):
"""
Delete the BDS metastore configuration represented by the provided ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str metastore_config_id: (required)
The metastore configuration ID
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/delete_bds_metastore_configuration.py.html>`__ to see an example of how to use delete_bds_metastore_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs/{metastoreConfigId}"
method = "DELETE"
operation_name = "delete_bds_metastore_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/DeleteBdsMetastoreConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_bds_metastore_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"metastoreConfigId": metastore_config_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def get_auto_scaling_configuration(self, bds_instance_id, auto_scaling_configuration_id, **kwargs):
"""
Returns details of the autoscale configuration identified by the given ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str auto_scaling_configuration_id: (required)
Unique Oracle-assigned identifier of the autoscale configuration.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.bds.models.AutoScalingConfiguration`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/get_auto_scaling_configuration.py.html>`__ to see an example of how to use get_auto_scaling_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/autoScalingConfiguration/{autoScalingConfigurationId}"
method = "GET"
operation_name = "get_auto_scaling_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/GetAutoScalingConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_auto_scaling_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"autoScalingConfigurationId": auto_scaling_configuration_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AutoScalingConfiguration",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="AutoScalingConfiguration",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def get_bds_api_key(self, bds_instance_id, api_key_id, **kwargs):
"""
Returns the user's API key information for the given ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str api_key_id: (required)
The API key identifier.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.bds.models.BdsApiKey`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/get_bds_api_key.py.html>`__ to see an example of how to use get_bds_api_key API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/apiKeys/{apiKeyId}"
method = "GET"
operation_name = "get_bds_api_key"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsApiKey/GetBdsApiKey"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_bds_api_key got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"apiKeyId": api_key_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="BdsApiKey",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="BdsApiKey",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def get_bds_instance(self, bds_instance_id, **kwargs):
"""
Returns information about the Big Data Service cluster identified by the given ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.bds.models.BdsInstance`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/get_bds_instance.py.html>`__ to see an example of how to use get_bds_instance API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}"
method = "GET"
operation_name = "get_bds_instance"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/GetBdsInstance"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_bds_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="BdsInstance",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="BdsInstance",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def get_bds_metastore_configuration(self, bds_instance_id, metastore_config_id, **kwargs):
"""
Returns the BDS Metastore configuration information for the given ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str metastore_config_id: (required)
The metastore configuration ID
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.bds.models.BdsMetastoreConfiguration`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/get_bds_metastore_configuration.py.html>`__ to see an example of how to use get_bds_metastore_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs/{metastoreConfigId}"
method = "GET"
operation_name = "get_bds_metastore_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/GetBdsMetastoreConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_bds_metastore_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"metastoreConfigId": metastore_config_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="BdsMetastoreConfiguration",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="BdsMetastoreConfiguration",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def get_work_request(self, work_request_id, **kwargs):
"""
Returns the status of the work request identified by the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.bds.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
operation_name = "get_work_request"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/WorkRequest/GetWorkRequest"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def install_patch(self, bds_instance_id, install_patch_details, **kwargs):
"""
Install the specified patch to this cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.InstallPatchDetails install_patch_details: (required)
Details of the patch to be installed.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/install_patch.py.html>`__ to see an example of how to use install_patch API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/installPatch"
method = "POST"
operation_name = "install_patch"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/InstallPatch"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_patch got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=install_patch_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=install_patch_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_auto_scaling_configurations(self, compartment_id, bds_instance_id, **kwargs):
"""
Returns information about the autoscaling configurations for a cluster.
:param str compartment_id: (required)
The OCID of the compartment.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str lifecycle_state: (optional)
The state of the autoscale configuration.
Allowed values are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.AutoScalingConfigurationSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_auto_scaling_configurations.py.html>`__ to see an example of how to use list_auto_scaling_configurations API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/autoScalingConfiguration"
method = "GET"
operation_name = "list_auto_scaling_configurations"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/ListAutoScalingConfigurations"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"page",
"limit",
"sort_by",
"sort_order",
"display_name",
"lifecycle_state",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_auto_scaling_configurations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AutoScalingConfigurationSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AutoScalingConfigurationSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_bds_api_keys(self, bds_instance_id, **kwargs):
"""
Returns a list of all API keys associated with this Big Data Service cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str lifecycle_state: (optional)
The state of the API key.
Allowed values are: "CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param str user_id: (optional)
The OCID of the user for whom the API key belongs.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.BdsApiKeySummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_bds_api_keys.py.html>`__ to see an example of how to use list_bds_api_keys API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/apiKeys"
method = "GET"
operation_name = "list_bds_api_keys"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsApiKey/ListBdsApiKeys"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"lifecycle_state",
"user_id",
"page",
"limit",
"sort_by",
"sort_order",
"display_name",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_bds_api_keys got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"lifecycleState": kwargs.get("lifecycle_state", missing),
"userId": kwargs.get("user_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[BdsApiKeySummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[BdsApiKeySummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_bds_instances(self, compartment_id, **kwargs):
"""
Returns a list of all Big Data Service clusters in a compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param str lifecycle_state: (optional)
The state of the cluster.
Allowed values are: "CREATING", "ACTIVE", "UPDATING", "SUSPENDING", "SUSPENDED", "RESUMING", "DELETING", "DELETED", "FAILED"
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.BdsInstanceSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_bds_instances.py.html>`__ to see an example of how to use list_bds_instances API.
"""
resource_path = "/bdsInstances"
method = "GET"
operation_name = "list_bds_instances"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstanceSummary/ListBdsInstances"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"lifecycle_state",
"page",
"limit",
"sort_by",
"sort_order",
"display_name",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_bds_instances got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVE", "UPDATING", "SUSPENDING", "SUSPENDED", "RESUMING", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"lifecycleState": kwargs.get("lifecycle_state", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[BdsInstanceSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[BdsInstanceSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_bds_metastore_configurations(self, bds_instance_id, **kwargs):
"""
Returns a list of metastore configurations ssociated with this Big Data Service cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str metastore_type: (optional)
The type of the metastore in the metastore configuration
Allowed values are: "LOCAL", "EXTERNAL"
:param str metastore_id: (optional)
The OCID of the Data Catalog metastore in the metastore configuration
:param str lifecycle_state: (optional)
The lifecycle state of the metastore in the metastore configuration
Allowed values are: "CREATING", "ACTIVATING", "ACTIVE", "INACTIVE", "UPDATING", "FAILED", "DELETING", "DELETED"
:param str bds_api_key_id: (optional)
The ID of the API key that is associated with the external metastore in the metastore configuration
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
A filter to return only resources that match the entire display name given.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.BdsMetastoreConfigurationSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_bds_metastore_configurations.py.html>`__ to see an example of how to use list_bds_metastore_configurations API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs"
method = "GET"
operation_name = "list_bds_metastore_configurations"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/ListBdsMetastoreConfigurations"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"metastore_type",
"metastore_id",
"lifecycle_state",
"bds_api_key_id",
"page",
"limit",
"sort_by",
"sort_order",
"display_name",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_bds_metastore_configurations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'metastore_type' in kwargs:
metastore_type_allowed_values = ["LOCAL", "EXTERNAL"]
if kwargs['metastore_type'] not in metastore_type_allowed_values:
raise ValueError(
"Invalid value for `metastore_type`, must be one of {0}".format(metastore_type_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "ACTIVATING", "ACTIVE", "INACTIVE", "UPDATING", "FAILED", "DELETING", "DELETED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"metastoreType": kwargs.get("metastore_type", missing),
"metastoreId": kwargs.get("metastore_id", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"bdsApiKeyId": kwargs.get("bds_api_key_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[BdsMetastoreConfigurationSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[BdsMetastoreConfigurationSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_patch_histories(self, bds_instance_id, **kwargs):
"""
List the patch history of this cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str lifecycle_state: (optional)
The status of the patch.
Allowed values are: "INSTALLING", "INSTALLED", "FAILED"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str patch_version: (optional)
The version of the patch
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.PatchHistorySummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_patch_histories.py.html>`__ to see an example of how to use list_patch_histories API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/patchHistory"
method = "GET"
operation_name = "list_patch_histories"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/ListPatchHistories"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"lifecycle_state",
"sort_by",
"patch_version",
"sort_order",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_patch_histories got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["INSTALLING", "INSTALLED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"lifecycleState": kwargs.get("lifecycle_state", missing),
"sortBy": kwargs.get("sort_by", missing),
"patchVersion": kwargs.get("patch_version", missing),
"sortOrder": kwargs.get("sort_order", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[PatchHistorySummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[PatchHistorySummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_patches(self, bds_instance_id, **kwargs):
"""
List all the available patches for this cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.PatchSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_patches.py.html>`__ to see an example of how to use list_patches API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/patches"
method = "GET"
operation_name = "list_patches"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/ListPatches"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"page",
"limit"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_patches got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[PatchSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[PatchSummary]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_work_request_errors(self, work_request_id, **kwargs):
"""
Returns a paginated list of errors for a work request identified by the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.WorkRequestError`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_work_request_errors.py.html>`__ to see an example of how to use list_work_request_errors API.
"""
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
operation_name = "list_work_request_errors"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/WorkRequestError/ListWorkRequestErrors"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_work_request_logs(self, work_request_id, **kwargs):
"""
Returns a paginated list of logs for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.WorkRequestLogEntry`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_work_request_logs.py.html>`__ to see an example of how to use list_work_request_logs API.
"""
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
operation_name = "list_work_request_logs"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/WorkRequestLogEntry/ListWorkRequestLogs"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def list_work_requests(self, compartment_id, **kwargs):
"""
Lists the work requests in a compartment.
:param str compartment_id: (required)
The OCID of the compartment.
:param str resource_id: (optional)
The OCID of the resource.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param int limit: (optional)
The maximum number of items to return.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeCreated is descending. Default order for displayName is ascending. If no value is specified timeCreated is default.
Allowed values are: "timeCreated", "displayName"
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.bds.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
operation_name = "list_work_requests"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/WorkRequest/ListWorkRequests"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"resource_id",
"page",
"limit",
"sort_by",
"sort_order",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeCreated", "displayName"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"resourceId": kwargs.get("resource_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequest]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequest]",
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def remove_auto_scaling_configuration(self, bds_instance_id, auto_scaling_configuration_id, remove_auto_scaling_configuration_details, **kwargs):
"""
Deletes an autoscale configuration.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str auto_scaling_configuration_id: (required)
Unique Oracle-assigned identifier of the autoscale configuration.
:param oci.bds.models.RemoveAutoScalingConfigurationDetails remove_auto_scaling_configuration_details: (required)
Details for the autoscale configuration
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/remove_auto_scaling_configuration.py.html>`__ to see an example of how to use remove_auto_scaling_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/autoScalingConfiguration/{autoScalingConfigurationId}/actions/remove"
method = "POST"
operation_name = "remove_auto_scaling_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/RemoveAutoScalingConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_auto_scaling_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"autoScalingConfigurationId": auto_scaling_configuration_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_auto_scaling_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_auto_scaling_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def remove_cloud_sql(self, bds_instance_id, remove_cloud_sql_details, **kwargs):
"""
Removes Cloud SQL from the cluster.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.RemoveCloudSqlDetails remove_cloud_sql_details: (required)
Details for the Cloud SQL capability
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/remove_cloud_sql.py.html>`__ to see an example of how to use remove_cloud_sql API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/removeCloudSql"
method = "POST"
operation_name = "remove_cloud_sql"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/RemoveCloudSql"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_cloud_sql got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_cloud_sql_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_cloud_sql_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def remove_node(self, bds_instance_id, remove_node_details, **kwargs):
"""
Remove a single node of a Big Data Service cluster
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.RemoveNodeDetails remove_node_details: (required)
Details for the node to be removed.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/remove_node.py.html>`__ to see an example of how to use remove_node API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/removeNode"
method = "POST"
operation_name = "remove_node"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/RemoveNode"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_node got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_node_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_node_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def restart_node(self, bds_instance_id, restart_node_details, **kwargs):
"""
Restarts a single node of a Big Data Service cluster
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.RestartNodeDetails restart_node_details: (required)
Details for restarting the node.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/restart_node.py.html>`__ to see an example of how to use restart_node API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/actions/restartNode"
method = "POST"
operation_name = "restart_node"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/RestartNode"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"restart_node got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=restart_node_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=restart_node_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def test_bds_metastore_configuration(self, bds_instance_id, metastore_config_id, test_bds_metastore_configuration_details, **kwargs):
"""
Test specified metastore configuration.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str metastore_config_id: (required)
The metastore configuration ID
:param oci.bds.models.TestBdsMetastoreConfigurationDetails test_bds_metastore_configuration_details: (required)
Request body for testing BDS metastore configuration.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/test_bds_metastore_configuration.py.html>`__ to see an example of how to use test_bds_metastore_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs/{metastoreConfigId}/actions/test"
method = "POST"
operation_name = "test_bds_metastore_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/TestBdsMetastoreConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"test_bds_metastore_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"metastoreConfigId": metastore_config_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=test_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=test_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def test_bds_object_storage_connection(self, bds_instance_id, api_key_id, test_bds_object_storage_connection_details, **kwargs):
"""
Test access to specified Object Storage bucket using the API key.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str api_key_id: (required)
The API key identifier.
:param oci.bds.models.TestBdsObjectStorageConnectionDetails test_bds_object_storage_connection_details: (required)
Parameters required to validate access to the specified Object Storage bucket using the API key.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/test_bds_object_storage_connection.py.html>`__ to see an example of how to use test_bds_object_storage_connection API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/apiKeys/{apiKeyId}/actions/testObjectStorageConnection"
method = "POST"
operation_name = "test_bds_object_storage_connection"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsApiKey/TestBdsObjectStorageConnection"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"test_bds_object_storage_connection got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"apiKeyId": api_key_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=test_bds_object_storage_connection_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=test_bds_object_storage_connection_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def update_auto_scaling_configuration(self, bds_instance_id, auto_scaling_configuration_id, update_auto_scaling_configuration_details, **kwargs):
"""
Updates fields on an autoscale configuration, including the name, the threshold value, and whether the autoscale configuration is enabled.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str auto_scaling_configuration_id: (required)
Unique Oracle-assigned identifier of the autoscale configuration.
:param oci.bds.models.UpdateAutoScalingConfigurationDetails update_auto_scaling_configuration_details: (required)
Details for update an autoscaling configuration.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error, without risk of executing that same action again. Retry tokens expire after 24
hours but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/update_auto_scaling_configuration.py.html>`__ to see an example of how to use update_auto_scaling_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/autoScalingConfiguration/{autoScalingConfigurationId}"
method = "PUT"
operation_name = "update_auto_scaling_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/UpdateAutoScalingConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_auto_scaling_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"autoScalingConfigurationId": auto_scaling_configuration_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_auto_scaling_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_auto_scaling_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def update_bds_instance(self, bds_instance_id, update_bds_instance_details, **kwargs):
"""
Updates the Big Data Service cluster identified by the given ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param oci.bds.models.UpdateBdsInstanceDetails update_bds_instance_details: (required)
Details for the cluster to be updated.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/update_bds_instance.py.html>`__ to see an example of how to use update_bds_instance API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}"
method = "PUT"
operation_name = "update_bds_instance"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsInstance/UpdateBdsInstance"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_bds_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_bds_instance_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_bds_instance_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
def update_bds_metastore_configuration(self, bds_instance_id, metastore_config_id, update_bds_metastore_configuration_details, **kwargs):
"""
Update the BDS metastore configuration represented by the provided ID.
:param str bds_instance_id: (required)
The OCID of the cluster.
:param str metastore_config_id: (required)
The metastore configuration ID
:param oci.bds.models.UpdateBdsMetastoreConfigurationDetails update_bds_metastore_configuration_details: (required)
Request body for updating BDS metastore configuration.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:param bool allow_control_chars: (optional)
allow_control_chars is a boolean to indicate whether or not this request should allow control characters in the response object.
By default, the response will not allow control characters in strings
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/bds/update_bds_metastore_configuration.py.html>`__ to see an example of how to use update_bds_metastore_configuration API.
"""
resource_path = "/bdsInstances/{bdsInstanceId}/metastoreConfigs/{metastoreConfigId}"
method = "PUT"
operation_name = "update_bds_metastore_configuration"
api_reference_link = "https://docs.oracle.com/iaas/api/#/en/bigdata/20190531/BdsMetastoreConfiguration/UpdateBdsMetastoreConfiguration"
# Don't accept unknown kwargs
expected_kwargs = [
"allow_control_chars",
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_bds_metastore_configuration got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"bdsInstanceId": bds_instance_id,
"metastoreConfigId": metastore_config_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_bds_metastore_configuration_details,
allow_control_chars=kwargs.get('allow_control_chars'),
operation_name=operation_name,
api_reference_link=api_reference_link)
| 50.24926
| 327
| 0.65433
| 27,547
| 220,745
| 5.036265
| 0.022689
| 0.056223
| 0.03235
| 0.005752
| 0.933859
| 0.919083
| 0.908523
| 0.899361
| 0.894128
| 0.889364
| 0
| 0.002578
| 0.268917
| 220,745
| 4,392
| 328
| 50.260701
| 0.857079
| 0.389766
| 0
| 0.831196
| 0
| 0.015308
| 0.178749
| 0.027952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015722
| false
| 0.000414
| 0.003724
| 0
| 0.050476
| 0.000414
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c9118a9b7aed51048b33cf956b541d58c040e05
| 2,567
|
py
|
Python
|
app/_test/suite/ext.action.asset/test.py
|
ewie/gbd-websuite
|
6f2814c7bb64d11cb5a0deec712df751718fb3e1
|
[
"Apache-2.0"
] | null | null | null |
app/_test/suite/ext.action.asset/test.py
|
ewie/gbd-websuite
|
6f2814c7bb64d11cb5a0deec712df751718fb3e1
|
[
"Apache-2.0"
] | null | null | null |
app/_test/suite/ext.action.asset/test.py
|
ewie/gbd-websuite
|
6f2814c7bb64d11cb5a0deec712df751718fb3e1
|
[
"Apache-2.0"
] | null | null | null |
import _test.util as u
base = '_/cmd/assetHttpGetPath/'
content = '0123456789\n'
def test_std_mime():
assert u.req(base + 'projectUid/allow_std_mime/path/x.txt').status_code == 200
assert u.req(base + 'projectUid/allow_std_mime/path/x.pdf').status_code == 200
assert u.req(base + 'projectUid/allow_std_mime/path/x.png').status_code == 200
assert u.req(base + 'projectUid/allow_std_mime/path/x.exe').status_code == 404
def test_allow_mime():
assert u.req(base + 'projectUid/allow_only_pdf/path/x.txt').status_code == 404
assert u.req(base + 'projectUid/allow_only_pdf/path/x.pdf').status_code == 200
assert u.req(base + 'projectUid/allow_only_pdf/path/x.png').status_code == 404
assert u.req(base + 'projectUid/allow_only_pdf/path/x.exe').status_code == 404
def test_deny_mime():
assert u.req(base + 'projectUid/allow_all_but_pdf/path/x.txt').status_code == 200
assert u.req(base + 'projectUid/allow_all_but_pdf/path/x.pdf').status_code == 404
assert u.req(base + 'projectUid/allow_all_but_pdf/path/x.png').status_code == 200
assert u.req(base + 'projectUid/allow_all_but_pdf/path/x.exe').status_code == 404
def test_content():
assert u.req(base + 'projectUid/allow_std_mime/path/x.txt').text == f'txt:{content}'
assert u.req(base + 'projectUid/allow_std_mime/path/x.pdf').text == f'pdf:{content}'
assert u.req(base + 'projectUid/allow_std_mime/path/x.png').text == f'png:{content}'
def test_asset_from_subdir():
r = u.req(base + 'projectUid/allow_std_mime?path=subdir/y.xml')
assert r.status_code == 200
assert r.text == f'xml:{content}'
def test_dotted_path_not_allowed():
assert u.req(base + 'projectUid/allow_std_mime?path=./x.txt').status_code == 404
assert u.req(base + 'projectUid/allow_std_mime?path=subdir/../x.txt').status_code == 404
def test_download_asset():
r = u.req('_/cmd/assetHttpGetDownload/projectUid/allow_std_mime/path/x.txt')
assert r.status_code == 200
assert r.headers['content-disposition'] == 'attachment; filename="x.txt"'
assert r.text == f'txt:{content}'
def test_download_asset_from_subdir():
r = u.req('_/cmd/assetHttpGetDownload/projectUid/allow_std_mime?path=subdir/y.xml')
assert r.status_code == 200
assert r.headers['content-disposition'] == 'attachment; filename="y.xml"'
assert r.text == f'xml:{content}'
def test_web_dir():
assert u.req('/').text == f'index.html:{content}'
assert u.req('/y.html').text == f'y.html:{content}'
assert u.req('/subdir/z.html').text == f'z.html:{content}'
| 40.746032
| 92
| 0.703935
| 414
| 2,567
| 4.154589
| 0.130435
| 0.053488
| 0.116279
| 0.188372
| 0.85
| 0.811628
| 0.790116
| 0.780814
| 0.737791
| 0.688953
| 0
| 0.027269
| 0.128555
| 2,567
| 62
| 93
| 41.403226
| 0.741618
| 0
| 0
| 0.116279
| 0
| 0
| 0.425789
| 0.325282
| 0
| 0
| 0
| 0
| 0.651163
| 1
| 0.209302
| false
| 0
| 0.023256
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
98e7bcfc1df331f5eaddd2888a8d96fe221b8f09
| 2,029
|
py
|
Python
|
tests/test_change_annotation.py
|
biosustain/gnomic
|
a6e6bbfdd6b42e888a3d1c361847ae7bb87c766e
|
[
"Apache-2.0"
] | 9
|
2015-07-13T14:15:11.000Z
|
2020-11-20T18:42:08.000Z
|
tests/test_change_annotation.py
|
biosustain/gnomic
|
a6e6bbfdd6b42e888a3d1c361847ae7bb87c766e
|
[
"Apache-2.0"
] | 33
|
2015-06-19T08:47:19.000Z
|
2017-09-04T11:30:39.000Z
|
tests/test_change_annotation.py
|
biosustain/gnomic
|
a6e6bbfdd6b42e888a3d1c361847ae7bb87c766e
|
[
"Apache-2.0"
] | 4
|
2015-10-15T19:10:54.000Z
|
2020-01-22T09:53:18.000Z
|
from gnomic.genotype import change_annotation
from gnomic.types import Feature as F, CompositeAnnotation
def test_replace_feature_in_composite_annotation():
assert change_annotation(CompositeAnnotation(F('a'), F('b')), F('a'), None) == CompositeAnnotation(F('b'))
assert change_annotation(CompositeAnnotation(F('b')), F('a'), None) == CompositeAnnotation(F('b'))
assert change_annotation(CompositeAnnotation(F('a') ** F('b'),
F('c')),
F('a') ** F('b'),
F('d')) == CompositeAnnotation(F('d'), F('c'))
assert change_annotation(CompositeAnnotation(F('a') ** F('b'),
F('c')),
F('a') ** F('b'),
F('c')) == CompositeAnnotation(F('c'))
def test_replace_feature_in_fusion():
assert change_annotation(F('a') ** F('b'), F('a')) == F('b')
assert change_annotation(F('a') ** F('b') ** F('c'), F('b')) == F('a') ** F('c')
assert change_annotation(F('a') ** F('b') ** F('c'), F('b'), F('x')) == F('a') ** F('x') ** F('c')
assert change_annotation(F('a') ** F('b') ** F('c'), F('b'),
F('x') ** F('y')) == F('a') ** F('x') ** F('y') ** F('c')
def test_remove_fusion_in_fusion():
assert change_annotation(F('a') ** F('b') ** F('c') ** F('d'), F('b') ** F('c')) == F('a') ** F('d')
assert change_annotation(F('a') ** F('b') ** F('c'), F('a') ** F('b')) == F('c')
def test_replace_fusion_in_fusion():
assert change_annotation(F('a') ** F('b') ** F('c') ** F('d'),
F('b') ** F('c'), F('x')) == F('a') ** F('x') ** F('d')
assert change_annotation(F('a') ** F('b') ** F('c'), F('a') ** F('b'),
F('x') ** F('y')) == F('x') ** F('y') ** F('c')
def test_insert_feature_in_fusion():
assert change_annotation(F('a') ** F('b'), None, F('x')) == CompositeAnnotation(F('a') ** F('b'), F('x'))
| 47.186047
| 110
| 0.457368
| 276
| 2,029
| 3.235507
| 0.097826
| 0.058231
| 0.077268
| 0.080627
| 0.81187
| 0.745801
| 0.716685
| 0.701008
| 0.671893
| 0.62374
| 0
| 0
| 0.269098
| 2,029
| 42
| 111
| 48.309524
| 0.602158
| 0
| 0
| 0.206897
| 0
| 0
| 0.044357
| 0
| 0
| 0
| 0
| 0
| 0.448276
| 1
| 0.172414
| true
| 0
| 0.068966
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c72a4d23772d34218a51bb4ae2af844f0c954289
| 12,421
|
py
|
Python
|
tests/integration/taskrouter/v1/workspace/test_task_channel.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 1,362
|
2015-01-04T10:25:18.000Z
|
2022-03-24T10:07:08.000Z
|
tests/integration/taskrouter/v1/workspace/test_task_channel.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 299
|
2015-01-30T09:52:39.000Z
|
2022-03-31T23:03:02.000Z
|
tests/integration/taskrouter/v1/workspace/test_task_channel.py
|
BrimmingDev/twilio-python
|
3226b5fed92b3c2ce64f03e6b19fc4792ef7647f
|
[
"MIT"
] | 622
|
2015-01-03T04:43:09.000Z
|
2022-03-29T14:11:00.000Z
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class TaskChannelTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels/TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_sid_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"friendly_name": "Default",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "default",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_fetch_unique_name_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"friendly_name": "Default",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "default",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": false,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.list()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"friendly_name": "Default",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"unique_name": "default",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
],
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0",
"key": "channels",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [],
"meta": {
"first_page_url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0",
"key": "channels",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels?PageSize=50&Page=0"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels/TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_sid_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Default",
"unique_name": "default",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_update_unique_name_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Default",
"unique_name": "default",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"channel_optimized_routing": true,
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels/TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_sid_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_delete_unique_name_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels("TCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.create(friendly_name="friendly_name", unique_name="unique_name")
values = {'FriendlyName': "friendly_name", 'UniqueName': "unique_name", }
self.holodeck.assert_has_request(Request(
'post',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/TaskChannels',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Outbound Voice",
"unique_name": "ovoice",
"date_created": "2016-04-14T17:35:54Z",
"date_updated": "2016-04-14T17:35:54Z",
"channel_optimized_routing": true,
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/TaskChannels/TCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"workspace": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
}
'''
))
actual = self.client.taskrouter.v1.workspaces("WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.task_channels.create(friendly_name="friendly_name", unique_name="unique_name")
self.assertIsNotNone(actual)
| 42.248299
| 160
| 0.576524
| 880
| 12,421
| 7.973864
| 0.117045
| 0.059855
| 0.062847
| 0.071826
| 0.932592
| 0.932592
| 0.921334
| 0.920906
| 0.906085
| 0.906085
| 0
| 0.031445
| 0.3164
| 12,421
| 293
| 161
| 42.392491
| 0.794959
| 0.008775
| 0
| 0.79661
| 1
| 0
| 0.238337
| 0.128008
| 0
| 0
| 0
| 0
| 0.161017
| 1
| 0.118644
| false
| 0
| 0.033898
| 0
| 0.161017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7343cb88ce0616f0282632885d1eab151c406b1
| 2,266
|
py
|
Python
|
get_add_checksum.py
|
junglefive/EightPoleModuleTester
|
3da4e74d550dc32de477a19933b880b13be225b2
|
[
"MIT"
] | 1
|
2019-05-16T02:24:29.000Z
|
2019-05-16T02:24:29.000Z
|
get_add_checksum.py
|
junglefive/EightPoleModuleTester
|
3da4e74d550dc32de477a19933b880b13be225b2
|
[
"MIT"
] | null | null | null |
get_add_checksum.py
|
junglefive/EightPoleModuleTester
|
3da4e74d550dc32de477a19933b880b13be225b2
|
[
"MIT"
] | 1
|
2019-05-16T02:24:32.000Z
|
2019-05-16T02:24:32.000Z
|
import os,sys
source = "1900160470b501251c002d050c4310d4ec4206d4ac420ad48b4200d070bd824270bdeb4217d4e94215d48b4270bd9b4270bdec4206d5ac42f9d5994200d070bd904270bd6d005c00ec4204d24c00ec4201d2994270bd0fb4034c0cbc03bc30b4f8f7a8fe30bc70bd1900160480807f7e7d7c7b7a7978777676757473727171706f6e6e6d6c6c6b6a6a696868676666656464636362616160605f5f5e5e5d5d5c5c5b5b5a5a59595858575756565555555454535352525251515050504f4f4f4e4e4d4d4d4c4c4c4b4b4b4a4a4a494949484848474747474646464545454444444443434343424242424141410cb3000000000020000200004641000028b300000002002078040000040100009103480f0236ff000102030eff01020201030405050206070808019effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
source= "2cabd3545ae33ef156a0c901c2183696bd4efc25abea10e1e9ae023cddedc691b676efa3830358e5c4d73c2f2d34c9a91853ab647f4e298491422b6231e55661662c1dc5adbc1b5448529ccfab814ac452336f8e40d0dd58185df6b19639d3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
byte_array = bytes.fromhex(source)
print(len(byte_array))
checksum = 0x00
for i in byte_array:
checksum = checksum+byte_array[i]
print(hex(checksum))
print(hex(checksum&0xff))
| 161.857143
| 1,035
| 0.973522
| 35
| 2,266
| 62.914286
| 0.542857
| 0.016349
| 0.015441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263864
| 0.013239
| 2,266
| 14
| 1,036
| 161.857143
| 0.72093
| 0
| 0
| 0
| 0
| 0
| 0.902913
| 0.902913
| 0
| 1
| 0.00353
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.3
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c74cf235a0d98743a8fc52ce2be8004a769cb9e4
| 22,783
|
py
|
Python
|
code/test/test_task.py
|
mdskrzypczyk/LinkScheduling
|
46b4a676976f3429f6b5a28685386ed712f1bbe0
|
[
"MIT"
] | null | null | null |
code/test/test_task.py
|
mdskrzypczyk/LinkScheduling
|
46b4a676976f3429f6b5a28685386ed712f1bbe0
|
[
"MIT"
] | null | null | null |
code/test/test_task.py
|
mdskrzypczyk/LinkScheduling
|
46b4a676976f3429f6b5a28685386ed712f1bbe0
|
[
"MIT"
] | null | null | null |
import unittest
from jobscheduling.task import Task, BudgetTask, ResourceTask, BudgetResourceTask, PeriodicTask, PeriodicBudgetTask, \
PeriodicResourceTask, PeriodicBudgetResourceTask, DAGSubTask, DAGResourceSubTask, DAGBudgetResourceSubTask, \
DAGTask, PeriodicDAGTask, ResourceDAGTask, BudgetResourceDAGTask, PeriodicResourceDAGTask, \
PeriodicBudgetResourceDAGTask
class TestTask(unittest.TestCase):
def test_init(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_deadline = 200
test_description = "test"
task = Task(name=test_name, c=test_proc, a=test_release, d=test_deadline, description=test_description)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.description, test_description)
class TestResourceTask(unittest.TestCase):
def test_init(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_deadline = 200
test_resources = ['1', '2', '3']
test_locked_resources = ['4', '5', '6']
task = ResourceTask(name=test_name, c=test_proc, a=test_release, d=test_deadline, resources=test_resources,
locked_resources=test_locked_resources)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.resources, test_resources)
self.assertEqual(task.locked_resources, test_locked_resources)
task = ResourceTask(name=test_name, c=test_proc, a=test_release, d=test_deadline)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.resources, [])
self.assertEqual(task.locked_resources, [])
def test_get_resource_schedules(self):
test_name = "ABC"
test_proc = 3
test_release = 50
test_deadline = 200
test_resources = ['1', '2']
test_locked_resources = ['3']
task = ResourceTask(name=test_name, c=test_proc, a=test_release, d=test_deadline, resources=test_resources,
locked_resources=test_locked_resources)
resource_schedules = task.get_resource_schedules()
slots = [(s, task) for s in range(test_release, test_release + test_proc)]
expected_resource_schedules = {
test_resources[0]: slots,
test_resources[1]: slots
}
for resource in resource_schedules.keys():
self.assertEqual(resource_schedules[resource], expected_resource_schedules[resource])
def test_get_resource_intervals(self):
test_name = "ABC"
test_proc = 3
test_release = 50
test_deadline = 200
test_resources = ['1', '2']
test_locked_resources = ['3']
task = ResourceTask(name=test_name, c=test_proc, a=test_release, d=test_deadline, resources=test_resources,
locked_resources=test_locked_resources)
resource_intervals = task.get_resource_intervals()
for resource, interval_tree in resource_intervals.items():
self.assertTrue(resource in test_resources)
self.assertEqual(len(interval_tree), 1)
interval = sorted(interval_tree)[0]
self.assertEqual(interval.begin, test_release)
self.assertEqual(interval.end, test_release + test_proc)
class TestBudgetTask(unittest.TestCase):
def test_init(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_deadline = 200
test_budget = 5
test_preemption_points = [1, 4, 7]
task = BudgetTask(name=test_name, c=test_proc, a=test_release, d=test_deadline, k=test_budget,
preemption_points=test_preemption_points)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.preemption_points, test_preemption_points)
class TestBudgetResourceTask(unittest.TestCase):
def test_init(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_deadline = 200
test_budget = 5
test_preemption_points = [1, 4, 7]
test_resources = ['1', '2', '3']
test_locked_resources = ['4', '5', '6']
task = BudgetResourceTask(name=test_name, c=test_proc, a=test_release, d=test_deadline, k=test_budget,
resources=test_resources, locked_resources=test_locked_resources,
preemption_points=test_preemption_points)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.preemption_points, test_preemption_points)
self.assertEqual(task.resources, test_resources)
self.assertEqual(task.locked_resources, test_locked_resources)
class TestPeriodicTasks(unittest.TestCase):
def test_init_periodic_task(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_period = 200
task = PeriodicTask(name=test_name, c=test_proc, a=test_release, p=test_period)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.p, test_period)
def test_init_periodic_budget_task(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_period = 200
test_budget = 5
test_preemption_points = [1, 4, 7]
task = PeriodicBudgetTask(name=test_name, c=test_proc, a=test_release, p=test_period, k=test_budget,
preemption_points=test_preemption_points)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.p, test_period)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.preemption_points, test_preemption_points)
def test_init_periodic_resource_task(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_period = 200
test_resources = ['1', '2', '3']
test_locked_resources = ['4', '5', '6']
task = PeriodicResourceTask(name=test_name, c=test_proc, a=test_release, p=test_period,
resources=test_resources, locked_resources=test_locked_resources)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.p, test_period)
self.assertEqual(task.resources, test_resources)
self.assertEqual(task.locked_resources, test_locked_resources)
task = PeriodicResourceTask(name=test_name, c=test_proc, a=test_release, p=test_period)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.p, test_period)
self.assertEqual(task.resources, [])
self.assertEqual(task.locked_resources, [])
def test_init_periodic_budget_resource_task(self):
test_name = "ABC"
test_proc = 100
test_release = 50
test_period = 200
test_budget = 5
test_preemption_points = [1, 4, 7]
test_resources = ['1', '2', '3']
test_locked_resources = ['4', '5', '6']
task = PeriodicBudgetResourceTask(name=test_name, c=test_proc, a=test_release, p=test_period, k=test_budget,
resources=test_resources, locked_resources=test_locked_resources,
preemption_points=test_preemption_points)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc)
self.assertEqual(task.a, test_release)
self.assertEqual(task.p, test_period)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.preemption_points, test_preemption_points)
self.assertEqual(task.resources, test_resources)
self.assertEqual(task.locked_resources, test_locked_resources)
class TestDagSubTasks(unittest.TestCase):
def test_init_dag_subtask(self):
test_name = "ABC"
test_proc_time = 10
test_deadline = 100
test_dist = 2
task = DAGSubTask(name=test_name, c=test_proc_time)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc_time)
self.assertIsNone(task.d)
self.assertEqual(task.children, [])
self.assertEqual(task.parents, [])
test_parents = [DAGSubTask(name="Parent", c=2)]
test_children = [DAGSubTask(name="Child", c=3)]
task = DAGSubTask(name=test_name, c=test_proc_time, d=test_deadline, parents=test_parents,
children=test_children, dist=test_dist)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc_time)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.children, test_children)
self.assertEqual(task.parents, test_parents)
def test_init_dag_resource_subtask(self):
test_name = "ABC"
test_proc_time = 10
test_deadline = 100
test_dist = 2
task = DAGResourceSubTask(name=test_name, c=test_proc_time)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc_time)
self.assertIsNone(task.d)
self.assertEqual(task.children, [])
self.assertEqual(task.parents, [])
self.assertEqual(task.resources, [])
self.assertEqual(task.locked_resources, [])
test_parents = [DAGSubTask(name="Parent", c=2)]
test_children = [DAGSubTask(name="Child", c=3)]
test_resources = ['2']
test_locked_resources = ['5']
task = DAGResourceSubTask(name=test_name, c=test_proc_time, d=test_deadline, parents=test_parents,
children=test_children, dist=test_dist, resources=test_resources,
locked_resources=test_locked_resources)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc_time)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.children, test_children)
self.assertEqual(task.parents, test_parents)
self.assertEqual(task.resources, test_resources)
self.assertEqual(task.locked_resources, test_locked_resources)
def test_init_dag_budget_resource_subtask(self):
test_name = "ABC"
test_proc_time = 10
test_deadline = 100
test_dist = 2
task = DAGBudgetResourceSubTask(name=test_name, c=test_proc_time)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc_time)
self.assertIsNone(task.d)
self.assertEqual(task.k, 0)
self.assertEqual(task.children, [])
self.assertEqual(task.parents, [])
self.assertEqual(task.resources, [])
self.assertEqual(task.locked_resources, [])
test_parents = [DAGSubTask(name="Parent", c=2)]
test_children = [DAGSubTask(name="Child", c=3)]
test_resources = ['2']
test_locked_resources = ['5']
test_budget = 10
task = DAGBudgetResourceSubTask(name=test_name, c=test_proc_time, d=test_deadline, k=test_budget,
parents=test_parents, children=test_children, dist=test_dist,
resources=test_resources, locked_resources=test_locked_resources)
self.assertEqual(task.name, test_name)
self.assertEqual(task.c, test_proc_time)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.children, test_children)
self.assertEqual(task.parents, test_parents)
self.assertEqual(task.resources, test_resources)
self.assertEqual(task.locked_resources, test_locked_resources)
class TestDagTasks(unittest.TestCase):
def test_init_dagtask(self):
test_name = "ABC"
test_subtask_proc = 3
test_task = DAGSubTask(name="SubTask", c=test_subtask_proc)
task = DAGTask(name=test_name, tasks=[test_task])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task])
self.assertEqual(task.subtasks, [test_task])
self.assertEqual(task.tasks, {test_task.name: test_task})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_subtask_proc)
self.assertIsNone(task.d)
test_task2 = DAGSubTask(name="Subtask2", c=4, a=test_task.c, parents=[test_task])
test_task.add_child(test_task2)
task = DAGTask(name=test_name, tasks=[test_task, test_task2])
test_deadline = 100
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task2])
self.assertEqual(task.subtasks, [test_task, test_task2])
self.assertEqual(task.tasks, {test_task.name: test_task, test_task2.name: test_task2})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_task.c + test_task2.c)
self.assertIsNone(task.d, test_deadline)
def test_init_resource_dagtask(self):
test_name = "ABC"
test_subtask_proc = 3
test_deadline = 100
test_task = DAGResourceSubTask(name="SubTask", c=test_subtask_proc)
task = ResourceDAGTask(name=test_name, d=test_deadline, tasks=[test_task])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task])
self.assertEqual(task.subtasks, [test_task])
self.assertEqual(task.tasks, {test_task.name: test_task})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_subtask_proc)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.resources, set())
test_resources1 = ['1', '2']
test_task = DAGResourceSubTask(name="Subtask1", c=4, a=0, resources=test_resources1)
test_resources2 = ['3']
test_task2 = DAGResourceSubTask(name="Subtask2", c=4, a=test_task.c, parents=[test_task],
resources=test_resources2)
test_task.add_child(test_task2)
task = ResourceDAGTask(name=test_name, d=test_deadline, tasks=[test_task, test_task2])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task2])
self.assertEqual(task.subtasks, [test_task, test_task2])
self.assertEqual(task.tasks, {test_task.name: test_task, test_task2.name: test_task2})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_task.c + test_task2.c)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.resources, set(test_task.resources + test_task2.resources))
def test_init_budget_resource_dagtask(self):
test_name = "ABC"
test_subtask_proc = 3
test_deadline = 100
test_task = DAGResourceSubTask(name="SubTask", c=test_subtask_proc)
task = BudgetResourceDAGTask(name=test_name, d=test_deadline, tasks=[test_task])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task])
self.assertEqual(task.subtasks, [test_task])
self.assertEqual(task.tasks, {test_task.name: test_task})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_subtask_proc)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.k, 0)
self.assertEqual(task.resources, set())
test_resources1 = ['1', '2']
test_task = DAGResourceSubTask(name="Subtask1", c=4, a=0, resources=test_resources1)
test_resources2 = ['3']
test_task2 = DAGResourceSubTask(name="Subtask2", c=4, a=test_task.c, parents=[test_task],
resources=test_resources2)
test_task.add_child(test_task2)
test_budget = 20
task = BudgetResourceDAGTask(name=test_name, d=test_deadline, k=test_budget, tasks=[test_task, test_task2])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task2])
self.assertEqual(task.subtasks, [test_task, test_task2])
self.assertEqual(task.tasks, {test_task.name: test_task, test_task2.name: test_task2})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_task.c + test_task2.c)
self.assertEqual(task.d, test_deadline)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.resources, set(test_task.resources + test_task2.resources))
def test_init_periodic_dagtask(self):
test_name = "ABC"
test_period = 100
test_subtask_proc = 3
test_task = DAGSubTask(name="SubTask", c=test_subtask_proc)
task = PeriodicDAGTask(name=test_name, p=test_period, tasks=[test_task])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task])
self.assertEqual(task.subtasks, [test_task])
self.assertEqual(task.tasks, {test_task.name: test_task})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_subtask_proc)
self.assertEqual(task.p, test_period)
test_task2 = DAGSubTask(name="Subtask2", c=4, a=test_task.c, parents=[test_task])
test_task.add_child(test_task2)
task = PeriodicDAGTask(name=test_name, p=test_period, tasks=[test_task, test_task2])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task2])
self.assertEqual(task.subtasks, [test_task, test_task2])
self.assertEqual(task.tasks, {test_task.name: test_task, test_task2.name: test_task2})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_task.c + test_task2.c)
self.assertEqual(task.p, test_period)
def test_init_periodic_resource_dagtask(self):
test_name = "ABC"
test_period = 100
test_subtask_proc = 3
test_task = DAGResourceSubTask(name="SubTask", c=test_subtask_proc)
task = PeriodicResourceDAGTask(name=test_name, p=test_period, tasks=[test_task])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task])
self.assertEqual(task.subtasks, [test_task])
self.assertEqual(task.tasks, {test_task.name: test_task})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_subtask_proc)
self.assertEqual(task.p, test_period)
self.assertEqual(task.resources, set())
test_resources1 = ['1', '2']
test_task = DAGResourceSubTask(name="Subtask1", c=4, a=0, resources=test_resources1)
test_resources2 = ['3']
test_task2 = DAGResourceSubTask(name="Subtask2", c=4, a=test_task.c, parents=[test_task],
resources=test_resources2)
test_task.add_child(test_task2)
task = PeriodicResourceDAGTask(name=test_name, p=test_period, tasks=[test_task, test_task2])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task2])
self.assertEqual(task.subtasks, [test_task, test_task2])
self.assertEqual(task.tasks, {test_task.name: test_task, test_task2.name: test_task2})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_task.c + test_task2.c)
self.assertEqual(task.p, test_period)
self.assertEqual(task.resources, set(test_task.resources + test_task2.resources))
def test_init_periodic_budget_resource_dagtask(self):
test_name = "ABC"
test_period = 100
test_subtask_proc = 3
test_task = DAGResourceSubTask(name="SubTask", c=test_subtask_proc)
task = PeriodicBudgetResourceDAGTask(name=test_name, p=test_period, tasks=[test_task])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task])
self.assertEqual(task.subtasks, [test_task])
self.assertEqual(task.tasks, {test_task.name: test_task})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_subtask_proc)
self.assertEqual(task.p, test_period)
self.assertEqual(task.k, 0)
self.assertEqual(task.resources, set())
test_resources1 = ['1', '2']
test_task = DAGResourceSubTask(name="Subtask1", c=4, a=0, resources=test_resources1)
test_resources2 = ['3']
test_task2 = DAGResourceSubTask(name="Subtask2", c=4, a=test_task.c, parents=[test_task],
resources=test_resources2)
test_task.add_child(test_task2)
test_budget = 10
task = PeriodicBudgetResourceDAGTask(name=test_name, p=test_period, k=test_budget,
tasks=[test_task, test_task2])
self.assertEqual(task.name, test_name)
self.assertEqual(task.sources, [test_task])
self.assertEqual(task.sinks, [test_task2])
self.assertEqual(task.subtasks, [test_task, test_task2])
self.assertEqual(task.tasks, {test_task.name: test_task, test_task2.name: test_task2})
self.assertEqual(task.a, 0)
self.assertEqual(task.c, test_task.c + test_task2.c)
self.assertEqual(task.p, test_period)
self.assertEqual(task.k, test_budget)
self.assertEqual(task.resources, set(test_task.resources + test_task2.resources))
| 42.986792
| 118
| 0.664048
| 2,763
| 22,783
| 5.22186
| 0.038002
| 0.216246
| 0.268644
| 0.057388
| 0.913086
| 0.901719
| 0.900956
| 0.899709
| 0.884391
| 0.876352
| 0
| 0.017297
| 0.231137
| 22,783
| 529
| 119
| 43.068053
| 0.80636
| 0
| 0
| 0.824601
| 0
| 0
| 0.011501
| 0
| 0
| 0
| 0
| 0
| 0.487472
| 1
| 0.04328
| false
| 0
| 0.004556
| 0
| 0.063781
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c7c75b1f01562cc68bf3175b50d20d896828e4e3
| 136
|
py
|
Python
|
viewmodels/__init__.py
|
Svaught598/WetLabApp
|
abec4b077547353c28ede748a07823812ea91112
|
[
"MIT"
] | 1
|
2021-02-16T18:31:35.000Z
|
2021-02-16T18:31:35.000Z
|
viewmodels/__init__.py
|
Svaught598/WetLabApp
|
abec4b077547353c28ede748a07823812ea91112
|
[
"MIT"
] | 5
|
2020-08-04T19:55:30.000Z
|
2022-03-12T00:28:10.000Z
|
viewmodels/__init__.py
|
Svaught598/WetLabApp
|
abec4b077547353c28ede748a07823812ea91112
|
[
"MIT"
] | null | null | null |
from .volume_view_model import VolumeViewModel
from .film_view_model import FilmViewModel
from .update_view_model import UpdateViewModel
| 45.333333
| 46
| 0.897059
| 18
| 136
| 6.444444
| 0.555556
| 0.232759
| 0.387931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080882
| 136
| 3
| 47
| 45.333333
| 0.928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c7d35112f9ffe0bbcc3d1258f949c9e198784af7
| 442,129
|
py
|
Python
|
sympy/integrals/rubi/rubi_tests/tests/test_logarithms.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
sympy/integrals/rubi/rubi_tests/tests/test_logarithms.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
sympy/integrals/rubi/rubi_tests/tests/test_logarithms.py
|
iamabhishek0/sympy
|
c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd
|
[
"BSD-3-Clause"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
import sys
from sympy.external import import_module
matchpy = import_module("matchpy")
if not matchpy:
#bin/test will not execute any tests now
disabled = True
if sys.version_info[:2] < (3, 6):
disabled = True
from sympy.integrals.rubi.rubi import rubi_integrate
from sympy.functions import log, sqrt, exp, cos, sin, tan, sec, csc, cot,cosh, sinh, tanh, coth, csch, csch, sech
from sympy import acsch , acsc, asinh,asin,acos,acosh,atan,atanh
from sympy.integrals.rubi.utility_function import (EllipticE, EllipticF, Int, ArcCsch, ArcCsc, Gamma, Factorial, PolyGamma , LogGamma , Subst ,
hypergeom, rubi_test, AppellF1, EllipticPi, Log, Sqrt, ArcTan, ArcTanh, ArcSin, ArcSinh, ArcCosh, ArcTanh, ArcCos, Hypergeometric2F1,)
from sympy import pi
from sympy import S, hyper, I, simplify, exp_polar, symbols, exp, Ei,erf, erfi, li, Integral, polylog, hyper as HypergeometricPFQ
from sympy import EulerGamma, expint, Chi, Shi, Ci, Si, E
from sympy.utilities.pytest import SKIP
a, b, c, d, e, f, m, n, x, u , k, p, r, s, t, i, j= symbols('a b c d e f m n x u k p r s t i j')
A, B, C, D, a, b, c, d, e, f, g, h, y, z, m, n, p, q, u, v, w, F = symbols('A B C D a b c d e f g h y z m n p q u v w F', )
def test_1():
assert rubi_test(rubi_integrate((e + f*x)**(p + S(-1))/log(d*(e + f*x)**p), x), x, li(d*(e + f*x)**p)/(d*f*p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((e*g + f*g*x)**(p + S(-1))/log(d*(e + f*x)**p), x), x, (e + f*x)**(-p + S(1))*(e*g + f*g*x)**(p + S(-1))*li(d*(e + f*x)**p)/(d*f*p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**m, x), x, b*f*p*q*(g + h*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), f*(g + h*x)/(-e*h + f*g))/(h*(m + S(1))*(m + S(2))*(-e*h + f*g)) + (a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(m + S(1))/(h*(m + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(4), x), x, -b*p*q*(g + h*x)**S(5)/(S(25)*h) - b*p*q*(g + h*x)**S(4)*(-e*h + f*g)/(S(20)*f*h) - b*p*q*(g + h*x)**S(3)*(-e*h + f*g)**S(2)/(S(15)*f**S(2)*h) - b*p*q*(g + h*x)**S(2)*(-e*h + f*g)**S(3)/(S(10)*f**S(3)*h) - b*p*q*x*(-e*h + f*g)**S(4)/(S(5)*f**S(4)) - b*p*q*(-e*h + f*g)**S(5)*log(e + f*x)/(S(5)*f**S(5)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(5)/(S(5)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(3), x), x, -b*p*q*(g + h*x)**S(4)/(S(16)*h) - b*p*q*(g + h*x)**S(3)*(-e*h + f*g)/(S(12)*f*h) - b*p*q*(g + h*x)**S(2)*(-e*h + f*g)**S(2)/(S(8)*f**S(2)*h) - b*p*q*x*(-e*h + f*g)**S(3)/(S(4)*f**S(3)) - b*p*q*(-e*h + f*g)**S(4)*log(e + f*x)/(S(4)*f**S(4)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(4)/(S(4)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2), x), x, -b*p*q*(g + h*x)**S(3)/(S(9)*h) - b*p*q*(g + h*x)**S(2)*(-e*h + f*g)/(S(6)*f*h) - b*p*q*x*(-e*h + f*g)**S(2)/(S(3)*f**S(2)) - b*p*q*(-e*h + f*g)**S(3)*log(e + f*x)/(S(3)*f**S(3)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(3)/(S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x), x), x, -b*p*q*(g + h*x)**S(2)/(S(4)*h) - b*p*q*x*(-e*h + f*g)/(S(2)*f) - b*p*q*(-e*h + f*g)**S(2)*log(e + f*x)/(S(2)*f**S(2)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2)/(S(2)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(a + b*log(c*(d*(e + f*x)**p)**q), x), x, a*x - b*p*q*x + b*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x), x), x, b*p*q*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(2), x), x, b*f*p*q*log(e + f*x)/(h*(-e*h + f*g)) - b*f*p*q*log(g + h*x)/(h*(-e*h + f*g)) + (-a - b*log(c*(d*(e + f*x)**p)**q))/(h*(g + h*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(3), x), x, b*f**S(2)*p*q*log(e + f*x)/(S(2)*h*(-e*h + f*g)**S(2)) - b*f**S(2)*p*q*log(g + h*x)/(S(2)*h*(-e*h + f*g)**S(2)) + b*f*p*q/(S(2)*h*(g + h*x)*(-e*h + f*g)) + (-a/S(2) - b*log(c*(d*(e + f*x)**p)**q)/S(2))/(h*(g + h*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(4), x), x, b*f**S(3)*p*q*log(e + f*x)/(S(3)*h*(-e*h + f*g)**S(3)) - b*f**S(3)*p*q*log(g + h*x)/(S(3)*h*(-e*h + f*g)**S(3)) + b*f**S(2)*p*q/(S(3)*h*(g + h*x)*(-e*h + f*g)**S(2)) + b*f*p*q/(S(6)*h*(g + h*x)**S(2)*(-e*h + f*g)) + (-a/S(3) - b*log(c*(d*(e + f*x)**p)**q)/S(3))/(h*(g + h*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(5), x), x, b*f**S(4)*p*q*log(e + f*x)/(S(4)*h*(-e*h + f*g)**S(4)) - b*f**S(4)*p*q*log(g + h*x)/(S(4)*h*(-e*h + f*g)**S(4)) + b*f**S(3)*p*q/(S(4)*h*(g + h*x)*(-e*h + f*g)**S(3)) + b*f**S(2)*p*q/(S(8)*h*(g + h*x)**S(2)*(-e*h + f*g)**S(2)) + b*f*p*q/(S(12)*h*(g + h*x)**S(3)*(-e*h + f*g)) + (-a/S(4) - b*log(c*(d*(e + f*x)**p)**q)/S(4))/(h*(g + h*x)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**m, x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**m, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**S(3), x), x, -a*b*p*q*x*(-e*h + f*g)**S(3)/(S(2)*f**S(3)) + b**S(2)*p**S(2)*q**S(2)*(g + h*x)**S(4)/(S(32)*h) + S(7)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**S(3)*(-e*h + f*g)/(S(72)*f*h) + S(13)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**S(2)*(-e*h + f*g)**S(2)/(S(48)*f**S(2)*h) + S(25)*b**S(2)*p**S(2)*q**S(2)*x*(-e*h + f*g)**S(3)/(S(24)*f**S(3)) - b**S(2)*p*q*(e + f*x)*(-e*h + f*g)**S(3)*log(c*(d*(e + f*x)**p)**q)/(S(2)*f**S(4)) + S(13)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**S(4)*log(e + f*x)/(S(24)*f**S(4)*h) - b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(4)/(S(8)*h) - b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(3)*(-e*h + f*g)/(S(6)*f*h) - b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2)*(-e*h + f*g)**S(2)/(S(4)*f**S(2)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**S(4)/(S(4)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-e*h + f*g)**S(4)/(S(4)*f**S(4)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**S(2), x), x, -S(2)*a*b*p*q*x*(-e*h + f*g)**S(2)/(S(3)*f**S(2)) + S(2)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**S(3)/(S(27)*h) + S(5)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**S(2)*(-e*h + f*g)/(S(18)*f*h) + S(11)*b**S(2)*p**S(2)*q**S(2)*x*(-e*h + f*g)**S(2)/(S(9)*f**S(2)) - S(2)*b**S(2)*p*q*(e + f*x)*(-e*h + f*g)**S(2)*log(c*(d*(e + f*x)**p)**q)/(S(3)*f**S(3)) + S(5)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**S(3)*log(e + f*x)/(S(9)*f**S(3)*h) - S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(3)/(S(9)*h) - b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2)*(-e*h + f*g)/(S(3)*f*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**S(3)/(S(3)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-e*h + f*g)**S(3)/(S(3)*f**S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x), x), x, -S(2)*a*b*p*q*x*(-e*h + f*g)/f + b**S(2)*e*h*p**S(2)*q**S(2)*x/(S(2)*f) + b**S(2)*h*p**S(2)*q**S(2)*x**S(2)/S(4) + S(2)*b**S(2)*p**S(2)*q**S(2)*x*(-e*h + f*g)/f - S(2)*b**S(2)*p*q*(e + f*x)*(-e*h + f*g)*log(c*(d*(e + f*x)**p)**q)/f**S(2) - b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(2)*f**S(2)) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)/(S(2)*f**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*h + f*g)/f**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2), x), x, -S(2)*a*b*p*q*x + S(2)*b**S(2)*p**S(2)*q**S(2)*x - S(2)*b**S(2)*p*q*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/f + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(g + h*x), x), x, -S(2)*b**S(2)*p**S(2)*q**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h + S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(g + h*x)**S(2), x), x, -S(2)*b**S(2)*f*p**S(2)*q**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)) - S(2)*b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/((g + h*x)*(-e*h + f*g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(g + h*x)**S(3), x), x, -b**S(2)*f**S(2)*p**S(2)*q**S(2)*log(e + f*x)/(h*(-e*h + f*g)**S(2)) + b**S(2)*f**S(2)*p**S(2)*q**S(2)*log(g + h*x)/(h*(-e*h + f*g)**S(2)) - b**S(2)*f**S(2)*p**S(2)*q**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(2)) - b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(2)) + b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))/(h*(g + h*x)*(-e*h + f*g)) + f**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(2)*h*(-e*h + f*g)**S(2)) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(2)*h*(g + h*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(g + h*x)**S(4), x), x, -b**S(2)*f**S(3)*p**S(2)*q**S(2)*log(e + f*x)/(h*(-e*h + f*g)**S(3)) + b**S(2)*f**S(3)*p**S(2)*q**S(2)*log(g + h*x)/(h*(-e*h + f*g)**S(3)) - S(2)*b**S(2)*f**S(3)*p**S(2)*q**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(S(3)*h*(-e*h + f*g)**S(3)) - b**S(2)*f**S(2)*p**S(2)*q**S(2)/(S(3)*h*(g + h*x)*(-e*h + f*g)**S(2)) - S(2)*b*f**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(S(3)*h*(-e*h + f*g)**S(3)) + S(2)*b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))/(S(3)*h*(g + h*x)*(-e*h + f*g)**S(2)) + b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))/(S(3)*h*(g + h*x)**S(2)*(-e*h + f*g)) + f**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(3)*h*(-e*h + f*g)**S(3)) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(3)*h*(g + h*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)**m, x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)**m, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)**S(3), x), x, S(6)*a*b**S(2)*p**S(2)*q**S(2)*x*(-e*h + f*g)**S(3)/f**S(3) - S(9)*b**S(3)*e*h*p**S(3)*q**S(3)*x*(-e*h + f*g)**S(2)/(S(4)*f**S(3)) - S(9)*b**S(3)*h*p**S(3)*q**S(3)*x**S(2)*(-e*h + f*g)**S(2)/(S(8)*f**S(2)) - S(6)*b**S(3)*p**S(3)*q**S(3)*x*(-e*h + f*g)**S(3)/f**S(3) - S(3)*b**S(3)*h**S(3)*p**S(3)*q**S(3)*(e + f*x)**S(4)/(S(128)*f**S(4)) - S(2)*b**S(3)*h**S(2)*p**S(3)*q**S(3)*(e + f*x)**S(3)*(-e*h + f*g)/(S(9)*f**S(4)) + S(6)*b**S(3)*p**S(2)*q**S(2)*(e + f*x)*(-e*h + f*g)**S(3)*log(c*(d*(e + f*x)**p)**q)/f**S(4) + S(3)*b**S(2)*h**S(3)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(4)/(S(32)*f**S(4)) + S(2)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)*(-e*h + f*g)/(S(3)*f**S(4)) + S(9)*b**S(2)*h*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(4)*f**S(4)) - S(3)*b*h**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(4)/(S(16)*f**S(4)) - b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(3)*(-e*h + f*g)/f**S(4) - S(9)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(4)*f**S(4)) - S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*h + f*g)**S(3)/f**S(4) + h**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(4)/(S(4)*f**S(4)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(3)*(-e*h + f*g)/f**S(4) + S(3)*h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(2)*f**S(4)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-e*h + f*g)**S(3)/f**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)**S(2), x), x, S(6)*a*b**S(2)*p**S(2)*q**S(2)*x*(-e*h + f*g)**S(2)/f**S(2) - S(3)*b**S(3)*e*h*p**S(3)*q**S(3)*x*(-e*h + f*g)/(S(2)*f**S(2)) - S(3)*b**S(3)*h*p**S(3)*q**S(3)*x**S(2)*(-e*h + f*g)/(S(4)*f) - S(6)*b**S(3)*p**S(3)*q**S(3)*x*(-e*h + f*g)**S(2)/f**S(2) - S(2)*b**S(3)*h**S(2)*p**S(3)*q**S(3)*(e + f*x)**S(3)/(S(27)*f**S(3)) + S(6)*b**S(3)*p**S(2)*q**S(2)*(e + f*x)*(-e*h + f*g)**S(2)*log(c*(d*(e + f*x)**p)**q)/f**S(3) + S(2)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)/(S(9)*f**S(3)) + S(3)*b**S(2)*h*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)/(S(2)*f**S(3)) - b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(3)/(S(3)*f**S(3)) - S(3)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)*(-e*h + f*g)/(S(2)*f**S(3)) - S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*h + f*g)**S(2)/f**S(3) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(3)/(S(3)*f**S(3)) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(2)*(-e*h + f*g)/f**S(3) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-e*h + f*g)**S(2)/f**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x), x), x, S(6)*a*b**S(2)*p**S(2)*q**S(2)*x*(-e*h + f*g)/f - S(3)*b**S(3)*e*h*p**S(3)*q**S(3)*x/(S(4)*f) - S(3)*b**S(3)*h*p**S(3)*q**S(3)*x**S(2)/S(8) - S(6)*b**S(3)*p**S(3)*q**S(3)*x*(-e*h + f*g)/f + S(6)*b**S(3)*p**S(2)*q**S(2)*(e + f*x)*(-e*h + f*g)*log(c*(d*(e + f*x)**p)**q)/f**S(2) + S(3)*b**S(2)*h*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(4)*f**S(2)) - S(3)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)/(S(4)*f**S(2)) - S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*h + f*g)/f**S(2) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(2)/(S(2)*f**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-e*h + f*g)/f**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3), x), x, S(6)*a*b**S(2)*p**S(2)*q**S(2)*x - S(6)*b**S(3)*p**S(3)*q**S(3)*x + S(6)*b**S(3)*p**S(2)*q**S(2)*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/f - S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/f + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(g + h*x), x), x, S(6)*b**S(3)*p**S(3)*q**S(3)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/h - S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h + S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(g + h*x)**S(2), x), x, S(6)*b**S(3)*f*p**S(3)*q**S(3)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)) - S(6)*b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)) - S(3)*b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)/((g + h*x)*(-e*h + f*g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(g + h*x)**S(3), x), x, S(3)*b**S(3)*f**S(2)*p**S(3)*q**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(2)) + S(3)*b**S(3)*f**S(2)*p**S(3)*q**S(3)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(2)) + S(3)*b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(2)) - S(3)*b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(2)) - S(3)*b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(S(2)*h*(-e*h + f*g)**S(2)) - S(3)*b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/(S(2)*(g + h*x)*(-e*h + f*g)**S(2)) + f**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(2)*h*(-e*h + f*g)**S(2)) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(2)*h*(g + h*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(g + h*x)**S(4), x), x, b**S(3)*f**S(3)*p**S(3)*q**S(3)*log(e + f*x)/(h*(-e*h + f*g)**S(3)) - b**S(3)*f**S(3)*p**S(3)*q**S(3)*log(g + h*x)/(h*(-e*h + f*g)**S(3)) + S(3)*b**S(3)*f**S(3)*p**S(3)*q**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(3)) + S(2)*b**S(3)*f**S(3)*p**S(3)*q**S(3)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(3)) + S(3)*b**S(2)*f**S(3)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(3)) - S(2)*b**S(2)*f**S(3)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(3)) - b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))/(h*(g + h*x)*(-e*h + f*g)**S(2)) - b*f**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)**S(3)) - b*f**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(2)*h*(-e*h + f*g)**S(3)) - b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/((g + h*x)*(-e*h + f*g)**S(3)) + b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(2)*h*(g + h*x)**S(2)*(-e*h + f*g)) + f**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(3)*h*(-e*h + f*g)**S(3)) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(3)*h*(g + h*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(g + h*x)**S(5), x), x, S(3)*b**S(3)*f**S(4)*p**S(3)*q**S(3)*log(e + f*x)/(S(2)*h*(-e*h + f*g)**S(4)) - S(3)*b**S(3)*f**S(4)*p**S(3)*q**S(3)*log(g + h*x)/(S(2)*h*(-e*h + f*g)**S(4)) + S(11)*b**S(3)*f**S(4)*p**S(3)*q**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(S(4)*h*(-e*h + f*g)**S(4)) + S(3)*b**S(3)*f**S(4)*p**S(3)*q**S(3)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(S(2)*h*(-e*h + f*g)**S(4)) + b**S(3)*f**S(3)*p**S(3)*q**S(3)/(S(4)*h*(g + h*x)*(-e*h + f*g)**S(3)) + S(11)*b**S(2)*f**S(4)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(S(4)*h*(-e*h + f*g)**S(4)) - S(3)*b**S(2)*f**S(4)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(S(2)*h*(-e*h + f*g)**S(4)) - S(5)*b**S(2)*f**S(3)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))/(S(4)*h*(g + h*x)*(-e*h + f*g)**S(3)) - b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))/(S(4)*h*(g + h*x)**S(2)*(-e*h + f*g)**S(2)) - S(3)*b*f**S(4)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(S(4)*h*(-e*h + f*g)**S(4)) - S(5)*b*f**S(4)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(8)*h*(-e*h + f*g)**S(4)) - S(3)*b*f**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/(S(4)*(g + h*x)*(-e*h + f*g)**S(4)) + S(3)*b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(8)*h*(g + h*x)**S(2)*(-e*h + f*g)**S(2)) + b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(4)*h*(g + h*x)**S(3)*(-e*h + f*g)) + f**S(4)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(4)*h*(-e*h + f*g)**S(4)) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(4)*h*(g + h*x)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(4), x), x, -S(24)*a*b**S(3)*p**S(3)*q**S(3)*x + S(24)*b**S(4)*p**S(4)*q**S(4)*x - S(24)*b**S(4)*p**S(3)*q**S(3)*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/f + S(12)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/f - S(4)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)/f + (a + b*log(c*(d*(e + f*x)**p)**q))**S(4)*(e + f*x)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(4)/(g + h*x), x), x, -S(24)*b**S(4)*p**S(4)*q**S(4)*polylog(S(5), -h*(e + f*x)/(-e*h + f*g))/h + S(24)*b**S(3)*p**S(3)*q**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/h - S(12)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h + S(4)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))**S(4)*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(4)/(g + h*x)**S(2), x), x, -S(24)*b**S(4)*f*p**S(4)*q**S(4)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)) + S(24)*b**S(3)*f*p**S(3)*q**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)) - S(12)*b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(h*(-e*h + f*g)) - S(4)*b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(g + h*x)/(-e*h + f*g))/(h*(-e*h + f*g)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(4)*(e + f*x)/((g + h*x)*(-e*h + f*g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x), x), x, -x + (a + b*x)*log(a + b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x)**S(2), x), x, S(2)*x + (a + b*x)*log(a + b*x)**S(2)/b - (S(2)*a + S(2)*b*x)*log(a + b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x)**S(3), x), x, -S(6)*x + (a + b*x)*log(a + b*x)**S(3)/b - (S(3)*a + S(3)*b*x)*log(a + b*x)**S(2)/b + (S(6)*a + S(6)*b*x)*log(a + b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x + c*x), x), x, -x + (a + x*(b + c))*log(a + x*(b + c))/(b + c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x + c*x)**S(2), x), x, S(2)*x + (a + x*(b + c))*log(a + x*(b + c))**S(2)/(b + c) - (S(2)*a + S(2)*x*(b + c))*log(a + x*(b + c))/(b + c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x + c*x)**S(3), x), x, -S(6)*x + (a + x*(b + c))*log(a + x*(b + c))**S(3)/(b + c) - (S(3)*a + S(3)*x*(b + c))*log(a + x*(b + c))**S(2)/(b + c) + (S(6)*a + S(6)*x*(b + c))*log(a + x*(b + c))/(b + c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-g*(d + e*x)/(-d*g + e*f))/(f + g*x), x), x, -polylog(S(2), e*(f + g*x)/(-d*g + e*f))/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(b*x + S(1))/x, x), x, -polylog(S(2), -b*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x)**n)**S(2), x), x, -S(5)*a**S(3)*n**S(2)*log(a + b*x)/(S(9)*b**S(3)) + a**S(3)*log(c*(a + b*x)**n)**S(2)/(S(3)*b**S(3)) + S(11)*a**S(2)*n**S(2)*x/(S(9)*b**S(2)) - S(2)*a**S(2)*n*(a + b*x)*log(c*(a + b*x)**n)/(S(3)*b**S(3)) - S(5)*a*n**S(2)*x**S(2)/(S(18)*b) + a*n*x**S(2)*log(c*(a + b*x)**n)/(S(3)*b) + S(2)*n**S(2)*x**S(3)/S(27) - S(2)*n*x**S(3)*log(c*(a + b*x)**n)/S(9) + x**S(3)*log(c*(a + b*x)**n)**S(2)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(2)/x**S(4), x), x, -log(c*(a + b*x)**n)**S(2)/(S(3)*x**S(3)) - b*n*log(c*(a + b*x)**n)/(S(3)*a*x**S(2)) - b**S(2)*n**S(2)/(S(3)*a**S(2)*x) + S(2)*b**S(2)*n*log(c*(a + b*x)**n)/(S(3)*a**S(2)*x) - b**S(3)*n**S(2)*log(x)/a**S(3) + b**S(3)*n**S(2)*log(a + b*x)/a**S(3) + S(2)*b**S(3)*n**S(2)*polylog(S(2), (a + b*x)/a)/(S(3)*a**S(3)) + S(2)*b**S(3)*n*log(c*(a + b*x)**n)*log(-b*x/a)/(S(3)*a**S(3)) - b**S(3)*log(c*(a + b*x)**n)**S(2)/(S(3)*a**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x)**n)**S(3), x), x, S(19)*a**S(3)*n**S(3)*log(a + b*x)/(S(18)*b**S(3)) - S(5)*a**S(3)*n*log(c*(a + b*x)**n)**S(2)/(S(6)*b**S(3)) + a**S(3)*log(c*(a + b*x)**n)**S(3)/(S(3)*b**S(3)) - S(85)*a**S(2)*n**S(3)*x/(S(18)*b**S(2)) + S(11)*a**S(2)*n**S(2)*(a + b*x)*log(c*(a + b*x)**n)/(S(3)*b**S(3)) - a**S(2)*n*(a + b*x)*log(c*(a + b*x)**n)**S(2)/b**S(3) + S(19)*a*n**S(3)*x**S(2)/(S(36)*b) - S(5)*a*n**S(2)*x**S(2)*log(c*(a + b*x)**n)/(S(6)*b) + a*n*x**S(2)*log(c*(a + b*x)**n)**S(2)/(S(2)*b) - S(2)*n**S(3)*x**S(3)/S(27) + S(2)*n**S(2)*x**S(3)*log(c*(a + b*x)**n)/S(9) - n*x**S(3)*log(c*(a + b*x)**n)**S(2)/S(3) + x**S(3)*log(c*(a + b*x)**n)**S(3)/S(3), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x)**n)**S(3), x), x, -S(9)*a**S(2)*n**S(3)*x/(S(2)*b**S(2)) + S(6)*a**S(2)*n**S(2)*(a + b*x)*log(c*(a + b*x)**n)/b**S(3) - S(3)*a**S(2)*n*(a + b*x)*log(c*(a + b*x)**n)**S(2)/b**S(3) + a**S(2)*(a + b*x)*log(c*(a + b*x)**n)**S(3)/b**S(3) + S(3)*a*n**S(3)*x**S(2)/(S(4)*b) - S(3)*a*n**S(2)*(a + b*x)**S(2)*log(c*(a + b*x)**n)/(S(2)*b**S(3)) + S(3)*a*n*(a + b*x)**S(2)*log(c*(a + b*x)**n)**S(2)/(S(2)*b**S(3)) - a*(a + b*x)**S(2)*log(c*(a + b*x)**n)**S(3)/b**S(3) - S(2)*n**S(3)*(a + b*x)**S(3)/(S(27)*b**S(3)) + S(2)*n**S(2)*(a + b*x)**S(3)*log(c*(a + b*x)**n)/(S(9)*b**S(3)) - n*(a + b*x)**S(3)*log(c*(a + b*x)**n)**S(2)/(S(3)*b**S(3)) + (a + b*x)**S(3)*log(c*(a + b*x)**n)**S(3)/(S(3)*b**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, Integral((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(3)/(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*Ei((S(4)*a + S(4)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(4)*p*q) + S(3)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*Ei((S(3)*a + S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(4)*p*q) + S(3)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(4)*p*q) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(4)*p*q), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(2)/(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*Ei((S(3)*a + S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(3)*p*q) + S(2)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(3)*p*q) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(3)*p*q), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)/(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(2)*p*q) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f**S(2)*p*q), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b*f*p*q), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**S(2), x), x, Integral((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**S(2), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(3)/(a + b*log(c*(d*(e + f*x)**p)**q))**S(2), x), x, -(e + f*x)*(g + h*x)**S(3)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))) + S(4)*h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*Ei((S(4)*a + S(4)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(4)*p**S(2)*q**S(2)) + S(9)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*Ei((S(3)*a + S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(4)*p**S(2)*q**S(2)) + S(6)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(4)*p**S(2)*q**S(2)) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(4)*p**S(2)*q**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(2)/(a + b*log(c*(d*(e + f*x)**p)**q))**S(2), x), x, -(e + f*x)*(g + h*x)**S(2)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))) + S(3)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*Ei((S(3)*a + S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(3)*p**S(2)*q**S(2)) + S(4)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(3)*p**S(2)*q**S(2)) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(3)*p**S(2)*q**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)/(a + b*log(c*(d*(e + f*x)**p)**q))**S(2), x), x, -(e + f*x)*(g + h*x)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))) + S(2)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(2)*p**S(2)*q**S(2)) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f**S(2)*p**S(2)*q**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(-2)), x), x, (-e - f*x)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(2)*f*p**S(2)*q**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**S(2)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**S(3), x), x, Integral((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**S(3), x), expand=True, _diff=True, _numerical=True)
# long time in rubi_test assert rubi_test(rubi_integrate((g + h*x)**S(3)/(a + b*log(c*(d*(e + f*x)**p)**q))**S(3), x), x, -(e/S(2) + f*x/S(2))*(g + h*x)**S(3)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)) - (S(2)*e + S(2)*f*x)*(g + h*x)**S(3)/(b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + (e + f*x)*(g + h*x)**S(2)*(-S(3)*e*h/S(2) + S(3)*f*g/S(2))/(b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + S(8)*h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*Ei((S(4)*a + S(4)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(3)*f**S(4)*p**S(3)*q**S(3)) + S(27)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*Ei((S(3)*a + S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(S(2)*b**S(3)*f**S(4)*p**S(3)*q**S(3)) + S(6)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(3)*f**S(4)*p**S(3)*q**S(3)) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(S(2)*b**S(3)*f**S(4)*p**S(3)*q**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(2)/(a + b*log(c*(d*(e + f*x)**p)**q))**S(3), x), x, -(e/S(2) + f*x/S(2))*(g + h*x)**S(2)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)) - (S(3)*e/S(2) + S(3)*f*x/S(2))*(g + h*x)**S(2)/(b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + (e + f*x)*(g + h*x)*(-e*h + f*g)/(b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + S(9)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*Ei((S(3)*a + S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(S(2)*b**S(3)*f**S(3)*p**S(3)*q**S(3)) + S(4)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(3)*f**S(3)*p**S(3)*q**S(3)) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(S(2)*b**S(3)*f**S(3)*p**S(3)*q**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)/(a + b*log(c*(d*(e + f*x)**p)**q))**S(3), x), x, -(e/S(2) + f*x/S(2))*(g + h*x)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)) - (e + f*x)*(g + h*x)/(b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + (e + f*x)*(-e*h/S(2) + f*g/S(2))/(b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + S(2)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*Ei((S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(3)*f**S(2)*p**S(3)*q**S(3)) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h/S(2) + f*g/S(2))*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(3)*f**S(2)*p**S(3)*q**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(-3)), x), x, (-e/S(2) - f*x/S(2))/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)) + (-e/S(2) - f*x/S(2))/(b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e/S(2) + f*x/S(2))*exp(-a/(b*p*q))*Ei((a + b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))/(b**S(3)*f*p**S(3)*q**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)**S(2)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(g + h*x)**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**m, x), x, Integral(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**m, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(4), x), x, -sqrt(S(5))*sqrt(pi)*sqrt(b)*h**S(4)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(5)/(p*q))*(e + f*x)**S(5)*exp(-S(5)*a/(b*p*q))*erfi(sqrt(S(5))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(50)*f**S(5)) - sqrt(pi)*sqrt(b)*h**S(3)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*(-e*h + f*g)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(4)*f**S(5)) - sqrt(S(3))*sqrt(pi)*sqrt(b)*h**S(2)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)**S(2)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*f**S(5)) - sqrt(S(2))*sqrt(pi)*sqrt(b)*h*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(3)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*f**S(5)) - sqrt(pi)*sqrt(b)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(4)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*f**S(5)) + h**S(4)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(5)/(S(5)*f**S(5)) + h**S(3)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(4)*(-e*h + f*g)/f**S(5) + S(2)*h**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)*(-e*h + f*g)**S(2)/f**S(5) + S(2)*h*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)**S(3)/f**S(5) + sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(4)/f**S(5), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(3), x), x, -sqrt(pi)*sqrt(b)*h**S(3)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(16)*f**S(4)) - sqrt(S(3))*sqrt(pi)*sqrt(b)*h**S(2)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(6)*f**S(4)) - S(3)*sqrt(S(2))*sqrt(pi)*sqrt(b)*h*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(8)*f**S(4)) - sqrt(pi)*sqrt(b)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*f**S(4)) + h**S(3)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(4)/(S(4)*f**S(4)) + h**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)*(-e*h + f*g)/f**S(4) + S(3)*h*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(2)*f**S(4)) + sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(3)/f**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**S(2), x), x, -sqrt(S(3))*sqrt(pi)*sqrt(b)*h**S(2)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(18)*f**S(3)) - sqrt(S(2))*sqrt(pi)*sqrt(b)*h*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(4)*f**S(3)) - sqrt(pi)*sqrt(b)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*f**S(3)) + h**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)/(S(3)*f**S(3)) + h*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)/f**S(3) + sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(2)/f**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x), x), x, -sqrt(S(2))*sqrt(pi)*sqrt(b)*h*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(8)*f**S(2)) - sqrt(pi)*sqrt(b)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h/S(2) + f*g/S(2))*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/f**S(2) + h*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(2)*f**S(2)) + sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)/f**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, sqrt(pi)*sqrt(b)*sqrt(p)*sqrt(q)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(-e/S(2) - f*x/S(2))*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/f + sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x), x), x, Integral(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(2), x), x, -b*f*p*q*Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x)/(S(2)*(-e*h + f*g)) + sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)/((g + h*x)*(-e*h + f*g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(3), x), x, b*f*p*q*Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(g + h*x)**S(2)), x)/(S(4)*h) - sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(S(2)*h*(g + h*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(4), x), x, b*f*p*q*Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(g + h*x)**S(3)), x)/(S(6)*h) - sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(S(3)*h*(g + h*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**S(5), x), x, b*f*p*q*Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(g + h*x)**S(4)), x)/(S(8)*h) - sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(S(4)*h*(g + h*x)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x)**m, x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x)**m, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x)**S(3), x), x, S(3)*sqrt(pi)*b**(S(3)/2)*h**S(3)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(128)*f**S(4)) + sqrt(S(3))*sqrt(pi)*b**(S(3)/2)*h**S(2)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(12)*f**S(4)) + S(9)*sqrt(S(2))*sqrt(pi)*b**(S(3)/2)*h*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(32)*f**S(4)) + S(3)*sqrt(pi)*b**(S(3)/2)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(4)*f**S(4)) - S(3)*b*h**S(3)*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(4)/(S(32)*f**S(4)) - b*h**S(2)*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)*(-e*h + f*g)/(S(2)*f**S(4)) - S(9)*b*h*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(8)*f**S(4)) - S(3)*b*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(3)/(S(2)*f**S(4)) + h**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(4)/(S(4)*f**S(4)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(3)*(-e*h + f*g)/f**S(4) + S(3)*h*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(2)*f**S(4)) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)*(-e*h + f*g)**S(3)/f**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x)**S(2), x), x, sqrt(S(3))*sqrt(pi)*b**(S(3)/2)*h**S(2)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(36)*f**S(3)) + S(3)*sqrt(S(2))*sqrt(pi)*b**(S(3)/2)*h*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(16)*f**S(3)) + S(3)*sqrt(pi)*b**(S(3)/2)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(4)*f**S(3)) - b*h**S(2)*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)/(S(6)*f**S(3)) - S(3)*b*h*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)/(S(4)*f**S(3)) - S(3)*b*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(2)/(S(2)*f**S(3)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(3)/(S(3)*f**S(3)) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(2)*(-e*h + f*g)/f**S(3) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)*(-e*h + f*g)**S(2)/f**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x), x), x, S(3)*sqrt(S(2))*sqrt(pi)*b**(S(3)/2)*h*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(32)*f**S(2)) + S(3)*sqrt(pi)*b**(S(3)/2)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(4)*f**S(2)) - S(3)*b*h*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(8)*f**S(2)) - S(3)*b*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)/(S(2)*f**S(2)) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(2)/(S(2)*f**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)*(-e*h + f*g)/f**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2), x), x, S(3)*sqrt(pi)*b**(S(3)/2)*p**(S(3)/2)*q**(S(3)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(4)*f) - S(3)*b*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)/(S(2)*f) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(g + h*x), x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(g + h*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(g + h*x)**S(2), x), x, -S(3)*b*f*p*q*Integral(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x), x)/(S(2)*(-e*h + f*g)) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)/((g + h*x)*(-e*h + f*g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(g + h*x)**S(3), x), x, S(3)*b*f*p*q*Integral(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/((e + f*x)*(g + h*x)**S(2)), x)/(S(4)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(S(2)*h*(g + h*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(g + h*x)**S(4), x), x, b*f*p*q*Integral(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/((e + f*x)*(g + h*x)**S(3)), x)/(S(2)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(S(3)*h*(g + h*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x)**m, x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x)**m, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x)**S(3), x), x, -S(15)*sqrt(pi)*b**(S(5)/2)*h**S(3)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(1024)*f**S(4)) - S(5)*sqrt(S(3))*sqrt(pi)*b**(S(5)/2)*h**S(2)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(72)*f**S(4)) - S(45)*sqrt(S(2))*sqrt(pi)*b**(S(5)/2)*h*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(128)*f**S(4)) - S(15)*sqrt(pi)*b**(S(5)/2)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(8)*f**S(4)) + S(15)*b**S(2)*h**S(3)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(4)/(S(256)*f**S(4)) + S(5)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)*(-e*h + f*g)/(S(12)*f**S(4)) + S(45)*b**S(2)*h*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(32)*f**S(4)) + S(15)*b**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(3)/(S(4)*f**S(4)) - S(5)*b*h**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(4)/(S(32)*f**S(4)) - S(5)*b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(3)*(-e*h + f*g)/(S(6)*f**S(4)) - S(15)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(8)*f**S(4)) - S(5)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)*(-e*h + f*g)**S(3)/(S(2)*f**S(4)) + h**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)**S(4)/(S(4)*f**S(4)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)**S(3)*(-e*h + f*g)/f**S(4) + S(3)*h*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)**S(2)*(-e*h + f*g)**S(2)/(S(2)*f**S(4)) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)*(-e*h + f*g)**S(3)/f**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x)**S(2), x), x, -S(5)*sqrt(S(3))*sqrt(pi)*b**(S(5)/2)*h**S(2)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(216)*f**S(3)) - S(15)*sqrt(S(2))*sqrt(pi)*b**(S(5)/2)*h*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(64)*f**S(3)) - S(15)*sqrt(pi)*b**(S(5)/2)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(8)*f**S(3)) + S(5)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)/(S(36)*f**S(3)) + S(15)*b**S(2)*h*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*h + f*g)/(S(16)*f**S(3)) + S(15)*b**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)**S(2)/(S(4)*f**S(3)) - S(5)*b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(3)/(S(18)*f**S(3)) - S(5)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(2)*(-e*h + f*g)/(S(4)*f**S(3)) - S(5)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)*(-e*h + f*g)**S(2)/(S(2)*f**S(3)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)**S(3)/(S(3)*f**S(3)) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)**S(2)*(-e*h + f*g)/f**S(3) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)*(-e*h + f*g)**S(2)/f**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x), x), x, -S(15)*sqrt(S(2))*sqrt(pi)*b**(S(5)/2)*h*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(128)*f**S(2)) - S(15)*sqrt(pi)*b**(S(5)/2)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(8)*f**S(2)) + S(15)*b**S(2)*h*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(32)*f**S(2)) + S(15)*b**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*(-e*h + f*g)/(S(4)*f**S(2)) - S(5)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)**S(2)/(S(8)*f**S(2)) - S(5)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)*(-e*h + f*g)/(S(2)*f**S(2)) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)**S(2)/(S(2)*f**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)*(-e*h + f*g)/f**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2), x), x, -S(15)*sqrt(pi)*b**(S(5)/2)*p**(S(5)/2)*q**(S(5)/2)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(8)*f) + S(15)*b**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)/(S(4)*f) - S(5)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(e + f*x)/(S(2)*f) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(g + h*x), x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(g + h*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(g + h*x)**S(2), x), x, -S(5)*b*f*p*q*Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/(g + h*x), x)/(S(2)*(-e*h + f*g)) + (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(e + f*x)/((g + h*x)*(-e*h + f*g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(g + h*x)**S(3), x), x, S(5)*b*f*p*q*Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/((e + f*x)*(g + h*x)**S(2)), x)/(S(4)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(S(2)*h*(g + h*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(g + h*x)**S(4), x), x, S(5)*b*f*p*q*Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/((e + f*x)*(g + h*x)**S(3)), x)/(S(6)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(S(3)*h*(g + h*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(g + h*x)**S(5), x), x, S(5)*b*f*p*q*Integral((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)/((e + f*x)*(g + h*x)**S(4)), x)/(S(8)*h) - (a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)/(S(4)*h*(g + h*x)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**m/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, Integral((g + h*x)**m/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(3)/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, sqrt(pi)*h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*sqrt(b)*f**S(4)*sqrt(p)*sqrt(q)) + sqrt(S(3))*sqrt(pi)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(sqrt(b)*f**S(4)*sqrt(p)*sqrt(q)) + S(3)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*sqrt(b)*f**S(4)*sqrt(p)*sqrt(q)) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(sqrt(b)*f**S(4)*sqrt(p)*sqrt(q)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(2)/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, sqrt(S(3))*sqrt(pi)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*sqrt(b)*f**S(3)*sqrt(p)*sqrt(q)) + sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(sqrt(b)*f**S(3)*sqrt(p)*sqrt(q)) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(sqrt(b)*f**S(3)*sqrt(p)*sqrt(q)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(2)*sqrt(b)*f**S(2)*sqrt(p)*sqrt(q)) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(sqrt(b)*f**S(2)*sqrt(p)*sqrt(q)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(sqrt(b)*f*sqrt(p)*sqrt(q)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), x, Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2), x), x, Integral((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(3)/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2), x), x, -(S(2)*e + S(2)*f*x)*(g + h*x)**S(3)/(b*f*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + S(4)*sqrt(pi)*h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(4)*p**(S(3)/2)*q**(S(3)/2)) + S(6)*sqrt(S(3))*sqrt(pi)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(4)*p**(S(3)/2)*q**(S(3)/2)) + S(6)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(4)*p**(S(3)/2)*q**(S(3)/2)) + S(2)*sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(4)*p**(S(3)/2)*q**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(2)/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2), x), x, -(S(2)*e + S(2)*f*x)*(g + h*x)**S(2)/(b*f*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + S(2)*sqrt(S(3))*sqrt(pi)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(3)*p**(S(3)/2)*q**(S(3)/2)) + S(4)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(3)*p**(S(3)/2)*q**(S(3)/2)) + S(2)*sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(3)*p**(S(3)/2)*q**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2), x), x, -(S(2)*e + S(2)*f*x)*(g + h*x)/(b*f*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + S(2)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(2)*p**(S(3)/2)*q**(S(3)/2)) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-S(2)*e*h + S(2)*f*g)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f**S(2)*p**(S(3)/2)*q**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(-3)/2), x), x, -(S(2)*e + S(2)*f*x)/(b*f*p*q*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(S(2)*e + S(2)*f*x)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(3)/2)*f*p**(S(3)/2)*q**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2), x), x, Integral((g + h*x)**m/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2), x), expand=True, _diff=True, _numerical=True)
''' long time in rubi test
assert rubi_test(rubi_integrate((g + h*x)**S(3)/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2), x), x, (-S(2)*e/S(3) - S(2)*f*x/S(3))*(g + h*x)**S(3)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)) - (S(16)*e/S(3) + S(16)*f*x/S(3))*(g + h*x)**S(3)/(b**S(2)*f*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + (e + f*x)*(g + h*x)**S(2)*(-S(4)*e*h + S(4)*f*g)/(b**S(2)*f**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + S(32)*sqrt(pi)*h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*(e + f*x)**S(4)*exp(-S(4)*a/(b*p*q))*erfi(S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*b**(S(5)/2)*f**S(4)*p**(S(5)/2)*q**(S(5)/2)) + S(12)*sqrt(S(3))*sqrt(pi)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*(-e*h + f*g)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(5)/2)*f**S(4)*p**(S(5)/2)*q**(S(5)/2)) + S(8)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(5)/2)*f**S(4)*p**(S(5)/2)*q**(S(5)/2)) + S(4)*sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(3)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*b**(S(5)/2)*f**S(4)*p**(S(5)/2)*q**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)**S(2)/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2), x), x, (-S(2)*e/S(3) - S(2)*f*x/S(3))*(g + h*x)**S(2)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)) - (S(4)*e + S(4)*f*x)*(g + h*x)**S(2)/(b**S(2)*f*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + (e + f*x)*(g + h*x)*(-S(8)*e*h/S(3) + S(8)*f*g/S(3))/(b**S(2)*f**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + S(4)*sqrt(S(3))*sqrt(pi)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*(e + f*x)**S(3)*exp(-S(3)*a/(b*p*q))*erfi(sqrt(S(3))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(5)/2)*f**S(3)*p**(S(5)/2)*q**(S(5)/2)) + S(16)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*(-e*h + f*g)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*b**(S(5)/2)*f**S(3)*p**(S(5)/2)*q**(S(5)/2)) + S(4)*sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-e*h + f*g)**S(2)*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*b**(S(5)/2)*f**S(3)*p**(S(5)/2)*q**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((g + h*x)/(a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2), x), x, (-S(2)*e/S(3) - S(2)*f*x/S(3))*(g + h*x)/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)) - (S(8)*e/S(3) + S(8)*f*x/S(3))*(g + h*x)/(b**S(2)*f*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + (e + f*x)*(-S(4)*e*h/S(3) + S(4)*f*g/S(3))/(b**S(2)*f**S(2)*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + S(8)*sqrt(S(2))*sqrt(pi)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*(e + f*x)**S(2)*exp(-S(2)*a/(b*p*q))*erfi(sqrt(S(2))*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(S(3)*b**(S(5)/2)*f**S(2)*p**(S(5)/2)*q**(S(5)/2)) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(e + f*x)*(-S(4)*e*h/S(3) + S(4)*f*g/S(3))*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(5)/2)*f**S(2)*p**(S(5)/2)*q**(S(5)/2)), expand=True, _diff=True, _numerical=True)
'''
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**(S(-5)/2), x), x, (-S(2)*e/S(3) - S(2)*f*x/S(3))/(b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**(S(3)/2)) - (S(4)*e/S(3) + S(4)*f*x/S(3))/(b**S(2)*f*p**S(2)*q**S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))) + sqrt(pi)*(c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*(S(4)*e/S(3) + S(4)*f*x/S(3))*exp(-a/(b*p*q))*erfi(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(b)*sqrt(p)*sqrt(q)))/(b**(S(5)/2)*f*p**(S(5)/2)*q**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**(S(5)/2)*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2), x), x, -S(4)*b*p*q*(g + h*x)**(S(5)/2)/(S(25)*h) - S(4)*b*p*q*(g + h*x)**(S(3)/2)*(-e*h + f*g)/(S(15)*f*h) - S(4)*b*p*q*sqrt(g + h*x)*(-e*h + f*g)**S(2)/(S(5)*f**S(2)*h) + S(4)*b*p*q*(-e*h + f*g)**(S(5)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(5)*f**(S(5)/2)*h) + S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(5)/2)/(S(5)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x), x), x, -S(4)*b*p*q*(g + h*x)**(S(3)/2)/(S(9)*h) - S(4)*b*p*q*sqrt(g + h*x)*(-e*h + f*g)/(S(3)*f*h) + S(4)*b*p*q*(-e*h + f*g)**(S(3)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(3)*f**(S(3)/2)*h) + S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2)/(S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/sqrt(g + h*x), x), x, -S(4)*b*p*q*sqrt(g + h*x)/h + S(4)*b*p*q*sqrt(-e*h + f*g)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(sqrt(f)*h) + (S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x)/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**(S(3)/2), x), x, -S(4)*b*sqrt(f)*p*q*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(h*sqrt(-e*h + f*g)) - (S(2)*a + S(2)*b*log(c*(d*(e + f*x)**p)**q))/(h*sqrt(g + h*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**(S(5)/2), x), x, -S(4)*b*f**(S(3)/2)*p*q*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(3)*h*(-e*h + f*g)**(S(3)/2)) + S(4)*b*f*p*q/(S(3)*h*sqrt(g + h*x)*(-e*h + f*g)) - (S(2)*a/S(3) + S(2)*b*log(c*(d*(e + f*x)**p)**q)/S(3))/(h*(g + h*x)**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**(S(7)/2), x), x, -S(4)*b*f**(S(5)/2)*p*q*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(5)*h*(-e*h + f*g)**(S(5)/2)) + S(4)*b*f**S(2)*p*q/(S(5)*h*sqrt(g + h*x)*(-e*h + f*g)**S(2)) + S(4)*b*f*p*q/(S(15)*h*(g + h*x)**(S(3)/2)*(-e*h + f*g)) - (S(2)*a/S(5) + S(2)*b*log(c*(d*(e + f*x)**p)**q)/S(5))/(h*(g + h*x)**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**(S(9)/2), x), x, -S(4)*b*f**(S(7)/2)*p*q*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(7)*h*(-e*h + f*g)**(S(7)/2)) + S(4)*b*f**S(3)*p*q/(S(7)*h*sqrt(g + h*x)*(-e*h + f*g)**S(3)) + S(4)*b*f**S(2)*p*q/(S(21)*h*(g + h*x)**(S(3)/2)*(-e*h + f*g)**S(2)) + S(4)*b*f*p*q/(S(35)*h*(g + h*x)**(S(5)/2)*(-e*h + f*g)) - (S(2)*a/S(7) + S(2)*b*log(c*(d*(e + f*x)**p)**q)/S(7))/(h*(g + h*x)**(S(7)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**(S(3)/2), x), x, S(16)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**(S(5)/2)/(S(125)*h) + S(128)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**(S(3)/2)*(-e*h + f*g)/(S(225)*f*h) + S(368)*b**S(2)*p**S(2)*q**S(2)*sqrt(g + h*x)*(-e*h + f*g)**S(2)/(S(75)*f**S(2)*h) + S(16)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(5)/2)*log(S(2)/(-sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g) + S(1)))*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(5)*f**(S(5)/2)*h) - S(8)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(5)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))**S(2)/(S(5)*f**(S(5)/2)*h) - S(368)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(5)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(75)*f**(S(5)/2)*h) + S(8)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(5)/2)*polylog(S(2), (-sqrt(f)*sqrt(g + h*x) - sqrt(-e*h + f*g))/(-sqrt(f)*sqrt(g + h*x) + sqrt(-e*h + f*g)))/(S(5)*f**(S(5)/2)*h) - S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(5)/2)/(S(25)*h) - S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2)*(-e*h + f*g)/(S(15)*f*h) - S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x)*(-e*h + f*g)**S(2)/(S(5)*f**S(2)*h) + S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(-e*h + f*g)**(S(5)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(5)*f**(S(5)/2)*h) + S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**(S(5)/2)/(S(5)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*sqrt(g + h*x), x), x, S(16)*b**S(2)*p**S(2)*q**S(2)*(g + h*x)**(S(3)/2)/(S(27)*h) + S(64)*b**S(2)*p**S(2)*q**S(2)*sqrt(g + h*x)*(-e*h + f*g)/(S(9)*f*h) + S(16)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(3)/2)*log(S(2)/(-sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g) + S(1)))*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(3)*f**(S(3)/2)*h) - S(8)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(3)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))**S(2)/(S(3)*f**(S(3)/2)*h) - S(64)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(3)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(9)*f**(S(3)/2)*h) + S(8)*b**S(2)*p**S(2)*q**S(2)*(-e*h + f*g)**(S(3)/2)*polylog(S(2), (-sqrt(f)*sqrt(g + h*x) - sqrt(-e*h + f*g))/(-sqrt(f)*sqrt(g + h*x) + sqrt(-e*h + f*g)))/(S(3)*f**(S(3)/2)*h) - S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2)/(S(9)*h) - S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x)*(-e*h + f*g)/(S(3)*f*h) + S(8)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(-e*h + f*g)**(S(3)/2)*atanh(sqrt(f)*sqrt(g + h*x)/sqrt(-e*h + f*g))/(S(3)*f**(S(3)/2)*h) + S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)**(S(3)/2)/(S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x), x), x, -b*f*p*q*Integral((g + h*x)**(S(3)/2)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)), x)/(S(3)*h) + S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2)/(S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/sqrt(g + h*x), x), x, -b*f*p*q*Integral(sqrt(g + h*x)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)), x)/h + S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x)/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x)**(S(3)/2), x), x, b*f*p*q*Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)*sqrt(g + h*x)), x)/h - S(2)*sqrt(a + b*log(c*(d*(e + f*x)**p)**q))/(h*sqrt(g + h*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(g + h*x)/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), x, Integral(sqrt(g + h*x)/sqrt(a + b*log(c*(d*(e + f*x)**p)**q)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x)), x), x, Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*sqrt(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2)), x), x, Integral(S(1)/(sqrt(a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)**(S(3)/2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**n*(g + h*x)**m, x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**n*(g + h*x)**m, x), expand=True, _diff=True, _numerical=True)
'''long time
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**n*(g + h*x)**S(3), x), x, S(3)*S(2)**(-n + S(-1))*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**S(2)*(-e*h + f*g)**S(2)*Gamma(n + S(1), (-S(2)*a - S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-S(2)*a/(b*p*q))/f**S(4) + S(4)**(-n + S(-1))*h**S(3)*(c*(d*(e + f*x)**p)**q)**(-S(4)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**S(4)*Gamma(n + S(1), (-S(4)*a - S(4)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-S(4)*a/(b*p*q))/f**S(4) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)*(-e*h + f*g)**S(3)*Gamma(n + S(1), (-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-a/(b*p*q))/f**S(4) + S(3)**(-n)*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**S(3)*(-e*h + f*g)*Gamma(n + S(1), (-S(3)*a - S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-S(3)*a/(b*p*q))/f**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**n*(g + h*x)**S(2), x), x, S(3)**(-n + S(-1))*h**S(2)*(c*(d*(e + f*x)**p)**q)**(-S(3)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**S(3)*Gamma(n + S(1), (-S(3)*a - S(3)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-S(3)*a/(b*p*q))/f**S(3) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)*(-e*h + f*g)**S(2)*Gamma(n + S(1), (-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-a/(b*p*q))/f**S(3) + S(2)**(-n)*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**S(2)*(-e*h + f*g)*Gamma(n + S(1), (-S(2)*a - S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-S(2)*a/(b*p*q))/f**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**n*(g + h*x), x), x, S(2)**(-n + S(-1))*h*(c*(d*(e + f*x)**p)**q)**(-S(2)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)**S(2)*Gamma(n + S(1), (-S(2)*a - S(2)*b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-S(2)*a/(b*p*q))/f**S(2) + (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)*(-e*h + f*g)*Gamma(n + S(1), (-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-a/(b*p*q))/f**S(2), expand=True, _diff=True, _numerical=True)
'''
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**n, x), x, (c*(d*(e + f*x)**p)**q)**(-S(1)/(p*q))*((-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))**(-n)*(a + b*log(c*(d*(e + f*x)**p)**q))**n*(e + f*x)*Gamma(n + S(1), (-a - b*log(c*(d*(e + f*x)**p)**q))/(b*p*q))*exp(-a/(b*p*q))/f, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**n/(g + h*x), x), x, Integral((a + b*log(c*(d*(e + f*x)**p)**q))**n/(g + h*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))*(i + j*x)**S(4)/(d*e + d*f*x), x), x, -S(4)*b*j*x*(-e*j + f*i)**S(3)/(d*f**S(4)) - b*j**S(4)*(e + f*x)**S(4)/(S(16)*d*f**S(5)) - S(4)*b*j**S(3)*(e + f*x)**S(3)*(-e*j + f*i)/(S(9)*d*f**S(5)) - S(3)*b*j**S(2)*(e + f*x)**S(2)*(-e*j + f*i)**S(2)/(S(2)*d*f**S(5)) + j**S(4)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(4)/(S(4)*d*f**S(5)) + S(4)*j**S(3)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(3)*(-e*j + f*i)/(S(3)*d*f**S(5)) + S(3)*j**S(2)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(2)*(-e*j + f*i)**S(2)/(d*f**S(5)) + S(4)*j*(a + b*log(c*(e + f*x)))*(e + f*x)*(-e*j + f*i)**S(3)/(d*f**S(5)) + (a + b*log(c*(e + f*x)))**S(2)*(-e*j + f*i)**S(4)/(S(2)*b*d*f**S(5)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))*(i + j*x)**S(3)/(d*e + d*f*x), x), x, -S(3)*b*j*x*(-e*j + f*i)**S(2)/(d*f**S(3)) - b*j**S(3)*(e + f*x)**S(3)/(S(9)*d*f**S(4)) - S(3)*b*j**S(2)*(e + f*x)**S(2)*(-e*j + f*i)/(S(4)*d*f**S(4)) + j**S(3)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(3)/(S(3)*d*f**S(4)) + S(3)*j**S(2)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(2)*(-e*j + f*i)/(S(2)*d*f**S(4)) + S(3)*j*(a + b*log(c*(e + f*x)))*(e + f*x)*(-e*j + f*i)**S(2)/(d*f**S(4)) + (a + b*log(c*(e + f*x)))**S(2)*(-e*j + f*i)**S(3)/(S(2)*b*d*f**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))*(i + j*x)**S(2)/(d*e + d*f*x), x), x, -S(2)*b*j*x*(-e*j + f*i)/(d*f**S(2)) - b*j**S(2)*(e + f*x)**S(2)/(S(4)*d*f**S(3)) + j**S(2)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(2)/(S(2)*d*f**S(3)) + S(2)*j*(a + b*log(c*(e + f*x)))*(e + f*x)*(-e*j + f*i)/(d*f**S(3)) + (a + b*log(c*(e + f*x)))**S(2)*(-e*j + f*i)**S(2)/(S(2)*b*d*f**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))*(i + j*x)/(d*e + d*f*x), x), x, -b*j*x/(d*f) + j*(a + b*log(c*(e + f*x)))*(e + f*x)/(d*f**S(2)) + (a + b*log(c*(e + f*x)))**S(2)*(-e*j/S(2) + f*i/S(2))/(b*d*f**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))/(d*e + d*f*x), x), x, (a + b*log(c*(e + f*x)))**S(2)/(S(2)*b*d*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))/((i + j*x)*(d*e + d*f*x)), x), x, -b*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)) - (a + b*log(c*(e + f*x)))*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)) + (a + b*log(c*(e + f*x)))**S(2)/(S(2)*b*d*(-e*j + f*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))/((i + j*x)**S(2)*(d*e + d*f*x)), x), x, -b*f*log(e + f*x)/(d*(-e*j + f*i)**S(2)) + b*f*log(i + j*x)/(d*(-e*j + f*i)**S(2)) - b*f*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) - f*(a + b*log(c*(e + f*x)))*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) + (a + b*log(c*(e + f*x)))/(d*(i + j*x)*(-e*j + f*i)) + f*(a + b*log(c*(e + f*x)))**S(2)/(S(2)*b*d*(-e*j + f*i)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))/((i + j*x)**S(3)*(d*e + d*f*x)), x), x, -S(3)*b*f**S(2)*log(e + f*x)/(S(2)*d*(-e*j + f*i)**S(3)) + S(3)*b*f**S(2)*log(i + j*x)/(S(2)*d*(-e*j + f*i)**S(3)) - b*f**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) - b*f/(S(2)*d*(i + j*x)*(-e*j + f*i)**S(2)) - f**S(2)*(a + b*log(c*(e + f*x)))*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) + f*(a + b*log(c*(e + f*x)))/(d*(i + j*x)*(-e*j + f*i)**S(2)) + (a/S(2) + b*log(c*(e + f*x))/S(2))/(d*(i + j*x)**S(2)*(-e*j + f*i)) + f**S(2)*(a + b*log(c*(e + f*x)))**S(2)/(S(2)*b*d*(-e*j + f*i)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)*(i + j*x)**S(4)/(d*e + d*f*x), x), x, S(8)*b**S(2)*j*x*(-e*j + f*i)**S(3)/(d*f**S(4)) + b**S(2)*j**S(4)*(e + f*x)**S(4)/(S(32)*d*f**S(5)) + S(8)*b**S(2)*j**S(3)*(e + f*x)**S(3)*(-e*j + f*i)/(S(27)*d*f**S(5)) + S(3)*b**S(2)*j**S(2)*(e + f*x)**S(2)*(-e*j + f*i)**S(2)/(S(2)*d*f**S(5)) - b*j**S(4)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(4)/(S(8)*d*f**S(5)) - S(8)*b*j**S(3)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(3)*(-e*j + f*i)/(S(9)*d*f**S(5)) - S(3)*b*j**S(2)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(2)*(-e*j + f*i)**S(2)/(d*f**S(5)) - S(8)*b*j*(a + b*log(c*(e + f*x)))*(e + f*x)*(-e*j + f*i)**S(3)/(d*f**S(5)) + j**S(4)*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)**S(4)/(S(4)*d*f**S(5)) + S(4)*j**S(3)*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)**S(3)*(-e*j + f*i)/(S(3)*d*f**S(5)) + S(3)*j**S(2)*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)**S(2)*(-e*j + f*i)**S(2)/(d*f**S(5)) + S(4)*j*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)*(-e*j + f*i)**S(3)/(d*f**S(5)) + (a + b*log(c*(e + f*x)))**S(3)*(-e*j + f*i)**S(4)/(S(3)*b*d*f**S(5)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)*(i + j*x)**S(3)/(d*e + d*f*x), x), x, S(6)*b**S(2)*j*x*(-e*j + f*i)**S(2)/(d*f**S(3)) + S(2)*b**S(2)*j**S(3)*(e + f*x)**S(3)/(S(27)*d*f**S(4)) + S(3)*b**S(2)*j**S(2)*(e + f*x)**S(2)*(-e*j + f*i)/(S(4)*d*f**S(4)) - S(2)*b*j**S(3)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(3)/(S(9)*d*f**S(4)) - S(3)*b*j**S(2)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(2)*(-e*j + f*i)/(S(2)*d*f**S(4)) - S(6)*b*j*(a + b*log(c*(e + f*x)))*(e + f*x)*(-e*j + f*i)**S(2)/(d*f**S(4)) + j**S(3)*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)**S(3)/(S(3)*d*f**S(4)) + S(3)*j**S(2)*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)**S(2)*(-e*j + f*i)/(S(2)*d*f**S(4)) + S(3)*j*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)*(-e*j + f*i)**S(2)/(d*f**S(4)) + (a + b*log(c*(e + f*x)))**S(3)*(-e*j + f*i)**S(3)/(S(3)*b*d*f**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)*(i + j*x)**S(2)/(d*e + d*f*x), x), x, S(4)*b**S(2)*j*x*(-e*j + f*i)/(d*f**S(2)) + b**S(2)*j**S(2)*(e + f*x)**S(2)/(S(4)*d*f**S(3)) - b*j**S(2)*(a + b*log(c*(e + f*x)))*(e + f*x)**S(2)/(S(2)*d*f**S(3)) - S(4)*b*j*(a + b*log(c*(e + f*x)))*(e + f*x)*(-e*j + f*i)/(d*f**S(3)) + j**S(2)*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)**S(2)/(S(2)*d*f**S(3)) + S(2)*j*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)*(-e*j + f*i)/(d*f**S(3)) + (a + b*log(c*(e + f*x)))**S(3)*(-e*j + f*i)**S(2)/(S(3)*b*d*f**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)*(i + j*x)/(d*e + d*f*x), x), x, S(2)*b**S(2)*j*x/(d*f) - S(2)*b*j*(a + b*log(c*(e + f*x)))*(e + f*x)/(d*f**S(2)) + j*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)/(d*f**S(2)) + (a + b*log(c*(e + f*x)))**S(3)*(-e*j/S(3) + f*i/S(3))/(b*d*f**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)/(d*e + d*f*x), x), x, (a + b*log(c*(e + f*x)))**S(3)/(S(3)*b*d*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)/((i + j*x)*(d*e + d*f*x)), x), x, S(2)*b**S(2)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)) - S(2)*b*(a + b*log(c*(e + f*x)))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)) - (a + b*log(c*(e + f*x)))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)) + (a + b*log(c*(e + f*x)))**S(3)/(S(3)*b*d*(-e*j + f*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)/((i + j*x)**S(2)*(d*e + d*f*x)), x), x, S(2)*b**S(2)*f*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) + S(2)*b**S(2)*f*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) + S(2)*b*f*(a + b*log(c*(e + f*x)))*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) - S(2)*b*f*(a + b*log(c*(e + f*x)))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) - f*(a + b*log(c*(e + f*x)))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(2)) - j*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)/(d*(i + j*x)*(-e*j + f*i)**S(2)) + f*(a + b*log(c*(e + f*x)))**S(3)/(S(3)*b*d*(-e*j + f*i)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)/((i + j*x)**S(3)*(d*e + d*f*x)), x), x, b**S(2)*f**S(2)*log(e + f*x)/(d*(-e*j + f*i)**S(3)) - b**S(2)*f**S(2)*log(i + j*x)/(d*(-e*j + f*i)**S(3)) + S(3)*b**S(2)*f**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) + S(2)*b**S(2)*f**S(2)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) + S(3)*b*f**S(2)*(a + b*log(c*(e + f*x)))*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) - S(2)*b*f**S(2)*(a + b*log(c*(e + f*x)))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) - b*f*(a + b*log(c*(e + f*x)))/(d*(i + j*x)*(-e*j + f*i)**S(2)) - f**S(2)*(a + b*log(c*(e + f*x)))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(d*(-e*j + f*i)**S(3)) - f**S(2)*(a + b*log(c*(e + f*x)))**S(2)/(S(2)*d*(-e*j + f*i)**S(3)) - f*j*(a + b*log(c*(e + f*x)))**S(2)*(e + f*x)/(d*(i + j*x)*(-e*j + f*i)**S(3)) + (a + b*log(c*(e + f*x)))**S(2)/(S(2)*d*(i + j*x)**S(2)*(-e*j + f*i)) + f**S(2)*(a + b*log(c*(e + f*x)))**S(3)/(S(3)*b*d*(-e*j + f*i)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((i + j*x)**S(4)/((a + b*log(c*(e + f*x)))*(d*e + d*f*x)), x), x, (-e*j + f*i)**S(4)*log(a + b*log(c*(e + f*x)))/(b*d*f**S(5)) + S(4)*j*(-e*j + f*i)**S(3)*exp(-a/b)*Ei((a + b*log(c*(e + f*x)))/b)/(b*c*d*f**S(5)) + S(6)*j**S(2)*(-e*j + f*i)**S(2)*exp(-S(2)*a/b)*Ei((S(2)*a + S(2)*b*log(c*(e + f*x)))/b)/(b*c**S(2)*d*f**S(5)) + S(4)*j**S(3)*(-e*j + f*i)*exp(-S(3)*a/b)*Ei((S(3)*a + S(3)*b*log(c*(e + f*x)))/b)/(b*c**S(3)*d*f**S(5)) + j**S(4)*exp(-S(4)*a/b)*Ei((S(4)*a + S(4)*b*log(c*(e + f*x)))/b)/(b*c**S(4)*d*f**S(5)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((i + j*x)**S(3)/((a + b*log(c*(e + f*x)))*(d*e + d*f*x)), x), x, (-e*j + f*i)**S(3)*log(a + b*log(c*(e + f*x)))/(b*d*f**S(4)) + S(3)*j*(-e*j + f*i)**S(2)*exp(-a/b)*Ei((a + b*log(c*(e + f*x)))/b)/(b*c*d*f**S(4)) + S(3)*j**S(2)*(-e*j + f*i)*exp(-S(2)*a/b)*Ei((S(2)*a + S(2)*b*log(c*(e + f*x)))/b)/(b*c**S(2)*d*f**S(4)) + j**S(3)*exp(-S(3)*a/b)*Ei((S(3)*a + S(3)*b*log(c*(e + f*x)))/b)/(b*c**S(3)*d*f**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((i + j*x)**S(2)/((a + b*log(c*(e + f*x)))*(d*e + d*f*x)), x), x, (-e*j + f*i)**S(2)*log(a + b*log(c*(e + f*x)))/(b*d*f**S(3)) + S(2)*j*(-e*j + f*i)*exp(-a/b)*Ei((a + b*log(c*(e + f*x)))/b)/(b*c*d*f**S(3)) + j**S(2)*exp(-S(2)*a/b)*Ei((S(2)*a + S(2)*b*log(c*(e + f*x)))/b)/(b*c**S(2)*d*f**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((i + j*x)/((a + b*log(c*(e + f*x)))*(d*e + d*f*x)), x), x, (-e*j + f*i)*log(a + b*log(c*(e + f*x)))/(b*d*f**S(2)) + j*exp(-a/b)*Ei((a + b*log(c*(e + f*x)))/b)/(b*c*d*f**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(e + f*x)))*(d*e + d*f*x)), x), x, log(a + b*log(c*(e + f*x)))/(b*d*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(e + f*x)))*(i + j*x)*(d*e + d*f*x)), x), x, Integral(S(1)/((a + b*log(c*(e + f*x)))*(i + j*x)*(d*e + d*f*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(e + f*x)))*(i + j*x)**S(2)*(d*e + d*f*x)), x), x, Integral(S(1)/((a + b*log(c*(e + f*x)))*(i + j*x)**S(2)*(d*e + d*f*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e*x)**n))*(f + g*x)**(S(5)/2)/(d + e*x), x), x, -S(4)*b*n*(f + g*x)**(S(5)/2)/(S(25)*e) - S(32)*b*n*(f + g*x)**(S(3)/2)*(-d*g + e*f)/(S(45)*e**S(2)) - S(92)*b*n*sqrt(f + g*x)*(-d*g + e*f)**S(2)/(S(15)*e**S(3)) - S(4)*b*n*(-d*g + e*f)**(S(5)/2)*log(S(2)/(-sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f) + S(1)))*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(7)/2) + S(2)*b*n*(-d*g + e*f)**(S(5)/2)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))**S(2)/e**(S(7)/2) + S(92)*b*n*(-d*g + e*f)**(S(5)/2)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/(S(15)*e**(S(7)/2)) - S(2)*b*n*(-d*g + e*f)**(S(5)/2)*polylog(S(2), (-sqrt(e)*sqrt(f + g*x) - sqrt(-d*g + e*f))/(-sqrt(e)*sqrt(f + g*x) + sqrt(-d*g + e*f)))/e**(S(7)/2) + S(2)*(a + b*log(c*(d + e*x)**n))*(f + g*x)**(S(5)/2)/(S(5)*e) + (a + b*log(c*(d + e*x)**n))*(f + g*x)**(S(3)/2)*(-S(2)*d*g/S(3) + S(2)*e*f/S(3))/e**S(2) + S(2)*(a + b*log(c*(d + e*x)**n))*sqrt(f + g*x)*(-d*g + e*f)**S(2)/e**S(3) - S(2)*(a + b*log(c*(d + e*x)**n))*(-d*g + e*f)**(S(5)/2)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(7)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e*x)**n))*(f + g*x)**(S(3)/2)/(d + e*x), x), x, -S(4)*b*n*(f + g*x)**(S(3)/2)/(S(9)*e) - S(16)*b*n*sqrt(f + g*x)*(-d*g + e*f)/(S(3)*e**S(2)) - S(4)*b*n*(-d*g + e*f)**(S(3)/2)*log(S(2)/(-sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f) + S(1)))*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(5)/2) + S(2)*b*n*(-d*g + e*f)**(S(3)/2)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))**S(2)/e**(S(5)/2) + S(16)*b*n*(-d*g + e*f)**(S(3)/2)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/(S(3)*e**(S(5)/2)) - S(2)*b*n*(-d*g + e*f)**(S(3)/2)*polylog(S(2), (-sqrt(e)*sqrt(f + g*x) - sqrt(-d*g + e*f))/(-sqrt(e)*sqrt(f + g*x) + sqrt(-d*g + e*f)))/e**(S(5)/2) + S(2)*(a + b*log(c*(d + e*x)**n))*(f + g*x)**(S(3)/2)/(S(3)*e) + (a + b*log(c*(d + e*x)**n))*sqrt(f + g*x)*(-S(2)*d*g + S(2)*e*f)/e**S(2) - S(2)*(a + b*log(c*(d + e*x)**n))*(-d*g + e*f)**(S(3)/2)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(5)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e*x)**n))*sqrt(f + g*x)/(d + e*x), x), x, -S(4)*b*n*sqrt(f + g*x)/e - S(4)*b*n*sqrt(-d*g + e*f)*log(S(2)/(-sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f) + S(1)))*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(3)/2) + S(2)*b*n*sqrt(-d*g + e*f)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))**S(2)/e**(S(3)/2) + S(4)*b*n*sqrt(-d*g + e*f)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(3)/2) - S(2)*b*n*sqrt(-d*g + e*f)*polylog(S(2), (-sqrt(e)*sqrt(f + g*x) - sqrt(-d*g + e*f))/(-sqrt(e)*sqrt(f + g*x) + sqrt(-d*g + e*f)))/e**(S(3)/2) + (S(2)*a + S(2)*b*log(c*(d + e*x)**n))*sqrt(f + g*x)/e - S(2)*(a + b*log(c*(d + e*x)**n))*sqrt(-d*g + e*f)*atanh(sqrt(e)*sqrt(f + g*x)/sqrt(-d*g + e*f))/e**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(d + e*x)*log(a + b*x)/(a + b*x), x), x, S(2)*sqrt(d + e*x)*log(a + b*x)/b - S(4)*sqrt(d + e*x)/b - S(2)*sqrt(-a*e + b*d)*log(a + b*x)*atanh(sqrt(b)*sqrt(d + e*x)/sqrt(-a*e + b*d))/b**(S(3)/2) - S(4)*sqrt(-a*e + b*d)*log(S(2)/(-sqrt(b)*sqrt(d + e*x)/sqrt(-a*e + b*d) + S(1)))*atanh(sqrt(b)*sqrt(d + e*x)/sqrt(-a*e + b*d))/b**(S(3)/2) + S(2)*sqrt(-a*e + b*d)*atanh(sqrt(b)*sqrt(d + e*x)/sqrt(-a*e + b*d))**S(2)/b**(S(3)/2) + S(4)*sqrt(-a*e + b*d)*atanh(sqrt(b)*sqrt(d + e*x)/sqrt(-a*e + b*d))/b**(S(3)/2) - S(2)*sqrt(-a*e + b*d)*polylog(S(2), (-sqrt(b)*sqrt(d + e*x) - sqrt(-a*e + b*d))/(-sqrt(b)*sqrt(d + e*x) + sqrt(-a*e + b*d)))/b**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**S(2)*(i + j*x)**m/(d*e + d*f*x), x), x, Integral((a + b*log(c*(e + f*x)))**S(2)*(i + j*x)**m/(d*e + d*f*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))*(i + j*x)**m/(d*e + d*f*x), x), x, Integral((a + b*log(c*(e + f*x)))*(i + j*x)**m/(d*e + d*f*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n*(i + j*x)**m/(d*e + d*f*x), x), x, Integral((a + b*log(c*(e + f*x)))**n*(i + j*x)**m/(d*e + d*f*x), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n*(i + j*x)**S(4)/(d*e + d*f*x), x), x, S(4)*S(3)**(-n + S(-1))*j**S(3)*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*(-e*j + f*i)*Gamma(n + S(1), (-S(3)*a - S(3)*b*log(c*(e + f*x)))/b)*exp(-S(3)*a/b)/(c**S(3)*d*f**S(5)) + S(4)**(-n + S(-1))*j**S(4)*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*Gamma(n + S(1), (-S(4)*a - S(4)*b*log(c*(e + f*x)))/b)*exp(-S(4)*a/b)/(c**S(4)*d*f**S(5)) + S(4)*j*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*(-e*j + f*i)**S(3)*Gamma(n + S(1), (-a - b*log(c*(e + f*x)))/b)*exp(-a/b)/(c*d*f**S(5)) + (a + b*log(c*(e + f*x)))**(n + S(1))*(-e*j + f*i)**S(4)/(b*d*f**S(5)*(n + S(1))) + S(3)*S(2)**(-n)*j**S(2)*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*(-e*j + f*i)**S(2)*Gamma(n + S(1), (-S(2)*a - S(2)*b*log(c*(e + f*x)))/b)*exp(-S(2)*a/b)/(c**S(2)*d*f**S(5)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n*(i + j*x)**S(3)/(d*e + d*f*x), x), x, S(3)*S(2)**(-n + S(-1))*j**S(2)*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*(-e*j + f*i)*Gamma(n + S(1), (-S(2)*a - S(2)*b*log(c*(e + f*x)))/b)*exp(-S(2)*a/b)/(c**S(2)*d*f**S(4)) + S(3)**(-n + S(-1))*j**S(3)*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*Gamma(n + S(1), (-S(3)*a - S(3)*b*log(c*(e + f*x)))/b)*exp(-S(3)*a/b)/(c**S(3)*d*f**S(4)) + S(3)*j*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*(-e*j + f*i)**S(2)*Gamma(n + S(1), (-a - b*log(c*(e + f*x)))/b)*exp(-a/b)/(c*d*f**S(4)) + (a + b*log(c*(e + f*x)))**(n + S(1))*(-e*j + f*i)**S(3)/(b*d*f**S(4)*(n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n*(i + j*x)**S(2)/(d*e + d*f*x), x), x, S(2)**(-n + S(-1))*j**S(2)*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*Gamma(n + S(1), (-S(2)*a - S(2)*b*log(c*(e + f*x)))/b)*exp(-S(2)*a/b)/(c**S(2)*d*f**S(3)) + S(2)*j*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*(-e*j + f*i)*Gamma(n + S(1), (-a - b*log(c*(e + f*x)))/b)*exp(-a/b)/(c*d*f**S(3)) + (a + b*log(c*(e + f*x)))**(n + S(1))*(-e*j + f*i)**S(2)/(b*d*f**S(3)*(n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n*(i + j*x)/(d*e + d*f*x), x), x, j*((-a - b*log(c*(e + f*x)))/b)**(-n)*(a + b*log(c*(e + f*x)))**n*Gamma(n + S(1), (-a - b*log(c*(e + f*x)))/b)*exp(-a/b)/(c*d*f**S(2)) + (a + b*log(c*(e + f*x)))**(n + S(1))*(-e*j + f*i)/(b*d*f**S(2)*(n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n/(d*e + d*f*x), x), x, (a + b*log(c*(e + f*x)))**(n + S(1))/(b*d*f*(n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n/((i + j*x)*(d*e + d*f*x)), x), x, Integral((a + b*log(c*(e + f*x)))**n/((i + j*x)*(d*e + d*f*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n/((i + j*x)**S(2)*(d*e + d*f*x)), x), x, Integral((a + b*log(c*(e + f*x)))**n/((i + j*x)**S(2)*(d*e + d*f*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(e + f*x)))**n/((i + j*x)**S(3)*(d*e + d*f*x)), x), x, Integral((a + b*log(c*(e + f*x)))**n/((i + j*x)**S(3)*(d*e + d*f*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(3)/(g + h*x), x), x, a*j*x*(-g*j + h*i)**S(2)/h**S(3) - b*p*q*(i + j*x)**S(3)/(S(9)*h) - b*p*q*(i + j*x)**S(2)*(-g*j + h*i)/(S(4)*h**S(2)) - b*j*p*q*x*(-g*j + h*i)**S(2)/h**S(3) + b*p*q*(-g*j + h*i)**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(4) - b*p*q*(i + j*x)**S(2)*(-e*j + f*i)/(S(6)*f*h) - b*j*p*q*x*(-e*j + f*i)*(-g*j + h*i)/(S(2)*f*h**S(2)) + b*j*(e + f*x)*(-g*j + h*i)**S(2)*log(c*(d*(e + f*x)**p)**q)/(f*h**S(3)) - b*j*p*q*x*(-e*j + f*i)**S(2)/(S(3)*f**S(2)*h) - b*p*q*(-e*j + f*i)**S(2)*(-g*j + h*i)*log(e + f*x)/(S(2)*f**S(2)*h**S(2)) - b*p*q*(-e*j + f*i)**S(3)*log(e + f*x)/(S(3)*f**S(3)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(3)/(S(3)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(2)*(-g*j/S(2) + h*i/S(2))/h**S(2) + (a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)**S(3)*log(f*(g + h*x)/(-e*h + f*g))/h**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(2)/(g + h*x), x), x, a*j*x*(-g*j + h*i)/h**S(2) - b*p*q*(i + j*x)**S(2)/(S(4)*h) - b*j*p*q*x*(-g*j + h*i)/h**S(2) + b*p*q*(-g*j + h*i)**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(3) - b*j*p*q*x*(-e*j + f*i)/(S(2)*f*h) + b*j*(e + f*x)*(-g*j + h*i)*log(c*(d*(e + f*x)**p)**q)/(f*h**S(2)) - b*p*q*(-e*j + f*i)**S(2)*log(e + f*x)/(S(2)*f**S(2)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(2)/(S(2)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)**S(2)*log(f*(g + h*x)/(-e*h + f*g))/h**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)/(g + h*x), x), x, a*j*x/h - b*j*p*q*x/h + b*p*q*(-g*j + h*i)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(2) + b*j*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/(f*h) + (a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)*log(f*(g + h*x)/(-e*h + f*g))/h**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x), x), x, b*p*q*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/((g + h*x)*(i + j*x)), x), x, b*p*q*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i) - b*p*q*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i) + (a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i) - (a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/((g + h*x)*(i + j*x)**S(2)), x), x, -b*f*p*q*log(e + f*x)/((-e*j + f*i)*(-g*j + h*i)) + b*f*p*q*log(i + j*x)/((-e*j + f*i)*(-g*j + h*i)) + b*h*p*q*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - b*h*p*q*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + h*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - h*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + (a + b*log(c*(d*(e + f*x)**p)**q))/((i + j*x)*(-g*j + h*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/((g + h*x)*(i + j*x)**S(3)), x), x, -b*f**S(2)*p*q*log(e + f*x)/(S(2)*(-e*j + f*i)**S(2)*(-g*j + h*i)) + b*f**S(2)*p*q*log(i + j*x)/(S(2)*(-e*j + f*i)**S(2)*(-g*j + h*i)) - b*f*h*p*q*log(e + f*x)/((-e*j + f*i)*(-g*j + h*i)**S(2)) + b*f*h*p*q*log(i + j*x)/((-e*j + f*i)*(-g*j + h*i)**S(2)) - b*f*p*q/(S(2)*(i + j*x)*(-e*j + f*i)*(-g*j + h*i)) + b*h**S(2)*p*q*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - b*h**S(2)*p*q*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) + h*(a + b*log(c*(d*(e + f*x)**p)**q))/((i + j*x)*(-g*j + h*i)**S(2)) + (a/S(2) + b*log(c*(d*(e + f*x)**p)**q)/S(2))/((i + j*x)**S(2)*(-g*j + h*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(i + j*x)**S(3)/(g + h*x), x), x, -S(2)*a*b*j*p*q*x*(-g*j + h*i)**S(2)/h**S(3) - S(2)*a*b*j*p*q*x*(-e*j + f*i)*(-g*j + h*i)/(f*h**S(2)) - S(2)*a*b*j*p*q*x*(-e*j + f*i)**S(2)/(S(3)*f**S(2)*h) + b**S(2)*e*j**S(2)*p**S(2)*q**S(2)*x*(-g*j + h*i)/(S(2)*f*h**S(2)) + S(2)*b**S(2)*p**S(2)*q**S(2)*(i + j*x)**S(3)/(S(27)*h) + b**S(2)*j**S(2)*p**S(2)*q**S(2)*x**S(2)*(-g*j + h*i)/(S(4)*h**S(2)) + S(2)*b**S(2)*j*p**S(2)*q**S(2)*x*(-g*j + h*i)**S(2)/h**S(3) - S(2)*b**S(2)*p**S(2)*q**S(2)*(-g*j + h*i)**S(3)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h**S(4) + S(5)*b**S(2)*p**S(2)*q**S(2)*(i + j*x)**S(2)*(-e*j + f*i)/(S(18)*f*h) + S(2)*b**S(2)*j*p**S(2)*q**S(2)*x*(-e*j + f*i)*(-g*j + h*i)/(f*h**S(2)) - S(2)*b**S(2)*j*p*q*(e + f*x)*(-g*j + h*i)**S(2)*log(c*(d*(e + f*x)**p)**q)/(f*h**S(3)) + S(11)*b**S(2)*j*p**S(2)*q**S(2)*x*(-e*j + f*i)**S(2)/(S(9)*f**S(2)*h) - S(2)*b**S(2)*j*p*q*(e + f*x)*(-e*j + f*i)*(-g*j + h*i)*log(c*(d*(e + f*x)**p)**q)/(f**S(2)*h**S(2)) - S(2)*b**S(2)*j*p*q*(e + f*x)*(-e*j + f*i)**S(2)*log(c*(d*(e + f*x)**p)**q)/(S(3)*f**S(3)*h) + S(5)*b**S(2)*p**S(2)*q**S(2)*(-e*j + f*i)**S(3)*log(e + f*x)/(S(9)*f**S(3)*h) - S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(3)/(S(9)*h) + S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(4) - b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(i + j*x)**S(2)*(-e*j + f*i)/(S(3)*f*h) - b*j**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-g*j + h*i)/(S(2)*f**S(2)*h**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(i + j*x)**S(3)/(S(3)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-g*j + h*i)**S(3)*log(f*(g + h*x)/(-e*h + f*g))/h**S(4) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-g*j + h*i)**S(2)/(f*h**S(3)) + j**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)*(-g*j + h*i)/(S(2)*f**S(2)*h**S(2)) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*j + f*i)*(-g*j + h*i)/(f**S(2)*h**S(2)) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-e*j + f*i)**S(3)/(S(3)*f**S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(i + j*x)**S(2)/(g + h*x), x), x, -S(2)*a*b*j*p*q*x*(-g*j + h*i)/h**S(2) - S(2)*a*b*j*p*q*x*(-e*j + f*i)/(f*h) + b**S(2)*e*j**S(2)*p**S(2)*q**S(2)*x/(S(2)*f*h) + b**S(2)*j**S(2)*p**S(2)*q**S(2)*x**S(2)/(S(4)*h) + S(2)*b**S(2)*j*p**S(2)*q**S(2)*x*(-g*j + h*i)/h**S(2) - S(2)*b**S(2)*p**S(2)*q**S(2)*(-g*j + h*i)**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h**S(3) + S(2)*b**S(2)*j*p**S(2)*q**S(2)*x*(-e*j + f*i)/(f*h) - S(2)*b**S(2)*j*p*q*(e + f*x)*(-g*j + h*i)*log(c*(d*(e + f*x)**p)**q)/(f*h**S(2)) - S(2)*b**S(2)*j*p*q*(e + f*x)*(-e*j + f*i)*log(c*(d*(e + f*x)**p)**q)/(f**S(2)*h) + S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(3) - b*j**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(2)*f**S(2)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-g*j + h*i)**S(2)*log(f*(g + h*x)/(-e*h + f*g))/h**S(3) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-g*j + h*i)/(f*h**S(2)) + j**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)/(S(2)*f**S(2)*h) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*j + f*i)/(f**S(2)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(i + j*x)/(g + h*x), x), x, -S(2)*a*b*j*p*q*x/h + S(2)*b**S(2)*j*p**S(2)*q**S(2)*x/h - S(2)*b**S(2)*p**S(2)*q**S(2)*(-g*j + h*i)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h**S(2) - S(2)*b**S(2)*j*p*q*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/(f*h) + S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(2) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-g*j + h*i)*log(f*(g + h*x)/(-e*h + f*g))/h**S(2) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/(f*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(g + h*x), x), x, -S(2)*b**S(2)*p**S(2)*q**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h + S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/((g + h*x)*(i + j*x)), x), x, -S(2)*b**S(2)*p**S(2)*q**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i) + S(2)*b**S(2)*p**S(2)*q**S(2)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i) + S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i) - S(2)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/((g + h*x)*(i + j*x)**S(2)), x), x, S(2)*b**S(2)*f*p**S(2)*q**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)) - S(2)*b**S(2)*h*p**S(2)*q**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) + S(2)*b**S(2)*h*p**S(2)*q**S(2)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + S(2)*b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)) + S(2)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - S(2)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) - j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/((i + j*x)*(-e*j + f*i)*(-g*j + h*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/((g + h*x)*(i + j*x)**S(3)), x), x, b**S(2)*f**S(2)*p**S(2)*q**S(2)*log(e + f*x)/((-e*j + f*i)**S(2)*(-g*j + h*i)) - b**S(2)*f**S(2)*p**S(2)*q**S(2)*log(i + j*x)/((-e*j + f*i)**S(2)*(-g*j + h*i)) + b**S(2)*f**S(2)*p**S(2)*q**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)**S(2)*(-g*j + h*i)) + S(2)*b**S(2)*f*h*p**S(2)*q**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)**S(2)) - S(2)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) + S(2)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) + b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/((-e*j + f*i)**S(2)*(-g*j + h*i)) + S(2)*b*f*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)**S(2)) - b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))/((i + j*x)*(-e*j + f*i)*(-g*j + h*i)) + S(2)*b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - S(2)*b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) - f**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(2)*(-e*j + f*i)**S(2)*(-g*j + h*i)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) - h*j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/((i + j*x)*(-e*j + f*i)*(-g*j + h*i)**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(2)/(S(2)*(i + j*x)**S(2)*(-g*j + h*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(i + j*x)**S(3)/(g + h*x), x), x, S(6)*a*b**S(2)*j*p**S(2)*q**S(2)*x*(-g*j + h*i)**S(2)/h**S(3) + S(6)*a*b**S(2)*j*p**S(2)*q**S(2)*x*(-e*j + f*i)*(-g*j + h*i)/(f*h**S(2)) + S(6)*a*b**S(2)*j*p**S(2)*q**S(2)*x*(-e*j + f*i)**S(2)/(f**S(2)*h) - S(3)*b**S(3)*e*j**S(2)*p**S(3)*q**S(3)*x*(-g*j + h*i)/(S(4)*f*h**S(2)) - S(3)*b**S(3)*e*j**S(2)*p**S(3)*q**S(3)*x*(-e*j + f*i)/(S(2)*f**S(2)*h) - S(3)*b**S(3)*j**S(2)*p**S(3)*q**S(3)*x**S(2)*(-g*j + h*i)/(S(8)*h**S(2)) - S(6)*b**S(3)*j*p**S(3)*q**S(3)*x*(-g*j + h*i)**S(2)/h**S(3) + S(6)*b**S(3)*p**S(3)*q**S(3)*(-g*j + h*i)**S(3)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/h**S(4) - S(3)*b**S(3)*j**S(2)*p**S(3)*q**S(3)*x**S(2)*(-e*j + f*i)/(S(4)*f*h) - S(6)*b**S(3)*j*p**S(3)*q**S(3)*x*(-e*j + f*i)*(-g*j + h*i)/(f*h**S(2)) + S(6)*b**S(3)*j*p**S(2)*q**S(2)*(e + f*x)*(-g*j + h*i)**S(2)*log(c*(d*(e + f*x)**p)**q)/(f*h**S(3)) - S(6)*b**S(3)*j*p**S(3)*q**S(3)*x*(-e*j + f*i)**S(2)/(f**S(2)*h) + S(6)*b**S(3)*j*p**S(2)*q**S(2)*(e + f*x)*(-e*j + f*i)*(-g*j + h*i)*log(c*(d*(e + f*x)**p)**q)/(f**S(2)*h**S(2)) - S(2)*b**S(3)*j**S(3)*p**S(3)*q**S(3)*(e + f*x)**S(3)/(S(27)*f**S(3)*h) + S(6)*b**S(3)*j*p**S(2)*q**S(2)*(e + f*x)*(-e*j + f*i)**S(2)*log(c*(d*(e + f*x)**p)**q)/(f**S(3)*h) - S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)**S(3)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h**S(4) + S(3)*b**S(2)*j**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-g*j + h*i)/(S(4)*f**S(2)*h**S(2)) + S(2)*b**S(2)*j**S(3)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(3)/(S(9)*f**S(3)*h) + S(3)*b**S(2)*j**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)*(-e*j + f*i)/(S(2)*f**S(3)*h) + S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-g*j + h*i)**S(3)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(4) - S(3)*b*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-g*j + h*i)**S(2)/(f*h**S(3)) - S(3)*b*j**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)*(-g*j + h*i)/(S(4)*f**S(2)*h**S(2)) - S(3)*b*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*j + f*i)*(-g*j + h*i)/(f**S(2)*h**S(2)) - b*j**S(3)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(3)/(S(3)*f**S(3)*h) - S(3)*b*j**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)*(-e*j + f*i)/(S(2)*f**S(3)*h) - S(3)*b*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*j + f*i)**S(2)/(f**S(3)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(-g*j + h*i)**S(3)*log(f*(g + h*x)/(-e*h + f*g))/h**S(4) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-g*j + h*i)**S(2)/(f*h**S(3)) + j**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(2)*(-g*j + h*i)/(S(2)*f**S(2)*h**S(2)) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-e*j + f*i)*(-g*j + h*i)/(f**S(2)*h**S(2)) + j**S(3)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(3)/(S(3)*f**S(3)*h) + j**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(2)*(-e*j + f*i)/(f**S(3)*h) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-e*j + f*i)**S(2)/(f**S(3)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(i + j*x)**S(2)/(g + h*x), x), x, S(6)*a*b**S(2)*j*p**S(2)*q**S(2)*x*(-g*j + h*i)/h**S(2) + S(6)*a*b**S(2)*j*p**S(2)*q**S(2)*x*(-e*j + f*i)/(f*h) - S(3)*b**S(3)*e*j**S(2)*p**S(3)*q**S(3)*x/(S(4)*f*h) - S(3)*b**S(3)*j**S(2)*p**S(3)*q**S(3)*x**S(2)/(S(8)*h) - S(6)*b**S(3)*j*p**S(3)*q**S(3)*x*(-g*j + h*i)/h**S(2) + S(6)*b**S(3)*p**S(3)*q**S(3)*(-g*j + h*i)**S(2)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/h**S(3) - S(6)*b**S(3)*j*p**S(3)*q**S(3)*x*(-e*j + f*i)/(f*h) + S(6)*b**S(3)*j*p**S(2)*q**S(2)*(e + f*x)*(-g*j + h*i)*log(c*(d*(e + f*x)**p)**q)/(f*h**S(2)) + S(6)*b**S(3)*j*p**S(2)*q**S(2)*(e + f*x)*(-e*j + f*i)*log(c*(d*(e + f*x)**p)**q)/(f**S(2)*h) - S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)**S(2)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h**S(3) + S(3)*b**S(2)*j**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(e + f*x)**S(2)/(S(4)*f**S(2)*h) + S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-g*j + h*i)**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(3) - S(3)*b*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-g*j + h*i)/(f*h**S(2)) - S(3)*b*j**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)**S(2)/(S(4)*f**S(2)*h) - S(3)*b*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)*(-e*j + f*i)/(f**S(2)*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(-g*j + h*i)**S(2)*log(f*(g + h*x)/(-e*h + f*g))/h**S(3) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-g*j + h*i)/(f*h**S(2)) + j**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)**S(2)/(S(2)*f**S(2)*h) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)*(-e*j + f*i)/(f**S(2)*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(i + j*x)/(g + h*x), x), x, S(6)*a*b**S(2)*j*p**S(2)*q**S(2)*x/h - S(6)*b**S(3)*j*p**S(3)*q**S(3)*x/h + S(6)*b**S(3)*p**S(3)*q**S(3)*(-g*j + h*i)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/h**S(2) + S(6)*b**S(3)*j*p**S(2)*q**S(2)*(e + f*x)*log(c*(d*(e + f*x)**p)**q)/(f*h) - S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*(-g*j + h*i)*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h**S(2) + S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(-g*j + h*i)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h**S(2) - S(3)*b*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/(f*h) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(-g*j + h*i)*log(f*(g + h*x)/(-e*h + f*g))/h**S(2) + j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)/(f*h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(g + h*x), x), x, S(6)*b**S(3)*p**S(3)*q**S(3)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/h - S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/h + S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/h + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(g + h*x)/(-e*h + f*g))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/((g + h*x)*(i + j*x)), x), x, S(6)*b**S(3)*p**S(3)*q**S(3)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i) - S(6)*b**S(3)*p**S(3)*q**S(3)*polylog(S(4), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i) - S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i) + S(6)*b**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i) + S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i) - S(3)*b*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i) - (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/((g + h*x)*(i + j*x)**S(2)), x), x, -S(6)*b**S(3)*f*p**S(3)*q**S(3)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)) + S(6)*b**S(3)*h*p**S(3)*q**S(3)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - S(6)*b**S(3)*h*p**S(3)*q**S(3)*polylog(S(4), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + S(6)*b**S(2)*f*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)) - S(6)*b**S(2)*h*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) + S(6)*b**S(2)*h*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + S(3)*b*f*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)) + S(3)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - S(3)*b*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) + h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i)**S(2) - h*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i)**S(2) - j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)/((i + j*x)*(-e*j + f*i)*(-g*j + h*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/((g + h*x)*(i + j*x)**S(3)), x), x, -S(3)*b**S(3)*f**S(2)*p**S(3)*q**S(3)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)**S(2)*(-g*j + h*i)) - S(3)*b**S(3)*f**S(2)*p**S(3)*q**S(3)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)**S(2)*(-g*j + h*i)) - S(6)*b**S(3)*f*h*p**S(3)*q**S(3)*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)**S(2)) + S(6)*b**S(3)*h**S(2)*p**S(3)*q**S(3)*polylog(S(4), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - S(6)*b**S(3)*h**S(2)*p**S(3)*q**S(3)*polylog(S(4), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) - S(3)*b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*log(f*(i + j*x)/(-e*j + f*i))/((-e*j + f*i)**S(2)*(-g*j + h*i)) + S(3)*b**S(2)*f**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)**S(2)*(-g*j + h*i)) + S(6)*b**S(2)*f*h*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)**S(2)) - S(6)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) + S(6)*b**S(2)*h**S(2)*p**S(2)*q**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))*polylog(S(3), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) + S(3)*b*f**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/(S(2)*(-e*j + f*i)**S(2)*(-g*j + h*i)) + S(3)*b*f*h*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*log(f*(i + j*x)/(-e*j + f*i))/((-e*j + f*i)*(-g*j + h*i)**S(2)) + S(3)*b*f*j*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(e + f*x)/(S(2)*(i + j*x)*(-e*j + f*i)**S(2)*(-g*j + h*i)) + S(3)*b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -h*(e + f*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - S(3)*b*h**S(2)*p*q*(a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*polylog(S(2), -j*(e + f*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) - f**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(2)*(-e*j + f*i)**S(2)*(-g*j + h*i)) + h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(g + h*x)/(-e*h + f*g))/(-g*j + h*i)**S(3) - h**S(2)*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*log(f*(i + j*x)/(-e*j + f*i))/(-g*j + h*i)**S(3) - h*j*(a + b*log(c*(d*(e + f*x)**p)**q))**S(3)*(e + f*x)/((i + j*x)*(-e*j + f*i)*(-g*j + h*i)**S(2)) + (a + b*log(c*(d*(e + f*x)**p)**q))**S(3)/(S(2)*(i + j*x)**S(2)*(-g*j + h*i)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((i + j*x)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), x, Integral((i + j*x)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)*(i + j*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)*(i + j*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)*(i + j*x)**S(2)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))*(g + h*x)*(i + j*x)**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((i + j*x)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)), x), x, Integral((i + j*x)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)*(i + j*x)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)*(i + j*x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)*(i + j*x)**S(2)), x), x, Integral(S(1)/((a + b*log(c*(d*(e + f*x)**p)**q))**S(2)*(g + h*x)*(i + j*x)**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(g + h*x**S(2)), x), x, -b*p*q*polylog(S(2), sqrt(h)*(-e - f*x)/(-e*sqrt(h) + f*sqrt(-g)))/(S(2)*sqrt(h)*sqrt(-g)) + b*p*q*polylog(S(2), sqrt(h)*(e + f*x)/(e*sqrt(h) + f*sqrt(-g)))/(S(2)*sqrt(h)*sqrt(-g)) - (a/S(2) + b*log(c*(d*(e + f*x)**p)**q)/S(2))*log(f*(sqrt(h)*x + sqrt(-g))/(-e*sqrt(h) + f*sqrt(-g)))/(sqrt(h)*sqrt(-g)) + (a/S(2) + b*log(c*(d*(e + f*x)**p)**q)/S(2))*log(f*(-sqrt(h)*x + sqrt(-g))/(e*sqrt(h) + f*sqrt(-g)))/(sqrt(h)*sqrt(-g)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/sqrt(h*x**S(2) + S(2)), x), x, -b*p*q*log(sqrt(S(2))*f*exp(asinh(sqrt(S(2))*sqrt(h)*x/S(2)))/(e*sqrt(h) - sqrt(e**S(2)*h + S(2)*f**S(2))) + S(1))*asinh(sqrt(S(2))*sqrt(h)*x/S(2))/sqrt(h) - b*p*q*log(sqrt(S(2))*f*exp(asinh(sqrt(S(2))*sqrt(h)*x/S(2)))/(e*sqrt(h) + sqrt(e**S(2)*h + S(2)*f**S(2))) + S(1))*asinh(sqrt(S(2))*sqrt(h)*x/S(2))/sqrt(h) + b*p*q*asinh(sqrt(S(2))*sqrt(h)*x/S(2))**S(2)/(S(2)*sqrt(h)) - b*p*q*polylog(S(2), -sqrt(S(2))*f*exp(asinh(sqrt(S(2))*sqrt(h)*x/S(2)))/(e*sqrt(h) - sqrt(e**S(2)*h + S(2)*f**S(2))))/sqrt(h) - b*p*q*polylog(S(2), -sqrt(S(2))*f*exp(asinh(sqrt(S(2))*sqrt(h)*x/S(2)))/(e*sqrt(h) + sqrt(e**S(2)*h + S(2)*f**S(2))))/sqrt(h) + (a + b*log(c*(d*(e + f*x)**p)**q))*asinh(sqrt(S(2))*sqrt(h)*x/S(2))/sqrt(h), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/sqrt(g + h*x**S(2)), x), x, -b*sqrt(g)*p*q*sqrt(S(1) + h*x**S(2)/g)*log(f*sqrt(g)*exp(asinh(sqrt(h)*x/sqrt(g)))/(e*sqrt(h) - sqrt(e**S(2)*h + f**S(2)*g)) + S(1))*asinh(sqrt(h)*x/sqrt(g))/(sqrt(h)*sqrt(g + h*x**S(2))) - b*sqrt(g)*p*q*sqrt(S(1) + h*x**S(2)/g)*log(f*sqrt(g)*exp(asinh(sqrt(h)*x/sqrt(g)))/(e*sqrt(h) + sqrt(e**S(2)*h + f**S(2)*g)) + S(1))*asinh(sqrt(h)*x/sqrt(g))/(sqrt(h)*sqrt(g + h*x**S(2))) + b*sqrt(g)*p*q*sqrt(S(1) + h*x**S(2)/g)*asinh(sqrt(h)*x/sqrt(g))**S(2)/(S(2)*sqrt(h)*sqrt(g + h*x**S(2))) - b*sqrt(g)*p*q*sqrt(S(1) + h*x**S(2)/g)*polylog(S(2), -f*sqrt(g)*exp(asinh(sqrt(h)*x/sqrt(g)))/(e*sqrt(h) - sqrt(e**S(2)*h + f**S(2)*g)))/(sqrt(h)*sqrt(g + h*x**S(2))) - b*sqrt(g)*p*q*sqrt(S(1) + h*x**S(2)/g)*polylog(S(2), -f*sqrt(g)*exp(asinh(sqrt(h)*x/sqrt(g)))/(e*sqrt(h) + sqrt(e**S(2)*h + f**S(2)*g)))/(sqrt(h)*sqrt(g + h*x**S(2))) + sqrt(g)*sqrt(S(1) + h*x**S(2)/g)*(a + b*log(c*(d*(e + f*x)**p)**q))*asinh(sqrt(h)*x/sqrt(g))/(sqrt(h)*sqrt(g + h*x**S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(-h*x + S(2))*sqrt(h*x + S(2))), x), x, -b*p*q*log(S(2)*f*exp(I*asin(h*x/S(2)))/(I*e*h - sqrt(-e**S(2)*h**S(2) + S(4)*f**S(2))) + S(1))*asin(h*x/S(2))/h - b*p*q*log(S(2)*f*exp(I*asin(h*x/S(2)))/(I*e*h + sqrt(-e**S(2)*h**S(2) + S(4)*f**S(2))) + S(1))*asin(h*x/S(2))/h + I*b*p*q*asin(h*x/S(2))**S(2)/(S(2)*h) + I*b*p*q*polylog(S(2), -S(2)*f*exp(I*asin(h*x/S(2)))/(I*e*h - sqrt(-e**S(2)*h**S(2) + S(4)*f**S(2))))/h + I*b*p*q*polylog(S(2), -S(2)*f*exp(I*asin(h*x/S(2)))/(I*e*h + sqrt(-e**S(2)*h**S(2) + S(4)*f**S(2))))/h + (a + b*log(c*(d*(e + f*x)**p)**q))*asin(h*x/S(2))/h, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d*(e + f*x)**p)**q))/(sqrt(g - h*x)*sqrt(g + h*x)), x), x, -b*g*p*q*sqrt(S(1) - h**S(2)*x**S(2)/g**S(2))*log(f*g*exp(I*asin(h*x/g))/(I*e*h - sqrt(-e**S(2)*h**S(2) + f**S(2)*g**S(2))) + S(1))*asin(h*x/g)/(h*sqrt(g - h*x)*sqrt(g + h*x)) - b*g*p*q*sqrt(S(1) - h**S(2)*x**S(2)/g**S(2))*log(f*g*exp(I*asin(h*x/g))/(I*e*h + sqrt(-e**S(2)*h**S(2) + f**S(2)*g**S(2))) + S(1))*asin(h*x/g)/(h*sqrt(g - h*x)*sqrt(g + h*x)) + I*b*g*p*q*sqrt(S(1) - h**S(2)*x**S(2)/g**S(2))*asin(h*x/g)**S(2)/(S(2)*h*sqrt(g - h*x)*sqrt(g + h*x)) + I*b*g*p*q*sqrt(S(1) - h**S(2)*x**S(2)/g**S(2))*polylog(S(2), -f*g*exp(I*asin(h*x/g))/(I*e*h - sqrt(-e**S(2)*h**S(2) + f**S(2)*g**S(2))))/(h*sqrt(g - h*x)*sqrt(g + h*x)) + I*b*g*p*q*sqrt(S(1) - h**S(2)*x**S(2)/g**S(2))*polylog(S(2), -f*g*exp(I*asin(h*x/g))/(I*e*h + sqrt(-e**S(2)*h**S(2) + f**S(2)*g**S(2))))/(h*sqrt(g - h*x)*sqrt(g + h*x)) + g*sqrt(S(1) - h**S(2)*x**S(2)/g**S(2))*(a + b*log(c*(d*(e + f*x)**p)**q))*asin(h*x/g)/(h*sqrt(g - h*x)*sqrt(g + h*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(2)*e/(e + f*x))/(e**S(2) - f**S(2)*x**S(2)), x), x, polylog(S(2), (-e + f*x)/(e + f*x))/(S(2)*e*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(S(2)*e/(e + f*x)))/(e**S(2) - f**S(2)*x**S(2)), x), x, a*atanh(f*x/e)/(e*f) + b*polylog(S(2), (-e + f*x)/(e + f*x))/(S(2)*e*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e/(e + f*x))/(e**S(2) - f**S(2)*x**S(2)), x), x, -log(S(2))*atanh(f*x/e)/(e*f) + polylog(S(2), (-e + f*x)/(e + f*x))/(S(2)*e*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(e/(e + f*x)))/(e**S(2) - f**S(2)*x**S(2)), x), x, b*polylog(S(2), (-e + f*x)/(e + f*x))/(S(2)*e*f) + (a - b*log(S(2)))*atanh(f*x/e)/(e*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x)/(c + d/x**S(2)), x), x, -sqrt(d)*log(b*(sqrt(d) - x*sqrt(-c))/(a*sqrt(-c) + b*sqrt(d)))*log(a + b*x)/(S(2)*(-c)**(S(3)/2)) + sqrt(d)*log(-b*(sqrt(d) + x*sqrt(-c))/(a*sqrt(-c) - b*sqrt(d)))*log(a + b*x)/(S(2)*(-c)**(S(3)/2)) + sqrt(d)*polylog(S(2), sqrt(-c)*(a + b*x)/(a*sqrt(-c) - b*sqrt(d)))/(S(2)*(-c)**(S(3)/2)) - sqrt(d)*polylog(S(2), sqrt(-c)*(a + b*x)/(a*sqrt(-c) + b*sqrt(d)))/(S(2)*(-c)**(S(3)/2)) - x/c + (a + b*x)*log(a + b*x)/(b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(3)/(d + e*x**S(2)), x), x, -S(3)*n**S(3)*polylog(S(4), sqrt(e)*(-a - b*x)/(-a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) + S(3)*n**S(3)*polylog(S(4), sqrt(e)*(a + b*x)/(a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) + S(3)*n**S(2)*log(c*(a + b*x)**n)*polylog(S(3), sqrt(e)*(-a - b*x)/(-a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) - S(3)*n**S(2)*log(c*(a + b*x)**n)*polylog(S(3), sqrt(e)*(a + b*x)/(a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) - S(3)*n*log(c*(a + b*x)**n)**S(2)*polylog(S(2), sqrt(e)*(-a - b*x)/(-a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) + S(3)*n*log(c*(a + b*x)**n)**S(2)*polylog(S(2), sqrt(e)*(a + b*x)/(a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) - log(c*(a + b*x)**n)**S(3)*log(b*(sqrt(e)*x + sqrt(-d))/(-a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) + log(c*(a + b*x)**n)**S(3)*log(b*(-sqrt(e)*x + sqrt(-d))/(a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(2)/(d + e*x**S(2)), x), x, n**S(2)*polylog(S(3), sqrt(e)*(-a - b*x)/(-a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) - n**S(2)*polylog(S(3), sqrt(e)*(a + b*x)/(a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) - n*log(c*(a + b*x)**n)*polylog(S(2), sqrt(e)*(-a - b*x)/(-a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) + n*log(c*(a + b*x)**n)*polylog(S(2), sqrt(e)*(a + b*x)/(a*sqrt(e) + b*sqrt(-d)))/(sqrt(e)*sqrt(-d)) - log(c*(a + b*x)**n)**S(2)*log(b*(sqrt(e)*x + sqrt(-d))/(-a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) + log(c*(a + b*x)**n)**S(2)*log(b*(-sqrt(e)*x + sqrt(-d))/(a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)/(d + e*x**S(2)), x), x, -n*polylog(S(2), sqrt(e)*(-a - b*x)/(-a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) + n*polylog(S(2), sqrt(e)*(a + b*x)/(a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) - log(c*(a + b*x)**n)*log(b*(sqrt(e)*x + sqrt(-d))/(-a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)) + log(c*(a + b*x)**n)*log(b*(-sqrt(e)*x + sqrt(-d))/(a*sqrt(e) + b*sqrt(-d)))/(S(2)*sqrt(e)*sqrt(-d)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((d + e*x**S(2))*log(c*(a + b*x)**n)), x), x, Integral(S(1)/((d + e*x**S(2))*log(c*(a + b*x)**n)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(5)*log(c + d*x)/(a + b*x**S(2)), x), x, a**S(2)*log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*b**S(3)) + a**S(2)*log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*b**S(3)) + a**S(2)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*b**S(3)) + a**S(2)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*b**S(3)) + a*c**S(2)*log(c + d*x)/(S(2)*b**S(2)*d**S(2)) - a*c*x/(S(2)*b**S(2)*d) - a*x**S(2)*log(c + d*x)/(S(2)*b**S(2)) + a*x**S(2)/(S(4)*b**S(2)) - c**S(4)*log(c + d*x)/(S(4)*b*d**S(4)) + c**S(3)*x/(S(4)*b*d**S(3)) - c**S(2)*x**S(2)/(S(8)*b*d**S(2)) + c*x**S(3)/(S(12)*b*d) + x**S(4)*log(c + d*x)/(S(4)*b) - x**S(4)/(S(16)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c + d*x)/(a + b*x**S(2)), x), x, a*x/b**S(2) - a*(c + d*x)*log(c + d*x)/(b**S(2)*d) + c**S(3)*log(c + d*x)/(S(3)*b*d**S(3)) - c**S(2)*x/(S(3)*b*d**S(2)) + c*x**S(2)/(S(6)*b*d) + x**S(3)*log(c + d*x)/(S(3)*b) - x**S(3)/(S(9)*b) - (-a)**(S(3)/2)*log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*b**(S(5)/2)) + (-a)**(S(3)/2)*log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*b**(S(5)/2)) - (-a)**(S(3)/2)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*b**(S(5)/2)) + (-a)**(S(3)/2)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*b**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c + d*x)/(a + b*x**S(2)), x), x, -a*log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*b**S(2)) - a*log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*b**S(2)) - a*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*b**S(2)) - a*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*b**S(2)) - c**S(2)*log(c + d*x)/(S(2)*b*d**S(2)) + c*x/(S(2)*b*d) + x**S(2)*log(c + d*x)/(S(2)*b) - x**S(2)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c + d*x)/(a + b*x**S(2)), x), x, -x/b + (c + d*x)*log(c + d*x)/(b*d) - sqrt(-a)*log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*b**(S(3)/2)) + sqrt(-a)*log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*b**(S(3)/2)) - sqrt(-a)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*b**(S(3)/2)) + sqrt(-a)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*b**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c + d*x)/(a + b*x**S(2)), x), x, log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*b) + log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*b) + polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*b) + polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(a + b*x**S(2)), x), x, -log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*sqrt(b)*sqrt(-a)) + log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*sqrt(b)*sqrt(-a)) - polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*sqrt(b)*sqrt(-a)) + polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*sqrt(b)*sqrt(-a)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x*(a + b*x**S(2))), x), x, log(-d*x/c)*log(c + d*x)/a - log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*a) - log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*a) + polylog(S(2), (c + d*x)/c)/a - polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*a) - polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x**S(2)*(a + b*x**S(2))), x), x, -sqrt(b)*log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*(-a)**(S(3)/2)) + sqrt(b)*log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*(-a)**(S(3)/2)) - sqrt(b)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*(-a)**(S(3)/2)) + sqrt(b)*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*(-a)**(S(3)/2)) - log(c + d*x)/(a*x) + d*log(x)/(a*c) - d*log(c + d*x)/(a*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x**S(3)*(a + b*x**S(2))), x), x, -log(c + d*x)/(S(2)*a*x**S(2)) - d/(S(2)*a*c*x) - d**S(2)*log(x)/(S(2)*a*c**S(2)) + d**S(2)*log(c + d*x)/(S(2)*a*c**S(2)) - b*log(-d*x/c)*log(c + d*x)/a**S(2) + b*log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/(S(2)*a**S(2)) + b*log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/(S(2)*a**S(2)) - b*polylog(S(2), (c + d*x)/c)/a**S(2) + b*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/(S(2)*a**S(2)) + b*polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/(S(2)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(5)*log(c + d*x)/(a + b*x**S(3)), x), x, -a*log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**S(2)) - a*log(-d*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**S(2)) - a*log((S(-1))**(S(1)/3)*d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**S(2)) - a*polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b**S(2)) - a*polylog(S(2), b**(S(1)/3)*(c + d*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b**S(2)) - a*polylog(S(2), b**(S(1)/3)*(c + d*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b**S(2)) + c**S(3)*log(c + d*x)/(S(3)*b*d**S(3)) - c**S(2)*x/(S(3)*b*d**S(2)) + c*x**S(2)/(S(6)*b*d) + x**S(3)*log(c + d*x)/(S(3)*b) - x**S(3)/(S(9)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c + d*x)/(a + b*x**S(3)), x), x, a**(S(2)/3)*log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**(S(5)/3)) - (S(-1))**(S(1)/3)*a**(S(2)/3)*log(d*(a**(S(1)/3) - (S(-1))**(S(1)/3)*b**(S(1)/3)*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**(S(5)/3)) + (S(-1))**(S(2)/3)*a**(S(2)/3)*log(-d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**(S(5)/3)) + a**(S(2)/3)*polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b**(S(5)/3)) - (S(-1))**(S(1)/3)*a**(S(2)/3)*polylog(S(2), (S(-1))**(S(1)/3)*b**(S(1)/3)*(c + d*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))/(S(3)*b**(S(5)/3)) + (S(-1))**(S(2)/3)*a**(S(2)/3)*polylog(S(2), (S(-1))**(S(2)/3)*b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))/(S(3)*b**(S(5)/3)) - c**S(2)*log(c + d*x)/(S(2)*b*d**S(2)) + c*x/(S(2)*b*d) + x**S(2)*log(c + d*x)/(S(2)*b) - x**S(2)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c + d*x)/(a + b*x**S(3)), x), x, -a**(S(1)/3)*log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**(S(4)/3)) - (S(-1))**(S(2)/3)*a**(S(1)/3)*log(d*(a**(S(1)/3) - (S(-1))**(S(1)/3)*b**(S(1)/3)*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**(S(4)/3)) + (S(-1))**(S(1)/3)*a**(S(1)/3)*log(-d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b**(S(4)/3)) - a**(S(1)/3)*polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b**(S(4)/3)) - (S(-1))**(S(2)/3)*a**(S(1)/3)*polylog(S(2), (S(-1))**(S(1)/3)*b**(S(1)/3)*(c + d*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))/(S(3)*b**(S(4)/3)) + (S(-1))**(S(1)/3)*a**(S(1)/3)*polylog(S(2), (S(-1))**(S(2)/3)*b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))/(S(3)*b**(S(4)/3)) - x/b + (c + d*x)*log(c + d*x)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c + d*x)/(a + b*x**S(3)), x), x, log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b) + log(-d*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b) + log((S(-1))**(S(1)/3)*d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*b) + polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b) + polylog(S(2), b**(S(1)/3)*(c + d*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b) + polylog(S(2), b**(S(1)/3)*(c + d*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c + d*x)/(a + b*x**S(3)), x), x, -log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(1)/3)*b**(S(2)/3)) + (S(-1))**(S(1)/3)*log(d*(a**(S(1)/3) - (S(-1))**(S(1)/3)*b**(S(1)/3)*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(1)/3)*b**(S(2)/3)) - (S(-1))**(S(2)/3)*log(-d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(1)/3)*b**(S(2)/3)) - polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a**(S(1)/3)*b**(S(2)/3)) + (S(-1))**(S(1)/3)*polylog(S(2), (S(-1))**(S(1)/3)*b**(S(1)/3)*(c + d*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(1)/3)*b**(S(2)/3)) - (S(-1))**(S(2)/3)*polylog(S(2), (S(-1))**(S(2)/3)*b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(1)/3)*b**(S(2)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(a + b*x**S(3)), x), x, log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(2)/3)*b**(S(1)/3)) + (S(-1))**(S(2)/3)*log(d*(a**(S(1)/3) - (S(-1))**(S(1)/3)*b**(S(1)/3)*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(2)/3)*b**(S(1)/3)) - (S(-1))**(S(1)/3)*log(-d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(2)/3)*b**(S(1)/3)) + polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a**(S(2)/3)*b**(S(1)/3)) + (S(-1))**(S(2)/3)*polylog(S(2), (S(-1))**(S(1)/3)*b**(S(1)/3)*(c + d*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(2)/3)*b**(S(1)/3)) - (S(-1))**(S(1)/3)*polylog(S(2), (S(-1))**(S(2)/3)*b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(2)/3)*b**(S(1)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x*(a + b*x**S(3))), x), x, log(-d*x/c)*log(c + d*x)/a - log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a) - log(-d*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a) - log((S(-1))**(S(1)/3)*d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a) + polylog(S(2), (c + d*x)/c)/a - polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a) - polylog(S(2), b**(S(1)/3)*(c + d*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a) - polylog(S(2), b**(S(1)/3)*(c + d*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x**S(2)*(a + b*x**S(3))), x), x, -log(c + d*x)/(a*x) + d*log(x)/(a*c) - d*log(c + d*x)/(a*c) + b**(S(1)/3)*log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(4)/3)) - (S(-1))**(S(1)/3)*b**(S(1)/3)*log(d*(a**(S(1)/3) - (S(-1))**(S(1)/3)*b**(S(1)/3)*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(4)/3)) + (S(-1))**(S(2)/3)*b**(S(1)/3)*log(-d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(4)/3)) + b**(S(1)/3)*polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a**(S(4)/3)) - (S(-1))**(S(1)/3)*b**(S(1)/3)*polylog(S(2), (S(-1))**(S(1)/3)*b**(S(1)/3)*(c + d*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(4)/3)) + (S(-1))**(S(2)/3)*b**(S(1)/3)*polylog(S(2), (S(-1))**(S(2)/3)*b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(4)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x**S(3)*(a + b*x**S(3))), x), x, -log(c + d*x)/(S(2)*a*x**S(2)) - d/(S(2)*a*c*x) - d**S(2)*log(x)/(S(2)*a*c**S(2)) + d**S(2)*log(c + d*x)/(S(2)*a*c**S(2)) - b**(S(2)/3)*log(-d*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(5)/3)) - (S(-1))**(S(2)/3)*b**(S(2)/3)*log(d*(a**(S(1)/3) - (S(-1))**(S(1)/3)*b**(S(1)/3)*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(5)/3)) + (S(-1))**(S(1)/3)*b**(S(2)/3)*log(-d*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))*log(c + d*x)/(S(3)*a**(S(5)/3)) - b**(S(2)/3)*polylog(S(2), b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + b**(S(1)/3)*c))/(S(3)*a**(S(5)/3)) - (S(-1))**(S(2)/3)*b**(S(2)/3)*polylog(S(2), (S(-1))**(S(1)/3)*b**(S(1)/3)*(c + d*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(5)/3)) + (S(-1))**(S(1)/3)*b**(S(2)/3)*polylog(S(2), (S(-1))**(S(2)/3)*b**(S(1)/3)*(c + d*x)/(-a**(S(1)/3)*d + (S(-1))**(S(2)/3)*b**(S(1)/3)*c))/(S(3)*a**(S(5)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c + d*x)/(a + b*x**S(4)), x), x, -x/b + (c + d*x)*log(c + d*x)/(b*d) - (-a)**(S(1)/4)*log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b**(S(5)/4)) + (-a)**(S(1)/4)*log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b**(S(5)/4)) - (-a)**(S(1)/4)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*b**(S(5)/4)) + (-a)**(S(1)/4)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*b**(S(5)/4)) - sqrt(-sqrt(-a))*log(-d*(b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*b**(S(5)/4)) + sqrt(-sqrt(-a))*log(d*(-b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*b**(S(5)/4)) - sqrt(-sqrt(-a))*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))/(S(4)*b**(S(5)/4)) + sqrt(-sqrt(-a))*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))/(S(4)*b**(S(5)/4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c + d*x)/(a + b*x**S(4)), x), x, log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b) + log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b) + log(-d*(b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b) + log(d*(-b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*b) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*b) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))/(S(4)*b) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c + d*x)/(a + b*x**S(4)), x), x, -log(-d*(b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*b**(S(3)/4)*sqrt(-sqrt(-a))) + log(d*(-b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*b**(S(3)/4)*sqrt(-sqrt(-a))) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))/(S(4)*b**(S(3)/4)*sqrt(-sqrt(-a))) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))/(S(4)*b**(S(3)/4)*sqrt(-sqrt(-a))) - log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b**(S(3)/4)*(-a)**(S(1)/4)) + log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b**(S(3)/4)*(-a)**(S(1)/4)) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*b**(S(3)/4)*(-a)**(S(1)/4)) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*b**(S(3)/4)*(-a)**(S(1)/4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c + d*x)/(a + b*x**S(4)), x), x, log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*sqrt(b)*sqrt(-a)) + log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*sqrt(b)*sqrt(-a)) - log(-d*(b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*sqrt(b)*sqrt(-a)) - log(d*(-b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*sqrt(b)*sqrt(-a)) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*sqrt(b)*sqrt(-a)) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*sqrt(b)*sqrt(-a)) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))/(S(4)*sqrt(b)*sqrt(-a)) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))/(S(4)*sqrt(b)*sqrt(-a)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(a + b*x**S(4)), x), x, -log(-d*(b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*b**(S(1)/4)*(-sqrt(-a))**(S(3)/2)) + log(d*(-b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*b**(S(1)/4)*(-sqrt(-a))**(S(3)/2)) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))/(S(4)*b**(S(1)/4)*(-sqrt(-a))**(S(3)/2)) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))/(S(4)*b**(S(1)/4)*(-sqrt(-a))**(S(3)/2)) - log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b**(S(1)/4)*(-a)**(S(3)/4)) + log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*b**(S(1)/4)*(-a)**(S(3)/4)) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*b**(S(1)/4)*(-a)**(S(3)/4)) + polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*b**(S(1)/4)*(-a)**(S(3)/4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x*(a + b*x**S(4))), x), x, log(-d*x/c)*log(c + d*x)/a - log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*a) - log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*a) - log(-d*(b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*a) - log(d*(-b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*a) + polylog(S(2), (c + d*x)/c)/a - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*a) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*a) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))/(S(4)*a) - polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))/(S(4)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x**S(2)*(a + b*x**S(4))), x), x, -b**(S(1)/4)*log(-d*(b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*(-sqrt(-a))**(S(5)/2)) + b**(S(1)/4)*log(d*(-b**(S(1)/4)*x + sqrt(-sqrt(-a)))/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))*log(c + d*x)/(S(4)*(-sqrt(-a))**(S(5)/2)) - b**(S(1)/4)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*sqrt(-sqrt(-a))))/(S(4)*(-sqrt(-a))**(S(5)/2)) + b**(S(1)/4)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*sqrt(-sqrt(-a))))/(S(4)*(-sqrt(-a))**(S(5)/2)) - b**(S(1)/4)*log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*(-a)**(S(5)/4)) + b**(S(1)/4)*log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*(-a)**(S(5)/4)) - b**(S(1)/4)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*(-a)**(S(5)/4)) + b**(S(1)/4)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*(-a)**(S(5)/4)) - log(c + d*x)/(a*x) + d*log(x)/(a*c) - d*log(c + d*x)/(a*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c + d*x)/(x**S(3)*(a + b*x**S(4))), x), x, sqrt(b)*log(-d*(b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*(-a)**(S(3)/2)) + sqrt(b)*log(d*(-b**(S(1)/4)*x + (-a)**(S(1)/4))/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*(-a)**(S(3)/2)) - sqrt(b)*log(-d*(b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*(-a)**(S(3)/2)) - sqrt(b)*log(d*(-b**(S(1)/4)*x + I*(-a)**(S(1)/4))/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))*log(c + d*x)/(S(4)*(-a)**(S(3)/2)) + sqrt(b)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - d*(-a)**(S(1)/4)))/(S(4)*(-a)**(S(3)/2)) + sqrt(b)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + d*(-a)**(S(1)/4)))/(S(4)*(-a)**(S(3)/2)) - sqrt(b)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c - I*d*(-a)**(S(1)/4)))/(S(4)*(-a)**(S(3)/2)) - sqrt(b)*polylog(S(2), b**(S(1)/4)*(c + d*x)/(b**(S(1)/4)*c + I*d*(-a)**(S(1)/4)))/(S(4)*(-a)**(S(3)/2)) - log(c + d*x)/(S(2)*a*x**S(2)) - d/(S(2)*a*c*x) - d**S(2)*log(x)/(S(2)*a*c**S(2)) + d**S(2)*log(c + d*x)/(S(2)*a*c**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(3)/(d*x + e*x**S(2)), x), x, S(6)*n**S(3)*polylog(S(4), (a + b*x)/a)/d - S(6)*n**S(3)*polylog(S(4), -e*(a + b*x)/(-a*e + b*d))/d - S(6)*n**S(2)*log(c*(a + b*x)**n)*polylog(S(3), (a + b*x)/a)/d + S(6)*n**S(2)*log(c*(a + b*x)**n)*polylog(S(3), -e*(a + b*x)/(-a*e + b*d))/d + S(3)*n*log(c*(a + b*x)**n)**S(2)*polylog(S(2), (a + b*x)/a)/d - S(3)*n*log(c*(a + b*x)**n)**S(2)*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/d + log(c*(a + b*x)**n)**S(3)*log(-b*x/a)/d - log(c*(a + b*x)**n)**S(3)*log(b*(d + e*x)/(-a*e + b*d))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(2)/(d*x + e*x**S(2)), x), x, -S(2)*n**S(2)*polylog(S(3), (a + b*x)/a)/d + S(2)*n**S(2)*polylog(S(3), -e*(a + b*x)/(-a*e + b*d))/d + S(2)*n*log(c*(a + b*x)**n)*polylog(S(2), (a + b*x)/a)/d - S(2)*n*log(c*(a + b*x)**n)*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/d + log(c*(a + b*x)**n)**S(2)*log(-b*x/a)/d - log(c*(a + b*x)**n)**S(2)*log(b*(d + e*x)/(-a*e + b*d))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)/(d*x + e*x**S(2)), x), x, n*polylog(S(2), (a + b*x)/a)/d - n*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/d + log(c*(a + b*x)**n)*log(-b*x/a)/d - log(c*(a + b*x)**n)*log(b*(d + e*x)/(-a*e + b*d))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((d*x + e*x**S(2))*log(c*(a + b*x)**n)), x), x, Integral(S(1)/(x*(d + e*x)*log(c*(a + b*x)**n)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(3)/(d + e*x + f*x**S(2)), x), x, S(6)*n**S(3)*polylog(S(4), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - S(6)*n**S(3)*polylog(S(4), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - S(6)*n**S(2)*log(c*(a + b*x)**n)*polylog(S(3), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + S(6)*n**S(2)*log(c*(a + b*x)**n)*polylog(S(3), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + S(3)*n*log(c*(a + b*x)**n)**S(2)*polylog(S(2), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - S(3)*n*log(c*(a + b*x)**n)**S(2)*polylog(S(2), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + log(c*(a + b*x)**n)**S(3)*log(-b*(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - log(c*(a + b*x)**n)**S(3)*log(-b*(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)**S(2)/(d + e*x + f*x**S(2)), x), x, -S(2)*n**S(2)*polylog(S(3), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + S(2)*n**S(2)*polylog(S(3), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + S(2)*n*log(c*(a + b*x)**n)*polylog(S(2), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - S(2)*n*log(c*(a + b*x)**n)*polylog(S(2), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + log(c*(a + b*x)**n)**S(2)*log(-b*(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - log(c*(a + b*x)**n)**S(2)*log(-b*(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**n)/(d + e*x + f*x**S(2)), x), x, n*polylog(S(2), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - n*polylog(S(2), S(2)*f*(a + b*x)/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + log(c*(a + b*x)**n)*log(-b*(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/(S(2)*a*f - b*(e - sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - log(c*(a + b*x)**n)*log(-b*(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/(S(2)*a*f - b*(e + sqrt(-S(4)*d*f + e**S(2)))))/sqrt(-S(4)*d*f + e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((d + e*x + f*x**S(2))*log(c*(a + b*x)**n)), x), x, Integral(S(1)/((d + e*x + f*x**S(2))*log(c*(a + b*x)**n)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(x)/(a + b*x + c*x**S(2)), x), x, -b*x*log(x)/c**S(2) + b*x/c**S(2) + x**S(2)*log(x)/(S(2)*c) - x**S(2)/(S(4)*c) + (-a*c + b**S(2) - b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(3)) + (-a*c + b**S(2) - b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(3)) + (-a*c + b**S(2) + b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(3)) + (-a*c + b**S(2) + b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(x)/(a + b*x + c*x**S(2)), x), x, x*log(x)/c - x/c - (b + (-S(2)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(2)) - (b + (-S(2)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(2)) - (b + (S(2)*a*c - b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(2)) - (b + (S(2)*a*c - b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(x)/(a + b*x + c*x**S(2)), x), x, (-b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c) + (-b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c) + (b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c) + (b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(a + b*x + c*x**S(2)), x), x, log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/sqrt(-S(4)*a*c + b**S(2)) - log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/sqrt(-S(4)*a*c + b**S(2)) + polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/sqrt(-S(4)*a*c + b**S(2)) - polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/sqrt(-S(4)*a*c + b**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x*(a + b*x + c*x**S(2))), x), x, -(-b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a) - (-b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a) - (b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a) - (b/sqrt(-S(4)*a*c + b**S(2)) + S(1))*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a) + log(x)**S(2)/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x**S(2)*(a + b*x + c*x**S(2))), x), x, -log(x)/(a*x) - S(1)/(a*x) - b*log(x)**S(2)/(S(2)*a**S(2)) + (b + (-S(2)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(2)) + (b + (-S(2)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(2)) + (b + (S(2)*a*c - b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(2)) + (b + (S(2)*a*c - b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x**S(3)*(a + b*x + c*x**S(2))), x), x, -log(x)/(S(2)*a*x**S(2)) - S(1)/(S(4)*a*x**S(2)) + b*log(x)/(a**S(2)*x) + b/(a**S(2)*x) + (-a*c/S(2) + b**S(2)/S(2))*log(x)**S(2)/a**S(3) - (-a*c + b**S(2) - b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(3)) - (-a*c + b**S(2) - b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(3)) - (-a*c + b**S(2) + b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(3)) - (-a*c + b**S(2) + b*(-S(3)*a*c + b**S(2))/sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/(S(2)*a**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e/(f + g*x))**p))**S(4), x), x, -S(24)*b**S(4)*e*p**S(4)*polylog(S(4), (d + e/(f + g*x))/d)/(d*g) + S(24)*b**S(3)*e*p**S(3)*(a + b*log(c*(d + e/(f + g*x))**p))*polylog(S(3), (d + e/(f + g*x))/d)/(d*g) - S(12)*b**S(2)*e*p**S(2)*(a + b*log(c*(d + e/(f + g*x))**p))**S(2)*polylog(S(2), (d + e/(f + g*x))/d)/(d*g) - S(4)*b*e*p*(a + b*log(c*(d + e/(f + g*x))**p))**S(3)*log(-e/(d*(f + g*x)))/(d*g) + (a + b*log(c*(d + e/(f + g*x))**p))**S(4)*(d*(f + g*x) + e)/(d*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e/(f + g*x))**p))**S(3), x), x, S(6)*b**S(3)*e*p**S(3)*polylog(S(3), (d + e/(f + g*x))/d)/(d*g) - S(6)*b**S(2)*e*p**S(2)*(a + b*log(c*(d + e/(f + g*x))**p))*polylog(S(2), (d + e/(f + g*x))/d)/(d*g) - S(3)*b*e*p*(a + b*log(c*(d + e/(f + g*x))**p))**S(2)*log(-e/(d*(f + g*x)))/(d*g) + (a + b*log(c*(d + e/(f + g*x))**p))**S(3)*(d*(f + g*x) + e)/(d*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e/(f + g*x))**p))**S(2), x), x, -S(2)*b**S(2)*e*p**S(2)*polylog(S(2), (d + e/(f + g*x))/d)/(d*g) - S(2)*b*e*p*(a + b*log(c*(d + e/(f + g*x))**p))*log(-e/(d*(f + g*x)))/(d*g) + (a + b*log(c*(d + e/(f + g*x))**p))**S(2)*(d*(f + g*x) + e)/(d*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(a + b*log(c*(d + e/(f + g*x))**p), x), x, a*x + b*(f + g*x)*log(c*(d + e/(f + g*x))**p)/g + b*e*p*log(d*(f + g*x) + e)/(d*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a + b*log(c*(d + e/(f + g*x))**p)), x), x, Integral(S(1)/(a + b*log(c*(d + e/x)**p)), (x, f + g*x))/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*(d + e/(f + g*x))**p))**(S(-2)), x), x, Integral((a + b*log(c*(d + e/x)**p))**(S(-2)), (x, f + g*x))/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(e*(f + g*x)**p)**q), x), x, -p*q*x + (f + g*x)*log(c*(e*(f + g*x)**p)**q)/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e*(f + g*x)**p)**q), x), x, -p*q*x + p*q*(f + g*x)*hyper((S(1), S(1)/p), (S(1) + S(1)/p,), -e*(f + g*x)**p/d)/g + (f + g*x)*log(c*(d + e*(f + g*x)**p)**q)/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e*(f + g*x)**S(3))**q), x), x, d**(S(1)/3)*q*log(d**(S(1)/3) + e**(S(1)/3)*(f + g*x))/(e**(S(1)/3)*g) - d**(S(1)/3)*q*log(d**(S(2)/3) - d**(S(1)/3)*e**(S(1)/3)*(f + g*x) + e**(S(2)/3)*(f + g*x)**S(2))/(S(2)*e**(S(1)/3)*g) - sqrt(S(3))*d**(S(1)/3)*q*atan(sqrt(S(3))*(d**(S(1)/3) - S(2)*e**(S(1)/3)*(f + g*x))/(S(3)*d**(S(1)/3)))/(e**(S(1)/3)*g) - S(3)*q*x + (f + g*x)*log(c*(d + e*(f + g*x)**S(3))**q)/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e*(f + g*x)**S(2))**q), x), x, S(2)*sqrt(d)*q*atan(sqrt(e)*(f + g*x)/sqrt(d))/(sqrt(e)*g) - S(2)*q*x + (f + g*x)*log(c*(d + e*(f + g*x)**S(2))**q)/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e*(f + g*x))**q), x), x, -q*x + (d + e*f + e*g*x)*log(c*(d + e*f + e*g*x)**q)/(e*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e/(f + g*x))**q), x), x, (f + g*x)*log(c*(d + e/(f + g*x))**q)/g + e*q*log(d*(f + g*x) + e)/(d*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e/(f + g*x)**S(2))**q), x), x, (f + g*x)*log(c*(d + e/(f + g*x)**S(2))**q)/g + S(2)*sqrt(e)*q*atan(sqrt(d)*(f + g*x)/sqrt(e))/(sqrt(d)*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(d + e/(f + g*x)**S(3))**q), x), x, (f + g*x)*log(c*(d + e/(f + g*x)**S(3))**q)/g + e**(S(1)/3)*q*log(d**(S(1)/3)*(f + g*x) + e**(S(1)/3))/(d**(S(1)/3)*g) - e**(S(1)/3)*q*log(d**(S(2)/3)*(f + g*x)**S(2) - d**(S(1)/3)*e**(S(1)/3)*(f + g*x) + e**(S(2)/3))/(S(2)*d**(S(1)/3)*g) - sqrt(S(3))*e**(S(1)/3)*q*atan(sqrt(S(3))*(-S(2)*d**(S(1)/3)*(f + g*x) + e**(S(1)/3))/(S(3)*e**(S(1)/3)))/(d**(S(1)/3)*g), expand=True, _diff=True, _numerical=True)
def test_2():
assert rubi_test(rubi_integrate(x**m*log(a*x**n), x), x, -n*x**(m + S(1))/(m + S(1))**S(2) + x**(m + S(1))*log(a*x**n)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(a*x**n), x), x, x**n*log(a*x**n)/n - x**n/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a*x**n), x), x, -n*x**S(4)/S(16) + x**S(4)*log(a*x**n)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a*x**n), x), x, -n*x**S(3)/S(9) + x**S(3)*log(a*x**n)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x**n), x), x, -n*x**S(2)/S(4) + x**S(2)*log(a*x**n)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n), x), x, -n*x + x*log(a*x**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)/x, x), x, log(a*x**n)**S(2)/(S(2)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)/x**S(2), x), x, -n/x - log(a*x**n)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)/x**S(3), x), x, -n/(S(4)*x**S(2)) - log(a*x**n)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*x**n)**S(2), x), x, S(2)*n**S(2)*x**(m + S(1))/(m + S(1))**S(3) - S(2)*n*x**(m + S(1))*log(a*x**n)/(m + S(1))**S(2) + x**(m + S(1))*log(a*x**n)**S(2)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(a*x**n)**S(2), x), x, x**n*log(a*x**n)**S(2)/n - S(2)*x**n*log(a*x**n)/n + S(2)*x**n/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a*x**n)**S(2), x), x, n**S(2)*x**S(4)/S(32) - n*x**S(4)*log(a*x**n)/S(8) + x**S(4)*log(a*x**n)**S(2)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a*x**n)**S(2), x), x, S(2)*n**S(2)*x**S(3)/S(27) - S(2)*n*x**S(3)*log(a*x**n)/S(9) + x**S(3)*log(a*x**n)**S(2)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x**n)**S(2), x), x, n**S(2)*x**S(2)/S(4) - n*x**S(2)*log(a*x**n)/S(2) + x**S(2)*log(a*x**n)**S(2)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(2), x), x, S(2)*n**S(2)*x - S(2)*n*x*log(a*x**n) + x*log(a*x**n)**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(2)/x, x), x, log(a*x**n)**S(3)/(S(3)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(2)/x**S(2), x), x, -S(2)*n**S(2)/x - S(2)*n*log(a*x**n)/x - log(a*x**n)**S(2)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(2)/x**S(3), x), x, -n**S(2)/(S(4)*x**S(2)) - n*log(a*x**n)/(S(2)*x**S(2)) - log(a*x**n)**S(2)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*x**n)**S(3), x), x, -S(6)*n**S(3)*x**(m + S(1))/(m + S(1))**S(4) + S(6)*n**S(2)*x**(m + S(1))*log(a*x**n)/(m + S(1))**S(3) - S(3)*n*x**(m + S(1))*log(a*x**n)**S(2)/(m + S(1))**S(2) + x**(m + S(1))*log(a*x**n)**S(3)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(a*x**n)**S(3), x), x, x**n*log(a*x**n)**S(3)/n - S(3)*x**n*log(a*x**n)**S(2)/n + S(6)*x**n*log(a*x**n)/n - S(6)*x**n/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a*x**n)**S(3), x), x, -S(3)*n**S(3)*x**S(4)/S(128) + S(3)*n**S(2)*x**S(4)*log(a*x**n)/S(32) - S(3)*n*x**S(4)*log(a*x**n)**S(2)/S(16) + x**S(4)*log(a*x**n)**S(3)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a*x**n)**S(3), x), x, -S(2)*n**S(3)*x**S(3)/S(27) + S(2)*n**S(2)*x**S(3)*log(a*x**n)/S(9) - n*x**S(3)*log(a*x**n)**S(2)/S(3) + x**S(3)*log(a*x**n)**S(3)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x**n)**S(3), x), x, -S(3)*n**S(3)*x**S(2)/S(8) + S(3)*n**S(2)*x**S(2)*log(a*x**n)/S(4) - S(3)*n*x**S(2)*log(a*x**n)**S(2)/S(4) + x**S(2)*log(a*x**n)**S(3)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(3), x), x, -S(6)*n**S(3)*x + S(6)*n**S(2)*x*log(a*x**n) - S(3)*n*x*log(a*x**n)**S(2) + x*log(a*x**n)**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(3)/x, x), x, log(a*x**n)**S(4)/(S(4)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(3)/x**S(2), x), x, -S(6)*n**S(3)/x - S(6)*n**S(2)*log(a*x**n)/x - S(3)*n*log(a*x**n)**S(2)/x - log(a*x**n)**S(3)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**S(3)/x**S(3), x), x, -S(3)*n**S(3)/(S(8)*x**S(2)) - S(3)*n**S(2)*log(a*x**n)/(S(4)*x**S(2)) - S(3)*n*log(a*x**n)**S(2)/(S(4)*x**S(2)) - log(a*x**n)**S(3)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(S(5)/2)*log(a*x), x), x, S(2)*x**(S(7)/2)*log(a*x)/S(7) - S(4)*x**(S(7)/2)/S(49), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(S(3)/2)*log(a*x), x), x, S(2)*x**(S(5)/2)*log(a*x)/S(5) - S(4)*x**(S(5)/2)/S(25), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(x)*log(a*x), x), x, S(2)*x**(S(3)/2)*log(a*x)/S(3) - S(4)*x**(S(3)/2)/S(9), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)/sqrt(x), x), x, S(2)*sqrt(x)*log(a*x) - S(4)*sqrt(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)/x**(S(3)/2), x), x, -S(2)*log(a*x)/sqrt(x) - S(4)/sqrt(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)/x**(S(5)/2), x), x, -S(2)*log(a*x)/(S(3)*x**(S(3)/2)) - S(4)/(S(9)*x**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x**n), x), x, x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*Ei((m + S(1))*log(a*x**n)/n)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))/log(a*x**n), x), x, li(a*x**n)/(a*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x**n), x), x, x**S(4)*(a*x**n)**(-S(4)/n)*Ei(S(4)*log(a*x**n)/n)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x**n), x), x, x**S(3)*(a*x**n)**(-S(3)/n)*Ei(S(3)*log(a*x**n)/n)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x**n), x), x, x**S(2)*(a*x**n)**(-S(2)/n)*Ei(S(2)*log(a*x**n)/n)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/log(a*x**n), x), x, x*(a*x**n)**(-S(1)/n)*Ei(log(a*x**n)/n)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x**n)), x), x, log(log(a*x**n))/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x**n)), x), x, (a*x**n)**(S(1)/n)*Ei(-log(a*x**n)/n)/(n*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x**n)), x), x, (a*x**n)**(S(2)/n)*Ei(-S(2)*log(a*x**n)/n)/(n*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x**n)**S(2), x), x, -x**(m + S(1))/(n*log(a*x**n)) + x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*(m + S(1))*Ei((m + S(1))*log(a*x**n)/n)/n**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))/log(a*x**n)**S(2), x), x, -x**n/(n*log(a*x**n)) + li(a*x**n)/(a*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x**n)**S(2), x), x, -x**S(4)/(n*log(a*x**n)) + S(4)*x**S(4)*(a*x**n)**(-S(4)/n)*Ei(S(4)*log(a*x**n)/n)/n**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x**n)**S(2), x), x, -x**S(3)/(n*log(a*x**n)) + S(3)*x**S(3)*(a*x**n)**(-S(3)/n)*Ei(S(3)*log(a*x**n)/n)/n**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x**n)**S(2), x), x, -x**S(2)/(n*log(a*x**n)) + S(2)*x**S(2)*(a*x**n)**(-S(2)/n)*Ei(S(2)*log(a*x**n)/n)/n**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(-2)), x), x, -x/(n*log(a*x**n)) + x*(a*x**n)**(-S(1)/n)*Ei(log(a*x**n)/n)/n**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x**n)**S(2)), x), x, -S(1)/(n*log(a*x**n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x**n)**S(2)), x), x, -S(1)/(n*x*log(a*x**n)) - (a*x**n)**(S(1)/n)*Ei(-log(a*x**n)/n)/(n**S(2)*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x**n)**S(2)), x), x, -S(1)/(n*x**S(2)*log(a*x**n)) - S(2)*(a*x**n)**(S(2)/n)*Ei(-S(2)*log(a*x**n)/n)/(n**S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x**n)**S(3), x), x, -x**(m + S(1))/(S(2)*n*log(a*x**n)**S(2)) - x**(m + S(1))*(m/S(2) + S(1)/2)/(n**S(2)*log(a*x**n)) + x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*(m + S(1))**S(2)*Ei((m + S(1))*log(a*x**n)/n)/(S(2)*n**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))/log(a*x**n)**S(3), x), x, -x**n/(S(2)*n*log(a*x**n)) - x**n/(S(2)*n*log(a*x**n)**S(2)) + li(a*x**n)/(S(2)*a*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x**n)**S(3), x), x, -x**S(4)/(S(2)*n*log(a*x**n)**S(2)) - S(2)*x**S(4)/(n**S(2)*log(a*x**n)) + S(8)*x**S(4)*(a*x**n)**(-S(4)/n)*Ei(S(4)*log(a*x**n)/n)/n**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x**n)**S(3), x), x, -x**S(3)/(S(2)*n*log(a*x**n)**S(2)) - S(3)*x**S(3)/(S(2)*n**S(2)*log(a*x**n)) + S(9)*x**S(3)*(a*x**n)**(-S(3)/n)*Ei(S(3)*log(a*x**n)/n)/(S(2)*n**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x**n)**S(3), x), x, -x**S(2)/(S(2)*n*log(a*x**n)**S(2)) - x**S(2)/(n**S(2)*log(a*x**n)) + S(2)*x**S(2)*(a*x**n)**(-S(2)/n)*Ei(S(2)*log(a*x**n)/n)/n**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(-3)), x), x, -x/(S(2)*n*log(a*x**n)**S(2)) - x/(S(2)*n**S(2)*log(a*x**n)) + x*(a*x**n)**(-S(1)/n)*Ei(log(a*x**n)/n)/(S(2)*n**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x**n)**S(3)), x), x, -S(1)/(S(2)*n*log(a*x**n)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x**n)**S(3)), x), x, -S(1)/(S(2)*n*x*log(a*x**n)**S(2)) + S(1)/(S(2)*n**S(2)*x*log(a*x**n)) + (a*x**n)**(S(1)/n)*Ei(-log(a*x**n)/n)/(S(2)*n**S(3)*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x**n)**S(3)), x), x, -S(1)/(S(2)*n*x**S(2)*log(a*x**n)**S(2)) + S(1)/(n**S(2)*x**S(2)*log(a*x**n)) + S(2)*(a*x**n)**(S(2)/n)*Ei(-S(2)*log(a*x**n)/n)/(n**S(3)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x), x), x, x**(m + S(1))*(a*x)**(-m + S(-1))*Ei((m + S(1))*log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x), x), x, Ei(S(4)*log(a*x))/a**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x), x), x, Ei(S(3)*log(a*x))/a**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x), x), x, Ei(S(2)*log(a*x))/a**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/log(a*x), x), x, li(a*x)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x)), x), x, log(log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x)), x), x, a*Ei(-log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x)), x), x, a**S(2)*Ei(-S(2)*log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x)**S(2), x), x, x**(m + S(1))*(a*x)**(-m + S(-1))*(m + S(1))*Ei((m + S(1))*log(a*x)) - x**(m + S(1))/log(a*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x)**S(2), x), x, -x**S(4)/log(a*x) + S(4)*Ei(S(4)*log(a*x))/a**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x)**S(2), x), x, -x**S(3)/log(a*x) + S(3)*Ei(S(3)*log(a*x))/a**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x)**S(2), x), x, -x**S(2)/log(a*x) + S(2)*Ei(S(2)*log(a*x))/a**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)**(S(-2)), x), x, -x/log(a*x) + li(a*x)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x)**S(2)), x), x, -S(1)/log(a*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x)**S(2)), x), x, -a*Ei(-log(a*x)) - S(1)/(x*log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x)**S(2)), x), x, -S(2)*a**S(2)*Ei(-S(2)*log(a*x)) - S(1)/(x**S(2)*log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x)**S(3), x), x, x**(m + S(1))*(a*x)**(-m + S(-1))*(m + S(1))**S(2)*Ei((m + S(1))*log(a*x))/S(2) - x**(m + S(1))*(m/S(2) + S(1)/2)/log(a*x) - x**(m + S(1))/(S(2)*log(a*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x)**S(3), x), x, -S(2)*x**S(4)/log(a*x) - x**S(4)/(S(2)*log(a*x)**S(2)) + S(8)*Ei(S(4)*log(a*x))/a**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x)**S(3), x), x, -S(3)*x**S(3)/(S(2)*log(a*x)) - x**S(3)/(S(2)*log(a*x)**S(2)) + S(9)*Ei(S(3)*log(a*x))/(S(2)*a**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x)**S(3), x), x, -x**S(2)/log(a*x) - x**S(2)/(S(2)*log(a*x)**S(2)) + S(2)*Ei(S(2)*log(a*x))/a**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)**(S(-3)), x), x, -x/(S(2)*log(a*x)) - x/(S(2)*log(a*x)**S(2)) + li(a*x)/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x)**S(3)), x), x, -S(1)/(S(2)*log(a*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x)**S(3)), x), x, a*Ei(-log(a*x))/S(2) + S(1)/(S(2)*x*log(a*x)) - S(1)/(S(2)*x*log(a*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x)**S(3)), x), x, S(2)*a**S(2)*Ei(-S(2)*log(a*x)) + S(1)/(x**S(2)*log(a*x)) - S(1)/(S(2)*x**S(2)*log(a*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*sqrt(log(a*x**n)), x), x, -sqrt(pi)*sqrt(n)*x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*erfi(sqrt(m + S(1))*sqrt(log(a*x**n))/sqrt(n))/(S(2)*(m + S(1))**(S(3)/2)) + x**(m + S(1))*sqrt(log(a*x**n))/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*sqrt(log(a*x**n)), x), x, -sqrt(pi)*sqrt(n)*x**S(4)*(a*x**n)**(-S(4)/n)*erfi(S(2)*sqrt(log(a*x**n))/sqrt(n))/S(16) + x**S(4)*sqrt(log(a*x**n))/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*sqrt(log(a*x**n)), x), x, -sqrt(S(3))*sqrt(pi)*sqrt(n)*x**S(3)*(a*x**n)**(-S(3)/n)*erfi(sqrt(S(3))*sqrt(log(a*x**n))/sqrt(n))/S(18) + x**S(3)*sqrt(log(a*x**n))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*sqrt(log(a*x**n)), x), x, -sqrt(S(2))*sqrt(pi)*sqrt(n)*x**S(2)*(a*x**n)**(-S(2)/n)*erfi(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/S(8) + x**S(2)*sqrt(log(a*x**n))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(a*x**n)), x), x, -sqrt(pi)*sqrt(n)*x*(a*x**n)**(-S(1)/n)*erfi(sqrt(log(a*x**n))/sqrt(n))/S(2) + x*sqrt(log(a*x**n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(a*x**n))/x, x), x, S(2)*log(a*x**n)**(S(3)/2)/(S(3)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(a*x**n))/x**S(2), x), x, sqrt(pi)*sqrt(n)*(a*x**n)**(S(1)/n)*erf(sqrt(log(a*x**n))/sqrt(n))/(S(2)*x) - sqrt(log(a*x**n))/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(a*x**n))/x**S(3), x), x, sqrt(S(2))*sqrt(pi)*sqrt(n)*(a*x**n)**(S(2)/n)*erf(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(S(8)*x**S(2)) - sqrt(log(a*x**n))/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*x**n)**(S(3)/2), x), x, S(3)*sqrt(pi)*n**(S(3)/2)*x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*erfi(sqrt(m + S(1))*sqrt(log(a*x**n))/sqrt(n))/(S(4)*(m + S(1))**(S(5)/2)) - S(3)*n*x**(m + S(1))*sqrt(log(a*x**n))/(S(2)*(m + S(1))**S(2)) + x**(m + S(1))*log(a*x**n)**(S(3)/2)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a*x**n)**(S(3)/2), x), x, S(3)*sqrt(pi)*n**(S(3)/2)*x**S(4)*(a*x**n)**(-S(4)/n)*erfi(S(2)*sqrt(log(a*x**n))/sqrt(n))/S(128) - S(3)*n*x**S(4)*sqrt(log(a*x**n))/S(32) + x**S(4)*log(a*x**n)**(S(3)/2)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a*x**n)**(S(3)/2), x), x, sqrt(S(3))*sqrt(pi)*n**(S(3)/2)*x**S(3)*(a*x**n)**(-S(3)/n)*erfi(sqrt(S(3))*sqrt(log(a*x**n))/sqrt(n))/S(36) - n*x**S(3)*sqrt(log(a*x**n))/S(6) + x**S(3)*log(a*x**n)**(S(3)/2)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x**n)**(S(3)/2), x), x, S(3)*sqrt(S(2))*sqrt(pi)*n**(S(3)/2)*x**S(2)*(a*x**n)**(-S(2)/n)*erfi(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/S(32) - S(3)*n*x**S(2)*sqrt(log(a*x**n))/S(8) + x**S(2)*log(a*x**n)**(S(3)/2)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(3)/2), x), x, S(3)*sqrt(pi)*n**(S(3)/2)*x*(a*x**n)**(-S(1)/n)*erfi(sqrt(log(a*x**n))/sqrt(n))/S(4) - S(3)*n*x*sqrt(log(a*x**n))/S(2) + x*log(a*x**n)**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(3)/2)/x, x), x, S(2)*log(a*x**n)**(S(5)/2)/(S(5)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(3)/2)/x**S(2), x), x, S(3)*sqrt(pi)*n**(S(3)/2)*(a*x**n)**(S(1)/n)*erf(sqrt(log(a*x**n))/sqrt(n))/(S(4)*x) - S(3)*n*sqrt(log(a*x**n))/(S(2)*x) - log(a*x**n)**(S(3)/2)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(3)/2)/x**S(3), x), x, S(3)*sqrt(S(2))*sqrt(pi)*n**(S(3)/2)*(a*x**n)**(S(2)/n)*erf(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(S(32)*x**S(2)) - S(3)*n*sqrt(log(a*x**n))/(S(8)*x**S(2)) - log(a*x**n)**(S(3)/2)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/sqrt(log(a*x**n)), x), x, sqrt(pi)*x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*erfi(sqrt(m + S(1))*sqrt(log(a*x**n))/sqrt(n))/(sqrt(n)*sqrt(m + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/sqrt(log(a*x**n)), x), x, sqrt(pi)*x**S(4)*(a*x**n)**(-S(4)/n)*erfi(S(2)*sqrt(log(a*x**n))/sqrt(n))/(S(2)*sqrt(n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/sqrt(log(a*x**n)), x), x, sqrt(S(3))*sqrt(pi)*x**S(3)*(a*x**n)**(-S(3)/n)*erfi(sqrt(S(3))*sqrt(log(a*x**n))/sqrt(n))/(S(3)*sqrt(n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/sqrt(log(a*x**n)), x), x, sqrt(S(2))*sqrt(pi)*x**S(2)*(a*x**n)**(-S(2)/n)*erfi(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(S(2)*sqrt(n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/sqrt(log(a*x**n)), x), x, sqrt(pi)*x*(a*x**n)**(-S(1)/n)*erfi(sqrt(log(a*x**n))/sqrt(n))/sqrt(n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*sqrt(log(a*x**n))), x), x, S(2)*sqrt(log(a*x**n))/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*sqrt(log(a*x**n))), x), x, sqrt(pi)*(a*x**n)**(S(1)/n)*erf(sqrt(log(a*x**n))/sqrt(n))/(sqrt(n)*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*sqrt(log(a*x**n))), x), x, sqrt(S(2))*sqrt(pi)*(a*x**n)**(S(2)/n)*erf(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(S(2)*sqrt(n)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x**n)**(S(3)/2), x), x, -S(2)*x**(m + S(1))/(n*sqrt(log(a*x**n))) + S(2)*sqrt(pi)*x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*sqrt(m + S(1))*erfi(sqrt(m + S(1))*sqrt(log(a*x**n))/sqrt(n))/n**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x**n)**(S(3)/2), x), x, -S(2)*x**S(4)/(n*sqrt(log(a*x**n))) + S(4)*sqrt(pi)*x**S(4)*(a*x**n)**(-S(4)/n)*erfi(S(2)*sqrt(log(a*x**n))/sqrt(n))/n**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x**n)**(S(3)/2), x), x, -S(2)*x**S(3)/(n*sqrt(log(a*x**n))) + S(2)*sqrt(S(3))*sqrt(pi)*x**S(3)*(a*x**n)**(-S(3)/n)*erfi(sqrt(S(3))*sqrt(log(a*x**n))/sqrt(n))/n**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x**n)**(S(3)/2), x), x, -S(2)*x**S(2)/(n*sqrt(log(a*x**n))) + S(2)*sqrt(S(2))*sqrt(pi)*x**S(2)*(a*x**n)**(-S(2)/n)*erfi(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/n**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(-3)/2), x), x, -S(2)*x/(n*sqrt(log(a*x**n))) + S(2)*sqrt(pi)*x*(a*x**n)**(-S(1)/n)*erfi(sqrt(log(a*x**n))/sqrt(n))/n**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x**n)**(S(3)/2)), x), x, -S(2)/(n*sqrt(log(a*x**n))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x**n)**(S(3)/2)), x), x, -S(2)/(n*x*sqrt(log(a*x**n))) - S(2)*sqrt(pi)*(a*x**n)**(S(1)/n)*erf(sqrt(log(a*x**n))/sqrt(n))/(n**(S(3)/2)*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x**n)**(S(3)/2)), x), x, -S(2)/(n*x**S(2)*sqrt(log(a*x**n))) - S(2)*sqrt(S(2))*sqrt(pi)*(a*x**n)**(S(2)/n)*erf(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(n**(S(3)/2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(a*x**n)**(S(5)/2), x), x, -S(2)*x**(m + S(1))/(S(3)*n*log(a*x**n)**(S(3)/2)) - x**(m + S(1))*(S(4)*m/S(3) + S(4)/3)/(n**S(2)*sqrt(log(a*x**n))) + S(4)*sqrt(pi)*x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*(m + S(1))**(S(3)/2)*erfi(sqrt(m + S(1))*sqrt(log(a*x**n))/sqrt(n))/(S(3)*n**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(a*x**n)**(S(5)/2), x), x, -S(2)*x**S(4)/(S(3)*n*log(a*x**n)**(S(3)/2)) - S(16)*x**S(4)/(S(3)*n**S(2)*sqrt(log(a*x**n))) + S(32)*sqrt(pi)*x**S(4)*(a*x**n)**(-S(4)/n)*erfi(S(2)*sqrt(log(a*x**n))/sqrt(n))/(S(3)*n**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(a*x**n)**(S(5)/2), x), x, -S(2)*x**S(3)/(S(3)*n*log(a*x**n)**(S(3)/2)) - S(4)*x**S(3)/(n**S(2)*sqrt(log(a*x**n))) + S(4)*sqrt(S(3))*sqrt(pi)*x**S(3)*(a*x**n)**(-S(3)/n)*erfi(sqrt(S(3))*sqrt(log(a*x**n))/sqrt(n))/n**(S(5)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(a*x**n)**(S(5)/2), x), x, -S(2)*x**S(2)/(S(3)*n*log(a*x**n)**(S(3)/2)) - S(8)*x**S(2)/(S(3)*n**S(2)*sqrt(log(a*x**n))) + S(8)*sqrt(S(2))*sqrt(pi)*x**S(2)*(a*x**n)**(-S(2)/n)*erfi(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(S(3)*n**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**(S(-5)/2), x), x, -S(2)*x/(S(3)*n*log(a*x**n)**(S(3)/2)) - S(4)*x/(S(3)*n**S(2)*sqrt(log(a*x**n))) + S(4)*sqrt(pi)*x*(a*x**n)**(-S(1)/n)*erfi(sqrt(log(a*x**n))/sqrt(n))/(S(3)*n**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(a*x**n)**(S(5)/2)), x), x, -S(2)/(S(3)*n*log(a*x**n)**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(a*x**n)**(S(5)/2)), x), x, -S(2)/(S(3)*n*x*log(a*x**n)**(S(3)/2)) + S(4)/(S(3)*n**S(2)*x*sqrt(log(a*x**n))) + S(4)*sqrt(pi)*(a*x**n)**(S(1)/n)*erf(sqrt(log(a*x**n))/sqrt(n))/(S(3)*n**(S(5)/2)*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(a*x**n)**(S(5)/2)), x), x, -S(2)/(S(3)*n*x**S(2)*log(a*x**n)**(S(3)/2)) + S(8)/(S(3)*n**S(2)*x**S(2)*sqrt(log(a*x**n))) + S(8)*sqrt(S(2))*sqrt(pi)*(a*x**n)**(S(2)/n)*erf(sqrt(S(2))*sqrt(log(a*x**n))/sqrt(n))/(S(3)*n**(S(5)/2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*x)**p, x), x, x**(m + S(1))*(a*x)**(-m + S(-1))*((-m + S(-1))*log(a*x))**(-p)*Gamma(p + S(1), (-m + S(-1))*log(a*x))*log(a*x)**p/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a*x)**p, x), x, S(4)**(-p + S(-1))*(-log(a*x))**(-p)*Gamma(p + S(1), -S(4)*log(a*x))*log(a*x)**p/a**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a*x)**p, x), x, S(3)**(-p + S(-1))*(-log(a*x))**(-p)*Gamma(p + S(1), -S(3)*log(a*x))*log(a*x)**p/a**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x)**p, x), x, S(2)**(-p + S(-1))*(-log(a*x))**(-p)*Gamma(p + S(1), -S(2)*log(a*x))*log(a*x)**p/a**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)**p, x), x, (-log(a*x))**(-p)*Gamma(p + S(1), -log(a*x))*log(a*x)**p/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)**p/x, x), x, log(a*x)**(p + S(1))/(p + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)**p/x**S(2), x), x, -a*Gamma(p + S(1), log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)**p/x**S(3), x), x, -S(2)**(-p + S(-1))*a**S(2)*Gamma(p + S(1), S(2)*log(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*x**n)**p, x), x, x**(m + S(1))*(a*x**n)**(-(m + S(1))/n)*((-m + S(-1))*log(a*x**n)/n)**(-p)*Gamma(p + S(1), (-m + S(-1))*log(a*x**n)/n)*log(a*x**n)**p/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(a*x**n)**p, x), x, (-log(a*x**n))**(-p)*Gamma(p + S(1), -log(a*x**n))*log(a*x**n)**p/(a*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a*x**n)**p, x), x, S(4)**(-p + S(-1))*x**S(4)*(a*x**n)**(-S(4)/n)*(-log(a*x**n)/n)**(-p)*Gamma(p + S(1), -S(4)*log(a*x**n)/n)*log(a*x**n)**p, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a*x**n)**p, x), x, S(3)**(-p + S(-1))*x**S(3)*(a*x**n)**(-S(3)/n)*(-log(a*x**n)/n)**(-p)*Gamma(p + S(1), -S(3)*log(a*x**n)/n)*log(a*x**n)**p, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x**n)**p, x), x, S(2)**(-p + S(-1))*x**S(2)*(a*x**n)**(-S(2)/n)*(-log(a*x**n)/n)**(-p)*Gamma(p + S(1), -S(2)*log(a*x**n)/n)*log(a*x**n)**p, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**p, x), x, x*(a*x**n)**(-S(1)/n)*(-log(a*x**n)/n)**(-p)*Gamma(p + S(1), -log(a*x**n)/n)*log(a*x**n)**p, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**p/x, x), x, log(a*x**n)**(p + S(1))/(n*(p + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**p/x**S(2), x), x, -(a*x**n)**(S(1)/n)*(log(a*x**n)/n)**(-p)*Gamma(p + S(1), log(a*x**n)/n)*log(a*x**n)**p/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)**p/x**S(3), x), x, -S(2)**(-p + S(-1))*(a*x**n)**(S(2)/n)*(log(a*x**n)/n)**(-p)*Gamma(p + S(1), S(2)*log(a*x**n)/n)*log(a*x**n)**p/x**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(b*x**n)**p), x), x, -n*p*x**(m + S(1))/(m + S(1))**S(2) + x**(m + S(1))*log(c*(b*x**n)**p)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(b*x**n)**p), x), x, -n*p*x**S(3)/S(9) + x**S(3)*log(c*(b*x**n)**p)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(b*x**n)**p), x), x, -n*p*x**S(2)/S(4) + x**S(2)*log(c*(b*x**n)**p)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p), x), x, -n*p*x + x*log(c*(b*x**n)**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)/x, x), x, log(c*(b*x**n)**p)**S(2)/(S(2)*n*p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)/x**S(2), x), x, -n*p/x - log(c*(b*x**n)**p)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)/x**S(3), x), x, -n*p/(S(4)*x**S(2)) - log(c*(b*x**n)**p)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)/x**S(4), x), x, -n*p/(S(9)*x**S(3)) - log(c*(b*x**n)**p)/(S(3)*x**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(b*x**n)**p)**S(2), x), x, S(2)*n**S(2)*p**S(2)*x**(m + S(1))/(m + S(1))**S(3) - S(2)*n*p*x**(m + S(1))*log(c*(b*x**n)**p)/(m + S(1))**S(2) + x**(m + S(1))*log(c*(b*x**n)**p)**S(2)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(b*x**n)**p)**S(2), x), x, S(2)*n**S(2)*p**S(2)*x**S(3)/S(27) - S(2)*n*p*x**S(3)*log(c*(b*x**n)**p)/S(9) + x**S(3)*log(c*(b*x**n)**p)**S(2)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(b*x**n)**p)**S(2), x), x, n**S(2)*p**S(2)*x**S(2)/S(4) - n*p*x**S(2)*log(c*(b*x**n)**p)/S(2) + x**S(2)*log(c*(b*x**n)**p)**S(2)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)**S(2), x), x, S(2)*n**S(2)*p**S(2)*x - S(2)*n*p*x*log(c*(b*x**n)**p) + x*log(c*(b*x**n)**p)**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)**S(2)/x, x), x, log(c*(b*x**n)**p)**S(3)/(S(3)*n*p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)**S(2)/x**S(2), x), x, -S(2)*n**S(2)*p**S(2)/x - S(2)*n*p*log(c*(b*x**n)**p)/x - log(c*(b*x**n)**p)**S(2)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)**S(2)/x**S(3), x), x, -n**S(2)*p**S(2)/(S(4)*x**S(2)) - n*p*log(c*(b*x**n)**p)/(S(2)*x**S(2)) - log(c*(b*x**n)**p)**S(2)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(b*x**n)**p)**S(2)/x**S(4), x), x, -S(2)*n**S(2)*p**S(2)/(S(27)*x**S(3)) - S(2)*n*p*log(c*(b*x**n)**p)/(S(9)*x**S(3)) - log(c*(b*x**n)**p)**S(2)/(S(3)*x**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(c*(b*x**n)**p), x), x, x**(m + S(1))*(c*(b*x**n)**p)**(-(m + S(1))/(n*p))*Ei((m + S(1))*log(c*(b*x**n)**p)/(n*p))/(n*p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(c*(b*x**n)**p)**S(2), x), x, -x**(m + S(1))/(n*p*log(c*(b*x**n)**p)) + x**(m + S(1))*(c*(b*x**n)**p)**(-(m + S(1))/(n*p))*(m + S(1))*Ei((m + S(1))*log(c*(b*x**n)**p)/(n*p))/(n**S(2)*p**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(b*x**n)**p)**q, x), x, x**(m + S(1))*(c*(b*x**n)**p)**(-(m + S(1))/(n*p))*((-m + S(-1))*log(c*(b*x**n)**p)/(n*p))**(-q)*Gamma(q + S(1), (-m + S(-1))*log(c*(b*x**n)**p)/(n*p))*log(c*(b*x**n)**p)**q/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**m*log(c*x), x), x, (a + b*x)**(m + S(1))*log(c*x)/(b*(m + S(1))) + (a + b*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), S(1) + b*x/a)/(a*b*(m**S(2) + S(3)*m + S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(3)*log(c*x), x), x, -a**S(4)*log(x)/(S(4)*b) - a**S(3)*x - S(3)*a**S(2)*b*x**S(2)/S(4) - a*b**S(2)*x**S(3)/S(3) - b**S(3)*x**S(4)/S(16) + (a + b*x)**S(4)*log(c*x)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)*log(c*x), x), x, -a**S(3)*log(x)/(S(3)*b) - a**S(2)*x - a*b*x**S(2)/S(2) - b**S(2)*x**S(3)/S(9) + (a + b*x)**S(3)*log(c*x)/(S(3)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)*log(c*x), x), x, -a*x - b*x**S(2)/S(4) + x*(S(2)*a + b*x)*log(c*x)/S(2), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((a + b*x)*log(c*x), x), x, -a**S(2)*log(x)/(S(2)*b) - a*x - b*x**S(2)/S(4) + (a + b*x)**S(2)*log(c*x)/(S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x)/(a + b*x), x), x, log((a + b*x)/a)*log(c*x)/b + polylog(S(2), -b*x/a)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x)/(a + b*x)**S(2), x), x, -log(c*x)/(b*(a + b*x)) + log(x)/(a*b) - log(a + b*x)/(a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x)/(a + b*x)**S(3), x), x, -log(c*x)/(S(2)*b*(a + b*x)**S(2)) + S(1)/(S(2)*a*b*(a + b*x)) + log(x)/(S(2)*a**S(2)*b) - log(a + b*x)/(S(2)*a**S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x)/(a + b*x)**S(4), x), x, -log(c*x)/(S(3)*b*(a + b*x)**S(3)) + S(1)/(S(6)*a*b*(a + b*x)**S(2)) + S(1)/(S(3)*a**S(2)*b*(a + b*x)) + log(x)/(S(3)*a**S(3)*b) - log(a + b*x)/(S(3)*a**S(3)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**m*log(c*x**n), x), x, (a + b*x)**(m + S(1))*log(c*x**n)/(b*(m + S(1))) + n*(a + b*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), S(1) + b*x/a)/(a*b*(m**S(2) + S(3)*m + S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(3)*log(c*x**n), x), x, -a**S(4)*n*log(x)/(S(4)*b) - a**S(3)*n*x - S(3)*a**S(2)*b*n*x**S(2)/S(4) - a*b**S(2)*n*x**S(3)/S(3) - b**S(3)*n*x**S(4)/S(16) + (a + b*x)**S(4)*log(c*x**n)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)*log(c*x**n), x), x, -a**S(3)*n*log(x)/(S(3)*b) - a**S(2)*n*x - a*b*n*x**S(2)/S(2) - b**S(2)*n*x**S(3)/S(9) + (a + b*x)**S(3)*log(c*x**n)/(S(3)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)*log(c*x**n), x), x, -a**S(2)*n*log(x)/(S(2)*b) - a*n*x - b*n*x**S(2)/S(4) + (a + b*x)**S(2)*log(c*x**n)/(S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**n)/(a + b*x), x), x, n*polylog(S(2), -b*x/a)/b + log((a + b*x)/a)*log(c*x**n)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**n)/(a + b*x)**S(2), x), x, -log(c*x**n)/(b*(a + b*x)) + n*log(x)/(a*b) - n*log(a + b*x)/(a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**n)/(a + b*x)**S(3), x), x, -log(c*x**n)/(S(2)*b*(a + b*x)**S(2)) + n/(S(2)*a*b*(a + b*x)) + n*log(x)/(S(2)*a**S(2)*b) - n*log(a + b*x)/(S(2)*a**S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**n)/(a + b*x)**S(4), x), x, -log(c*x**n)/(S(3)*b*(a + b*x)**S(3)) + n/(S(6)*a*b*(a + b*x)**S(2)) + n/(S(3)*a**S(2)*b*(a + b*x)) + n*log(x)/(S(3)*a**S(3)*b) - n*log(a + b*x)/(S(3)*a**S(3)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**n)/(S(4)*x + S(2))**S(2), x), x, n*log(x)/S(8) - n*log(S(2)*x + S(1))/S(8) - log(c*x**n)/(S(8)*(S(2)*x + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)/(-a*x + S(1)), x), x, polylog(S(2), -a*x + S(1))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x/a)/(a - x), x), x, polylog(S(2), (a - x)/a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a*x**S(2))/(-a*x**S(2) + S(1)), x), x, polylog(S(2), -a*x**S(2) + S(1))/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(x**S(2)/a)/(a - x**S(2)), x), x, polylog(S(2), (a - x**S(2))/a)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(a*x**n)/(-a*x**n + S(1)), x), x, polylog(S(2), -a*x**n + S(1))/(a*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(x**n/a)/(a - x**n), x), x, polylog(S(2), (a - x**n)/a)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a/x)/(a*x - x**S(2)), x), x, polylog(S(2), -a/x + S(1))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a/x**S(2))/(a*x - x**S(3)), x), x, polylog(S(2), (-a + x**S(2))/x**S(2))/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**(-n + S(1)))/(a*x - x**n), x), x, -polylog(S(2), -a*x**(-n + S(1)) + S(1))/(a*(-n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-a*x**(-m)*(-c + S(1))/b + c)/(x*(a + b*x**m)), x), x, polylog(S(2), x**(-m)*(a + b*x**m)*(-c + S(1))/b)/(a*m), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**(-m)*(a*c - a + b*c*x**m)/b)/(x*(a + b*x**m)), x), x, polylog(S(2), x**(-m)*(a + b*x**m)*(-c + S(1))/b)/(a*m), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + x**(-m)*(a*c*d - d)/(c*e)))/(x*(d + e*x**m)), x), x, polylog(S(2), x**(-m)*(d + e*x**m)*(-a*c + S(1))/e)/(d*m), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**(-m)*(a*c*d + a*c*e*x**m - d)/e)/(x*(d + e*x**m)), x), x, polylog(S(2), x**(-m)*(d + e*x**m)*(-a*c + S(1))/e)/(d*m), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(2)*a/(a + b*x))/(a**S(2) - b**S(2)*x**S(2)), x), x, polylog(S(2), (-a + b*x)/(a + b*x))/(S(2)*a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(2)*a/(a + b*x))/((a - b*x)*(a + b*x)), x), x, polylog(S(2), (-a + b*x)/(a + b*x))/(S(2)*a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a*(-c + S(1)) + b*x*(c + S(1)))/(a + b*x))/(a**S(2) - b**S(2)*x**S(2)), x), x, polylog(S(2), c*(a - b*x)/(a + b*x))/(S(2)*a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a*(-c + S(1)) + b*x*(c + S(1)))/(a + b*x))/((a - b*x)*(a + b*x)), x), x, polylog(S(2), c*(a - b*x)/(a + b*x))/(S(2)*a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-c*(a - b*x)/(a + b*x) + S(1))/(a**S(2) - b**S(2)*x**S(2)), x), x, polylog(S(2), c*(a - b*x)/(a + b*x))/(S(2)*a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-c*(a - b*x)/(a + b*x) + S(1))/((a - b*x)*(a + b*x)), x), x, polylog(S(2), c*(a - b*x)/(a + b*x))/(S(2)*a*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))/(d + e*x**S(2)), x), x, -I*b*n*polylog(S(2), -I*sqrt(e)*x/sqrt(d))/(S(2)*sqrt(d)*sqrt(e)) + I*b*n*polylog(S(2), I*sqrt(e)*x/sqrt(d))/(S(2)*sqrt(d)*sqrt(e)) + (a + b*log(c*x**n))*atan(sqrt(e)*x/sqrt(d))/(sqrt(d)*sqrt(e)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))/(d + e*x + f*x**S(2)), x), x, b*n*polylog(S(2), -S(2)*f*x/(e - sqrt(-S(4)*d*f + e**S(2))))/sqrt(-S(4)*d*f + e**S(2)) - b*n*polylog(S(2), -S(2)*f*x/(e + sqrt(-S(4)*d*f + e**S(2))))/sqrt(-S(4)*d*f + e**S(2)) + (a + b*log(c*x**n))*log((e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/(e - sqrt(-S(4)*d*f + e**S(2))))/sqrt(-S(4)*d*f + e**S(2)) - (a + b*log(c*x**n))*log((e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/(e + sqrt(-S(4)*d*f + e**S(2))))/sqrt(-S(4)*d*f + e**S(2)), expand=True, _diff=True, _numerical=True)
# same result as in mathematica but fails assert rubi_test(rubi_integrate((d + e*x)**m*log(c*x)/x, x), x, (d + e*x)**m*(d/(e*x) + S(1))**(-m)*log(c*x)*hyper((-m, -m), (-m + S(1),), -d/(e*x))/m - (d + e*x)**m*(d/(e*x) + S(1))**(-m)*hyper((-m, -m, -m), (-m + S(1), -m + S(1)), -d/(e*x))/m**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))**S(3), x), x, S(6)*a*b**S(2)*n**S(2)*x - S(6)*b**S(3)*n**S(3)*x + S(6)*b**S(3)*n**S(2)*x*log(c*x**n) - S(3)*b*n*x*(a + b*log(c*x**n))**S(2) + x*(a + b*log(c*x**n))**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))**S(2), x), x, -S(2)*a*b*n*x + S(2)*b**S(2)*n**S(2)*x - S(2)*b**S(2)*n*x*log(c*x**n) + x*(a + b*log(c*x**n))**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(a + b*log(c*x**n), x), x, a*x - b*n*x + b*x*log(c*x**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a + b*log(c*x**n)), x), x, x*(c*x**n)**(-S(1)/n)*exp(-a/(b*n))*Ei((a + b*log(c*x**n))/(b*n))/(b*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))**(S(-2)), x), x, -x/(b*n*(a + b*log(c*x**n))) + x*(c*x**n)**(-S(1)/n)*exp(-a/(b*n))*Ei((a + b*log(c*x**n))/(b*n))/(b**S(2)*n**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))**(S(-3)), x), x, -x/(S(2)*b*n*(a + b*log(c*x**n))**S(2)) - x/(S(2)*b**S(2)*n**S(2)*(a + b*log(c*x**n))) + x*(c*x**n)**(-S(1)/n)*exp(-a/(b*n))*Ei((a + b*log(c*x**n))/(b*n))/(S(2)*b**S(3)*n**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(c*x**n))**m, x), x, x*(c*x**n)**(-S(1)/n)*((-a - b*log(c*x**n))/(b*n))**(-m)*(a + b*log(c*x**n))**m*Gamma(m + S(1), (-a - b*log(c*x**n))/(b*n))*exp(-a/(b*n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/(a + b*log(c*x**n)), x), x, x**(m + S(1))*(c*x**n)**(-(m + S(1))/n)*exp(-a*(m + S(1))/(b*n))*Ei((a + b*log(c*x**n))*(m + S(1))/(b*n))/(b*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/(a + b*log(c*x**n))**S(2), x), x, -x**(m + S(1))/(b*n*(a + b*log(c*x**n))) + x**(m + S(1))*(c*x**n)**(-(m + S(1))/n)*(m + S(1))*exp(-a*(m + S(1))/(b*n))*Ei((a + b*log(c*x**n))*(m + S(1))/(b*n))/(b**S(2)*n**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*(a + b*log(c*x**n))**p, x), x, x**(m + S(1))*(c*x**n)**(-(m + S(1))/n)*((a + b*log(c*x**n))*(-m + S(-1))/(b*n))**(-p)*(a + b*log(c*x**n))**p*Gamma(p + S(1), (a + b*log(c*x**n))*(-m + S(-1))/(b*n))*exp(-a*(m + S(1))/(b*n))/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(n + S(-1))*log(-b*x**n/a)/(a + b*x**n), x), x, -polylog(S(2), (a + b*x**n)/a)/(b*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(a + b*x**S(2))**p), x), x, x**(m + S(1))*log(c*(a + b*x**S(2))**p)/(m + S(1)) - S(2)*b*p*x**(m + S(3))*hyper((S(1), m/S(2) + S(3)/2), (m/S(2) + S(5)/2,), -b*x**S(2)/a)/(a*(m**S(2) + S(4)*m + S(3))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c*(a + b*x**S(2))**p), x), x, S(2)*a**(S(5)/2)*p*atan(sqrt(b)*x/sqrt(a))/(S(5)*b**(S(5)/2)) - S(2)*a**S(2)*p*x/(S(5)*b**S(2)) + S(2)*a*p*x**S(3)/(S(15)*b) - S(2)*p*x**S(5)/S(25) + x**S(5)*log(c*(a + b*x**S(2))**p)/S(5), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x**S(2))**p), x), x, -a**S(2)*p*log(a + b*x**S(2))/(S(4)*b**S(2)) + a*p*x**S(2)/(S(4)*b) - p*x**S(4)/S(8) + x**S(4)*log(c*(a + b*x**S(2))**p)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x**S(2))**p), x), x, -S(2)*a**(S(3)/2)*p*atan(sqrt(b)*x/sqrt(a))/(S(3)*b**(S(3)/2)) + S(2)*a*p*x/(S(3)*b) - S(2)*p*x**S(3)/S(9) + x**S(3)*log(c*(a + b*x**S(2))**p)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**S(2))**p), x), x, -p*x**S(2)/S(2) + (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**p)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p), x), x, S(2)*sqrt(a)*p*atan(sqrt(b)*x/sqrt(a))/sqrt(b) - S(2)*p*x + x*log(c*(a + b*x**S(2))**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x, x), x, p*polylog(S(2), (a + b*x**S(2))/a)/S(2) + log(c*(a + b*x**S(2))**p)*log(-b*x**S(2)/a)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(2), x), x, -log(c*(a + b*x**S(2))**p)/x + S(2)*sqrt(b)*p*atan(sqrt(b)*x/sqrt(a))/sqrt(a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(3), x), x, b*p*log(x)/a - (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**p)/(a*x**S(2)), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(3), x), x, -log(c*(a + b*x**S(2))**p)/(S(2)*x**S(2)) + b*p*log(x)/a - b*p*log(a + b*x**S(2))/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(4), x), x, -log(c*(a + b*x**S(2))**p)/(S(3)*x**S(3)) - S(2)*b*p/(S(3)*a*x) - S(2)*b**(S(3)/2)*p*atan(sqrt(b)*x/sqrt(a))/(S(3)*a**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(5), x), x, -log(c*(a + b*x**S(2))**p)/(S(4)*x**S(4)) - b*p/(S(4)*a*x**S(2)) - b**S(2)*p*log(x)/(S(2)*a**S(2)) + b**S(2)*p*log(a + b*x**S(2))/(S(4)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(6), x), x, -log(c*(a + b*x**S(2))**p)/(S(5)*x**S(5)) - S(2)*b*p/(S(15)*a*x**S(3)) + S(2)*b**S(2)*p/(S(5)*a**S(2)*x) + S(2)*b**(S(5)/2)*p*atan(sqrt(b)*x/sqrt(a))/(S(5)*a**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/x**S(7), x), x, -log(c*(a + b*x**S(2))**p)/(S(6)*x**S(6)) - b*p/(S(12)*a*x**S(4)) + b**S(2)*p/(S(6)*a**S(2)*x**S(2)) + b**S(3)*p*log(x)/(S(3)*a**S(3)) - b**S(3)*p*log(a + b*x**S(2))/(S(6)*a**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(a + b*x**S(3))**p), x), x, x**(m + S(1))*log(c*(a + b*x**S(3))**p)/(m + S(1)) - S(3)*b*p*x**(m + S(4))*hyper((S(1), m/S(3) + S(4)/3), (m/S(3) + S(7)/3,), -b*x**S(3)/a)/(a*(m**S(2) + S(5)*m + S(4))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(5)*log(c*(a + b*x**S(3))**p), x), x, -a**S(2)*p*log(a + b*x**S(3))/(S(6)*b**S(2)) + a*p*x**S(3)/(S(6)*b) - p*x**S(6)/S(12) + x**S(6)*log(c*(a + b*x**S(3))**p)/S(6), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c*(a + b*x**S(3))**p), x), x, a**(S(5)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(5)*b**(S(5)/3)) - a**(S(5)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(10)*b**(S(5)/3)) + sqrt(S(3))*a**(S(5)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(5)*b**(S(5)/3)) + S(3)*a*p*x**S(2)/(S(10)*b) - S(3)*p*x**S(5)/S(25) + x**S(5)*log(c*(a + b*x**S(3))**p)/S(5), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x**S(3))**p), x), x, -a**(S(4)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(4)*b**(S(4)/3)) + a**(S(4)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(8)*b**(S(4)/3)) + sqrt(S(3))*a**(S(4)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(4)*b**(S(4)/3)) + S(3)*a*p*x/(S(4)*b) - S(3)*p*x**S(4)/S(16) + x**S(4)*log(c*(a + b*x**S(3))**p)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x**S(3))**p), x), x, -p*x**S(3)/S(3) + (a/S(3) + b*x**S(3)/S(3))*log(c*(a + b*x**S(3))**p)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**S(3))**p), x), x, -a**(S(2)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*b**(S(2)/3)) + a**(S(2)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*b**(S(2)/3)) - sqrt(S(3))*a**(S(2)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*b**(S(2)/3)) - S(3)*p*x**S(2)/S(4) + x**S(2)*log(c*(a + b*x**S(3))**p)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p), x), x, a**(S(1)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/b**(S(1)/3) - a**(S(1)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*b**(S(1)/3)) - sqrt(S(3))*a**(S(1)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/b**(S(1)/3) - S(3)*p*x + x*log(c*(a + b*x**S(3))**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x, x), x, p*polylog(S(2), (a + b*x**S(3))/a)/S(3) + log(c*(a + b*x**S(3))**p)*log(-b*x**S(3)/a)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x**S(2), x), x, -log(c*(a + b*x**S(3))**p)/x - b**(S(1)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/a**(S(1)/3) + b**(S(1)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*a**(S(1)/3)) - sqrt(S(3))*b**(S(1)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/a**(S(1)/3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x**S(3), x), x, -log(c*(a + b*x**S(3))**p)/(S(2)*x**S(2)) + b**(S(2)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*a**(S(2)/3)) - b**(S(2)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*a**(S(2)/3)) - sqrt(S(3))*b**(S(2)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*a**(S(2)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x**S(4), x), x, -log(c*(a + b*x**S(3))**p)/(S(3)*x**S(3)) + b*p*log(x)/a - b*p*log(a + b*x**S(3))/(S(3)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x**S(5), x), x, -log(c*(a + b*x**S(3))**p)/(S(4)*x**S(4)) - S(3)*b*p/(S(4)*a*x) + b**(S(4)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(4)*a**(S(4)/3)) - b**(S(4)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(8)*a**(S(4)/3)) + sqrt(S(3))*b**(S(4)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(4)*a**(S(4)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x**S(6), x), x, -log(c*(a + b*x**S(3))**p)/(S(5)*x**S(5)) - S(3)*b*p/(S(10)*a*x**S(2)) - b**(S(5)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(5)*a**(S(5)/3)) + b**(S(5)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(10)*a**(S(5)/3)) + sqrt(S(3))*b**(S(5)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(5)*a**(S(5)/3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/x**S(7), x), x, -log(c*(a + b*x**S(3))**p)/(S(6)*x**S(6)) - b*p/(S(6)*a*x**S(3)) - b**S(2)*p*log(x)/(S(2)*a**S(2)) + b**S(2)*p*log(a + b*x**S(3))/(S(6)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(a + b*sqrt(x))**p), x), x, x**(m + S(1))*log(c*(a + b*sqrt(x))**p)/(m + S(1)) - b*p*x**(m + S(3)/2)*hyper((S(1), S(2)*m + S(3)), (S(2)*m + S(4),), -b*sqrt(x)/a)/(a*(S(2)*m**S(2) + S(5)*m + S(3))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*sqrt(x))**p), x), x, -a**S(8)*p*log(a + b*sqrt(x))/(S(4)*b**S(8)) + a**S(7)*p*sqrt(x)/(S(4)*b**S(7)) - a**S(6)*p*x/(S(8)*b**S(6)) + a**S(5)*p*x**(S(3)/2)/(S(12)*b**S(5)) - a**S(4)*p*x**S(2)/(S(16)*b**S(4)) + a**S(3)*p*x**(S(5)/2)/(S(20)*b**S(3)) - a**S(2)*p*x**S(3)/(S(24)*b**S(2)) + a*p*x**(S(7)/2)/(S(28)*b) - p*x**S(4)/S(32) + x**S(4)*log(c*(a + b*sqrt(x))**p)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*sqrt(x))**p), x), x, -a**S(6)*p*log(a + b*sqrt(x))/(S(3)*b**S(6)) + a**S(5)*p*sqrt(x)/(S(3)*b**S(5)) - a**S(4)*p*x/(S(6)*b**S(4)) + a**S(3)*p*x**(S(3)/2)/(S(9)*b**S(3)) - a**S(2)*p*x**S(2)/(S(12)*b**S(2)) + a*p*x**(S(5)/2)/(S(15)*b) - p*x**S(3)/S(18) + x**S(3)*log(c*(a + b*sqrt(x))**p)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*sqrt(x))**p), x), x, -a**S(4)*p*log(a + b*sqrt(x))/(S(2)*b**S(4)) + a**S(3)*p*sqrt(x)/(S(2)*b**S(3)) - a**S(2)*p*x/(S(4)*b**S(2)) + a*p*x**(S(3)/2)/(S(6)*b) - p*x**S(2)/S(8) + x**S(2)*log(c*(a + b*sqrt(x))**p)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*sqrt(x))**p), x), x, -a**S(2)*p*log(a + b*sqrt(x))/b**S(2) + a*p*sqrt(x)/b - p*x/S(2) + x*log(c*(a + b*sqrt(x))**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*sqrt(x))**p)/x, x), x, S(2)*p*polylog(S(2), (a + b*sqrt(x))/a) + S(2)*log(c*(a + b*sqrt(x))**p)*log(-b*sqrt(x)/a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*sqrt(x))**p)/x**S(2), x), x, -log(c*(a + b*sqrt(x))**p)/x - b*p/(a*sqrt(x)) - b**S(2)*p*log(x)/(S(2)*a**S(2)) + b**S(2)*p*log(a + b*sqrt(x))/a**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*sqrt(x))**p)/x**S(3), x), x, -log(c*(a + b*sqrt(x))**p)/(S(2)*x**S(2)) - b*p/(S(6)*a*x**(S(3)/2)) + b**S(2)*p/(S(4)*a**S(2)*x) - b**S(3)*p/(S(2)*a**S(3)*sqrt(x)) - b**S(4)*p*log(x)/(S(4)*a**S(4)) + b**S(4)*p*log(a + b*sqrt(x))/(S(2)*a**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*sqrt(x))**p)/x**S(4), x), x, -log(c*(a + b*sqrt(x))**p)/(S(3)*x**S(3)) - b*p/(S(15)*a*x**(S(5)/2)) + b**S(2)*p/(S(12)*a**S(2)*x**S(2)) - b**S(3)*p/(S(9)*a**S(3)*x**(S(3)/2)) + b**S(4)*p/(S(6)*a**S(4)*x) - b**S(5)*p/(S(3)*a**S(5)*sqrt(x)) - b**S(6)*p*log(x)/(S(6)*a**S(6)) + b**S(6)*p*log(a + b*sqrt(x))/(S(3)*a**S(6)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*sqrt(x))/sqrt(x), x), x, -S(2)*sqrt(x) + S(2)*(a + b*sqrt(x))*log(a + b*sqrt(x))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(a + b/x)**p), x), x, p*x**(m + S(1))*hyper((S(1), m + S(1)), (m + S(2),), -a*x/b)/(m + S(1))**S(2) + x**(m + S(1))*log(c*(a + b/x)**p)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c*(a + b/x)**p), x), x, x**S(5)*log(c*(a + b/x)**p)/S(5) + b*p*x**S(4)/(S(20)*a) - b**S(2)*p*x**S(3)/(S(15)*a**S(2)) + b**S(3)*p*x**S(2)/(S(10)*a**S(3)) - b**S(4)*p*x/(S(5)*a**S(4)) + b**S(5)*p*log(a*x + b)/(S(5)*a**S(5)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b/x)**p), x), x, x**S(4)*log(c*(a + b/x)**p)/S(4) + b*p*x**S(3)/(S(12)*a) - b**S(2)*p*x**S(2)/(S(8)*a**S(2)) + b**S(3)*p*x/(S(4)*a**S(3)) - b**S(4)*p*log(a*x + b)/(S(4)*a**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b/x)**p), x), x, x**S(3)*log(c*(a + b/x)**p)/S(3) + b*p*x**S(2)/(S(6)*a) - b**S(2)*p*x/(S(3)*a**S(2)) + b**S(3)*p*log(a*x + b)/(S(3)*a**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b/x)**p), x), x, x**S(2)*log(c*(a + b/x)**p)/S(2) + b*p*x/(S(2)*a) - b**S(2)*p*log(a*x + b)/(S(2)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p), x), x, x*log(c*(a + b/x)**p) + b*p*log(a*x + b)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/x, x), x, -p*polylog(S(2), (a + b/x)/a) - log(c*(a + b/x)**p)*log(-b/(a*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/x**S(2), x), x, p/x - (a + b/x)*log(c*(a + b/x)**p)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/x**S(3), x), x, -a**S(2)*p*log(x)/(S(2)*b**S(2)) + a**S(2)*p*log(a*x + b)/(S(2)*b**S(2)) - a*p/(S(2)*b*x) + p/(S(4)*x**S(2)) - log(c*(a + b/x)**p)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/x**S(4), x), x, a**S(3)*p*log(x)/(S(3)*b**S(3)) - a**S(3)*p*log(a*x + b)/(S(3)*b**S(3)) + a**S(2)*p/(S(3)*b**S(2)*x) - a*p/(S(6)*b*x**S(2)) + p/(S(9)*x**S(3)) - log(c*(a + b/x)**p)/(S(3)*x**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/x**S(5), x), x, -a**S(4)*p*log(x)/(S(4)*b**S(4)) + a**S(4)*p*log(a*x + b)/(S(4)*b**S(4)) - a**S(3)*p/(S(4)*b**S(3)*x) + a**S(2)*p/(S(8)*b**S(2)*x**S(2)) - a*p/(S(12)*b*x**S(3)) + p/(S(16)*x**S(4)) - log(c*(a + b/x)**p)/(S(4)*x**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(b/x + S(1))/x, x), x, polylog(S(2), -b/x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(a + b/x**S(2))**p), x), x, S(2)*p*x**(m + S(1))*hyper((S(1), m/S(2) + S(1)/2), (m/S(2) + S(3)/2,), -a*x**S(2)/b)/(m + S(1))**S(2) + x**(m + S(1))*log(c*(a + b/x**S(2))**p)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c*(a + b/x**S(2))**p), x), x, x**S(5)*log(c*(a + b/x**S(2))**p)/S(5) + S(2)*b*p*x**S(3)/(S(15)*a) - S(2)*b**S(2)*p*x/(S(5)*a**S(2)) + S(2)*b**(S(5)/2)*p*atan(sqrt(a)*x/sqrt(b))/(S(5)*a**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b/x**S(2))**p), x), x, x**S(4)*log(c*(a + b/x**S(2))**p)/S(4) + b*p*x**S(2)/(S(4)*a) - b**S(2)*p*log(a*x**S(2) + b)/(S(4)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b/x**S(2))**p), x), x, x**S(3)*log(c*(a + b/x**S(2))**p)/S(3) + S(2)*b*p*x/(S(3)*a) - S(2)*b**(S(3)/2)*p*atan(sqrt(a)*x/sqrt(b))/(S(3)*a**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b/x**S(2))**p), x), x, x**S(2)*log(c*(a + b/x**S(2))**p)/S(2) + b*p*log(a*x**S(2) + b)/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p), x), x, x*log(c*(a + b/x**S(2))**p) + S(2)*sqrt(b)*p*atan(sqrt(a)*x/sqrt(b))/sqrt(a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/x, x), x, -p*polylog(S(2), (a + b/x**S(2))/a)/S(2) - log(c*(a + b/x**S(2))**p)*log(-b/(a*x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/x**S(2), x), x, S(2)*sqrt(a)*p*atan(sqrt(a)*x/sqrt(b))/sqrt(b) + S(2)*p/x - log(c*(a + b/x**S(2))**p)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/x**S(3), x), x, p/(S(2)*x**S(2)) - (a/S(2) + b/(S(2)*x**S(2)))*log(c*(a + b/x**S(2))**p)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/x**S(4), x), x, -S(2)*a**(S(3)/2)*p*atan(sqrt(a)*x/sqrt(b))/(S(3)*b**(S(3)/2)) - S(2)*a*p/(S(3)*b*x) + S(2)*p/(S(9)*x**S(3)) - log(c*(a + b/x**S(2))**p)/(S(3)*x**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(c*(a + b*x**n)**p), x), x, x**(m + S(1))*log(c*(a + b*x**n)**p)/(m + S(1)) - b*n*p*x**(m + n + S(1))*hyper((S(1), (m + n + S(1))/n), ((m + S(2)*n + S(1))/n,), -b*x**n/a)/(a*(m + S(1))*(m + n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x**n)**p), x), x, x**S(3)*log(c*(a + b*x**n)**p)/S(3) - b*n*p*x**(n + S(3))*hyper((S(1), (n + S(3))/n), (S(2) + S(3)/n,), -b*x**n/a)/(S(3)*a*(n + S(3))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**n)**p), x), x, x**S(2)*log(c*(a + b*x**n)**p)/S(2) - b*n*p*x**(n + S(2))*hyper((S(1), (n + S(2))/n), (S(2) + S(2)/n,), -b*x**n/a)/(S(2)*a*(n + S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**n)**p), x), x, x*log(c*(a + b*x**n)**p) - b*n*p*x**(n + S(1))*hyper((S(1), S(1) + S(1)/n), (S(2) + S(1)/n,), -b*x**n/a)/(a*(n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**n)**p)/x, x), x, p*polylog(S(2), (a + b*x**n)/a)/n + log(c*(a + b*x**n)**p)*log(-b*x**n/a)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**n)**p)/x**S(2), x), x, -log(c*(a + b*x**n)**p)/x - b*n*p*x**(n + S(-1))*hyper((S(1), (n + S(-1))/n), (S(2) - S(1)/n,), -b*x**n/a)/(a*(-n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**n)**p)/x**S(3), x), x, -log(c*(a + b*x**n)**p)/(S(2)*x**S(2)) - b*n*p*x**(n + S(-2))*hyper((S(1), (n + S(-2))/n), (S(2) - S(2)/n,), -b*x**n/a)/(S(2)*a*(-n + S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**n)**p)/x**S(4), x), x, -log(c*(a + b*x**n)**p)/(S(3)*x**S(3)) - b*n*p*x**(n + S(-3))*hyper((S(1), (n + S(-3))/n), (S(2) - S(3)/n,), -b*x**n/a)/(S(3)*a*(-n + S(3))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**m*log(c*(a + b*x)**p), x), x, b*p*(d + e*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), b*(d + e*x)/(-a*e + b*d))/(e*(m + S(1))*(m + S(2))*(-a*e + b*d)) + (d + e*x)**(m + S(1))*log(c*(a + b*x)**p)/(e*(m + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(3)*log(c*(a + b*x)**p), x), x, -p*(d + e*x)**S(4)/(S(16)*e) + (d + e*x)**S(4)*log(c*(a + b*x)**p)/(S(4)*e) - p*(d + e*x)**S(3)*(-a*e/S(12) + b*d/S(12))/(b*e) - p*(d + e*x)**S(2)*(-a*e + b*d)**S(2)/(S(8)*b**S(2)*e) - p*x*(-a*e + b*d)**S(3)/(S(4)*b**S(3)) - p*(-a*e + b*d)**S(4)*log(a + b*x)/(S(4)*b**S(4)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(2)*log(c*(a + b*x)**p), x), x, -p*(d + e*x)**S(3)/(S(9)*e) + (d + e*x)**S(3)*log(c*(a + b*x)**p)/(S(3)*e) - p*(d + e*x)**S(2)*(-a*e/S(6) + b*d/S(6))/(b*e) - p*x*(-a*e + b*d)**S(2)/(S(3)*b**S(2)) - p*(-a*e + b*d)**S(3)*log(a + b*x)/(S(3)*b**S(3)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)*log(c*(a + b*x)**p), x), x, -p*(d + e*x)**S(2)/(S(4)*e) + (d + e*x)**S(2)*log(c*(a + b*x)**p)/(S(2)*e) + p*x*(a*e/S(2) - b*d/S(2))/b - p*(-a*e + b*d)**S(2)*log(a + b*x)/(S(2)*b**S(2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p), x), x, -p*x + (a + b*x)*log(c*(a + b*x)**p)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(d + e*x), x), x, p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/e + log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(d + e*x)**S(2), x), x, b*p*log(a + b*x)/(e*(-a*e + b*d)) - b*p*log(d + e*x)/(e*(-a*e + b*d)) - log(c*(a + b*x)**p)/(e*(d + e*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(d + e*x)**S(3), x), x, b**S(2)*p*log(a + b*x)/(S(2)*e*(-a*e + b*d)**S(2)) - b**S(2)*p*log(d + e*x)/(S(2)*e*(-a*e + b*d)**S(2)) + b*p/(S(2)*e*(d + e*x)*(-a*e + b*d)) - log(c*(a + b*x)**p)/(S(2)*e*(d + e*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(d + e*x)**S(4), x), x, b**S(3)*p*log(a + b*x)/(S(3)*e*(-a*e + b*d)**S(3)) - b**S(3)*p*log(d + e*x)/(S(3)*e*(-a*e + b*d)**S(3)) + b**S(2)*p/(S(3)*e*(d + e*x)*(-a*e + b*d)**S(2)) + b*p/(S(6)*e*(d + e*x)**S(2)*(-a*e + b*d)) - log(c*(a + b*x)**p)/(S(3)*e*(d + e*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**m*log(c*(a + b*x**S(2))**p), x), x, sqrt(b)*p*(d + e*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/(e*(m + S(1))*(m + S(2))*(sqrt(b)*d + e*sqrt(-a))) + sqrt(b)*p*(d + e*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/(e*(m + S(1))*(m + S(2))*(sqrt(b)*d - e*sqrt(-a))) + (d + e*x)**(m + S(1))*log(c*(a + b*x**S(2))**p)/(e*(m + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(3)*log(c*(a + b*x**S(2))**p), x), x, S(2)*sqrt(a)*d*p*(-a*e**S(2) + b*d**S(2))*atan(sqrt(b)*x/sqrt(a))/b**(S(3)/2) - S(2)*d*e**S(2)*p*x**S(3)/S(3) - e**S(3)*p*x**S(4)/S(8) + (d + e*x)**S(4)*log(c*(a + b*x**S(2))**p)/(S(4)*e) - S(2)*d*p*x*(-a*e**S(2) + b*d**S(2))/b - e*p*x**S(2)*(-a*e**S(2) + S(6)*b*d**S(2))/(S(4)*b) - p*(a**S(2)*e**S(4)/S(4) - S(3)*a*b*d**S(2)*e**S(2)/S(2) + b**S(2)*d**S(4)/S(4))*log(a + b*x**S(2))/(b**S(2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(2)*log(c*(a + b*x**S(2))**p), x), x, sqrt(a)*p*(-S(2)*a*e**S(2)/S(3) + S(2)*b*d**S(2))*atan(sqrt(b)*x/sqrt(a))/b**(S(3)/2) - d*e*p*x**S(2) - S(2)*e**S(2)*p*x**S(3)/S(9) + (d + e*x)**S(3)*log(c*(a + b*x**S(2))**p)/(S(3)*e) - d*p*(-S(3)*a*e**S(2) + b*d**S(2))*log(a + b*x**S(2))/(S(3)*b*e) + p*x*(S(2)*a*e**S(2)/S(3) - S(2)*b*d**S(2))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)*log(c*(a + b*x**S(2))**p), x), x, S(2)*sqrt(a)*d*p*atan(sqrt(b)*x/sqrt(a))/sqrt(b) - S(2)*d*p*x - e*p*x**S(2)/S(2) + (d + e*x)**S(2)*log(c*(a + b*x**S(2))**p)/(S(2)*e) - p*(-a*e**S(2)/S(2) + b*d**S(2)/S(2))*log(a + b*x**S(2))/(b*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p), x), x, S(2)*sqrt(a)*p*atan(sqrt(b)*x/sqrt(a))/sqrt(b) - S(2)*p*x + x*log(c*(a + b*x**S(2))**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(d + e*x), x), x, -p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/e - p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/e - p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/e - p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/e + log(c*(a + b*x**S(2))**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(d + e*x)**S(2), x), x, S(2)*sqrt(a)*sqrt(b)*p*atan(sqrt(b)*x/sqrt(a))/(a*e**S(2) + b*d**S(2)) + b*d*p*log(a + b*x**S(2))/(e*(a*e**S(2) + b*d**S(2))) - S(2)*b*d*p*log(d + e*x)/(e*(a*e**S(2) + b*d**S(2))) - log(c*(a + b*x**S(2))**p)/(e*(d + e*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(d + e*x)**S(3), x), x, S(2)*sqrt(a)*b**(S(3)/2)*d*p*atan(sqrt(b)*x/sqrt(a))/(a*e**S(2) + b*d**S(2))**S(2) + b*d*p/(e*(d + e*x)*(a*e**S(2) + b*d**S(2))) + b*p*(-a*e**S(2) + b*d**S(2))*log(a + b*x**S(2))/(S(2)*e*(a*e**S(2) + b*d**S(2))**S(2)) - b*p*(-a*e**S(2) + b*d**S(2))*log(d + e*x)/(e*(a*e**S(2) + b*d**S(2))**S(2)) - log(c*(a + b*x**S(2))**p)/(S(2)*e*(d + e*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**m*log(c*(a + b*x**S(3))**p), x), x, b**(S(1)/3)*p*(d + e*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/(e*(m + S(1))*(m + S(2))*(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d)) + b**(S(1)/3)*p*(d + e*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/(e*(m + S(1))*(m + S(2))*((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d)) + b**(S(1)/3)*p*(d + e*x)**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/(e*(m + S(1))*(m + S(2))*(-a**(S(1)/3)*e + b**(S(1)/3)*d)) + (d + e*x)**(m + S(1))*log(c*(a + b*x**S(3))**p)/(e*(m + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(3)*log(c*(a + b*x**S(3))**p), x), x, a**(S(1)/3)*p*(-S(6)*a**(S(1)/3)*b**(S(2)/3)*d**S(2)*e - a*e**S(3) + S(4)*b*d**S(3))*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(4)*b**(S(4)/3)) - a**(S(1)/3)*p*(-S(6)*a**(S(1)/3)*b**(S(2)/3)*d**S(2)*e - a*e**S(3) + S(4)*b*d**S(3))*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(8)*b**(S(4)/3)) - sqrt(S(3))*a**(S(1)/3)*p*(S(6)*a**(S(1)/3)*b**(S(2)/3)*d**S(2)*e - a*e**S(3) + S(4)*b*d**S(3))*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(4)*b**(S(4)/3)) - S(9)*d**S(2)*e*p*x**S(2)/S(4) - d*e**S(2)*p*x**S(3) - S(3)*e**S(3)*p*x**S(4)/S(16) + (d + e*x)**S(4)*log(c*(a + b*x**S(3))**p)/(S(4)*e) - d*p*(-S(4)*a*e**S(3) + b*d**S(3))*log(a + b*x**S(3))/(S(4)*b*e) + p*x*(S(3)*a*e**S(3)/S(4) - S(3)*b*d**S(3))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(2)*log(c*(a + b*x**S(3))**p), x), x, a**(S(1)/3)*d*p*(-a**(S(1)/3)*e + b**(S(1)/3)*d)*log(a**(S(1)/3) + b**(S(1)/3)*x)/b**(S(2)/3) - a**(S(1)/3)*d*p*(-a**(S(1)/3)*e + b**(S(1)/3)*d)*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*b**(S(2)/3)) - sqrt(S(3))*a**(S(1)/3)*d*p*(a**(S(1)/3)*e + b**(S(1)/3)*d)*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/b**(S(2)/3) - S(3)*d**S(2)*p*x - S(3)*d*e*p*x**S(2)/S(2) - e**S(2)*p*x**S(3)/S(3) + (d + e*x)**S(3)*log(c*(a + b*x**S(3))**p)/(S(3)*e) - p*(-a*e**S(3)/S(3) + b*d**S(3)/S(3))*log(a + b*x**S(3))/(b*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)*log(c*(a + b*x**S(3))**p), x), x, a**(S(1)/3)*p*(-a**(S(1)/3)*e + S(2)*b**(S(1)/3)*d)*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*b**(S(2)/3)) - a**(S(1)/3)*p*(-a**(S(1)/3)*e + S(2)*b**(S(1)/3)*d)*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*b**(S(2)/3)) - sqrt(S(3))*a**(S(1)/3)*p*(a**(S(1)/3)*e + S(2)*b**(S(1)/3)*d)*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*b**(S(2)/3)) - d**S(2)*p*log(a + b*x**S(3))/(S(2)*e) - S(3)*d*p*x - S(3)*e*p*x**S(2)/S(4) + (d + e*x)**S(2)*log(c*(a + b*x**S(3))**p)/(S(2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p), x), x, a**(S(1)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/b**(S(1)/3) - a**(S(1)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*b**(S(1)/3)) - sqrt(S(3))*a**(S(1)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/b**(S(1)/3) - S(3)*p*x + x*log(c*(a + b*x**S(3))**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(d + e*x), x), x, -p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e - p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e - p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e - p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/e - p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e - p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e + log(c*(a + b*x**S(3))**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(d + e*x)**S(2), x), x, a**(S(1)/3)*b**(S(1)/3)*p*(a**(S(1)/3)*e + b**(S(1)/3)*d)*log(a**(S(1)/3) + b**(S(1)/3)*x)/(-a*e**S(3) + b*d**S(3)) - a**(S(1)/3)*b**(S(1)/3)*p*(a**(S(1)/3)*e + b**(S(1)/3)*d)*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*(-a*e**S(3) + b*d**S(3))) - sqrt(S(3))*a**(S(1)/3)*b**(S(1)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(a**(S(2)/3)*e**S(2) + a**(S(1)/3)*b**(S(1)/3)*d*e + b**(S(2)/3)*d**S(2)) + b*d**S(2)*p*log(a + b*x**S(3))/(e*(-a*e**S(3) + b*d**S(3))) - S(3)*b*d**S(2)*p*log(d + e*x)/(e*(-a*e**S(3) + b*d**S(3))) - log(c*(a + b*x**S(3))**p)/(e*(d + e*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(d + e*x)**S(3), x), x, -sqrt(S(3))*a**(S(1)/3)*b**(S(2)/3)*p*(-S(3)*a**(S(1)/3)*b**(S(2)/3)*d**S(2)*e + a*e**S(3) + S(2)*b*d**S(3))*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*(-a*e**S(3) + b*d**S(3))**S(2)) + a**(S(1)/3)*b**(S(2)/3)*p*(S(3)*a**(S(1)/3)*b**(S(2)/3)*d**S(2)*e + a*e**S(3) + S(2)*b*d**S(3))*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*(-a*e**S(3) + b*d**S(3))**S(2)) - a**(S(1)/3)*b**(S(2)/3)*p*(S(3)*a**(S(1)/3)*b**(S(2)/3)*d**S(2)*e + a*e**S(3) + S(2)*b*d**S(3))*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*(-a*e**S(3) + b*d**S(3))**S(2)) + S(3)*b*d**S(2)*p/(S(2)*e*(d + e*x)*(-a*e**S(3) + b*d**S(3))) + b*d*p*(S(2)*a*e**S(3) + b*d**S(3))*log(a + b*x**S(3))/(S(2)*e*(-a*e**S(3) + b*d**S(3))**S(2)) - S(3)*b*d*p*(S(2)*a*e**S(3) + b*d**S(3))*log(d + e*x)/(S(2)*e*(-a*e**S(3) + b*d**S(3))**S(2)) - log(c*(a + b*x**S(3))**p)/(S(2)*e*(d + e*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b/x)/(c + d*x), x), x, log(-d*x/c)*log(c + d*x)/d - log(-d*(a*x + b)/(a*c - b*d))*log(c + d*x)/d + log(a + b/x)*log(c + d*x)/d + polylog(S(2), (c + d*x)/c)/d - polylog(S(2), a*(c + d*x)/(a*c - b*d))/d, expand=True, _diff=True, _numerical=True)
# recursion sympy and mathematica assert rubi_test(rubi_integrate(log(a + b*x**n)/(c + d*x), x), x, -b*n*Integral(x**(n + S(-1))*log(c + d*x)/(a + b*x**n), x)/d + log(a + b*x**n)*log(c + d*x)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x)/(c + d*x), x), x, log(a*x)*log((c + d*x)/c)/d + polylog(S(2), -d*x/c)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a/x)/(c + d*x), x), x, log(a/x)*log((c + d*x)/c)/d - polylog(S(2), -d*x/c)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*x**n)/(c + d*x), x), x, n*polylog(S(2), -d*x/c)/d + log(a*x**n)*log((c + d*x)/c)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**n)/(a + b*x), x), x, n*polylog(S(2), -b*x/a)/b + log(x**n)*log((a + b*x)/a)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x)**p)/(d + e*x), x), x, a**S(3)*p*log(a + b*x)/(S(3)*b**S(3)*e) + a**S(2)*d*p*log(a + b*x)/(S(2)*b**S(2)*e**S(2)) - a**S(2)*p*x/(S(3)*b**S(2)*e) - a*d*p*x/(S(2)*b*e**S(2)) + a*p*x**S(2)/(S(6)*b*e) - d**S(3)*p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/e**S(4) - d**S(3)*log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/e**S(4) - d**S(2)*p*x/e**S(3) + d*p*x**S(2)/(S(4)*e**S(2)) - d*x**S(2)*log(c*(a + b*x)**p)/(S(2)*e**S(2)) - p*x**S(3)/(S(9)*e) + x**S(3)*log(c*(a + b*x)**p)/(S(3)*e) + d**S(2)*(a + b*x)*log(c*(a + b*x)**p)/(b*e**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x)**p)/(d + e*x), x), x, -a**S(2)*p*log(a + b*x)/(S(2)*b**S(2)*e) + a*p*x/(S(2)*b*e) + d**S(2)*p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/e**S(3) + d**S(2)*log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/e**S(3) + d*p*x/e**S(2) - p*x**S(2)/(S(4)*e) + x**S(2)*log(c*(a + b*x)**p)/(S(2)*e) - d*(a + b*x)*log(c*(a + b*x)**p)/(b*e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x)**p)/(d + e*x), x), x, -d*p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/e**S(2) - d*log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/e**S(2) - p*x/e + (a + b*x)*log(c*(a + b*x)**p)/(b*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(d + e*x), x), x, p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/e + log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(x*(d + e*x)), x), x, p*polylog(S(2), (a + b*x)/a)/d - p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/d + log(c*(a + b*x)**p)*log(-b*x/a)/d - log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(x**S(2)*(d + e*x)), x), x, -log(c*(a + b*x)**p)/(d*x) - e*p*polylog(S(2), (a + b*x)/a)/d**S(2) + e*p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/d**S(2) - e*log(c*(a + b*x)**p)*log(-b*x/a)/d**S(2) + e*log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/d**S(2) + b*p*log(x)/(a*d) - b*p*log(a + b*x)/(a*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x)**p)/(x**S(3)*(d + e*x)), x), x, -log(c*(a + b*x)**p)/(S(2)*d*x**S(2)) + e*log(c*(a + b*x)**p)/(d**S(2)*x) + e**S(2)*p*polylog(S(2), (a + b*x)/a)/d**S(3) - e**S(2)*p*polylog(S(2), -e*(a + b*x)/(-a*e + b*d))/d**S(3) + e**S(2)*log(c*(a + b*x)**p)*log(-b*x/a)/d**S(3) - e**S(2)*log(c*(a + b*x)**p)*log(b*(d + e*x)/(-a*e + b*d))/d**S(3) - b*p/(S(2)*a*d*x) - b*e*p*log(x)/(a*d**S(2)) + b*e*p*log(a + b*x)/(a*d**S(2)) - b**S(2)*p*log(x)/(S(2)*a**S(2)*d) + b**S(2)*p*log(a + b*x)/(S(2)*a**S(2)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x**S(2))**p)/(d + e*x), x), x, -S(2)*a**(S(3)/2)*p*atan(sqrt(b)*x/sqrt(a))/(S(3)*b**(S(3)/2)*e) + S(2)*sqrt(a)*d**S(2)*p*atan(sqrt(b)*x/sqrt(a))/(sqrt(b)*e**S(3)) + S(2)*a*p*x/(S(3)*b*e) + d**S(3)*p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/e**S(4) + d**S(3)*p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/e**S(4) + d**S(3)*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/e**S(4) + d**S(3)*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/e**S(4) - d**S(3)*log(c*(a + b*x**S(2))**p)*log(d + e*x)/e**S(4) - S(2)*d**S(2)*p*x/e**S(3) + d**S(2)*x*log(c*(a + b*x**S(2))**p)/e**S(3) + d*p*x**S(2)/(S(2)*e**S(2)) - S(2)*p*x**S(3)/(S(9)*e) + x**S(3)*log(c*(a + b*x**S(2))**p)/(S(3)*e) - d*(a + b*x**S(2))*log(c*(a + b*x**S(2))**p)/(S(2)*b*e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x**S(2))**p)/(d + e*x), x), x, -S(2)*sqrt(a)*d*p*atan(sqrt(b)*x/sqrt(a))/(sqrt(b)*e**S(2)) - d**S(2)*p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/e**S(3) - d**S(2)*p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/e**S(3) - d**S(2)*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/e**S(3) - d**S(2)*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/e**S(3) + d**S(2)*log(c*(a + b*x**S(2))**p)*log(d + e*x)/e**S(3) + S(2)*d*p*x/e**S(2) - d*x*log(c*(a + b*x**S(2))**p)/e**S(2) - p*x**S(2)/(S(2)*e) + (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**p)/(b*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**S(2))**p)/(d + e*x), x), x, S(2)*sqrt(a)*p*atan(sqrt(b)*x/sqrt(a))/(sqrt(b)*e) + d*p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/e**S(2) + d*p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/e**S(2) + d*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/e**S(2) + d*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/e**S(2) - d*log(c*(a + b*x**S(2))**p)*log(d + e*x)/e**S(2) - S(2)*p*x/e + x*log(c*(a + b*x**S(2))**p)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(d + e*x), x), x, -p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/e - p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/e - p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/e - p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/e + log(c*(a + b*x**S(2))**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(x*(d + e*x)), x), x, p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/d + p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/d + p*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*d) + p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/d + p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/d + log(c*(a + b*x**S(2))**p)*log(-b*x**S(2)/a)/(S(2)*d) - log(c*(a + b*x**S(2))**p)*log(d + e*x)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(x**S(2)*(d + e*x)), x), x, -log(c*(a + b*x**S(2))**p)/(d*x) - e*p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/d**S(2) - e*p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/d**S(2) - e*p*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*d**S(2)) - e*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/d**S(2) - e*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/d**S(2) - e*log(c*(a + b*x**S(2))**p)*log(-b*x**S(2)/a)/(S(2)*d**S(2)) + e*log(c*(a + b*x**S(2))**p)*log(d + e*x)/d**S(2) + S(2)*sqrt(b)*p*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**p)/(x**S(3)*(d + e*x)), x), x, -log(c*(a + b*x**S(2))**p)/(S(2)*d*x**S(2)) + e*log(c*(a + b*x**S(2))**p)/(d**S(2)*x) + e**S(2)*p*log(-e*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*d - e*sqrt(-a)))*log(d + e*x)/d**S(3) + e**S(2)*p*log(e*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*d + e*sqrt(-a)))*log(d + e*x)/d**S(3) + e**S(2)*p*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*d**S(3)) + e**S(2)*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d - e*sqrt(-a)))/d**S(3) + e**S(2)*p*polylog(S(2), sqrt(b)*(d + e*x)/(sqrt(b)*d + e*sqrt(-a)))/d**S(3) + e**S(2)*log(c*(a + b*x**S(2))**p)*log(-b*x**S(2)/a)/(S(2)*d**S(3)) - e**S(2)*log(c*(a + b*x**S(2))**p)*log(d + e*x)/d**S(3) + b*p*log(x)/(a*d) - b*p*log(a + b*x**S(2))/(S(2)*a*d) - S(2)*sqrt(b)*e*p*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x**S(3))**p)/(d + e*x), x), x, a**(S(2)/3)*d*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*b**(S(2)/3)*e**S(2)) - a**(S(2)/3)*d*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*b**(S(2)/3)*e**S(2)) + sqrt(S(3))*a**(S(2)/3)*d*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*b**(S(2)/3)*e**S(2)) + a**(S(1)/3)*d**S(2)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(b**(S(1)/3)*e**S(3)) - a**(S(1)/3)*d**S(2)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*b**(S(1)/3)*e**S(3)) - sqrt(S(3))*a**(S(1)/3)*d**S(2)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(b**(S(1)/3)*e**S(3)) + d**S(3)*p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(4) + d**S(3)*p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(4) + d**S(3)*p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(4) + d**S(3)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(4) + d**S(3)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(4) + d**S(3)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(4) - d**S(3)*log(c*(a + b*x**S(3))**p)*log(d + e*x)/e**S(4) - S(3)*d**S(2)*p*x/e**S(3) + d**S(2)*x*log(c*(a + b*x**S(3))**p)/e**S(3) + S(3)*d*p*x**S(2)/(S(4)*e**S(2)) - d*x**S(2)*log(c*(a + b*x**S(3))**p)/(S(2)*e**S(2)) - p*x**S(3)/(S(3)*e) + (a/S(3) + b*x**S(3)/S(3))*log(c*(a + b*x**S(3))**p)/(b*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x**S(3))**p)/(d + e*x), x), x, -a**(S(2)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*b**(S(2)/3)*e) + a**(S(2)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*b**(S(2)/3)*e) - sqrt(S(3))*a**(S(2)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*b**(S(2)/3)*e) - a**(S(1)/3)*d*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(b**(S(1)/3)*e**S(2)) + a**(S(1)/3)*d*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*b**(S(1)/3)*e**S(2)) + sqrt(S(3))*a**(S(1)/3)*d*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(b**(S(1)/3)*e**S(2)) - d**S(2)*p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(3) - d**S(2)*p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(3) - d**S(2)*p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(3) - d**S(2)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(3) - d**S(2)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(3) - d**S(2)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(3) + d**S(2)*log(c*(a + b*x**S(3))**p)*log(d + e*x)/e**S(3) + S(3)*d*p*x/e**S(2) - d*x*log(c*(a + b*x**S(3))**p)/e**S(2) - S(3)*p*x**S(2)/(S(4)*e) + x**S(2)*log(c*(a + b*x**S(3))**p)/(S(2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**S(3))**p)/(d + e*x), x), x, a**(S(1)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(b**(S(1)/3)*e) - a**(S(1)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*b**(S(1)/3)*e) - sqrt(S(3))*a**(S(1)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(b**(S(1)/3)*e) + d*p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(2) + d*p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(2) + d*p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e**S(2) + d*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(2) + d*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(2) + d*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e**S(2) - d*log(c*(a + b*x**S(3))**p)*log(d + e*x)/e**S(2) - S(3)*p*x/e + x*log(c*(a + b*x**S(3))**p)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(d + e*x), x), x, -p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e - p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e - p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/e - p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/e - p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e - p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/e + log(c*(a + b*x**S(3))**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(x*(d + e*x)), x), x, p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d + p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d + p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d + p*polylog(S(2), (a + b*x**S(3))/a)/(S(3)*d) + p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/d + p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/d + p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/d + log(c*(a + b*x**S(3))**p)*log(-b*x**S(3)/a)/(S(3)*d) - log(c*(a + b*x**S(3))**p)*log(d + e*x)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(x**S(2)*(d + e*x)), x), x, -log(c*(a + b*x**S(3))**p)/(d*x) - e*p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d**S(2) - e*p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d**S(2) - e*p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d**S(2) - e*p*polylog(S(2), (a + b*x**S(3))/a)/(S(3)*d**S(2)) - e*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/d**S(2) - e*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/d**S(2) - e*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/d**S(2) - e*log(c*(a + b*x**S(3))**p)*log(-b*x**S(3)/a)/(S(3)*d**S(2)) + e*log(c*(a + b*x**S(3))**p)*log(d + e*x)/d**S(2) - b**(S(1)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(a**(S(1)/3)*d) + b**(S(1)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*a**(S(1)/3)*d) - sqrt(S(3))*b**(S(1)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(a**(S(1)/3)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(3))**p)/(x**S(3)*(d + e*x)), x), x, -log(c*(a + b*x**S(3))**p)/(S(2)*d*x**S(2)) + e*log(c*(a + b*x**S(3))**p)/(d**S(2)*x) + e**S(2)*p*log(-e*(a**(S(1)/3) + b**(S(1)/3)*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d**S(3) + e**S(2)*p*log(-e*((S(-1))**(S(2)/3)*a**(S(1)/3) + b**(S(1)/3)*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d**S(3) + e**S(2)*p*log((S(-1))**(S(1)/3)*e*(a**(S(1)/3) + (S(-1))**(S(2)/3)*b**(S(1)/3)*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))*log(d + e*x)/d**S(3) + e**S(2)*p*polylog(S(2), (a + b*x**S(3))/a)/(S(3)*d**S(3)) + e**S(2)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-a**(S(1)/3)*e + b**(S(1)/3)*d))/d**S(3) + e**S(2)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/((S(-1))**(S(1)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/d**S(3) + e**S(2)*p*polylog(S(2), b**(S(1)/3)*(d + e*x)/(-(S(-1))**(S(2)/3)*a**(S(1)/3)*e + b**(S(1)/3)*d))/d**S(3) + e**S(2)*log(c*(a + b*x**S(3))**p)*log(-b*x**S(3)/a)/(S(3)*d**S(3)) - e**S(2)*log(c*(a + b*x**S(3))**p)*log(d + e*x)/d**S(3) + b**(S(1)/3)*e*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(a**(S(1)/3)*d**S(2)) - b**(S(1)/3)*e*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(2)*a**(S(1)/3)*d**S(2)) + sqrt(S(3))*b**(S(1)/3)*e*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(a**(S(1)/3)*d**S(2)) + b**(S(2)/3)*p*log(a**(S(1)/3) + b**(S(1)/3)*x)/(S(2)*a**(S(2)/3)*d) - b**(S(2)/3)*p*log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3)*x**S(2))/(S(4)*a**(S(2)/3)*d) - sqrt(S(3))*b**(S(2)/3)*p*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*x)/(S(3)*a**(S(1)/3)))/(S(2)*a**(S(2)/3)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b/x)**p)/(d + e*x), x), x, -d**S(3)*p*log(-e*x/d)*log(d + e*x)/e**S(4) + d**S(3)*p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/e**S(4) - d**S(3)*p*polylog(S(2), (d + e*x)/d)/e**S(4) + d**S(3)*p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/e**S(4) - d**S(3)*log(c*(a + b/x)**p)*log(d + e*x)/e**S(4) + d**S(2)*x*log(c*(a + b/x)**p)/e**S(3) - d*x**S(2)*log(c*(a + b/x)**p)/(S(2)*e**S(2)) + x**S(3)*log(c*(a + b/x)**p)/(S(3)*e) + b*d**S(2)*p*log(a*x + b)/(a*e**S(3)) - b*d*p*x/(S(2)*a*e**S(2)) + b*p*x**S(2)/(S(6)*a*e) + b**S(2)*d*p*log(a*x + b)/(S(2)*a**S(2)*e**S(2)) - b**S(2)*p*x/(S(3)*a**S(2)*e) + b**S(3)*p*log(a*x + b)/(S(3)*a**S(3)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b/x)**p)/(d + e*x), x), x, d**S(2)*p*log(-e*x/d)*log(d + e*x)/e**S(3) - d**S(2)*p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/e**S(3) + d**S(2)*p*polylog(S(2), (d + e*x)/d)/e**S(3) - d**S(2)*p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/e**S(3) + d**S(2)*log(c*(a + b/x)**p)*log(d + e*x)/e**S(3) - d*x*log(c*(a + b/x)**p)/e**S(2) + x**S(2)*log(c*(a + b/x)**p)/(S(2)*e) - b*d*p*log(a*x + b)/(a*e**S(2)) + b*p*x/(S(2)*a*e) - b**S(2)*p*log(a*x + b)/(S(2)*a**S(2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b/x)**p)/(d + e*x), x), x, -d*p*log(-e*x/d)*log(d + e*x)/e**S(2) + d*p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/e**S(2) - d*p*polylog(S(2), (d + e*x)/d)/e**S(2) + d*p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/e**S(2) - d*log(c*(a + b/x)**p)*log(d + e*x)/e**S(2) + x*log(c*(a + b/x)**p)/e + b*p*log(a*x + b)/(a*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/(d + e*x), x), x, p*log(-e*x/d)*log(d + e*x)/e - p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/e + p*polylog(S(2), (d + e*x)/d)/e - p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/e + log(c*(a + b/x)**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/(x*(d + e*x)), x), x, -p*log(-e*x/d)*log(d + e*x)/d + p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/d - p*polylog(S(2), (a + b/x)/a)/d - p*polylog(S(2), (d + e*x)/d)/d + p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/d - log(c*(a + b/x)**p)*log(-b/(a*x))/d - log(c*(a + b/x)**p)*log(d + e*x)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/(x**S(2)*(d + e*x)), x), x, p/(d*x) + e*p*log(-e*x/d)*log(d + e*x)/d**S(2) - e*p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/d**S(2) + e*p*polylog(S(2), (a + b/x)/a)/d**S(2) + e*p*polylog(S(2), (d + e*x)/d)/d**S(2) - e*p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/d**S(2) + e*log(c*(a + b/x)**p)*log(-b/(a*x))/d**S(2) + e*log(c*(a + b/x)**p)*log(d + e*x)/d**S(2) - (a + b/x)*log(c*(a + b/x)**p)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x)**p)/(x**S(3)*(d + e*x)), x), x, -a**S(2)*p*log(x)/(S(2)*b**S(2)*d) + a**S(2)*p*log(a*x + b)/(S(2)*b**S(2)*d) - a*p/(S(2)*b*d*x) + p/(S(4)*d*x**S(2)) - log(c*(a + b/x)**p)/(S(2)*d*x**S(2)) - e*p/(d**S(2)*x) - e**S(2)*p*log(-e*x/d)*log(d + e*x)/d**S(3) + e**S(2)*p*log(-e*(a*x + b)/(a*d - b*e))*log(d + e*x)/d**S(3) - e**S(2)*p*polylog(S(2), (a + b/x)/a)/d**S(3) - e**S(2)*p*polylog(S(2), (d + e*x)/d)/d**S(3) + e**S(2)*p*polylog(S(2), a*(d + e*x)/(a*d - b*e))/d**S(3) - e**S(2)*log(c*(a + b/x)**p)*log(-b/(a*x))/d**S(3) - e**S(2)*log(c*(a + b/x)**p)*log(d + e*x)/d**S(3) + e*(a + b/x)*log(c*(a + b/x)**p)/(b*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b/x**S(2))**p)/(d + e*x), x), x, -S(2)*d**S(3)*p*log(-e*x/d)*log(d + e*x)/e**S(4) + d**S(3)*p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e**S(4) + d**S(3)*p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e**S(4) - S(2)*d**S(3)*p*polylog(S(2), (d + e*x)/d)/e**S(4) + d**S(3)*p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/e**S(4) + d**S(3)*p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/e**S(4) - d**S(3)*log(c*(a + b/x**S(2))**p)*log(d + e*x)/e**S(4) + d**S(2)*x*log(c*(a + b/x**S(2))**p)/e**S(3) - d*x**S(2)*log(c*(a + b/x**S(2))**p)/(S(2)*e**S(2)) + x**S(3)*log(c*(a + b/x**S(2))**p)/(S(3)*e) - b*d*p*log(a*x**S(2) + b)/(S(2)*a*e**S(2)) + S(2)*b*p*x/(S(3)*a*e) + S(2)*sqrt(b)*d**S(2)*p*atan(sqrt(a)*x/sqrt(b))/(sqrt(a)*e**S(3)) - S(2)*b**(S(3)/2)*p*atan(sqrt(a)*x/sqrt(b))/(S(3)*a**(S(3)/2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b/x**S(2))**p)/(d + e*x), x), x, S(2)*d**S(2)*p*log(-e*x/d)*log(d + e*x)/e**S(3) - d**S(2)*p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e**S(3) - d**S(2)*p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e**S(3) + S(2)*d**S(2)*p*polylog(S(2), (d + e*x)/d)/e**S(3) - d**S(2)*p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/e**S(3) - d**S(2)*p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/e**S(3) + d**S(2)*log(c*(a + b/x**S(2))**p)*log(d + e*x)/e**S(3) - d*x*log(c*(a + b/x**S(2))**p)/e**S(2) + x**S(2)*log(c*(a + b/x**S(2))**p)/(S(2)*e) + b*p*log(a*x**S(2) + b)/(S(2)*a*e) - S(2)*sqrt(b)*d*p*atan(sqrt(a)*x/sqrt(b))/(sqrt(a)*e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b/x**S(2))**p)/(d + e*x), x), x, -S(2)*d*p*log(-e*x/d)*log(d + e*x)/e**S(2) + d*p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e**S(2) + d*p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e**S(2) - S(2)*d*p*polylog(S(2), (d + e*x)/d)/e**S(2) + d*p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/e**S(2) + d*p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/e**S(2) - d*log(c*(a + b/x**S(2))**p)*log(d + e*x)/e**S(2) + x*log(c*(a + b/x**S(2))**p)/e + S(2)*sqrt(b)*p*atan(sqrt(a)*x/sqrt(b))/(sqrt(a)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/(d + e*x), x), x, S(2)*p*log(-e*x/d)*log(d + e*x)/e - p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e - p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/e + S(2)*p*polylog(S(2), (d + e*x)/d)/e - p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/e - p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/e + log(c*(a + b/x**S(2))**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/(x*(d + e*x)), x), x, -S(2)*p*log(-e*x/d)*log(d + e*x)/d + p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/d + p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/d - p*polylog(S(2), (a + b/x**S(2))/a)/(S(2)*d) - S(2)*p*polylog(S(2), (d + e*x)/d)/d + p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/d + p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/d - log(c*(a + b/x**S(2))**p)*log(-b/(a*x**S(2)))/(S(2)*d) - log(c*(a + b/x**S(2))**p)*log(d + e*x)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/(x**S(2)*(d + e*x)), x), x, S(2)*sqrt(a)*p*atan(sqrt(a)*x/sqrt(b))/(sqrt(b)*d) + S(2)*p/(d*x) - log(c*(a + b/x**S(2))**p)/(d*x) + S(2)*e*p*log(-e*x/d)*log(d + e*x)/d**S(2) - e*p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/d**S(2) - e*p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/d**S(2) + e*p*polylog(S(2), (a + b/x**S(2))/a)/(S(2)*d**S(2)) + S(2)*e*p*polylog(S(2), (d + e*x)/d)/d**S(2) - e*p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/d**S(2) - e*p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/d**S(2) + e*log(c*(a + b/x**S(2))**p)*log(-b/(a*x**S(2)))/(S(2)*d**S(2)) + e*log(c*(a + b/x**S(2))**p)*log(d + e*x)/d**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(2))**p)/(x**S(3)*(d + e*x)), x), x, -S(2)*sqrt(a)*e*p*atan(sqrt(a)*x/sqrt(b))/(sqrt(b)*d**S(2)) + p/(S(2)*d*x**S(2)) - S(2)*e*p/(d**S(2)*x) + e*log(c*(a + b/x**S(2))**p)/(d**S(2)*x) - S(2)*e**S(2)*p*log(-e*x/d)*log(d + e*x)/d**S(3) + e**S(2)*p*log(e*(sqrt(b) - x*sqrt(-a))/(sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/d**S(3) + e**S(2)*p*log(-e*(sqrt(b) + x*sqrt(-a))/(-sqrt(b)*e + d*sqrt(-a)))*log(d + e*x)/d**S(3) - e**S(2)*p*polylog(S(2), (a + b/x**S(2))/a)/(S(2)*d**S(3)) - S(2)*e**S(2)*p*polylog(S(2), (d + e*x)/d)/d**S(3) + e**S(2)*p*polylog(S(2), sqrt(-a)*(d + e*x)/(-sqrt(b)*e + d*sqrt(-a)))/d**S(3) + e**S(2)*p*polylog(S(2), sqrt(-a)*(d + e*x)/(sqrt(b)*e + d*sqrt(-a)))/d**S(3) - e**S(2)*log(c*(a + b/x**S(2))**p)*log(-b/(a*x**S(2)))/(S(2)*d**S(3)) - e**S(2)*log(c*(a + b/x**S(2))**p)*log(d + e*x)/d**S(3) - (a/S(2) + b/(S(2)*x**S(2)))*log(c*(a + b/x**S(2))**p)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b/x**S(3))**p)/(d + e*x), x), x, -S(3)*d**S(3)*p*log(-e*x/d)*log(d + e*x)/e**S(4) + d**S(3)*p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/e**S(4) + d**S(3)*p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/e**S(4) + d**S(3)*p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/e**S(4) - S(3)*d**S(3)*p*polylog(S(2), (d + e*x)/d)/e**S(4) + d**S(3)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/e**S(4) + d**S(3)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/e**S(4) + d**S(3)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/e**S(4) - d**S(3)*log(c*(a + b/x**S(3))**p)*log(d + e*x)/e**S(4) + d**S(2)*x*log(c*(a + b/x**S(3))**p)/e**S(3) - d*x**S(2)*log(c*(a + b/x**S(3))**p)/(S(2)*e**S(2)) + x**S(3)*log(c*(a + b/x**S(3))**p)/(S(3)*e) + b*p*log(a*x**S(3) + b)/(S(3)*a*e) + b**(S(1)/3)*d**S(2)*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*e**S(3)) - b**(S(1)/3)*d**S(2)*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(2)*a**(S(1)/3)*e**S(3)) - sqrt(S(3))*b**(S(1)/3)*d**S(2)*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(a**(S(1)/3)*e**S(3)) + b**(S(2)/3)*d*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(S(2)*a**(S(2)/3)*e**S(2)) - b**(S(2)/3)*d*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(4)*a**(S(2)/3)*e**S(2)) + sqrt(S(3))*b**(S(2)/3)*d*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(S(2)*a**(S(2)/3)*e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b/x**S(3))**p)/(d + e*x), x), x, S(3)*d**S(2)*p*log(-e*x/d)*log(d + e*x)/e**S(3) - d**S(2)*p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/e**S(3) - d**S(2)*p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/e**S(3) - d**S(2)*p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/e**S(3) + S(3)*d**S(2)*p*polylog(S(2), (d + e*x)/d)/e**S(3) - d**S(2)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/e**S(3) - d**S(2)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/e**S(3) - d**S(2)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/e**S(3) + d**S(2)*log(c*(a + b/x**S(3))**p)*log(d + e*x)/e**S(3) - d*x*log(c*(a + b/x**S(3))**p)/e**S(2) + x**S(2)*log(c*(a + b/x**S(3))**p)/(S(2)*e) - b**(S(1)/3)*d*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*e**S(2)) + b**(S(1)/3)*d*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(2)*a**(S(1)/3)*e**S(2)) + sqrt(S(3))*b**(S(1)/3)*d*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(a**(S(1)/3)*e**S(2)) - b**(S(2)/3)*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(S(2)*a**(S(2)/3)*e) + b**(S(2)/3)*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(4)*a**(S(2)/3)*e) - sqrt(S(3))*b**(S(2)/3)*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(S(2)*a**(S(2)/3)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b/x**S(3))**p)/(d + e*x), x), x, -S(3)*d*p*log(-e*x/d)*log(d + e*x)/e**S(2) + d*p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/e**S(2) + d*p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/e**S(2) + d*p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/e**S(2) - S(3)*d*p*polylog(S(2), (d + e*x)/d)/e**S(2) + d*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/e**S(2) + d*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/e**S(2) + d*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/e**S(2) - d*log(c*(a + b/x**S(3))**p)*log(d + e*x)/e**S(2) + x*log(c*(a + b/x**S(3))**p)/e + b**(S(1)/3)*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*e) - b**(S(1)/3)*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(2)*a**(S(1)/3)*e) - sqrt(S(3))*b**(S(1)/3)*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(a**(S(1)/3)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(3))**p)/(d + e*x), x), x, S(3)*p*log(-e*x/d)*log(d + e*x)/e - p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/e - p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/e - p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/e + S(3)*p*polylog(S(2), (d + e*x)/d)/e - p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/e - p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/e - p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/e + log(c*(a + b/x**S(3))**p)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(3))**p)/(x*(d + e*x)), x), x, -S(3)*p*log(-e*x/d)*log(d + e*x)/d + p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/d + p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/d + p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/d - p*polylog(S(2), (a + b/x**S(3))/a)/(S(3)*d) - S(3)*p*polylog(S(2), (d + e*x)/d)/d + p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/d + p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/d + p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/d - log(c*(a + b/x**S(3))**p)*log(-b/(a*x**S(3)))/(S(3)*d) - log(c*(a + b/x**S(3))**p)*log(d + e*x)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(3))**p)/(x**S(2)*(d + e*x)), x), x, -a**(S(1)/3)*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(b**(S(1)/3)*d) + a**(S(1)/3)*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(2)*b**(S(1)/3)*d) - sqrt(S(3))*a**(S(1)/3)*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(b**(S(1)/3)*d) + S(3)*p/(d*x) - log(c*(a + b/x**S(3))**p)/(d*x) + S(3)*e*p*log(-e*x/d)*log(d + e*x)/d**S(2) - e*p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/d**S(2) - e*p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/d**S(2) - e*p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/d**S(2) + e*p*polylog(S(2), (a + b/x**S(3))/a)/(S(3)*d**S(2)) + S(3)*e*p*polylog(S(2), (d + e*x)/d)/d**S(2) - e*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/d**S(2) - e*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/d**S(2) - e*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/d**S(2) + e*log(c*(a + b/x**S(3))**p)*log(-b/(a*x**S(3)))/(S(3)*d**S(2)) + e*log(c*(a + b/x**S(3))**p)*log(d + e*x)/d**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b/x**S(3))**p)/(x**S(3)*(d + e*x)), x), x, a**(S(2)/3)*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(S(2)*b**(S(2)/3)*d) - a**(S(2)/3)*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(4)*b**(S(2)/3)*d) - sqrt(S(3))*a**(S(2)/3)*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(S(2)*b**(S(2)/3)*d) + a**(S(1)/3)*e*p*log(a**(S(1)/3)*x + b**(S(1)/3))/(b**(S(1)/3)*d**S(2)) - a**(S(1)/3)*e*p*log(a**(S(2)/3)*x**S(2) - a**(S(1)/3)*b**(S(1)/3)*x + b**(S(2)/3))/(S(2)*b**(S(1)/3)*d**S(2)) + sqrt(S(3))*a**(S(1)/3)*e*p*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*x + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(b**(S(1)/3)*d**S(2)) + S(3)*p/(S(4)*d*x**S(2)) - log(c*(a + b/x**S(3))**p)/(S(2)*d*x**S(2)) - S(3)*e*p/(d**S(2)*x) + e*log(c*(a + b/x**S(3))**p)/(d**S(2)*x) - S(3)*e**S(2)*p*log(-e*x/d)*log(d + e*x)/d**S(3) + e**S(2)*p*log(-e*(a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d - b**(S(1)/3)*e))*log(d + e*x)/d**S(3) + e**S(2)*p*log(-e*(a**(S(1)/3)*x + (S(-1))**(S(2)/3)*b**(S(1)/3))/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))*log(d + e*x)/d**S(3) + e**S(2)*p*log((S(-1))**(S(1)/3)*e*((S(-1))**(S(2)/3)*a**(S(1)/3)*x + b**(S(1)/3))/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))*log(d + e*x)/d**S(3) - e**S(2)*p*polylog(S(2), (a + b/x**S(3))/a)/(S(3)*d**S(3)) - S(3)*e**S(2)*p*polylog(S(2), (d + e*x)/d)/d**S(3) + e**S(2)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - b**(S(1)/3)*e))/d**S(3) + e**S(2)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d + (S(-1))**(S(1)/3)*b**(S(1)/3)*e))/d**S(3) + e**S(2)*p*polylog(S(2), a**(S(1)/3)*(d + e*x)/(a**(S(1)/3)*d - (S(-1))**(S(2)/3)*b**(S(1)/3)*e))/d**S(3) - e**S(2)*log(c*(a + b/x**S(3))**p)*log(-b/(a*x**S(3)))/(S(3)*d**S(3)) - e**S(2)*log(c*(a + b/x**S(3))**p)*log(d + e*x)/d**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d + e*x**S(2))/(-x**S(2) + S(1)), x), x, log((-sqrt(e)*x + sqrt(-d))/(-sqrt(e) + sqrt(-d)))*log(-x + S(1))/S(2) - log((sqrt(e)*x + sqrt(-d))/(-sqrt(e) + sqrt(-d)))*log(x + S(1))/S(2) - log((-sqrt(e)*x + sqrt(-d))/(sqrt(e) + sqrt(-d)))*log(x + S(1))/S(2) + log((sqrt(e)*x + sqrt(-d))/(sqrt(e) + sqrt(-d)))*log(-x + S(1))/S(2) + log(d + e*x**S(2))*atanh(x) - polylog(S(2), sqrt(e)*(-x + S(-1))/(-sqrt(e) + sqrt(-d)))/S(2) + polylog(S(2), sqrt(e)*(x + S(-1))/(-sqrt(e) + sqrt(-d)))/S(2) + polylog(S(2), sqrt(e)*(-x + S(1))/(sqrt(e) + sqrt(-d)))/S(2) - polylog(S(2), sqrt(e)*(x + S(1))/(sqrt(e) + sqrt(-d)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d + e*x**S(2))/(a + b*x**S(2)), x), x, I*log(sqrt(b)*(-sqrt(e)*x + sqrt(-d))/(-I*sqrt(a)*sqrt(e) + sqrt(b)*sqrt(-d)))*log(S(1) + I*sqrt(b)*x/sqrt(a))/(S(2)*sqrt(a)*sqrt(b)) - I*log(sqrt(b)*(-sqrt(e)*x + sqrt(-d))/(I*sqrt(a)*sqrt(e) + sqrt(b)*sqrt(-d)))*log(S(1) - I*sqrt(b)*x/sqrt(a))/(S(2)*sqrt(a)*sqrt(b)) - I*log(sqrt(b)*(sqrt(e)*x + sqrt(-d))/(-I*sqrt(a)*sqrt(e) + sqrt(b)*sqrt(-d)))*log(S(1) - I*sqrt(b)*x/sqrt(a))/(S(2)*sqrt(a)*sqrt(b)) + I*log(sqrt(b)*(sqrt(e)*x + sqrt(-d))/(I*sqrt(a)*sqrt(e) + sqrt(b)*sqrt(-d)))*log(S(1) + I*sqrt(b)*x/sqrt(a))/(S(2)*sqrt(a)*sqrt(b)) + log(d + e*x**S(2))*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*sqrt(b)) + I*polylog(S(2), sqrt(e)*(-sqrt(a) - I*sqrt(b)*x)/(-sqrt(a)*sqrt(e) + I*sqrt(b)*sqrt(-d)))/(S(2)*sqrt(a)*sqrt(b)) - I*polylog(S(2), sqrt(e)*(-sqrt(a) + I*sqrt(b)*x)/(-sqrt(a)*sqrt(e) + I*sqrt(b)*sqrt(-d)))/(S(2)*sqrt(a)*sqrt(b)) - I*polylog(S(2), sqrt(e)*(sqrt(a) - I*sqrt(b)*x)/(sqrt(a)*sqrt(e) + I*sqrt(b)*sqrt(-d)))/(S(2)*sqrt(a)*sqrt(b)) + I*polylog(S(2), sqrt(e)*(sqrt(a) + I*sqrt(b)*x)/(sqrt(a)*sqrt(e) + I*sqrt(b)*sqrt(-d)))/(S(2)*sqrt(a)*sqrt(b)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-x**S(2) + S(1))/(-x**S(2) + S(2)), x), x, sqrt(S(2))*log(-x**S(2) + S(1))*atanh(sqrt(S(2))*x/S(2))/S(2) - sqrt(S(2))*log(-S(2)*sqrt(S(2)) + S(3))*atanh(x)/S(2) + sqrt(S(2))*polylog(S(2), sqrt(S(2))*(-x + S(-1))/(-sqrt(S(2)) + S(2)))/S(4) - sqrt(S(2))*polylog(S(2), sqrt(S(2))*(x + S(-1))/(-sqrt(S(2)) + S(2)))/S(4) + sqrt(S(2))*polylog(S(2), sqrt(S(2))*(-x + S(1))/(sqrt(S(2)) + S(2)))/S(4) - sqrt(S(2))*polylog(S(2), sqrt(S(2))*(x + S(1))/(sqrt(S(2)) + S(2)))/S(4), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate(log(-x**S(2) + S(1))/(-x**S(2) + S(2)), x), x, -sqrt(S(2))*log(-x + S(1))*atanh(sqrt(S(2))/S(2))/S(2) + sqrt(S(2))*log(x + S(1))*atanh(sqrt(S(2))/S(2))/S(2) + sqrt(S(2))*log(-x**S(2) + S(1))*atanh(sqrt(S(2))*x/S(2))/S(2) + sqrt(S(2))*polylog(S(2), sqrt(S(2))*(-x + S(-1))/(-sqrt(S(2)) + S(2)))/S(4) - sqrt(S(2))*polylog(S(2), sqrt(S(2))*(x + S(-1))/(-sqrt(S(2)) + S(2)))/S(4) + sqrt(S(2))*polylog(S(2), sqrt(S(2))*(-x + S(1))/(sqrt(S(2)) + S(2)))/S(4) - sqrt(S(2))*polylog(S(2), sqrt(S(2))*(x + S(1))/(sqrt(S(2)) + S(2)))/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + c*x**S(2))*log(d + e*x**S(2))/x**S(2), x), x, -a*log(d + e*x**S(2))/x + c*x*log(d + e*x**S(2)) - S(2)*c*x + (S(2)*a*e + S(2)*c*d)*atan(sqrt(e)*x/sqrt(d))/(sqrt(d)*sqrt(e)), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((a + c*x**S(2))*log(d + e*x**S(2))/x**S(2), x), x, -a*log(d + e*x**S(2))/x + S(2)*a*sqrt(e)*atan(sqrt(e)*x/sqrt(d))/sqrt(d) + S(2)*c*sqrt(d)*atan(sqrt(e)*x/sqrt(d))/sqrt(e) + c*x*log(d + e*x**S(2)) - S(2)*c*x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(5)*log(c*(a + b*x**S(2))**n)**S(2), x), x, -S(5)*a**S(3)*n**S(2)*log(a + b*x**S(2))/(S(18)*b**S(3)) + a**S(3)*log(c*(a + b*x**S(2))**n)**S(2)/(S(6)*b**S(3)) + S(11)*a**S(2)*n**S(2)*x**S(2)/(S(18)*b**S(2)) - a**S(2)*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)/(S(3)*b**S(3)) - S(5)*a*n**S(2)*x**S(4)/(S(36)*b) + a*n*x**S(4)*log(c*(a + b*x**S(2))**n)/(S(6)*b) + n**S(2)*x**S(6)/S(27) - n*x**S(6)*log(c*(a + b*x**S(2))**n)/S(9) + x**S(6)*log(c*(a + b*x**S(2))**n)**S(2)/S(6), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x**S(2))**n)**S(2), x), x, -S(3)*a*n**S(2)*x**S(2)/(S(4)*b) + a*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)/b**S(2) - a*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(S(2)*b**S(2)) + n**S(2)*x**S(4)/S(8) - n*(a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)/(S(4)*b**S(2)) + (a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*b**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**S(2))**n)**S(2), x), x, n**S(2)*x**S(2) - n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)/b + (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**n)**S(2)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x, x), x, -n**S(2)*polylog(S(3), (a + b*x**S(2))/a) + n*log(c*(a + b*x**S(2))**n)*polylog(S(2), (a + b*x**S(2))/a) + log(c*(a + b*x**S(2))**n)**S(2)*log(-b*x**S(2)/a)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(3), x), x, b*n**S(2)*polylog(S(2), (a + b*x**S(2))/a)/a + b*n*log(c*(a + b*x**S(2))**n)*log(-b*x**S(2)/a)/a - (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(a*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(5), x), x, -log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*x**S(4)) - b*n*log(c*(a + b*x**S(2))**n)/(S(2)*a*x**S(2)) + b**S(2)*n**S(2)*log(x)/a**S(2) - b**S(2)*n**S(2)*log(a + b*x**S(2))/(S(2)*a**S(2)) - b**S(2)*n**S(2)*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*a**S(2)) - b**S(2)*n*log(c*(a + b*x**S(2))**n)*log(-b*x**S(2)/a)/(S(2)*a**S(2)) + b**S(2)*log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*a**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(7), x), x, -log(c*(a + b*x**S(2))**n)**S(2)/(S(6)*x**S(6)) - b*n*log(c*(a + b*x**S(2))**n)/(S(6)*a*x**S(4)) - b**S(2)*n**S(2)/(S(6)*a**S(2)*x**S(2)) + b**S(2)*n*log(c*(a + b*x**S(2))**n)/(S(3)*a**S(2)*x**S(2)) - b**S(3)*n**S(2)*log(x)/a**S(3) + b**S(3)*n**S(2)*log(a + b*x**S(2))/(S(2)*a**S(3)) + b**S(3)*n**S(2)*polylog(S(2), (a + b*x**S(2))/a)/(S(3)*a**S(3)) + b**S(3)*n*log(c*(a + b*x**S(2))**n)*log(-b*x**S(2)/a)/(S(3)*a**S(3)) - b**S(3)*log(c*(a + b*x**S(2))**n)**S(2)/(S(6)*a**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(c*(a + b*x**S(2))**n)**S(2), x), x, S(8)*a**(S(5)/2)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(S(5)*b**(S(5)/2)) + S(4)*I*a**(S(5)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/(S(5)*b**(S(5)/2)) - S(184)*a**(S(5)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))/(S(75)*b**(S(5)/2)) + S(4)*I*a**(S(5)/2)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/(S(5)*b**(S(5)/2)) + S(4)*a**(S(5)/2)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/(S(5)*b**(S(5)/2)) + S(184)*a**S(2)*n**S(2)*x/(S(75)*b**S(2)) - S(4)*a**S(2)*n*x*log(c*(a + b*x**S(2))**n)/(S(5)*b**S(2)) - S(64)*a*n**S(2)*x**S(3)/(S(225)*b) + S(4)*a*n*x**S(3)*log(c*(a + b*x**S(2))**n)/(S(15)*b) + S(8)*n**S(2)*x**S(5)/S(125) - S(4)*n*x**S(5)*log(c*(a + b*x**S(2))**n)/S(25) + x**S(5)*log(c*(a + b*x**S(2))**n)**S(2)/S(5), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(c*(a + b*x**S(2))**n)**S(2), x), x, -S(8)*a**(S(3)/2)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(S(3)*b**(S(3)/2)) - S(4)*I*a**(S(3)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/(S(3)*b**(S(3)/2)) + S(32)*a**(S(3)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))/(S(9)*b**(S(3)/2)) - S(4)*I*a**(S(3)/2)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/(S(3)*b**(S(3)/2)) - S(4)*a**(S(3)/2)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/(S(3)*b**(S(3)/2)) - S(32)*a*n**S(2)*x/(S(9)*b) + S(4)*a*n*x*log(c*(a + b*x**S(2))**n)/(S(3)*b) + S(8)*n**S(2)*x**S(3)/S(27) - S(4)*n*x**S(3)*log(c*(a + b*x**S(2))**n)/S(9) + x**S(3)*log(c*(a + b*x**S(2))**n)**S(2)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2), x), x, S(8)*sqrt(a)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/sqrt(b) + S(4)*I*sqrt(a)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/sqrt(b) - S(8)*sqrt(a)*n**S(2)*atan(sqrt(b)*x/sqrt(a))/sqrt(b) + S(4)*I*sqrt(a)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/sqrt(b) + S(4)*sqrt(a)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/sqrt(b) + S(8)*n**S(2)*x - S(4)*n*x*log(c*(a + b*x**S(2))**n) + x*log(c*(a + b*x**S(2))**n)**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(2), x), x, -log(c*(a + b*x**S(2))**n)**S(2)/x + S(8)*sqrt(b)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/sqrt(a) + S(4)*I*sqrt(b)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/sqrt(a) + S(4)*I*sqrt(b)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/sqrt(a) + S(4)*sqrt(b)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/sqrt(a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(4), x), x, -log(c*(a + b*x**S(2))**n)**S(2)/(S(3)*x**S(3)) - S(4)*b*n*log(c*(a + b*x**S(2))**n)/(S(3)*a*x) - S(8)*b**(S(3)/2)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(S(3)*a**(S(3)/2)) - S(4)*I*b**(S(3)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/(S(3)*a**(S(3)/2)) + S(8)*b**(S(3)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))/(S(3)*a**(S(3)/2)) - S(4)*I*b**(S(3)/2)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/(S(3)*a**(S(3)/2)) - S(4)*b**(S(3)/2)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/(S(3)*a**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(6), x), x, -log(c*(a + b*x**S(2))**n)**S(2)/(S(5)*x**S(5)) - S(4)*b*n*log(c*(a + b*x**S(2))**n)/(S(15)*a*x**S(3)) - S(8)*b**S(2)*n**S(2)/(S(15)*a**S(2)*x) + S(4)*b**S(2)*n*log(c*(a + b*x**S(2))**n)/(S(5)*a**S(2)*x) + S(8)*b**(S(5)/2)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(S(5)*a**(S(5)/2)) + S(4)*I*b**(S(5)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/(S(5)*a**(S(5)/2)) - S(32)*b**(S(5)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))/(S(15)*a**(S(5)/2)) + S(4)*I*b**(S(5)/2)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/(S(5)*a**(S(5)/2)) + S(4)*b**(S(5)/2)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/(S(5)*a**(S(5)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(2)/x**S(8), x), x, -log(c*(a + b*x**S(2))**n)**S(2)/(S(7)*x**S(7)) - S(4)*b*n*log(c*(a + b*x**S(2))**n)/(S(35)*a*x**S(5)) - S(8)*b**S(2)*n**S(2)/(S(105)*a**S(2)*x**S(3)) + S(4)*b**S(2)*n*log(c*(a + b*x**S(2))**n)/(S(21)*a**S(2)*x**S(3)) + S(64)*b**S(3)*n**S(2)/(S(105)*a**S(3)*x) - S(4)*b**S(3)*n*log(c*(a + b*x**S(2))**n)/(S(7)*a**S(3)*x) - S(8)*b**(S(7)/2)*n**S(2)*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(S(7)*a**(S(7)/2)) - S(4)*I*b**(S(7)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))**S(2)/(S(7)*a**(S(7)/2)) + S(184)*b**(S(7)/2)*n**S(2)*atan(sqrt(b)*x/sqrt(a))/(S(105)*a**(S(7)/2)) - S(4)*I*b**(S(7)/2)*n**S(2)*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/(S(7)*a**(S(7)/2)) - S(4)*b**(S(7)/2)*n*log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/(S(7)*a**(S(7)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(5)*log(c*(a + b*x**S(2))**n)**S(3), x), x, -S(9)*a**S(2)*n**S(3)*x**S(2)/(S(4)*b**S(2)) + S(3)*a**S(2)*n**S(2)*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)/b**S(3) - S(3)*a**S(2)*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(S(2)*b**S(3)) + a**S(2)*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(3)/(S(2)*b**S(3)) + S(3)*a*n**S(3)*x**S(4)/(S(8)*b) - S(3)*a*n**S(2)*(a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)/(S(4)*b**S(3)) + S(3)*a*n*(a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*b**S(3)) - a*(a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)**S(3)/(S(2)*b**S(3)) - n**S(3)*(a + b*x**S(2))**S(3)/(S(27)*b**S(3)) + n**S(2)*(a + b*x**S(2))**S(3)*log(c*(a + b*x**S(2))**n)/(S(9)*b**S(3)) - n*(a + b*x**S(2))**S(3)*log(c*(a + b*x**S(2))**n)**S(2)/(S(6)*b**S(3)) + (a + b*x**S(2))**S(3)*log(c*(a + b*x**S(2))**n)**S(3)/(S(6)*b**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(c*(a + b*x**S(2))**n)**S(3), x), x, S(21)*a*n**S(3)*x**S(2)/(S(8)*b) - S(3)*a*n**S(2)*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)/b**S(2) + S(3)*a*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(S(2)*b**S(2)) - a*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(3)/(S(2)*b**S(2)) - S(3)*n**S(3)*x**S(4)/S(16) + S(3)*n**S(2)*(a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)/(S(8)*b**S(2)) - S(3)*n*(a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)**S(2)/(S(8)*b**S(2)) + (a + b*x**S(2))**S(2)*log(c*(a + b*x**S(2))**n)**S(3)/(S(4)*b**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c*(a + b*x**S(2))**n)**S(3), x), x, -S(3)*n**S(3)*x**S(2) + S(3)*n**S(2)*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)/b - S(3)*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(S(2)*b) + (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**n)**S(3)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(3)/x, x), x, S(3)*n**S(3)*polylog(S(4), (a + b*x**S(2))/a) - S(3)*n**S(2)*log(c*(a + b*x**S(2))**n)*polylog(S(3), (a + b*x**S(2))/a) + S(3)*n*log(c*(a + b*x**S(2))**n)**S(2)*polylog(S(2), (a + b*x**S(2))/a)/S(2) + log(c*(a + b*x**S(2))**n)**S(3)*log(-b*x**S(2)/a)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(3)/x**S(3), x), x, -S(3)*b*n**S(3)*polylog(S(3), (a + b*x**S(2))/a)/a + S(3)*b*n**S(2)*log(c*(a + b*x**S(2))**n)*polylog(S(2), (a + b*x**S(2))/a)/a + S(3)*b*n*log(c*(a + b*x**S(2))**n)**S(2)*log(-b*x**S(2)/a)/(S(2)*a) - (a/S(2) + b*x**S(2)/S(2))*log(c*(a + b*x**S(2))**n)**S(3)/(a*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(3)/x**S(5), x), x, -log(c*(a + b*x**S(2))**n)**S(3)/(S(4)*x**S(4)) + S(3)*b**S(2)*n**S(3)*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*a**S(2)) + S(3)*b**S(2)*n**S(3)*polylog(S(3), (a + b*x**S(2))/a)/(S(2)*a**S(2)) + S(3)*b**S(2)*n**S(2)*log(c*(a + b*x**S(2))**n)*log(-b*x**S(2)/a)/(S(2)*a**S(2)) - S(3)*b**S(2)*n**S(2)*log(c*(a + b*x**S(2))**n)*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*a**S(2)) - S(3)*b**S(2)*n*log(c*(a + b*x**S(2))**n)**S(2)*log(-b*x**S(2)/a)/(S(4)*a**S(2)) + b**S(2)*log(c*(a + b*x**S(2))**n)**S(3)/(S(4)*a**S(2)) - S(3)*b*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*a**S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)**S(3)/x**S(7), x), x, -log(c*(a + b*x**S(2))**n)**S(3)/(S(6)*x**S(6)) - b*n*log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*a*x**S(4)) - b**S(2)*n**S(2)*log(c*(a + b*x**S(2))**n)/(S(2)*a**S(2)*x**S(2)) + b**S(3)*n**S(3)*log(x)/a**S(3) - b**S(3)*n**S(3)*log(a + b*x**S(2))/(S(2)*a**S(3)) - S(3)*b**S(3)*n**S(3)*polylog(S(2), (a + b*x**S(2))/a)/(S(2)*a**S(3)) - b**S(3)*n**S(3)*polylog(S(3), (a + b*x**S(2))/a)/a**S(3) - S(3)*b**S(3)*n**S(2)*log(c*(a + b*x**S(2))**n)*log(-b*x**S(2)/a)/(S(2)*a**S(3)) + b**S(3)*n**S(2)*log(c*(a + b*x**S(2))**n)*polylog(S(2), (a + b*x**S(2))/a)/a**S(3) + b**S(3)*n*log(c*(a + b*x**S(2))**n)**S(2)*log(-b*x**S(2)/a)/(S(2)*a**S(3)) + b**S(3)*n*log(c*(a + b*x**S(2))**n)**S(2)/(S(4)*a**S(3)) - b**S(3)*log(c*(a + b*x**S(2))**n)**S(3)/(S(6)*a**S(3)) + b**S(2)*n*(a + b*x**S(2))*log(c*(a + b*x**S(2))**n)**S(2)/(S(2)*a**S(3)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(c*(a + b*x**S(2))**n), x), x, -a*(c*(a + b*x**S(2))**n)**(-S(1)/n)*(a + b*x**S(2))*Ei(log(c*(a + b*x**S(2))**n)/n)/(S(2)*b**S(2)*n) + (c*(a + b*x**S(2))**n)**(-S(2)/n)*(a + b*x**S(2))**S(2)*Ei(S(2)*log(c*(a + b*x**S(2))**n)/n)/(S(2)*b**S(2)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(c*(a + b*x**S(2))**n), x), x, (c*(a + b*x**S(2))**n)**(-S(1)/n)*(a/S(2) + b*x**S(2)/S(2))*Ei(log(c*(a + b*x**S(2))**n)/n)/(b*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(c*(a + b*x**S(2))**n)), x), x, Integral(S(1)/(x*log(c*(a + b*x)**n)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(c*(a + b*x**S(2))**n)), x), x, Integral(S(1)/(x**S(2)*log(c*(a + b*x)**n)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(c*(a + b*x**S(2))**n)**S(2), x), x, -a*(c*(a + b*x**S(2))**n)**(-S(1)/n)*(a + b*x**S(2))*Ei(log(c*(a + b*x**S(2))**n)/n)/(S(2)*b**S(2)*n**S(2)) - x**S(2)*(a + b*x**S(2))/(S(2)*b*n*log(c*(a + b*x**S(2))**n)) + (c*(a + b*x**S(2))**n)**(-S(2)/n)*(a + b*x**S(2))**S(2)*Ei(S(2)*log(c*(a + b*x**S(2))**n)/n)/(b**S(2)*n**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(c*(a + b*x**S(2))**n)**S(2), x), x, (-a/S(2) - b*x**S(2)/S(2))/(b*n*log(c*(a + b*x**S(2))**n)) + (c*(a + b*x**S(2))**n)**(-S(1)/n)*(a/S(2) + b*x**S(2)/S(2))*Ei(log(c*(a + b*x**S(2))**n)/n)/(b*n**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(c*(a + b*x**S(2))**n)**S(2)), x), x, Integral(S(1)/(x*log(c*(a + b*x)**n)**S(2)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(c*(a + b*x**S(2))**n)**S(2)), x), x, Integral(S(1)/(x**S(2)*log(c*(a + b*x)**n)**S(2)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(c*(a + b*x**S(2))**n)**S(3), x), x, -a*(a + b*x**S(2))/(S(4)*b**S(2)*n**S(2)*log(c*(a + b*x**S(2))**n)) - a*(c*(a + b*x**S(2))**n)**(-S(1)/n)*(a + b*x**S(2))*Ei(log(c*(a + b*x**S(2))**n)/n)/(S(4)*b**S(2)*n**S(3)) - x**S(2)*(a + b*x**S(2))/(S(4)*b*n*log(c*(a + b*x**S(2))**n)**S(2)) - x**S(2)*(a + b*x**S(2))/(S(2)*b*n**S(2)*log(c*(a + b*x**S(2))**n)) + (c*(a + b*x**S(2))**n)**(-S(2)/n)*(a + b*x**S(2))**S(2)*Ei(S(2)*log(c*(a + b*x**S(2))**n)/n)/(b**S(2)*n**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(c*(a + b*x**S(2))**n)**S(3), x), x, (-a/S(4) - b*x**S(2)/S(4))/(b*n*log(c*(a + b*x**S(2))**n)**S(2)) + (-a/S(4) - b*x**S(2)/S(4))/(b*n**S(2)*log(c*(a + b*x**S(2))**n)) + (c*(a + b*x**S(2))**n)**(-S(1)/n)*(a/S(4) + b*x**S(2)/S(4))*Ei(log(c*(a + b*x**S(2))**n)/n)/(b*n**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(c*(a + b*x**S(2))**n)**S(3)), x), x, Integral(S(1)/(x*log(c*(a + b*x)**n)**S(3)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(c*(a + b*x**S(2))**n)**S(3)), x), x, Integral(S(1)/(x**S(2)*log(c*(a + b*x)**n)**S(3)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(c*(a + b*x**S(2))), x), x, Integral(x**m/log(c*(a + b*x**S(2))), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(c*(a + b*x**S(2))), x), x, -a*li(a*c + b*c*x**S(2))/(S(2)*b**S(2)*c) + Ei(S(2)*log(a*c + b*c*x**S(2)))/(S(2)*b**S(2)*c**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(c*(a + b*x**S(2))), x), x, Integral(x**S(2)/log(c*(a + b*x**S(2))), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(c*(a + b*x**S(2))), x), x, li(c*(a + b*x**S(2)))/(S(2)*b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/log(c*(a + b*x**S(2))), x), x, Integral(S(1)/log(c*(a + b*x**S(2))), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(c*(a + b*x**S(2)))), x), x, Integral(S(1)/(x*log(a*c + b*c*x)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(c*(a + b*x**S(2)))), x), x, Integral(S(1)/(x**S(2)*log(c*(a + b*x**S(2)))), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(c*(a + b*x**S(2)))), x), x, Integral(S(1)/(x**S(2)*log(a*c + b*c*x)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(c*(a + b*x**S(2)))**S(2), x), x, Integral(x**m/log(c*(a + b*x**S(2)))**S(2), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(c*(a + b*x**S(2)))**S(2), x), x, -a*li(a*c + b*c*x**S(2))/(S(2)*b**S(2)*c) - x**S(2)*(a + b*x**S(2))/(S(2)*b*log(a*c + b*c*x**S(2))) + Ei(S(2)*log(a*c + b*c*x**S(2)))/(b**S(2)*c**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(c*(a + b*x**S(2)))**S(2), x), x, Integral(x**S(2)/log(c*(a + b*x**S(2)))**S(2), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(c*(a + b*x**S(2)))**S(2), x), x, (-a/S(2) - b*x**S(2)/S(2))/(b*log(c*(a + b*x**S(2)))) + li(c*(a + b*x**S(2)))/(S(2)*b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2)))**(S(-2)), x), x, Integral(log(c*(a + b*x**S(2)))**(S(-2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(c*(a + b*x**S(2)))**S(2)), x), x, Integral(S(1)/(x*log(a*c + b*c*x)**S(2)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(c*(a + b*x**S(2)))**S(2)), x), x, Integral(S(1)/(x**S(2)*log(c*(a + b*x**S(2)))**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(c*(a + b*x**S(2)))**S(2)), x), x, Integral(S(1)/(x**S(2)*log(a*c + b*c*x)**S(2)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m/log(c*(a + b*x**S(2)))**S(3), x), x, Integral(x**m/log(c*(a + b*x**S(2)))**S(3), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)/log(c*(a + b*x**S(2)))**S(3), x), x, -a*(a + b*x**S(2))/(S(4)*b**S(2)*log(a*c + b*c*x**S(2))) - a*li(a*c + b*c*x**S(2))/(S(4)*b**S(2)*c) - x**S(2)*(a + b*x**S(2))/(S(2)*b*log(a*c + b*c*x**S(2))) - x**S(2)*(a + b*x**S(2))/(S(4)*b*log(a*c + b*c*x**S(2))**S(2)) + Ei(S(2)*log(a*c + b*c*x**S(2)))/(b**S(2)*c**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/log(c*(a + b*x**S(2)))**S(3), x), x, Integral(x**S(2)/log(c*(a + b*x**S(2)))**S(3), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/log(c*(a + b*x**S(2)))**S(3), x), x, (-a/S(4) - b*x**S(2)/S(4))/(b*log(c*(a + b*x**S(2)))) + (-a/S(4) - b*x**S(2)/S(4))/(b*log(c*(a + b*x**S(2)))**S(2)) + li(c*(a + b*x**S(2)))/(S(4)*b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2)))**(S(-3)), x), x, Integral(log(c*(a + b*x**S(2)))**(S(-3)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(c*(a + b*x**S(2)))**S(3)), x), x, Integral(S(1)/(x*log(a*c + b*c*x)**S(3)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*log(c*(a + b*x**S(2)))**S(3)), x), x, Integral(S(1)/(x**S(2)*log(c*(a + b*x**S(2)))**S(3)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(3)*log(c*(a + b*x**S(2)))**S(3)), x), x, Integral(S(1)/(x**S(2)*log(a*c + b*c*x)**S(3)), (x, x**S(2)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(b*x**m + S(1))/x, x), x, -polylog(S(2), -b*x**m)/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(b*x**m + S(2))/x, x), x, log(S(2))*log(x) - polylog(S(2), -b*x**m/S(2))/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(2)*b*x**m + S(6))/x, x), x, log(S(6))*log(x) - polylog(S(2), -b*x**m/S(3))/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**m))/x, x), x, log(c*(a + b*x**m))*log(-b*x**m/a)/m + polylog(S(2), (a + b*x**m)/a)/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**m)**n)/x, x), x, n*polylog(S(2), (a + b*x**m)/a)/m + log(c*(a + b*x**m)**n)*log(-b*x**m/a)/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**m)**n)**S(2)/x, x), x, -S(2)*n**S(2)*polylog(S(3), (a + b*x**m)/a)/m + S(2)*n*log(c*(a + b*x**m)**n)*polylog(S(2), (a + b*x**m)/a)/m + log(c*(a + b*x**m)**n)**S(2)*log(-b*x**m/a)/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**m)**n)**S(3)/x, x), x, S(6)*n**S(3)*polylog(S(4), (a + b*x**m)/a)/m - S(6)*n**S(2)*log(c*(a + b*x**m)**n)*polylog(S(3), (a + b*x**m)/a)/m + S(3)*n*log(c*(a + b*x**m)**n)**S(2)*polylog(S(2), (a + b*x**m)/a)/m + log(c*(a + b*x**m)**n)**S(3)*log(-b*x**m/a)/m, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(d*(b*x + c*x**S(2))**n), x), x, n*x**(m + S(1))*hyper((S(1), m + S(1)), (m + S(2),), -c*x/b)/(m + S(1))**S(2) - S(2)*n*x**(m + S(1))/(m + S(1))**S(2) + x**(m + S(1))*log(d*(b*x + c*x**S(2))**n)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(d*(b*x + c*x**S(2))**n), x), x, b**S(5)*n*log(b + c*x)/(S(5)*c**S(5)) - b**S(4)*n*x/(S(5)*c**S(4)) + b**S(3)*n*x**S(2)/(S(10)*c**S(3)) - b**S(2)*n*x**S(3)/(S(15)*c**S(2)) + b*n*x**S(4)/(S(20)*c) - S(2)*n*x**S(5)/S(25) + x**S(5)*log(d*(b*x + c*x**S(2))**n)/S(5), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(d*(b*x + c*x**S(2))**n), x), x, -b**S(4)*n*log(b + c*x)/(S(4)*c**S(4)) + b**S(3)*n*x/(S(4)*c**S(3)) - b**S(2)*n*x**S(2)/(S(8)*c**S(2)) + b*n*x**S(3)/(S(12)*c) - n*x**S(4)/S(8) + x**S(4)*log(d*(b*x + c*x**S(2))**n)/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(d*(b*x + c*x**S(2))**n), x), x, b**S(3)*n*log(b + c*x)/(S(3)*c**S(3)) - b**S(2)*n*x/(S(3)*c**S(2)) + b*n*x**S(2)/(S(6)*c) - S(2)*n*x**S(3)/S(9) + x**S(3)*log(d*(b*x + c*x**S(2))**n)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(d*(b*x + c*x**S(2))**n), x), x, -b**S(2)*n*log(b + c*x)/(S(2)*c**S(2)) + b*n*x/(S(2)*c) - n*x**S(2)/S(2) + x**S(2)*log(d*(b*x + c*x**S(2))**n)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n), x), x, b*n*log(b + c*x)/c - S(2)*n*x + x*log(d*(b*x + c*x**S(2))**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n)/x, x), x, -n*log(x)**S(2)/S(2) - n*log(x)*log((b + c*x)/b) - n*polylog(S(2), -c*x/b) + log(x)*log(d*(b*x + c*x**S(2))**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n)/x**S(2), x), x, -n/x - log(d*(b*x + c*x**S(2))**n)/x + c*n*log(x)/b - c*n*log(b + c*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n)/x**S(3), x), x, -n/(S(4)*x**S(2)) - log(d*(b*x + c*x**S(2))**n)/(S(2)*x**S(2)) - c*n/(S(2)*b*x) - c**S(2)*n*log(x)/(S(2)*b**S(2)) + c**S(2)*n*log(b + c*x)/(S(2)*b**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n)/x**S(4), x), x, -n/(S(9)*x**S(3)) - log(d*(b*x + c*x**S(2))**n)/(S(3)*x**S(3)) - c*n/(S(6)*b*x**S(2)) + c**S(2)*n/(S(3)*b**S(2)*x) + c**S(3)*n*log(x)/(S(3)*b**S(3)) - c**S(3)*n*log(b + c*x)/(S(3)*b**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n)/x**S(5), x), x, -n/(S(16)*x**S(4)) - log(d*(b*x + c*x**S(2))**n)/(S(4)*x**S(4)) - c*n/(S(12)*b*x**S(3)) + c**S(2)*n/(S(8)*b**S(2)*x**S(2)) - c**S(3)*n/(S(4)*b**S(3)*x) - c**S(4)*n*log(x)/(S(4)*b**S(4)) + c**S(4)*n*log(b + c*x)/(S(4)*b**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(d*(a + b*x + c*x**S(2))**n), x), x, -S(2)*c*n*x**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2))))/((b + sqrt(-S(4)*a*c + b**S(2)))*(m + S(1))*(m + S(2))) - S(2)*c*n*x**(m + S(2))*hyper((S(1), m + S(2)), (m + S(3),), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2))))/((b - sqrt(-S(4)*a*c + b**S(2)))*(m + S(1))*(m + S(2))) + x**(m + S(1))*log(d*(a + b*x + c*x**S(2))**n)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(4)*log(d*(a + b*x + c*x**S(2))**n), x), x, b*n*x**S(4)/(S(20)*c) + b*n*x**S(2)*(-S(3)*a*c + b**S(2))/(S(10)*c**S(3)) + b*n*(S(5)*a**S(2)*c**S(2) - S(5)*a*b**S(2)*c + b**S(4))*log(a + b*x + c*x**S(2))/(S(10)*c**S(5)) - S(2)*n*x**S(5)/S(25) + x**S(5)*log(d*(a + b*x + c*x**S(2))**n)/S(5) - n*x**S(3)*(-S(2)*a*c/S(15) + b**S(2)/S(15))/c**S(2) + n*x*(-S(2)*a**S(2)*c**S(2)/S(5) + S(4)*a*b**S(2)*c/S(5) - b**S(4)/S(5))/c**S(4) + n*sqrt(-S(4)*a*c + b**S(2))*(a**S(2)*c**S(2)/S(5) - S(3)*a*b**S(2)*c/S(5) + b**S(4)/S(5))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c**S(5), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(d*(a + b*x + c*x**S(2))**n), x), x, b*n*x**S(3)/(S(12)*c) + b*n*x*(-S(3)*a*c + b**S(2))/(S(4)*c**S(3)) - b*n*sqrt(-S(4)*a*c + b**S(2))*(-S(2)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(S(4)*c**S(4)) - n*x**S(4)/S(8) + x**S(4)*log(d*(a + b*x + c*x**S(2))**n)/S(4) - n*x**S(2)*(-a*c/S(4) + b**S(2)/S(8))/c**S(2) - n*(a**S(2)*c**S(2)/S(4) - a*b**S(2)*c/S(2) + b**S(4)/S(8))*log(a + b*x + c*x**S(2))/c**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(d*(a + b*x + c*x**S(2))**n), x), x, b*n*x**S(2)/(S(6)*c) + b*n*(-S(3)*a*c + b**S(2))*log(a + b*x + c*x**S(2))/(S(6)*c**S(3)) - S(2)*n*x**S(3)/S(9) + x**S(3)*log(d*(a + b*x + c*x**S(2))**n)/S(3) + n*x*(S(2)*a*c/S(3) - b**S(2)/S(3))/c**S(2) + n*sqrt(-S(4)*a*c + b**S(2))*(-a*c/S(3) + b**S(2)/S(3))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(d*(a + b*x + c*x**S(2))**n), x), x, b*n*x/(S(2)*c) - b*n*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c**S(2)) - n*x**S(2)/S(2) + x**S(2)*log(d*(a + b*x + c*x**S(2))**n)/S(2) - n*(-a*c/S(2) + b**S(2)/S(4))*log(a + b*x + c*x**S(2))/c**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n), x), x, b*n*log(a + b*x + c*x**S(2))/(S(2)*c) - S(2)*n*x + x*log(d*(a + b*x + c*x**S(2))**n) + n*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/x, x), x, -n*log(x)*log((b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(b - sqrt(-S(4)*a*c + b**S(2)))) - n*log(x)*log((b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(b + sqrt(-S(4)*a*c + b**S(2)))) - n*polylog(S(2), -S(2)*c*x/(b - sqrt(-S(4)*a*c + b**S(2)))) - n*polylog(S(2), -S(2)*c*x/(b + sqrt(-S(4)*a*c + b**S(2)))) + log(x)*log(d*(a + b*x + c*x**S(2))**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/x**S(2), x), x, -log(d*(a + b*x + c*x**S(2))**n)/x + b*n*log(x)/a - b*n*log(a + b*x + c*x**S(2))/(S(2)*a) + n*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/x**S(3), x), x, -log(d*(a + b*x + c*x**S(2))**n)/(S(2)*x**S(2)) - b*n/(S(2)*a*x) - b*n*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(S(2)*a**S(2)) - n*(-a*c + b**S(2)/S(2))*log(x)/a**S(2) + n*(-a*c/S(2) + b**S(2)/S(4))*log(a + b*x + c*x**S(2))/a**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/x**S(4), x), x, -log(d*(a + b*x + c*x**S(2))**n)/(S(3)*x**S(3)) - b*n/(S(6)*a*x**S(2)) + n*(-S(2)*a*c/S(3) + b**S(2)/S(3))/(a**S(2)*x) + b*n*(-S(3)*a*c + b**S(2))*log(x)/(S(3)*a**S(3)) - b*n*(-S(3)*a*c + b**S(2))*log(a + b*x + c*x**S(2))/(S(6)*a**S(3)) + n*sqrt(-S(4)*a*c + b**S(2))*(-a*c/S(3) + b**S(2)/S(3))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/a**S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/x**S(5), x), x, -log(d*(a + b*x + c*x**S(2))**n)/(S(4)*x**S(4)) - b*n/(S(12)*a*x**S(3)) + n*(-a*c/S(4) + b**S(2)/S(8))/(a**S(2)*x**S(2)) - b*n*(-S(3)*a*c + b**S(2))/(S(4)*a**S(3)*x) - b*n*sqrt(-S(4)*a*c + b**S(2))*(-S(2)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(S(4)*a**S(4)) + n*(a**S(2)*c**S(2)/S(4) - a*b**S(2)*c/S(2) + b**S(4)/S(8))*log(a + b*x + c*x**S(2))/a**S(4) - n*(a**S(2)*c**S(2)/S(2) - a*b**S(2)*c + b**S(4)/S(4))*log(x)/a**S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**S(2) + x + S(1)), x), x, x*log(x**S(2) + x + S(1)) - S(2)*x + log(x**S(2) + x + S(1))/S(2) + sqrt(S(3))*atan(sqrt(S(3))*(S(2)*x + S(1))/S(3)), expand=True, _diff=True, _numerical=True)
# long time assert rubi_test(rubi_integrate((d + e*x)**S(4)*log(d*(a + b*x + c*x**S(2))**n), x), x, -S(2)*e**S(4)*n*x**S(5)/S(25) + (d + e*x)**S(5)*log(d*(a + b*x + c*x**S(2))**n)/(S(5)*e) - e**S(3)*n*x**S(4)*(-b*e + S(10)*c*d)/(S(20)*c) - e**S(2)*n*x**S(3)*(b**S(2)*e**S(2) + S(20)*c**S(2)*d**S(2) - c*e*(S(2)*a*e + S(5)*b*d))/(S(15)*c**S(2)) - e*n*x**S(2)*(-b**S(3)*e**S(3) + b*c*e**S(2)*(S(3)*a*e + S(5)*b*d) + S(20)*c**S(3)*d**S(3) - S(10)*c**S(2)*d*e*(a*e + b*d))/(S(10)*c**S(3)) + n*x*(-b**S(4)*e**S(4)/S(5) + b**S(2)*c*e**S(3)*(S(4)*a*e + S(5)*b*d)/S(5) - S(2)*c**S(4)*d**S(4) + S(2)*c**S(3)*d**S(2)*e*(S(2)*a*e + b*d) - c**S(2)*e**S(2)*(S(2)*a**S(2)*e**S(2) + S(15)*a*b*d*e + S(10)*b**S(2)*d**S(2))/S(5))/c**S(4) + n*sqrt(-S(4)*a*c + b**S(2))*(b**S(4)*e**S(4)/S(5) - b**S(2)*c*e**S(3)*(S(3)*a*e + S(5)*b*d)/S(5) + c**S(4)*d**S(4) - S(2)*c**S(3)*d**S(2)*e*(a*e + b*d) + c**S(2)*e**S(2)*(a**S(2)*e**S(2) + S(10)*a*b*d*e + S(10)*b**S(2)*d**S(2))/S(5))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c**S(5) - n*(-b*e/S(10) + c*d/S(5))*(b**S(4)*e**S(4) - b**S(2)*c*e**S(3)*(S(5)*a*e + S(3)*b*d) + c**S(4)*d**S(4) - S(2)*c**S(3)*d**S(2)*e*(S(5)*a*e + b*d) + c**S(2)*e**S(2)*(S(5)*a**S(2)*e**S(2) + S(10)*a*b*d*e + S(4)*b**S(2)*d**S(2)))*log(a + b*x + c*x**S(2))/(c**S(5)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(3)*log(d*(a + b*x + c*x**S(2))**n), x), x, -e**S(3)*n*x**S(4)/S(8) + (d + e*x)**S(4)*log(d*(a + b*x + c*x**S(2))**n)/(S(4)*e) - e**S(2)*n*x**S(3)*(-b*e + S(8)*c*d)/(S(12)*c) - e*n*x**S(2)*(b**S(2)*e**S(2) + S(12)*c**S(2)*d**S(2) - S(2)*c*e*(a*e + S(2)*b*d))/(S(8)*c**S(2)) + n*x*(b**S(3)*e**S(3)/S(4) - b*c*e**S(2)*(S(3)*a*e + S(4)*b*d)/S(4) - S(2)*c**S(3)*d**S(3) + c**S(2)*d*e*(S(4)*a*e + S(3)*b*d)/S(2))/c**S(3) + n*sqrt(-S(4)*a*c + b**S(2))*(-b*e/S(4) + c*d/S(2))*(b**S(2)*e**S(2) + S(2)*c**S(2)*d**S(2) - S(2)*c*e*(a*e + b*d))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c**S(4) - n*(b**S(4)*e**S(4)/S(8) - b**S(2)*c*e**S(3)*(a*e + b*d)/S(2) + c**S(4)*d**S(4)/S(4) - c**S(3)*d**S(2)*e*(S(3)*a*e + b*d)/S(2) + c**S(2)*e**S(2)*(a**S(2)*e**S(2) + S(6)*a*b*d*e + S(3)*b**S(2)*d**S(2))/S(4))*log(a + b*x + c*x**S(2))/(c**S(4)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)**S(2)*log(d*(a + b*x + c*x**S(2))**n), x), x, -S(2)*e**S(2)*n*x**S(3)/S(9) + (d + e*x)**S(3)*log(d*(a + b*x + c*x**S(2))**n)/(S(3)*e) - e*n*x**S(2)*(-b*e + S(6)*c*d)/(S(6)*c) + n*x*(-b**S(2)*e**S(2)/S(3) - S(2)*c**S(2)*d**S(2) + c*e*(S(2)*a*e + S(3)*b*d)/S(3))/c**S(2) + n*sqrt(-S(4)*a*c + b**S(2))*(b**S(2)*e**S(2)/S(3) + c**S(2)*d**S(2) - c*e*(a*e + S(3)*b*d)/S(3))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c**S(3) - n*(-b*e/S(6) + c*d/S(3))*(b**S(2)*e**S(2) + c**S(2)*d**S(2) - c*e*(S(3)*a*e + b*d))*log(a + b*x + c*x**S(2))/(c**S(3)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((d + e*x)*log(d*(a + b*x + c*x**S(2))**n), x), x, -e*n*x**S(2)/S(2) + n*x*(b*e/(S(2)*c) - S(2)*d) + (d + e*x)**S(2)*log(d*(a + b*x + c*x**S(2))**n)/(S(2)*e) + n*sqrt(-S(4)*a*c + b**S(2))*(-b*e/S(2) + c*d)*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c**S(2) - n*(b**S(2)*e**S(2)/S(4) + c**S(2)*d**S(2)/S(2) - c*e*(a*e + b*d)/S(2))*log(a + b*x + c*x**S(2))/(c**S(2)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n), x), x, b*n*log(a + b*x + c*x**S(2))/(S(2)*c) - S(2)*n*x + x*log(d*(a + b*x + c*x**S(2))**n) + n*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/(d + e*x), x), x, -n*log(-e*(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c*d - e*(b - sqrt(-S(4)*a*c + b**S(2)))))*log(d + e*x)/e - n*log(-e*(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c*d - e*(b + sqrt(-S(4)*a*c + b**S(2)))))*log(d + e*x)/e - n*polylog(S(2), S(2)*c*(d + e*x)/(S(2)*c*d - e*(b + sqrt(-S(4)*a*c + b**S(2)))))/e - n*polylog(S(2), S(2)*c*(d + e*x)/(-b*e + S(2)*c*d + e*sqrt(-S(4)*a*c + b**S(2))))/e + log(d*(a + b*x + c*x**S(2))**n)*log(d + e*x)/e, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/(d + e*x)**S(2), x), x, n*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(a*e**S(2) - b*d*e + c*d**S(2)) + n*(-b*e/S(2) + c*d)*log(a + b*x + c*x**S(2))/(e*(a*e**S(2) - b*d*e + c*d**S(2))) + n*(b*e - S(2)*c*d)*log(d + e*x)/(e*(a*e**S(2) - b*d*e + c*d**S(2))) - log(d*(a + b*x + c*x**S(2))**n)/(e*(d + e*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/(d + e*x)**S(3), x), x, n*sqrt(-S(4)*a*c + b**S(2))*(-b*e/S(2) + c*d)*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(a*e**S(2) - b*d*e + c*d**S(2))**S(2) + n*(b**S(2)*e**S(2)/S(4) + c**S(2)*d**S(2)/S(2) - c*e*(a*e + b*d)/S(2))*log(a + b*x + c*x**S(2))/(e*(a*e**S(2) - b*d*e + c*d**S(2))**S(2)) - n*(b**S(2)*e**S(2)/S(2) + c**S(2)*d**S(2) - c*e*(a*e + b*d))*log(d + e*x)/(e*(a*e**S(2) - b*d*e + c*d**S(2))**S(2)) + n*(-b*e/S(2) + c*d)/(e*(d + e*x)*(a*e**S(2) - b*d*e + c*d**S(2))) - log(d*(a + b*x + c*x**S(2))**n)/(S(2)*e*(d + e*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/(d + e*x)**S(4), x), x, n*sqrt(-S(4)*a*c + b**S(2))*(b**S(2)*e**S(2)/S(3) + c**S(2)*d**S(2) - c*e*(a*e + S(3)*b*d)/S(3))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(a*e**S(2) - b*d*e + c*d**S(2))**S(3) - n*(-b*e/S(3) + S(2)*c*d/S(3))*(b**S(2)*e**S(2) + c**S(2)*d**S(2) - c*e*(S(3)*a*e + b*d))*log(d + e*x)/(e*(a*e**S(2) - b*d*e + c*d**S(2))**S(3)) + n*(-b*e/S(6) + c*d/S(3))*(b**S(2)*e**S(2) + c**S(2)*d**S(2) - c*e*(S(3)*a*e + b*d))*log(a + b*x + c*x**S(2))/(e*(a*e**S(2) - b*d*e + c*d**S(2))**S(3)) + n*(b**S(2)*e**S(2)/S(3) + S(2)*c**S(2)*d**S(2)/S(3) - S(2)*c*e*(a*e + b*d)/S(3))/(e*(d + e*x)*(a*e**S(2) - b*d*e + c*d**S(2))**S(2)) + n*(-b*e/S(6) + c*d/S(3))/(e*(d + e*x)**S(2)*(a*e**S(2) - b*d*e + c*d**S(2))) - log(d*(a + b*x + c*x**S(2))**n)/(S(3)*e*(d + e*x)**S(3)), expand=True, _diff=True, _numerical=True)
# long time assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/(d + e*x)**S(5), x), x, n*sqrt(-S(4)*a*c + b**S(2))*(-b*e/S(4) + c*d/S(2))*(b**S(2)*e**S(2) + S(2)*c**S(2)*d**S(2) - S(2)*c*e*(a*e + b*d))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(a*e**S(2) - b*d*e + c*d**S(2))**S(4) + n*(b**S(4)*e**S(4)/S(8) - b**S(2)*c*e**S(3)*(a*e + b*d)/S(2) + c**S(4)*d**S(4)/S(4) - c**S(3)*d**S(2)*e*(S(3)*a*e + b*d)/S(2) + c**S(2)*e**S(2)*(a**S(2)*e**S(2) + S(6)*a*b*d*e + S(3)*b**S(2)*d**S(2))/S(4))*log(a + b*x + c*x**S(2))/(e*(a*e**S(2) - b*d*e + c*d**S(2))**S(4)) - n*(b**S(4)*e**S(4)/S(4) - b**S(2)*c*e**S(3)*(a*e + b*d) + c**S(4)*d**S(4)/S(2) - c**S(3)*d**S(2)*e*(S(3)*a*e + b*d) + c**S(2)*e**S(2)*(a**S(2)*e**S(2) + S(6)*a*b*d*e + S(3)*b**S(2)*d**S(2))/S(2))*log(d + e*x)/(e*(a*e**S(2) - b*d*e + c*d**S(2))**S(4)) + n*(-b*e/S(4) + c*d/S(2))*(b**S(2)*e**S(2) + c**S(2)*d**S(2) - c*e*(S(3)*a*e + b*d))/(e*(d + e*x)*(a*e**S(2) - b*d*e + c*d**S(2))**S(3)) + n*(b**S(2)*e**S(2)/S(8) + c**S(2)*d**S(2)/S(4) - c*e*(a*e + b*d)/S(4))/(e*(d + e*x)**S(2)*(a*e**S(2) - b*d*e + c*d**S(2))**S(2)) + n*(-b*e/S(12) + c*d/S(6))/(e*(d + e*x)**S(3)*(a*e**S(2) - b*d*e + c*d**S(2))) - log(d*(a + b*x + c*x**S(2))**n)/(S(4)*e*(d + e*x)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + c*x**S(2))**n)/(a*e + c*e*x**S(2)), x), x, S(2)*n*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(c)*x))*atan(sqrt(c)*x/sqrt(a))/(sqrt(a)*sqrt(c)*e) + I*n*atan(sqrt(c)*x/sqrt(a))**S(2)/(sqrt(a)*sqrt(c)*e) + I*n*polylog(S(2), (-sqrt(a) + I*sqrt(c)*x)/(sqrt(a) + I*sqrt(c)*x))/(sqrt(a)*sqrt(c)*e) + log(d*(a + c*x**S(2))**n)*atan(sqrt(c)*x/sqrt(a))/(sqrt(a)*sqrt(c)*e), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)/(a*e + b*e*x + c*e*x**S(2)), x), x, -S(4)*n*log(S(2)/(-b/sqrt(-S(4)*a*c + b**S(2)) - S(2)*c*x/sqrt(-S(4)*a*c + b**S(2)) + S(1)))*atanh(b/sqrt(-S(4)*a*c + b**S(2)) + S(2)*c*x/sqrt(-S(4)*a*c + b**S(2)))/(e*sqrt(-S(4)*a*c + b**S(2))) + S(2)*n*atanh(b/sqrt(-S(4)*a*c + b**S(2)) + S(2)*c*x/sqrt(-S(4)*a*c + b**S(2)))**S(2)/(e*sqrt(-S(4)*a*c + b**S(2))) - S(2)*n*polylog(S(2), (-b/sqrt(-S(4)*a*c + b**S(2)) - S(2)*c*x/sqrt(-S(4)*a*c + b**S(2)) + S(-1))/(-b/sqrt(-S(4)*a*c + b**S(2)) - S(2)*c*x/sqrt(-S(4)*a*c + b**S(2)) + S(1)))/(e*sqrt(-S(4)*a*c + b**S(2))) - S(2)*log(d*(a + b*x + c*x**S(2))**n)*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/(e*sqrt(-S(4)*a*c + b**S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(g*(a + b*x + c*x**S(2))**n)/(d + e*x**S(2)), x), x, n*log(sqrt(e)*(-b - S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c*sqrt(-d) - sqrt(e)*(b - sqrt(-S(4)*a*c + b**S(2)))))*log(sqrt(e)*x + sqrt(-d))/(S(2)*sqrt(e)*sqrt(-d)) - n*log(sqrt(e)*(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c*sqrt(-d) + sqrt(e)*(b - sqrt(-S(4)*a*c + b**S(2)))))*log(-sqrt(e)*x + sqrt(-d))/(S(2)*sqrt(e)*sqrt(-d)) + n*log(sqrt(e)*(-b - S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c*sqrt(-d) - sqrt(e)*(b + sqrt(-S(4)*a*c + b**S(2)))))*log(sqrt(e)*x + sqrt(-d))/(S(2)*sqrt(e)*sqrt(-d)) - n*log(sqrt(e)*(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(S(2)*c*sqrt(-d) + sqrt(e)*(b + sqrt(-S(4)*a*c + b**S(2)))))*log(-sqrt(e)*x + sqrt(-d))/(S(2)*sqrt(e)*sqrt(-d)) + n*polylog(S(2), S(2)*c*(sqrt(e)*x + sqrt(-d))/(S(2)*c*sqrt(-d) - sqrt(e)*(b - sqrt(-S(4)*a*c + b**S(2)))))/(S(2)*sqrt(e)*sqrt(-d)) - n*polylog(S(2), S(2)*c*(-sqrt(e)*x + sqrt(-d))/(S(2)*c*sqrt(-d) + sqrt(e)*(b - sqrt(-S(4)*a*c + b**S(2)))))/(S(2)*sqrt(e)*sqrt(-d)) + n*polylog(S(2), S(2)*c*(sqrt(e)*x + sqrt(-d))/(S(2)*c*sqrt(-d) - sqrt(e)*(b + sqrt(-S(4)*a*c + b**S(2)))))/(S(2)*sqrt(e)*sqrt(-d)) - n*polylog(S(2), S(2)*c*(-sqrt(e)*x + sqrt(-d))/(S(2)*c*sqrt(-d) + sqrt(e)*(b + sqrt(-S(4)*a*c + b**S(2)))))/(S(2)*sqrt(e)*sqrt(-d)) + log(g*(a + b*x + c*x**S(2))**n)*log(-sqrt(e)*x + sqrt(-d))/(S(2)*sqrt(e)*sqrt(-d)) - log(g*(a + b*x + c*x**S(2))**n)*log(sqrt(e)*x + sqrt(-d))/(S(2)*sqrt(e)*sqrt(-d)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(g*(a + b*x + c*x**S(2))**n)/(d + e*x + f*x**S(2)), x), x, -n*log(f*(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(-c*(e - sqrt(-S(4)*d*f + e**S(2))) + f*(b + sqrt(-S(4)*a*c + b**S(2)))))*log(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/sqrt(-S(4)*d*f + e**S(2)) + n*log(f*(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(-c*(e + sqrt(-S(4)*d*f + e**S(2))) + f*(b - sqrt(-S(4)*a*c + b**S(2)))))*log(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/sqrt(-S(4)*d*f + e**S(2)) + n*log(f*(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/(-c*(e + sqrt(-S(4)*d*f + e**S(2))) + f*(b + sqrt(-S(4)*a*c + b**S(2)))))*log(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/sqrt(-S(4)*d*f + e**S(2)) - n*log(-f*(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/(-b*f + c*e - c*sqrt(-S(4)*d*f + e**S(2)) + f*sqrt(-S(4)*a*c + b**S(2))))*log(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/sqrt(-S(4)*d*f + e**S(2)) - n*polylog(S(2), -c*(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/(-c*(e - sqrt(-S(4)*d*f + e**S(2))) + f*(b - sqrt(-S(4)*a*c + b**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) - n*polylog(S(2), -c*(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/(-c*(e - sqrt(-S(4)*d*f + e**S(2))) + f*(b + sqrt(-S(4)*a*c + b**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + n*polylog(S(2), -c*(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/(-c*(e + sqrt(-S(4)*d*f + e**S(2))) + f*(b - sqrt(-S(4)*a*c + b**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + n*polylog(S(2), -c*(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/(-c*(e + sqrt(-S(4)*d*f + e**S(2))) + f*(b + sqrt(-S(4)*a*c + b**S(2)))))/sqrt(-S(4)*d*f + e**S(2)) + log(g*(a + b*x + c*x**S(2))**n)*log(e + S(2)*f*x - sqrt(-S(4)*d*f + e**S(2)))/sqrt(-S(4)*d*f + e**S(2)) - log(g*(a + b*x + c*x**S(2))**n)*log(e + S(2)*f*x + sqrt(-S(4)*d*f + e**S(2)))/sqrt(-S(4)*d*f + e**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(b*x + c*x**S(2))**n)**S(2), x), x, -S(2)*b*n**S(2)*log(-c*x/b)*log(b + c*x)/c - b*n**S(2)*log(b + c*x)**S(2)/c - S(4)*b*n**S(2)*log(b + c*x)/c - S(2)*b*n**S(2)*polylog(S(2), (b + c*x)/b)/c + S(2)*b*n*log(d*(b*x + c*x**S(2))**n)*log(b + c*x)/c + S(8)*n**S(2)*x - S(4)*n*x*log(d*(b*x + c*x**S(2))**n) + x*log(d*(b*x + c*x**S(2))**n)**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x + c*x**S(2))**n)**S(2), x), x, -S(2)*b*n**S(2)*log(a + b*x + c*x**S(2))/c + S(8)*n**S(2)*x - S(4)*n*x*log(d*(a + b*x + c*x**S(2))**n) + x*log(d*(a + b*x + c*x**S(2))**n)**S(2) - n**S(2)*(b - sqrt(-S(4)*a*c + b**S(2)))*log((b/S(2) + c*x + sqrt(-S(4)*a*c + b**S(2))/S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/c - n**S(2)*(b - sqrt(-S(4)*a*c + b**S(2)))*log(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))**S(2)/(S(2)*c) - n**S(2)*(b - sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), (-b/S(2) - c*x + sqrt(-S(4)*a*c + b**S(2))/S(2))/sqrt(-S(4)*a*c + b**S(2)))/c - n**S(2)*(b + sqrt(-S(4)*a*c + b**S(2)))*log((-b/S(2) - c*x + sqrt(-S(4)*a*c + b**S(2))/S(2))/sqrt(-S(4)*a*c + b**S(2)))*log(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/c - n**S(2)*(b + sqrt(-S(4)*a*c + b**S(2)))*log(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))**S(2)/(S(2)*c) - n**S(2)*(b + sqrt(-S(4)*a*c + b**S(2)))*polylog(S(2), (b/S(2) + c*x + sqrt(-S(4)*a*c + b**S(2))/S(2))/sqrt(-S(4)*a*c + b**S(2)))/c - S(4)*n**S(2)*sqrt(-S(4)*a*c + b**S(2))*atanh((b + S(2)*c*x)/sqrt(-S(4)*a*c + b**S(2)))/c + n*(b - sqrt(-S(4)*a*c + b**S(2)))*log(d*(a + b*x + c*x**S(2))**n)*log(b + S(2)*c*x - sqrt(-S(4)*a*c + b**S(2)))/c + n*(b + sqrt(-S(4)*a*c + b**S(2)))*log(d*(a + b*x + c*x**S(2))**n)*log(b + S(2)*c*x + sqrt(-S(4)*a*c + b**S(2)))/c, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(x**S(2) + x + S(1))/(x**S(2) + S(3)*x + S(2)), x), x, x*log(x**S(2) + x + S(1)) - S(2)*x - log((-S(2)*x + S(-1) - sqrt(S(3))*I)/(S(1) - sqrt(S(3))*I))*log(S(2)*x + S(2)) - log((-S(2)*x + S(-1) + sqrt(S(3))*I)/(S(1) + sqrt(S(3))*I))*log(S(2)*x + S(2)) + S(4)*log((-S(2)*x + S(-1) - sqrt(S(3))*I)/(S(3) - sqrt(S(3))*I))*log(S(2)*x + S(4)) + S(4)*log((-S(2)*x + S(-1) + sqrt(S(3))*I)/(S(3) + sqrt(S(3))*I))*log(S(2)*x + S(4)) + log(S(2)*x + S(2))*log(x**S(2) + x + S(1)) - S(4)*log(S(2)*x + S(4))*log(x**S(2) + x + S(1)) + log(x**S(2) + x + S(1))/S(2) + sqrt(S(3))*atan(sqrt(S(3))*(S(2)*x + S(1))/S(3)) - polylog(S(2), (S(2)*x + S(2))/(S(1) - sqrt(S(3))*I)) - polylog(S(2), (S(2)*x + S(2))/(S(1) + sqrt(S(3))*I)) + S(4)*polylog(S(2), (S(2)*x + S(4))/(S(3) - sqrt(S(3))*I)) + S(4)*polylog(S(2), (S(2)*x + S(4))/(S(3) + sqrt(S(3))*I)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**S(2) + x + S(1))**S(2), x), x, x*log(x**S(2) + x + S(1))**S(2) - S(4)*x*log(x**S(2) + x + S(1)) + S(8)*x - (S(1) + sqrt(S(3))*I)*log(sqrt(S(3))*I*(S(2)*x + S(1) - sqrt(S(3))*I)/S(6))*log(S(2)*x + S(1) + sqrt(S(3))*I) - (S(1) - sqrt(S(3))*I)*log(-sqrt(S(3))*I*(S(2)*x + S(1) + sqrt(S(3))*I)/S(6))*log(S(2)*x + S(1) - sqrt(S(3))*I) - (S(1) - sqrt(S(3))*I)*log(S(2)*x + S(1) - sqrt(S(3))*I)**S(2)/S(2) + (S(1) - sqrt(S(3))*I)*log(S(2)*x + S(1) - sqrt(S(3))*I)*log(x**S(2) + x + S(1)) - (S(1) + sqrt(S(3))*I)*log(S(2)*x + S(1) + sqrt(S(3))*I)**S(2)/S(2) + (S(1) + sqrt(S(3))*I)*log(S(2)*x + S(1) + sqrt(S(3))*I)*log(x**S(2) + x + S(1)) - S(2)*log(x**S(2) + x + S(1)) - S(4)*sqrt(S(3))*atan(sqrt(S(3))*(S(2)*x + S(1))/S(3)) - (S(1) - sqrt(S(3))*I)*polylog(S(2), sqrt(S(3))*I*(S(2)*x + S(1) - sqrt(S(3))*I)/S(6)) - (S(1) + sqrt(S(3))*I)*polylog(S(2), -sqrt(S(3))*I*(S(2)*x + S(1) + sqrt(S(3))*I)/S(6)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**S(2) + x + S(-1))**S(2)/x**S(3), x), x, S(3)*log(x)*log((S(2)*x + S(1) + sqrt(S(5)))/(S(1) + sqrt(S(5)))) - S(3)*log(x)*log(x**S(2) + x + S(-1)) + log(x) - (sqrt(S(5)) + S(3))*log(sqrt(S(5))*(x + S(1)/2 + sqrt(S(5))/S(2))/S(5))*log(S(2)*x - sqrt(S(5)) + S(1))/S(2) - (-sqrt(S(5)) + S(3))*log(S(2)*x + S(1) + sqrt(S(5)))**S(2)/S(4) + (-sqrt(S(5)) + S(3))*log(S(2)*x + S(1) + sqrt(S(5)))*log(x**S(2) + x + S(-1))/S(2) - (-sqrt(S(5)) + S(1))*log(S(2)*x + S(1) + sqrt(S(5)))/S(2) - (sqrt(S(5)) + S(3))*log(S(2)*x - sqrt(S(5)) + S(1))**S(2)/S(4) + (sqrt(S(5)) + S(3))*log(S(2)*x - sqrt(S(5)) + S(1))*log(x**S(2) + x + S(-1))/S(2) - (S(1) + sqrt(S(5)))*log(S(2)*x - sqrt(S(5)) + S(1))/S(2) + S(3)*log(S(-1)/2 + sqrt(S(5))/S(2))*log(S(2)*x - sqrt(S(5)) + S(1)) - (-sqrt(S(5)) + S(3))*log(S(2)*sqrt(S(5)))*log(S(2)*x - sqrt(S(5)) + S(1))/S(2) - (sqrt(S(5)) + S(3))*polylog(S(2), sqrt(S(5))*(-x + S(-1)/2 + sqrt(S(5))/S(2))/S(5))/S(2) + (-sqrt(S(5)) + S(3))*polylog(S(2), sqrt(S(5))*(-x + S(-1)/2 + sqrt(S(5))/S(2))/S(5))/S(2) + S(3)*polylog(S(2), -S(2)*x/(S(1) + sqrt(S(5)))) - S(3)*polylog(S(2), (S(2)*x - sqrt(S(5)) + S(1))/(-sqrt(S(5)) + S(1))) + log(x**S(2) + x + S(-1))/x - log(x**S(2) + x + S(-1))**S(2)/(S(2)*x**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1)), x), x, x**S(4)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/S(4) - x**S(4)/S(32) + x**S(3)/S(192) - x**S(2)/S(1024) - x*(x**S(2) - x)**(S(3)/2)/S(32) + x/S(4096) + (-S(149)*x/S(1024) + S(149)/2048)*sqrt(x**S(2) - x) - (x**S(2) - x)**(S(3)/2)/S(12) - S(683)*sqrt(x**S(2) - x)/S(4096) - log(S(8)*x + S(1))/S(32768) - S(1537)*atanh(x/sqrt(x**S(2) - x))/S(16384) + atanh((-S(5)*x/S(3) + S(1)/6)/sqrt(x**S(2) - x))/S(32768), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1)), x), x, x**S(3)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/S(3) - x**S(3)/S(18) + x**S(2)/S(96) - x/S(384) + (-S(5)*x/S(32) + S(5)/64)*sqrt(x**S(2) - x) - (x**S(2) - x)**(S(3)/2)/S(18) - S(85)*sqrt(x**S(2) - x)/S(384) + log(S(8)*x + S(1))/S(3072) - S(223)*atanh(x/sqrt(x**S(2) - x))/S(1536) - atanh((-S(5)*x/S(3) + S(1)/6)/sqrt(x**S(2) - x))/S(3072), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1)), x), x, x**S(2)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/S(2) - x**S(2)/S(8) + x/S(32) + (-x/S(8) + S(1)/16)*sqrt(x**S(2) - x) - S(11)*sqrt(x**S(2) - x)/S(32) - log(S(8)*x + S(1))/S(256) - S(33)*atanh(x/sqrt(x**S(2) - x))/S(128) + atanh((-S(5)*x/S(3) + S(1)/6)/sqrt(x**S(2) - x))/S(256), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1)), x), x, x*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1)) - x/S(2) - sqrt(x**S(2) - x)/S(2) + log(S(8)*x + S(1))/S(16) - S(7)*atanh(x/sqrt(x**S(2) - x))/S(8) - atanh((-S(5)*x/S(3) + S(1)/6)/sqrt(x**S(2) - x))/S(16), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1))/x, x), x, Integral(log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/x, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1))/x**S(2), x), x, S(4)*log(x) - S(4)*log(S(8)*x + S(1)) + S(4)*atanh((-S(5)*x/S(3) + S(1)/6)/sqrt(x**S(2) - x)) + S(4)*sqrt(x**S(2) - x)/x - log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1))/x**S(3), x), x, -S(16)*log(x) + S(16)*log(S(8)*x + S(1)) - S(16)*atanh((-S(5)*x/S(3) + S(1)/6)/sqrt(x**S(2) - x)) - S(10)*sqrt(x**S(2) - x)/x - S(2)/x - log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/(S(2)*x**S(2)) - S(2)*(x**S(2) - x)**(S(3)/2)/(S(3)*x**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(S(3)/2)*log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1)), x), x, S(2)*x**(S(5)/2)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/S(5) - S(2)*x**(S(5)/2)/S(25) + x**(S(3)/2)/S(60) - sqrt(x)/S(160) + sqrt(S(2))*atan(S(2)*sqrt(S(2))*sqrt(x))/S(640) - S(2)*(x**S(2) - x)**(S(3)/2)/(S(25)*sqrt(x)) - S(127)*sqrt(x**S(2) - x)/(S(480)*sqrt(x)) - sqrt(S(2))*sqrt(x**S(2) - x)*atan(S(2)*sqrt(S(2))*sqrt(x + S(-1))/S(3))/(S(640)*sqrt(x)*sqrt(x + S(-1))) - (-S(2)*x/S(15) + S(2)/15)*sqrt(x**S(2) - x)/(sqrt(x)*(sqrt(x) + S(1))) - (-S(2)*x/S(15) + S(2)/15)*sqrt(x**S(2) - x)/(sqrt(x)*(-sqrt(x) + S(1))) - S(71)*(x**S(2) - x)**(S(3)/2)/(S(300)*x**(S(3)/2)), expand=True, _diff=True, _numerical=True)
# failing due to apart assert rubi_test(rubi_integrate(sqrt(x)*log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1)), x), x, S(2)*x**(S(3)/2)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/S(3) - S(2)*x**(S(3)/2)/S(9) + sqrt(x)/S(12) - sqrt(S(2))*atan(S(2)*sqrt(S(2))*sqrt(x))/S(48) - S(17)*sqrt(x**S(2) - x)/(S(36)*sqrt(x)) + sqrt(S(2))*sqrt(x**S(2) - x)*atan(S(2)*sqrt(S(2))*sqrt(x + S(-1))/S(3))/(S(48)*sqrt(x)*sqrt(x + S(-1))) - (-S(2)*x/S(9) + S(2)/9)*sqrt(x**S(2) - x)/(sqrt(x)*(sqrt(x) + S(1))) - (-S(2)*x/S(9) + S(2)/9)*sqrt(x**S(2) - x)/(sqrt(x)*(-sqrt(x) + S(1))) - S(2)*(x**S(2) - x)**(S(3)/2)/(S(9)*x**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1))/sqrt(x), x), x, S(2)*sqrt(x)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1)) - S(2)*sqrt(x) + sqrt(S(2))*atan(S(2)*sqrt(S(2))*sqrt(x))/S(2) - S(2)*sqrt(x**S(2) - x)/(S(3)*sqrt(x)) - sqrt(S(2))*sqrt(x**S(2) - x)*atan(S(2)*sqrt(S(2))*sqrt(x + S(-1))/S(3))/(S(2)*sqrt(x)*sqrt(x + S(-1))) - (-S(2)*x/S(3) + S(2)/3)*sqrt(x**S(2) - x)/(sqrt(x)*(sqrt(x) + S(1))) - (-S(2)*x/S(3) + S(2)/3)*sqrt(x**S(2) - x)/(sqrt(x)*(-sqrt(x) + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1))/x**(S(3)/2), x), x, S(4)*sqrt(S(2))*atan(S(2)*sqrt(S(2))*sqrt(x)) - S(8)*atan(sqrt(x)/sqrt(x**S(2) - x)) - S(4)*sqrt(x**S(2) - x)/(S(3)*sqrt(x)) - S(2)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/sqrt(x) - S(4)*sqrt(S(2))*sqrt(x**S(2) - x)*atan(S(2)*sqrt(S(2))*sqrt(x + S(-1))/S(3))/(sqrt(x)*sqrt(x + S(-1))) + (-S(2)*x/S(3) + S(2)/3)*sqrt(x**S(2) - x)/(sqrt(x)*(sqrt(x) + S(1))) + (-S(2)*x/S(3) + S(2)/3)*sqrt(x**S(2) - x)/(sqrt(x)*(-sqrt(x) + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(4)*x + S(4)*sqrt(x*(x + S(-1))) + S(-1))/x**(S(5)/2), x), x, -S(32)*sqrt(S(2))*atan(S(2)*sqrt(S(2))*sqrt(x))/S(3) + S(44)*atan(sqrt(x)/sqrt(x**S(2) - x))/S(3) - S(4)*sqrt(x**S(2) - x)/(S(9)*sqrt(x)) - S(16)/(S(3)*sqrt(x)) + S(32)*sqrt(S(2))*sqrt(x**S(2) - x)*atan(S(2)*sqrt(S(2))*sqrt(x + S(-1))/S(3))/(S(3)*sqrt(x)*sqrt(x + S(-1))) + (-S(2)*x/S(9) + S(2)/9)*sqrt(x**S(2) - x)/(sqrt(x)*(sqrt(x) + S(1))) + (-S(2)*x/S(9) + S(2)/9)*sqrt(x**S(2) - x)/(sqrt(x)*(-sqrt(x) + S(1))) + S(4)*sqrt(x**S(2) - x)/(S(3)*x**(S(3)/2)) - S(2)*log(S(4)*x + S(4)*sqrt(x**S(2) - x) + S(-1))/(S(3)*x**(S(3)/2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a + x)/x)/x, x), x, polylog(S(2), -a/x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a + x**S(2))/x**S(2))/x, x), x, polylog(S(2), -a/x**S(2))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**(-n)*(a + x**n))/x, x), x, polylog(S(2), -a*x**(-n))/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a + b*x)/x)/x, x), x, -log(-a/(b*x))*log(a/x + b) - polylog(S(2), (a/x + b)/b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a + b*x**S(2))/x**S(2))/x, x), x, -log(-a/(b*x**S(2)))*log(a/x**S(2) + b)/S(2) - polylog(S(2), (a/x**S(2) + b)/b)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**(-n)*(a + b*x**n))/x, x), x, -log(-a*x**(-n)/b)*log(a*x**(-n) + b)/n - polylog(S(2), (a*x**(-n) + b)/b)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a + b*x)/x)/(c + d*x), x), x, log((a + b*x)/x)*log(c + d*x)/d + log(-d*x/c)*log(c + d*x)/d - log(-d*(a + b*x)/(-a*d + b*c))*log(c + d*x)/d + polylog(S(2), (c + d*x)/c)/d - polylog(S(2), b*(c + d*x)/(-a*d + b*c))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((a + b*x**S(2))/x**S(2))/(c + d*x), x), x, S(2)*log(-d*x/c)*log(c + d*x)/d - log(-d*(sqrt(b)*x + sqrt(-a))/(sqrt(b)*c - d*sqrt(-a)))*log(c + d*x)/d - log(d*(-sqrt(b)*x + sqrt(-a))/(sqrt(b)*c + d*sqrt(-a)))*log(c + d*x)/d + log(c + d*x)*log(a/x**S(2) + b)/d + S(2)*polylog(S(2), (c + d*x)/c)/d - polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c - d*sqrt(-a)))/d - polylog(S(2), sqrt(b)*(c + d*x)/(sqrt(b)*c + d*sqrt(-a)))/d, expand=True, _diff=True, _numerical=True)
# recursion sympy and mathematica assert rubi_test(rubi_integrate(log(x**(-n)*(a + b*x**n))/(c + d*x), x), x, a*n*Integral(log(c + d*x)/(x*(a + b*x**n)), x)/d + log(c + d*x)*log(a*x**(-n) + b)/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(4), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(4)/b + n**S(4)*(-S(24)*a*d + S(24)*b*c)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(b*d) - n**S(3)*(-S(24)*a*d + S(24)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n**S(2)*(-S(12)*a*d + S(12)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(4)*a*d + S(4)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(3)/b - n**S(3)*(-S(6)*a*d + S(6)*b*c)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n**S(2)*(-S(6)*a*d + S(6)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(3)*a*d + S(3)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/b + n**S(2)*(-S(2)*a*d + S(2)*b*c)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(2)*a*d + S(2)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)/b - n*(-a*d + b*c)*log(c + d*x)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/log(e*((a + b*x)/(c + d*x))**n), x), x, Integral(S(1)/log(e*((a + b*x)/(c + d*x))**n), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**(S(-2)), x), x, Integral(log(e*((a + b*x)/(c + d*x))**n)**(S(-2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3)/x, x), x, S(6)*n**S(3)*polylog(S(4), c*(a + b*x)/(a*(c + d*x))) - S(6)*n**S(3)*polylog(S(4), d*(a + b*x)/(b*(c + d*x))) - S(6)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), c*(a + b*x)/(a*(c + d*x))) + S(6)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), d*(a + b*x)/(b*(c + d*x))) + S(3)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), c*(a + b*x)/(a*(c + d*x))) - S(3)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x))) - log(e*((a + b*x)/(c + d*x))**n)**S(3)*log((-a*d + b*c)/(b*(c + d*x))) + log(e*((a + b*x)/(c + d*x))**n)**S(3)*log(x*(a*d - b*c)/(a*(c + d*x))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2)/x, x), x, -S(2)*n**S(2)*polylog(S(3), c*(a + b*x)/(a*(c + d*x))) + S(2)*n**S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x))) + S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), c*(a + b*x)/(a*(c + d*x))) - S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x))) - log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x))) + log(e*((a + b*x)/(c + d*x))**n)**S(2)*log(x*(a*d - b*c)/(a*(c + d*x))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/x, x), x, -n*log(x)*log((a + b*x)/a) + n*log(x)*log((c + d*x)/c) - n*polylog(S(2), -b*x/a) + n*polylog(S(2), -d*x/c) + log(x)*log(e*((a + b*x)/(c + d*x))**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(e*((a + b*x)/(c + d*x))**n)), x), x, Integral(S(1)/(x*log(e*((a + b*x)/(c + d*x))**n)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(e*((a + b*x)/(c + d*x))**n)**S(2)), x), x, Integral(S(1)/(x*log(e*((a + b*x)/(c + d*x))**n)**S(2)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(3)*log(e*(a + b*x)/(c + d*x)), x), x, -x*(-a*d + b*c)**S(3)/(S(4)*d**S(3)) + (a + b*x)**S(4)*log(e*(a + b*x)/(c + d*x))/(S(4)*b) - (a + b*x)**S(3)*(-a*d/S(12) + b*c/S(12))/(b*d) + (a + b*x)**S(2)*(-a*d + b*c)**S(2)/(S(8)*b*d**S(2)) + (-a*d + b*c)**S(4)*log(c + d*x)/(S(4)*b*d**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x)), x), x, x*(-a*d + b*c)**S(2)/(S(3)*d**S(2)) + (a + b*x)**S(3)*log(e*(a + b*x)/(c + d*x))/(S(3)*b) - (a + b*x)**S(2)*(-a*d/S(6) + b*c/S(6))/(b*d) - (-a*d + b*c)**S(3)*log(c + d*x)/(S(3)*b*d**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)*log(e*(a + b*x)/(c + d*x)), x), x, x*(a*d/S(2) - b*c/S(2))/d + (a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(2)*b) + (-a*d + b*c)**S(2)*log(c + d*x)/(S(2)*b*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x)), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))/b - (-a*d + b*c)*log(c + d*x)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))/(a + b*x), x), x, -log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))/b + polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))/(a + b*x)**S(2), x), x, -(c + d*x)*log(e*(a + b*x)/(c + d*x))/((a + b*x)*(-a*d + b*c)) - S(1)/(b*(a + b*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))/(a + b*x)**S(3), x), x, d**S(2)*log(a + b*x)/(S(2)*b*(-a*d + b*c)**S(2)) - d**S(2)*log(c + d*x)/(S(2)*b*(-a*d + b*c)**S(2)) + d/(S(2)*b*(a + b*x)*(-a*d + b*c)) - log(e*(a + b*x)/(c + d*x))/(S(2)*b*(a + b*x)**S(2)) - S(1)/(S(4)*b*(a + b*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(3)*log(e*(a + b*x)/(c + d*x))**S(2), x), x, -S(5)*x*(-a*d + b*c)**S(3)/(S(12)*d**S(3)) + (a + b*x)**S(4)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(4)*b) - (a + b*x)**S(3)*(-a*d/S(6) + b*c/S(6))*log(e*(a + b*x)/(c + d*x))/(b*d) + (a + b*x)**S(2)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(4)*b*d**S(2)) + (a + b*x)**S(2)*(-a*d + b*c)**S(2)/(S(12)*b*d**S(2)) - (a + b*x)*(-a*d + b*c)**S(3)*log(e*(a + b*x)/(c + d*x))/(S(2)*b*d**S(3)) - (-a*d + b*c)**S(4)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(S(2)*b*d**S(4)) + S(11)*(-a*d + b*c)**S(4)*log(c + d*x)/(S(12)*b*d**S(4)) - (-a*d + b*c)**S(4)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(S(2)*b*d**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2), x), x, x*(-a*d + b*c)**S(2)/(S(3)*d**S(2)) + (a + b*x)**S(3)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(3)*b) - (a + b*x)**S(2)*(-a*d/S(3) + b*c/S(3))*log(e*(a + b*x)/(c + d*x))/(b*d) + S(2)*(a + b*x)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(3)*b*d**S(2)) + S(2)*(-a*d + b*c)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(S(3)*b*d**S(3)) - (-a*d + b*c)**S(3)*log(c + d*x)/(b*d**S(3)) + S(2)*(-a*d + b*c)**S(3)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(S(3)*b*d**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)*log(e*(a + b*x)/(c + d*x))**S(2), x), x, (a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(2)*b) + (a + b*x)*(a*d - b*c)*log(e*(a + b*x)/(c + d*x))/(b*d) - (-a*d + b*c)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b*d**S(2)) + (-a*d + b*c)**S(2)*log(c + d*x)/(b*d**S(2)) - (-a*d + b*c)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(2)/b + (-S(2)*a*d + S(2)*b*c)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b*d) + (-S(2)*a*d + S(2)*b*c)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2)/(a + b*x), x), x, -log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/b + S(2)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/b + S(2)*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2)/(a + b*x)**S(2), x), x, -(c + d*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((a + b*x)*(-a*d + b*c)) - (S(2)*c + S(2)*d*x)*log(e*(a + b*x)/(c + d*x))/((a + b*x)*(-a*d + b*c)) - S(2)/(b*(a + b*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2)/(a + b*x)**S(3), x), x, -b*(c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(2)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) - b*(c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(2)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) - b*(c + d*x)**S(2)/(S(4)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) + d*(c + d*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((a + b*x)*(-a*d + b*c)**S(2)) + S(2)*d*(c + d*x)*log(e*(a + b*x)/(c + d*x))/((a + b*x)*(-a*d + b*c)**S(2)) + S(2)*d/(b*(a + b*x)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(3), x), x, (a + b*x)**S(3)*log(e*(a + b*x)/(c + d*x))**S(3)/(S(3)*b) - (a + b*x)**S(2)*(-a*d/S(2) + b*c/S(2))*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d) + (a + b*x)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d**S(2)) + (a + b*x)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))/(b*d**S(2)) + (-a*d + b*c)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d**S(3)) + S(3)*(-a*d + b*c)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b*d**S(3)) + S(2)*(-a*d + b*c)**S(3)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(3)) - (-a*d + b*c)**S(3)*log(c + d*x)/(b*d**S(3)) + S(3)*(-a*d + b*c)**S(3)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(3)) - S(2)*(-a*d + b*c)**S(3)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)*log(e*(a + b*x)/(c + d*x))**S(3), x), x, (a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(3)/(S(2)*b) - (a + b*x)*(-S(3)*a*d/S(2) + S(3)*b*c/S(2))*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d) - S(3)*(-a*d + b*c)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/(S(2)*b*d**S(2)) - S(3)*(-a*d + b*c)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b*d**S(2)) - S(3)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(2)) - S(3)*(-a*d + b*c)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(2)) + S(3)*(-a*d + b*c)**S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(3)/b + (-S(6)*a*d + S(6)*b*c)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) - (-S(6)*a*d + S(6)*b*c)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(3)*a*d + S(3)*b*c)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3)/(a + b*x), x), x, -log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))**S(3)/b + S(3)*log(e*(a + b*x)/(c + d*x))**S(2)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/b + S(6)*log(e*(a + b*x)/(c + d*x))*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/b + S(6)*polylog(S(4), b*(c + d*x)/(d*(a + b*x)))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3)/(a + b*x)**S(2), x), x, -(c + d*x)*log(e*(a + b*x)/(c + d*x))**S(3)/((a + b*x)*(-a*d + b*c)) - (S(3)*c + S(3)*d*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((a + b*x)*(-a*d + b*c)) - (S(6)*c + S(6)*d*x)*log(e*(a + b*x)/(c + d*x))/((a + b*x)*(-a*d + b*c)) - S(6)/(b*(a + b*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3)/(a + b*x)**S(3), x), x, -b*(c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(3)/(S(2)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) - S(3)*b*(c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(4)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) - S(3)*b*(c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(4)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) - S(3)*b*(c + d*x)**S(2)/(S(8)*(a + b*x)**S(2)*(-a*d + b*c)**S(2)) + d*(c + d*x)*log(e*(a + b*x)/(c + d*x))**S(3)/((a + b*x)*(-a*d + b*c)**S(2)) + S(3)*d*(c + d*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((a + b*x)*(-a*d + b*c)**S(2)) + S(6)*d*(c + d*x)*log(e*(a + b*x)/(c + d*x))/((a + b*x)*(-a*d + b*c)**S(2)) + S(6)*d/(b*(a + b*x)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n), x), x, (c + d*x)**S(4)*log(e*((a + b*x)/(c + d*x))**n)/(S(4)*d) - n*(c + d*x)**S(3)*(-a*d/S(12) + b*c/S(12))/(b*d) - n*(c + d*x)**S(2)*(-a*d + b*c)**S(2)/(S(8)*b**S(2)*d) - n*x*(-a*d + b*c)**S(3)/(S(4)*b**S(3)) - n*(-a*d + b*c)**S(4)*log(a + b*x)/(S(4)*b**S(4)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n), x), x, (c + d*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)/(S(3)*d) - n*(c + d*x)**S(2)*(-a*d/S(6) + b*c/S(6))/(b*d) - n*x*(-a*d + b*c)**S(2)/(S(3)*b**S(2)) - n*(-a*d + b*c)**S(3)*log(a + b*x)/(S(3)*b**S(3)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)*log(e*((a + b*x)/(c + d*x))**n), x), x, (c + d*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)/(S(2)*d) + n*x*(a*d/S(2) - b*c/S(2))/b - n*(-a*d + b*c)**S(2)*log(a + b*x)/(S(2)*b**S(2)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)/b - n*(-a*d + b*c)*log(c + d*x)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(c + d*x), x), x, -n*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/d - log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(c + d*x)**S(2), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)/((c + d*x)*(-a*d + b*c)) + n/(d*(c + d*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(c + d*x)**S(3), x), x, b**S(2)*n*log(a + b*x)/(S(2)*d*(-a*d + b*c)**S(2)) - b**S(2)*n*log(c + d*x)/(S(2)*d*(-a*d + b*c)**S(2)) + b*n/(S(2)*d*(c + d*x)*(-a*d + b*c)) + n/(S(4)*d*(c + d*x)**S(2)) - log(e*((a + b*x)/(c + d*x))**n)/(S(2)*d*(c + d*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(c + d*x)**S(4), x), x, b**S(3)*n*log(a + b*x)/(S(3)*d*(-a*d + b*c)**S(3)) - b**S(3)*n*log(c + d*x)/(S(3)*d*(-a*d + b*c)**S(3)) + b**S(2)*n/(S(3)*d*(c + d*x)*(-a*d + b*c)**S(2)) + b*n/(S(6)*d*(c + d*x)**S(2)*(-a*d + b*c)) + n/(S(9)*d*(c + d*x)**S(3)) - log(e*((a + b*x)/(c + d*x))**n)/(S(3)*d*(c + d*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)**S(3)*log(e*(a + b*x)/(c + d*x))**S(2), x), x, (c + d*x)**S(4)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(4)*d) - (c + d*x)**S(3)*(-a*d/S(6) + b*c/S(6))*log(e*(a + b*x)/(c + d*x))/(b*d) - (c + d*x)**S(2)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(4)*b**S(2)*d) + (c + d*x)**S(2)*(-a*d + b*c)**S(2)/(S(12)*b**S(2)*d) + S(5)*x*(-a*d + b*c)**S(3)/(S(12)*b**S(3)) - (a + b*x)*(-a*d + b*c)**S(3)*log(e*(a + b*x)/(c + d*x))/(S(2)*b**S(4)) + (-a*d + b*c)**S(4)*log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))/(S(2)*b**S(4)*d) + S(5)*(-a*d + b*c)**S(4)*log(a + b*x)/(S(12)*b**S(4)*d) + (-a*d + b*c)**S(4)*log(c + d*x)/(S(2)*b**S(4)*d) - (-a*d + b*c)**S(4)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(S(2)*b**S(4)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2), x), x, (c + d*x)**S(3)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(3)*d) - (c + d*x)**S(2)*(-a*d/S(3) + b*c/S(3))*log(e*(a + b*x)/(c + d*x))/(b*d) + x*(-a*d + b*c)**S(2)/(S(3)*b**S(2)) - S(2)*(a + b*x)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(3)*b**S(3)) + S(2)*(-a*d + b*c)**S(3)*log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))/(S(3)*b**S(3)*d) + (-a*d + b*c)**S(3)*log(a + b*x)/(S(3)*b**S(3)*d) + S(2)*(-a*d + b*c)**S(3)*log(c + d*x)/(S(3)*b**S(3)*d) - S(2)*(-a*d + b*c)**S(3)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(S(3)*b**S(3)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)*log(e*(a + b*x)/(c + d*x))**S(2), x), x, (c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(2)*d) + (a + b*x)*(a*d - b*c)*log(e*(a + b*x)/(c + d*x))/b**S(2) + (-a*d + b*c)**S(2)*log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))/(b**S(2)*d) + (-a*d + b*c)**S(2)*log(c + d*x)/(b**S(2)*d) - (-a*d + b*c)**S(2)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(2)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(2)/b + (-S(2)*a*d + S(2)*b*c)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b*d) + (-S(2)*a*d + S(2)*b*c)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2)/(c + d*x), x), x, -log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/d - S(2)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/d + S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2)/(c + d*x)**S(2), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((c + d*x)*(-a*d + b*c)) - (S(2)*a + S(2)*b*x)*log(e*(a + b*x)/(c + d*x))/((c + d*x)*(-a*d + b*c)) - S(2)/(d*(c + d*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(2)/(c + d*x)**S(3), x), x, b*(a + b*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((c + d*x)*(-a*d + b*c)**S(2)) - S(2)*b*(a + b*x)*log(e*(a + b*x)/(c + d*x))/((c + d*x)*(-a*d + b*c)**S(2)) - S(2)*b/(d*(c + d*x)*(-a*d + b*c)) - d*(a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(2)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)) + d*(a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(2)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)) - d*(a + b*x)**S(2)/(S(4)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(3), x), x, (c + d*x)**S(3)*log(e*(a + b*x)/(c + d*x))**S(3)/(S(3)*d) - (c + d*x)**S(2)*(-a*d/S(2) + b*c/S(2))*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d) - (a + b*x)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/b**S(3) + (a + b*x)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))/b**S(3) - S(2)*(-a*d + b*c)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b**S(3)*d) + (-a*d + b*c)**S(3)*log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/(b**S(3)*d) - (-a*d + b*c)**S(3)*log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))/(b**S(3)*d) - S(2)*(-a*d + b*c)**S(3)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(3)*d) - (-a*d + b*c)**S(3)*log(c + d*x)/(b**S(3)*d) - S(2)*(-a*d + b*c)**S(3)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b**S(3)*d) + (-a*d + b*c)**S(3)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(3)*d) - S(2)*(-a*d + b*c)**S(3)*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/(b**S(3)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)*log(e*(a + b*x)/(c + d*x))**S(3), x), x, (c + d*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(3)/(S(2)*d) - (a + b*x)*(-S(3)*a*d/S(2) + S(3)*b*c/S(2))*log(e*(a + b*x)/(c + d*x))**S(2)/b**S(2) - S(3)*(-a*d + b*c)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))/(b**S(2)*d) + S(3)*(-a*d + b*c)**S(2)*log((a*d - b*c)/(d*(a + b*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/(S(2)*b**S(2)*d) - S(3)*(-a*d + b*c)**S(2)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(2)*d) - S(3)*(-a*d + b*c)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b**S(2)*d) - S(3)*(-a*d + b*c)**S(2)*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/(b**S(2)*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(3)/b + (-S(6)*a*d + S(6)*b*c)*log(e*(a + b*x)/(c + d*x))*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) - (-S(6)*a*d + S(6)*b*c)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(3)*a*d + S(3)*b*c)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3)/(c + d*x), x), x, -log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(3)/d - S(3)*log(e*(a + b*x)/(c + d*x))**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/d + S(6)*log(e*(a + b*x)/(c + d*x))*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/d - S(6)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/d, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3)/(c + d*x)**S(2), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(3)/((c + d*x)*(-a*d + b*c)) - (S(3)*a + S(3)*b*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((c + d*x)*(-a*d + b*c)) + (S(6)*a + S(6)*b*x)*log(e*(a + b*x)/(c + d*x))/((c + d*x)*(-a*d + b*c)) + S(6)/(d*(c + d*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(3)/(c + d*x)**S(3), x), x, b*(a + b*x)*log(e*(a + b*x)/(c + d*x))**S(3)/((c + d*x)*(-a*d + b*c)**S(2)) - S(3)*b*(a + b*x)*log(e*(a + b*x)/(c + d*x))**S(2)/((c + d*x)*(-a*d + b*c)**S(2)) + S(6)*b*(a + b*x)*log(e*(a + b*x)/(c + d*x))/((c + d*x)*(-a*d + b*c)**S(2)) + S(6)*b/(d*(c + d*x)*(-a*d + b*c)) - d*(a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(3)/(S(2)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)) + S(3)*d*(a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))**S(2)/(S(4)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)) - S(3)*d*(a + b*x)**S(2)*log(e*(a + b*x)/(c + d*x))/(S(4)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)) + S(3)*d*(a + b*x)**S(2)/(S(8)*(c + d*x)**S(2)*(-a*d + b*c)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(4), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(4)/b - (-S(24)*a*d + S(24)*b*c)*log(e*(a + b*x)/(c + d*x))*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(24)*a*d + S(24)*b*c)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(12)*a*d + S(12)*b*c)*log(e*(a + b*x)/(c + d*x))**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(4)*a*d + S(4)*b*c)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(3)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(a + b*x)/(c + d*x))**S(5), x), x, (a + b*x)*log(e*(a + b*x)/(c + d*x))**S(5)/b + (-S(120)*a*d + S(120)*b*c)*log(e*(a + b*x)/(c + d*x))*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(b*d) - (-S(120)*a*d + S(120)*b*c)*polylog(S(5), d*(a + b*x)/(b*(c + d*x)))/(b*d) - (-S(60)*a*d + S(60)*b*c)*log(e*(a + b*x)/(c + d*x))**S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(20)*a*d + S(20)*b*c)*log(e*(a + b*x)/(c + d*x))**S(3)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + (-S(5)*a*d + S(5)*b*c)*log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(4)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d*(a + b*x)/(b*(c + d*x)))/(c*f + d*f*x), x), x, polylog(S(2), (-a*d + b*c)/(b*(c + d*x)))/(d*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(1) + S(1)/(a + b*x))/(a + b*x), x), x, polylog(S(2), -S(1)/(a + b*x))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(1) - S(1)/(a + b*x))/(a + b*x), x), x, polylog(S(2), S(1)/(a + b*x))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n), x), x, (f + g*x)**S(4)*log(e*((a + b*x)/(c + d*x))**n)/(S(4)*g) + n*(-c*g + d*f)**S(4)*log(c + d*x)/(S(4)*d**S(4)*g) - g**S(3)*n*x**S(3)*(-a*d/S(12) + b*c/S(12))/(b*d) - g**S(2)*n*x**S(2)*(-a*d/S(8) + b*c/S(8))*(-a*d*g - b*c*g + S(4)*b*d*f)/(b**S(2)*d**S(2)) + g*n*x*(a*d/S(4) - b*c/S(4))*(a**S(2)*d**S(2)*g**S(2) - a*b*d*g*(-c*g + S(4)*d*f) + b**S(2)*(c**S(2)*g**S(2) - S(4)*c*d*f*g + S(6)*d**S(2)*f**S(2)))/(b**S(3)*d**S(3)) - n*(-a*g + b*f)**S(4)*log(a + b*x)/(S(4)*b**S(4)*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n), x), x, (f + g*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)/(S(3)*g) + n*(-c*g + d*f)**S(3)*log(c + d*x)/(S(3)*d**S(3)*g) - g**S(2)*n*x**S(2)*(-a*d/S(6) + b*c/S(6))/(b*d) + g*n*x*(a*d/S(3) - b*c/S(3))*(-a*d*g - b*c*g + S(3)*b*d*f)/(b**S(2)*d**S(2)) - n*(-a*g + b*f)**S(3)*log(a + b*x)/(S(3)*b**S(3)*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)*log(e*((a + b*x)/(c + d*x))**n), x), x, (f + g*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)/(S(2)*g) + n*(-c*g + d*f)**S(2)*log(c + d*x)/(S(2)*d**S(2)*g) + g*n*x*(a*d/S(2) - b*c/S(2))/(b*d) - n*(-a*g + b*f)**S(2)*log(a + b*x)/(S(2)*b**S(2)*g), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)/b - n*(-a*d + b*c)*log(c + d*x)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(f + g*x), x), x, -n*log(-g*(a + b*x)/(-a*g + b*f))*log(f + g*x)/g + n*log(-g*(c + d*x)/(-c*g + d*f))*log(f + g*x)/g - n*polylog(S(2), b*(f + g*x)/(-a*g + b*f))/g + n*polylog(S(2), d*(f + g*x)/(-c*g + d*f))/g + log(e*((a + b*x)/(c + d*x))**n)*log(f + g*x)/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(f + g*x)**S(2), x), x, -n*(-a*d + b*c)*log(c + d*x)/((-a*g + b*f)*(-c*g + d*f)) + n*(-a*d + b*c)*log(f + g*x)/((-a*g + b*f)*(-c*g + d*f)) + (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)/((f + g*x)*(-a*g + b*f)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(f + g*x)**S(3), x), x, b**S(2)*n*log(a + b*x)/(S(2)*g*(-a*g + b*f)**S(2)) - d**S(2)*n*log(c + d*x)/(S(2)*g*(-c*g + d*f)**S(2)) + n*(-a*d/S(2) + b*c/S(2))*(-a*d*g - b*c*g + S(2)*b*d*f)*log(f + g*x)/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n*(a*d/S(2) - b*c/S(2))/((f + g*x)*(-a*g + b*f)*(-c*g + d*f)) - log(e*((a + b*x)/(c + d*x))**n)/(S(2)*g*(f + g*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(f + g*x)**S(4), x), x, b**S(3)*n*log(a + b*x)/(S(3)*g*(-a*g + b*f)**S(3)) - d**S(3)*n*log(c + d*x)/(S(3)*g*(-c*g + d*f)**S(3)) + n*(-a*d/S(3) + b*c/S(3))*(a**S(2)*d**S(2)*g**S(2) - a*b*d*g*(-c*g + S(3)*d*f) + b**S(2)*(c**S(2)*g**S(2) - S(3)*c*d*f*g + S(3)*d**S(2)*f**S(2)))*log(f + g*x)/((-a*g + b*f)**S(3)*(-c*g + d*f)**S(3)) - n*(-a*d/S(3) + b*c/S(3))*(-a*d*g - b*c*g + S(2)*b*d*f)/((f + g*x)*(-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n*(a*d/S(6) - b*c/S(6))/((f + g*x)**S(2)*(-a*g + b*f)*(-c*g + d*f)) - log(e*((a + b*x)/(c + d*x))**n)/(S(3)*g*(f + g*x)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)**S(2), x), x, -a**S(3)*g**S(3)*n**S(2)*(-a*d + b*c)*log(a + b*x)/(S(6)*b**S(4)*d) + a**S(2)*g**S(2)*n**S(2)*(-a*d + b*c)*(-a*d*g - b*c*g + S(4)*b*d*f)*log(a + b*x)/(S(4)*b**S(4)*d**S(2)) + (f + g*x)**S(4)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/(S(4)*g) - n**S(2)*(-c*g + d*f)**S(4)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(S(2)*d**S(4)*g) - n*(-c*g + d*f)**S(4)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(S(2)*d**S(4)*g) + c**S(3)*g**S(3)*n**S(2)*(-a*d + b*c)*log(c + d*x)/(S(6)*b*d**S(4)) - g**S(3)*n*x**S(3)*(-a*d/S(6) + b*c/S(6))*log(e*((a + b*x)/(c + d*x))**n)/(b*d) - c**S(2)*g**S(2)*n**S(2)*(-a*d + b*c)*(-a*d*g - b*c*g + S(4)*b*d*f)*log(c + d*x)/(S(4)*b**S(2)*d**S(4)) + g**S(3)*n**S(2)*x**S(2)*(-a*d + b*c)**S(2)/(S(12)*b**S(2)*d**S(2)) - g**S(2)*n*x**S(2)*(-a*d/S(4) + b*c/S(4))*(-a*d*g - b*c*g + S(4)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)/(b**S(2)*d**S(2)) - g**S(3)*n**S(2)*x*(-a*d + b*c)**S(2)*(a*d + b*c)/(S(6)*b**S(3)*d**S(3)) + g**S(2)*n**S(2)*x*(-a*d + b*c)**S(2)*(-a*d*g - b*c*g + S(4)*b*d*f)/(S(4)*b**S(3)*d**S(3)) - n**S(2)*(-a*g + b*f)**S(4)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(S(2)*b**S(4)*g) + n*(-a*g + b*f)**S(4)*log(e*((a + b*x)/(c + d*x))**n)*log((a*d - b*c)/(d*(a + b*x)))/(S(2)*b**S(4)*g) - g*n*(a + b*x)*(-a*d/S(2) + b*c/S(2))*(a**S(2)*d**S(2)*g**S(2) - a*b*d*g*(-c*g + S(4)*d*f) + b**S(2)*(c**S(2)*g**S(2) - S(4)*c*d*f*g + S(6)*d**S(2)*f**S(2)))*log(e*((a + b*x)/(c + d*x))**n)/(b**S(4)*d**S(3)) + g*n**S(2)*(-a*d + b*c)**S(2)*(a**S(2)*d**S(2)*g**S(2) - a*b*d*g*(-c*g + S(4)*d*f) + b**S(2)*(c**S(2)*g**S(2) - S(4)*c*d*f*g + S(6)*d**S(2)*f**S(2)))*log(c + d*x)/(S(2)*b**S(4)*d**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)**S(2), x), x, a**S(2)*g**S(2)*n**S(2)*(-a*d + b*c)*log(a + b*x)/(S(3)*b**S(3)*d) + (f + g*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/(S(3)*g) - S(2)*n**S(2)*(-c*g + d*f)**S(3)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(S(3)*d**S(3)*g) - S(2)*n*(-c*g + d*f)**S(3)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(S(3)*d**S(3)*g) - c**S(2)*g**S(2)*n**S(2)*(-a*d + b*c)*log(c + d*x)/(S(3)*b*d**S(3)) - g**S(2)*n*x**S(2)*(-a*d/S(3) + b*c/S(3))*log(e*((a + b*x)/(c + d*x))**n)/(b*d) + g**S(2)*n**S(2)*x*(-a*d + b*c)**S(2)/(S(3)*b**S(2)*d**S(2)) - S(2)*n**S(2)*(-a*g + b*f)**S(3)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(S(3)*b**S(3)*g) + S(2)*n*(-a*g + b*f)**S(3)*log(e*((a + b*x)/(c + d*x))**n)*log((a*d - b*c)/(d*(a + b*x)))/(S(3)*b**S(3)*g) - g*n*(a + b*x)*(-S(2)*a*d/S(3) + S(2)*b*c/S(3))*(-a*d*g - b*c*g + S(3)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)/(b**S(3)*d**S(2)) + S(2)*g*n**S(2)*(-a*d + b*c)**S(2)*(-a*d*g - b*c*g + S(3)*b*d*f)*log(c + d*x)/(S(3)*b**S(3)*d**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)*log(e*((a + b*x)/(c + d*x))**n)**S(2), x), x, (f + g*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/(S(2)*g) - n**S(2)*(-c*g + d*f)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(d**S(2)*g) - n*(-c*g + d*f)**S(2)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(d**S(2)*g) - n**S(2)*(-a*g + b*f)**S(2)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(2)*g) + n*(-a*g + b*f)**S(2)*log(e*((a + b*x)/(c + d*x))**n)*log((a*d - b*c)/(d*(a + b*x)))/(b**S(2)*g) + g*n*(a + b*x)*(a*d - b*c)*log(e*((a + b*x)/(c + d*x))**n)/(b**S(2)*d) + g*n**S(2)*(-a*d + b*c)**S(2)*log(c + d*x)/(b**S(2)*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/b + n**S(2)*(-S(2)*a*d + S(2)*b*c)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(2)*a*d + S(2)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
# taking long time in rubi_test assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2)/(f + g*x), x), x, -S(2)*n**S(2)*polylog(S(3), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/g + S(2)*n**S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/g + S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/g - S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/g - log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/g + log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2)/(f + g*x)**S(2), x), x, n**S(2)*(-S(2)*a*d + S(2)*b*c)*polylog(S(2), (a + b*x)*(-c*g + d*f)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)*(-c*g + d*f)) + n*(-S(2)*a*d + S(2)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)*(-c*g + d*f)) + (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/((f + g*x)*(-a*g + b*f)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2)/(f + g*x)**S(3), x), x, b**S(2)*n**S(2)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(g*(-a*g + b*f)**S(2)) - b**S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*log((a*d - b*c)/(d*(a + b*x)))/(g*(-a*g + b*f)**S(2)) + d**S(2)*n**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(g*(-c*g + d*f)**S(2)) + d**S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(g*(-c*g + d*f)**S(2)) - g*n**S(2)*(-a*d + b*c)**S(2)*log(c + d*x)/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + g*n**S(2)*(-a*d + b*c)**S(2)*log(f + g*x)/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + g*n*(a + b*x)*(-a*d + b*c)*log(e*((a + b*x)/(c + d*x))**n)/((f + g*x)*(-a*g + b*f)**S(2)*(-c*g + d*f)) - n**S(2)*(-a*d + b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*log(-g*(a + b*x)/(-a*g + b*f))*log(f + g*x)/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n**S(2)*(-a*d + b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*log(-g*(c + d*x)/(-c*g + d*f))*log(f + g*x)/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) - n**S(2)*(-a*d + b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*polylog(S(2), b*(f + g*x)/(-a*g + b*f))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n**S(2)*(-a*d + b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*polylog(S(2), d*(f + g*x)/(-c*g + d*f))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n*(-a*d + b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)*log(f + g*x)/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) - log(e*((a + b*x)/(c + d*x))**n)**S(2)/(S(2)*g*(f + g*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)**S(3), x), x, a**S(2)*g**S(2)*n**S(3)*(-a*d + b*c)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(3)*d) - a**S(2)*g**S(2)*n**S(2)*(-a*d + b*c)*log(e*((a + b*x)/(c + d*x))**n)*log((a*d - b*c)/(d*(a + b*x)))/(b**S(3)*d) + (f + g*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)**S(3)/(S(3)*g) + S(2)*n**S(3)*(-c*g + d*f)**S(3)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(d**S(3)*g) - S(2)*n**S(2)*(-c*g + d*f)**S(3)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(d**S(3)*g) - n*(-c*g + d*f)**S(3)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/(d**S(3)*g) + c**S(2)*g**S(2)*n**S(3)*(-a*d + b*c)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d**S(3)) + c**S(2)*g**S(2)*n**S(2)*(-a*d + b*c)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d**S(3)) - g**S(2)*n*x**S(2)*(-a*d/S(2) + b*c/S(2))*log(e*((a + b*x)/(c + d*x))**n)**S(2)/(b*d) - S(2)*n**S(3)*(-a*g + b*f)**S(3)*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/(b**S(3)*g) - S(2)*n**S(2)*(-a*g + b*f)**S(3)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(3)*g) + n*(-a*g + b*f)**S(3)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((a*d - b*c)/(d*(a + b*x)))/(b**S(3)*g) + g**S(2)*n**S(2)*(a + b*x)*(-a*d + b*c)**S(2)*log(e*((a + b*x)/(c + d*x))**n)/(b**S(3)*d**S(2)) - g*n*(a + b*x)*(-a*d + b*c)*(-a*d*g - b*c*g + S(3)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)**S(2)/(b**S(3)*d**S(2)) - g**S(2)*n**S(3)*(-a*d + b*c)**S(3)*log(c + d*x)/(b**S(3)*d**S(3)) - S(2)*g*n**S(3)*(-a*d + b*c)**S(2)*(-a*d*g - b*c*g + S(3)*b*d*f)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b**S(3)*d**S(3)) - S(2)*g*n**S(2)*(-a*d + b*c)**S(2)*(-a*d*g - b*c*g + S(3)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(b**S(3)*d**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((f + g*x)*log(e*((a + b*x)/(c + d*x))**n)**S(3), x), x, (f + g*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)**S(3)/(S(2)*g) + S(3)*n**S(3)*(-c*g + d*f)**S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(d**S(2)*g) - S(3)*n**S(2)*(-c*g + d*f)**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(d**S(2)*g) - S(3)*n*(-c*g + d*f)**S(2)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/(S(2)*d**S(2)*g) - S(3)*n**S(3)*(-a*g + b*f)**S(2)*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/(b**S(2)*g) - S(3)*n**S(2)*(-a*g + b*f)**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(b**S(2)*g) + S(3)*n*(-a*g + b*f)**S(2)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((a*d - b*c)/(d*(a + b*x)))/(S(2)*b**S(2)*g) + g*n*(a + b*x)*(S(3)*a*d/S(2) - S(3)*b*c/S(2))*log(e*((a + b*x)/(c + d*x))**n)**S(2)/(b**S(2)*d) - S(3)*g*n**S(3)*(-a*d + b*c)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b**S(2)*d**S(2)) - S(3)*g*n**S(2)*(-a*d + b*c)**S(2)*log(e*((a + b*x)/(c + d*x))**n)*log((-a*d + b*c)/(b*(c + d*x)))/(b**S(2)*d**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(3)/b - n**S(3)*(-S(6)*a*d + S(6)*b*c)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n**S(2)*(-S(6)*a*d + S(6)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(3)*a*d + S(3)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
# takes long time in test assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3)/(f + g*x), x), x, -S(6)*n**S(3)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/g + S(6)*n**S(3)*polylog(S(4), (a + b*x)*(-c*g + d*f)/((c + d*x)*(-a*g + b*f)))/g - S(6)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/g + S(6)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/g + S(3)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/g - S(3)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/g - log(e*((a + b*x)/(c + d*x))**n)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))/g + log(e*((a + b*x)/(c + d*x))**n)**S(3)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/g, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3)/(f + g*x)**S(2), x), x, -n**S(3)*(-S(6)*a*d + S(6)*b*c)*polylog(S(3), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)*(-c*g + d*f)) + n**S(2)*(-S(6)*a*d + S(6)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)*(-c*g + d*f)) + n*(-S(3)*a*d + S(3)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)*(-c*g + d*f)) + (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(3)/((f + g*x)*(-a*g + b*f)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3)/(f + g*x)**S(3), x), x, S(3)*b**S(2)*n**S(3)*polylog(S(3), b*(c + d*x)/(d*(a + b*x)))/(g*(-a*g + b*f)**S(2)) + S(3)*b**S(2)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), b*(c + d*x)/(d*(a + b*x)))/(g*(-a*g + b*f)**S(2)) - S(3)*b**S(2)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((a*d - b*c)/(d*(a + b*x)))/(S(2)*g*(-a*g + b*f)**S(2)) - S(3)*d**S(2)*n**S(3)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(g*(-c*g + d*f)**S(2)) + S(3)*d**S(2)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(g*(-c*g + d*f)**S(2)) + S(3)*d**S(2)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/(S(2)*g*(-c*g + d*f)**S(2)) + S(3)*g*n**S(3)*(-a*d + b*c)**S(2)*polylog(S(2), (a + b*x)*(-c*g + d*f)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + S(3)*g*n**S(2)*(-a*d + b*c)**S(2)*log(e*((a + b*x)/(c + d*x))**n)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + g*n*(a + b*x)*(-S(3)*a*d/S(2) + S(3)*b*c/S(2))*log(e*((a + b*x)/(c + d*x))**n)**S(2)/((f + g*x)*(-a*g + b*f)**S(2)*(-c*g + d*f)) - n**S(3)*(-S(3)*a*d + S(3)*b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*polylog(S(3), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n**S(3)*(-S(3)*a*d + S(3)*b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n**S(2)*(-S(3)*a*d + S(3)*b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) - n**S(2)*(-S(3)*a*d + S(3)*b*c)*(-a*d*g - b*c*g + S(2)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) - n*(-S(3)*a*d/S(2) + S(3)*b*c/S(2))*(-a*d*g - b*c*g + S(2)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) + n*(-S(3)*a*d/S(2) + S(3)*b*c/S(2))*(-a*d*g - b*c*g + S(2)*b*d*f)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/((-a*g + b*f)**S(2)*(-c*g + d*f)**S(2)) - log(e*((a + b*x)/(c + d*x))**n)**S(3)/(S(2)*g*(f + g*x)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(4), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(4)/b + n**S(4)*(-S(24)*a*d + S(24)*b*c)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(b*d) - n**S(3)*(-S(24)*a*d + S(24)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n**S(2)*(-S(12)*a*d + S(12)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(4)*a*d + S(4)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(3)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(5), x), x, (a + b*x)*log(e*((a + b*x)/(c + d*x))**n)**S(5)/b - n**S(5)*(-S(120)*a*d + S(120)*b*c)*polylog(S(5), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n**S(4)*(-S(120)*a*d + S(120)*b*c)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(b*d) - n**S(3)*(-S(60)*a*d + S(60)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n**S(2)*(-S(20)*a*d + S(20)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(3)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(b*d) + n*(-S(5)*a*d + S(5)*b*c)*log(e*((a + b*x)/(c + d*x))**n)**S(4)*log((-a*d + b*c)/(b*(c + d*x)))/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**m*(c + d*x)**(-m + S(-2))/log(e*((a + b*x)/(c + d*x))**n), x), x, (e*((a + b*x)/(c + d*x))**n)**(-(m + S(1))/n)*(a + b*x)**(m + S(1))*(c + d*x)**(-m + S(-1))*Ei((m + S(1))*log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(3)/((c + d*x)**S(5)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(-S(4)/n)*(a + b*x)**S(4)*Ei(S(4)*log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(c + d*x)**S(4)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)/((c + d*x)**S(4)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(-S(3)/n)*(a + b*x)**S(3)*Ei(S(3)*log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(c + d*x)**S(3)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)/((c + d*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(-S(2)/n)*(a + b*x)**S(2)*Ei(S(2)*log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(c + d*x)**S(2)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((c + d*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(-S(1)/n)*(a + b*x)*Ei(log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(c + d*x)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*x)*(c + d*x)*log(e*((a + b*x)/(c + d*x))**n)), x), x, log(log(e*((a + b*x)/(c + d*x))**n))/(n*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*x)**S(2)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(S(1)/n)*(c + d*x)*Ei(-log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(a + b*x)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)/((a + b*x)**S(3)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(S(2)/n)*(c + d*x)**S(2)*Ei(-S(2)*log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(a + b*x)**S(2)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((c + d*x)**S(2)/((a + b*x)**S(4)*log(e*((a + b*x)/(c + d*x))**n)), x), x, (e*((a + b*x)/(c + d*x))**n)**(S(3)/n)*(c + d*x)**S(3)*Ei(-S(3)*log(e*((a + b*x)/(c + d*x))**n)/n)/(n*(a + b*x)**S(3)*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**p/((a + b*x)*(c + d*x)), x), x, log(e*((a + b*x)/(c + d*x))**n)**(p + S(1))/(n*(p + S(1))*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**p/(a*c + b*d*x**S(2) + x*(a*d + b*c)), x), x, log(e*((a + b*x)/(c + d*x))**n)**(p + S(1))/(n*(p + S(1))*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x/(a + b*x))/(a + b*x), x), x, -log(a/(a + b*x))*log(c*x/(a + b*x))/b - polylog(S(2), b*x/(a + b*x))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x/(a + b*x))**S(2)/(x*(a + b*x)), x), x, log(c*x/(a + b*x))**S(3)/(S(3)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a/(a + b*x))*log(c*x/(a + b*x))**S(2)/(x*(a + b*x)), x), x, -log(c*x/(a + b*x))**S(2)*polylog(S(2), b*x/(a + b*x))/a + S(2)*log(c*x/(a + b*x))*polylog(S(3), b*x/(a + b*x))/a - S(2)*polylog(S(4), b*x/(a + b*x))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/((c + d*x)*(f + g*x)), x), x, -n*polylog(S(2), (a + b*x)*(-c*g + d*f)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f) - log(e*((a + b*x)/(c + d*x))**n)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f), expand=True, _diff=True, _numerical=True)
# long time in test assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2)/((c + d*x)*(f + g*x)), x), x, S(2)*n**S(2)*polylog(S(3), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f) - S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(2), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f) - log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f), expand=True, _diff=True, _numerical=True)
# || assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(3)/((c + d*x)*(f + g*x)), x), x, -S(6)*n**S(3)*polylog(S(4), (a + b*x)*(-c*g + d*f)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f) + S(6)*n**S(2)*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f) - S(3)*n*log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), (a*(-c*g + d*f) - b*c*g*x + b*d*f*x)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f) - log(e*((a + b*x)/(c + d*x))**n)**S(3)*log((f + g*x)*(-a*d + b*c)/((c + d*x)*(-a*g + b*f)))/(-c*g + d*f), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((-a*d + b*c)/(b*(c + d*x)))*log(e*(a + b*x)/(c + d*x))**S(2)/((c + d*x)*(a*g + b*g*x)), x), x, -log(e*(a + b*x)/(c + d*x))**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(g*(-a*d + b*c)) + S(2)*log(e*(a + b*x)/(c + d*x))*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(g*(-a*d + b*c)) - S(2)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(g*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)**S(2)*log((-a*d + b*c)/(b*(c + d*x)))/((c + d*x)*(a*g + b*g*x)), x), x, -S(2)*n**S(2)*polylog(S(4), d*(a + b*x)/(b*(c + d*x)))/(g*(-a*d + b*c)) + S(2)*n*log(e*((a + b*x)/(c + d*x))**n)*polylog(S(3), d*(a + b*x)/(b*(c + d*x)))/(g*(-a*d + b*c)) - log(e*((a + b*x)/(c + d*x))**n)**S(2)*polylog(S(2), d*(a + b*x)/(b*(c + d*x)))/(g*(-a*d + b*c)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a*x + b)/x), x), x, b*log(x)/a + (a*x + b)*log(c*(a*x + b)/x)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a*x + b)/x)**S(2), x), x, -S(2)*b*log(-b/(a*x))*log(c*(a*x + b)/x)/a - S(2)*b*polylog(S(2), S(1) + b/(a*x))/a + (a*x + b)*log(c*(a*x + b)/x)**S(2)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a*x + b)/x)**S(3), x), x, -S(3)*b*log(-b/(a*x))*log(c*(a*x + b)/x)**S(2)/a - S(6)*b*log(c*(a*x + b)/x)*polylog(S(2), (a*x + b)/(a*x))/a + S(6)*b*polylog(S(3), (a*x + b)/(a*x))/a + (a*x + b)*log(c*(a*x + b)/x)**S(3)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a*x + b)**S(2)/x**S(2)), x), x, x*log(c*(a*x + b)**S(2)/x**S(2)) + S(2)*b*log(a*x + b)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a*x + b)**S(2)/x**S(2))**S(2), x), x, x*log(c*(a*x + b)**S(2)/x**S(2))**S(2) - S(4)*b*log(b/(a*x + b))*log(c*(a*x + b)**S(2)/x**S(2))/a + S(8)*b*polylog(S(2), a*x/(a*x + b))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a*x + b)**S(2)/x**S(2))**S(3), x), x, x*log(c*(a*x + b)**S(2)/x**S(2))**S(3) - S(6)*b*log(b/(a*x + b))*log(c*(a*x + b)**S(2)/x**S(2))**S(2)/a + S(24)*b*log(c*(a*x + b)**S(2)/x**S(2))*polylog(S(2), a*x/(a*x + b))/a + S(48)*b*polylog(S(3), a*x/(a*x + b))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**S(2)/(a*x + b)**S(2)), x), x, x*log(c*x**S(2)/(a*x + b)**S(2)) - S(2)*b*log(a*x + b)/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**S(2)/(a*x + b)**S(2))**S(2), x), x, x*log(c*x**S(2)/(a*x + b)**S(2))**S(2) + S(4)*b*log(b/(a*x + b))*log(c*x**S(2)/(a*x + b)**S(2))/a + S(8)*b*polylog(S(2), a*x/(a*x + b))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**S(2)/(a*x + b)**S(2))**S(3), x), x, x*log(c*x**S(2)/(a*x + b)**S(2))**S(3) + S(6)*b*log(b/(a*x + b))*log(c*x**S(2)/(a*x + b)**S(2))**S(2)/a + S(24)*b*log(c*x**S(2)/(a*x + b)**S(2))*polylog(S(2), a*x/(a*x + b))/a - S(48)*b*polylog(S(3), a*x/(a*x + b))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b/x)/(d + e*x**S(2)), x), x, -I*log(sqrt(e)*(-a*x - b)/(I*a*sqrt(d) - b*sqrt(e)))*log(S(1) - I*sqrt(e)*x/sqrt(d))/(S(2)*sqrt(d)*sqrt(e)) + I*log(sqrt(e)*(a*x + b)/(I*a*sqrt(d) + b*sqrt(e)))*log(S(1) + I*sqrt(e)*x/sqrt(d))/(S(2)*sqrt(d)*sqrt(e)) + log(a + b/x)*atan(sqrt(e)*x/sqrt(d))/(sqrt(d)*sqrt(e)) - I*polylog(S(2), a*(sqrt(d) - I*sqrt(e)*x)/(a*sqrt(d) + I*b*sqrt(e)))/(S(2)*sqrt(d)*sqrt(e)) + I*polylog(S(2), a*(sqrt(d) + I*sqrt(e)*x)/(a*sqrt(d) - I*b*sqrt(e)))/(S(2)*sqrt(d)*sqrt(e)) + I*polylog(S(2), -I*sqrt(e)*x/sqrt(d))/(S(2)*sqrt(d)*sqrt(e)) - I*polylog(S(2), I*sqrt(e)*x/sqrt(d))/(S(2)*sqrt(d)*sqrt(e)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(f + g*x**S(2)), x), x, -I*n*log(sqrt(g)*(-a - b*x)/(-a*sqrt(g) + I*b*sqrt(f)))*log(S(1) - I*sqrt(g)*x/sqrt(f))/(S(2)*sqrt(f)*sqrt(g)) + I*n*log(sqrt(g)*(a + b*x)/(a*sqrt(g) + I*b*sqrt(f)))*log(S(1) + I*sqrt(g)*x/sqrt(f))/(S(2)*sqrt(f)*sqrt(g)) + I*n*log(sqrt(g)*(-c - d*x)/(-c*sqrt(g) + I*d*sqrt(f)))*log(S(1) - I*sqrt(g)*x/sqrt(f))/(S(2)*sqrt(f)*sqrt(g)) - I*n*log(sqrt(g)*(c + d*x)/(c*sqrt(g) + I*d*sqrt(f)))*log(S(1) + I*sqrt(g)*x/sqrt(f))/(S(2)*sqrt(f)*sqrt(g)) - I*n*polylog(S(2), b*(sqrt(f) - I*sqrt(g)*x)/(I*a*sqrt(g) + b*sqrt(f)))/(S(2)*sqrt(f)*sqrt(g)) + I*n*polylog(S(2), b*(sqrt(f) + I*sqrt(g)*x)/(-I*a*sqrt(g) + b*sqrt(f)))/(S(2)*sqrt(f)*sqrt(g)) + I*n*polylog(S(2), d*(sqrt(f) - I*sqrt(g)*x)/(I*c*sqrt(g) + d*sqrt(f)))/(S(2)*sqrt(f)*sqrt(g)) - I*n*polylog(S(2), d*(sqrt(f) + I*sqrt(g)*x)/(-I*c*sqrt(g) + d*sqrt(f)))/(S(2)*sqrt(f)*sqrt(g)) + log(e*((a + b*x)/(c + d*x))**n)*atan(sqrt(g)*x/sqrt(f))/(sqrt(f)*sqrt(g)), expand=True, _diff=True, _numerical=True)
# long time assert rubi_test(rubi_integrate(log(e*((a + b*x)/(c + d*x))**n)/(f + g*x + h*x**S(2)), x), x, n*log((S(2)*a*h - b*g + b*(g + S(2)*h*x))/(S(2)*a*h - b*(g + sqrt(-S(4)*f*h + g**S(2)))))*log(g/sqrt(-S(4)*f*h + g**S(2)) + S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/sqrt(-S(4)*f*h + g**S(2)) - n*log((S(2)*c*h - d*g + d*(g + S(2)*h*x))/(S(2)*c*h - d*(g + sqrt(-S(4)*f*h + g**S(2)))))*log(g/sqrt(-S(4)*f*h + g**S(2)) + S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/sqrt(-S(4)*f*h + g**S(2)) - n*log((-S(2)*a*h + b*g - b*(g + S(2)*h*x))/(-S(2)*a*h + b*g - b*sqrt(-S(4)*f*h + g**S(2))))*log(-g/sqrt(-S(4)*f*h + g**S(2)) - S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/sqrt(-S(4)*f*h + g**S(2)) + n*log((-S(2)*c*h + d*g - d*(g + S(2)*h*x))/(-S(2)*c*h + d*g - d*sqrt(-S(4)*f*h + g**S(2))))*log(-g/sqrt(-S(4)*f*h + g**S(2)) - S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/sqrt(-S(4)*f*h + g**S(2)) - n*polylog(S(2), b*sqrt(-S(4)*f*h + g**S(2))*(-g/sqrt(-S(4)*f*h + g**S(2)) - S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/(S(2)*a*h - b*(g - sqrt(-S(4)*f*h + g**S(2)))))/sqrt(-S(4)*f*h + g**S(2)) + n*polylog(S(2), -b*sqrt(-S(4)*f*h + g**S(2))*(g/sqrt(-S(4)*f*h + g**S(2)) + S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/(S(2)*a*h - b*(g + sqrt(-S(4)*f*h + g**S(2)))))/sqrt(-S(4)*f*h + g**S(2)) + n*polylog(S(2), d*sqrt(-S(4)*f*h + g**S(2))*(-g/sqrt(-S(4)*f*h + g**S(2)) - S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/(S(2)*c*h - d*(g - sqrt(-S(4)*f*h + g**S(2)))))/sqrt(-S(4)*f*h + g**S(2)) - n*polylog(S(2), -d*sqrt(-S(4)*f*h + g**S(2))*(g/sqrt(-S(4)*f*h + g**S(2)) + S(2)*h*x/sqrt(-S(4)*f*h + g**S(2)) + S(1))/(S(2)*c*h - d*(g + sqrt(-S(4)*f*h + g**S(2)))))/sqrt(-S(4)*f*h + g**S(2)) - S(2)*log(e*((a + b*x)/(c + d*x))**n)*atanh((g + S(2)*h*x)/sqrt(-S(4)*f*h + g**S(2)))/sqrt(-S(4)*f*h + g**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**n/(-c**S(2)*x**S(2) + S(1)), x), x, -(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**(n + S(1))/(b*c*(n + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(3)/(-c**S(2)*x**S(2) + S(1)), x), x, -(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(4)/(S(4)*b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(2)/(-c**S(2)*x**S(2) + S(1)), x), x, -(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(3)/(S(3)*b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))/(-c**S(2)*x**S(2) + S(1)), x), x, -(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(2)/(S(2)*b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))*(-c**S(2)*x**S(2) + S(1))), x), x, -log(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))/(b*c), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(2)*(-c**S(2)*x**S(2) + S(1))), x), x, S(1)/(b*c*(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/((a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(3)*(-c**S(2)*x**S(2) + S(1))), x), x, S(1)/(S(2)*b*c*(a + b*log(sqrt(-c*x + S(1))/sqrt(c*x + S(1))))**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sqrt(-a*x + S(1))/sqrt(a*x + S(1)))/(-a**S(2)*x**S(2) + S(1)), x), x, -log(sqrt(-a*x + S(1))/sqrt(a*x + S(1)))**S(2)/(S(2)*a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(a + b*exp(x)), x), x, -x**S(4)*log(S(1) + b*exp(x)/a)/S(4) + x**S(4)*log(a + b*exp(x))/S(4) - x**S(3)*polylog(S(2), -b*exp(x)/a) + S(3)*x**S(2)*polylog(S(3), -b*exp(x)/a) - S(6)*x*polylog(S(4), -b*exp(x)/a) + S(6)*polylog(S(5), -b*exp(x)/a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(a + b*exp(x)), x), x, -x**S(3)*log(S(1) + b*exp(x)/a)/S(3) + x**S(3)*log(a + b*exp(x))/S(3) - x**S(2)*polylog(S(2), -b*exp(x)/a) + S(2)*x*polylog(S(3), -b*exp(x)/a) - S(2)*polylog(S(4), -b*exp(x)/a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(a + b*exp(x)), x), x, -x**S(2)*log(S(1) + b*exp(x)/a)/S(2) + x**S(2)*log(a + b*exp(x))/S(2) - x*polylog(S(2), -b*exp(x)/a) + polylog(S(3), -b*exp(x)/a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*exp(x)), x), x, -x*log(S(1) + b*exp(x)/a) + x*log(a + b*exp(x)) - polylog(S(2), -b*exp(x)/a), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*exp(x))/x, x), x, Integral(log(a + b*exp(x))/x, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(e*(f**(c*(a + b*x)))**n + S(1)), x), x, -x**S(3)*polylog(S(2), -e*(f**(c*(a + b*x)))**n)/(b*c*n*log(f)) + S(3)*x**S(2)*polylog(S(3), -e*(f**(c*(a + b*x)))**n)/(b**S(2)*c**S(2)*n**S(2)*log(f)**S(2)) - S(6)*x*polylog(S(4), -e*(f**(c*(a + b*x)))**n)/(b**S(3)*c**S(3)*n**S(3)*log(f)**S(3)) + S(6)*polylog(S(5), -e*(f**(c*(a + b*x)))**n)/(b**S(4)*c**S(4)*n**S(4)*log(f)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(e*(f**(c*(a + b*x)))**n + S(1)), x), x, -x**S(2)*polylog(S(2), -e*(f**(c*(a + b*x)))**n)/(b*c*n*log(f)) + S(2)*x*polylog(S(3), -e*(f**(c*(a + b*x)))**n)/(b**S(2)*c**S(2)*n**S(2)*log(f)**S(2)) - S(2)*polylog(S(4), -e*(f**(c*(a + b*x)))**n)/(b**S(3)*c**S(3)*n**S(3)*log(f)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(e*(f**(c*(a + b*x)))**n + S(1)), x), x, -x*polylog(S(2), -e*(f**(c*(a + b*x)))**n)/(b*c*n*log(f)) + polylog(S(3), -e*(f**(c*(a + b*x)))**n)/(b**S(2)*c**S(2)*n**S(2)*log(f)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(f**(c*(a + b*x)))**n + S(1)), x), x, -polylog(S(2), -e*(f**(c*(a + b*x)))**n)/(b*c*n*log(f)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(e*(f**(c*(a + b*x)))**n + S(1))/x, x), x, Integral(log(e*(f**(c*(a + b*x)))**n + S(1))/x, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log(d + e*(f**(c*(a + b*x)))**n), x), x, -x**S(4)*log(S(1) + e*(f**(c*(a + b*x)))**n/d)/S(4) + x**S(4)*log(d + e*(f**(c*(a + b*x)))**n)/S(4) - x**S(3)*polylog(S(2), -e*(f**(c*(a + b*x)))**n/d)/(b*c*n*log(f)) + S(3)*x**S(2)*polylog(S(3), -e*(f**(c*(a + b*x)))**n/d)/(b**S(2)*c**S(2)*n**S(2)*log(f)**S(2)) - S(6)*x*polylog(S(4), -e*(f**(c*(a + b*x)))**n/d)/(b**S(3)*c**S(3)*n**S(3)*log(f)**S(3)) + S(6)*polylog(S(5), -e*(f**(c*(a + b*x)))**n/d)/(b**S(4)*c**S(4)*n**S(4)*log(f)**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)*log(d + e*(f**(c*(a + b*x)))**n), x), x, -x**S(3)*log(S(1) + e*(f**(c*(a + b*x)))**n/d)/S(3) + x**S(3)*log(d + e*(f**(c*(a + b*x)))**n)/S(3) - x**S(2)*polylog(S(2), -e*(f**(c*(a + b*x)))**n/d)/(b*c*n*log(f)) + S(2)*x*polylog(S(3), -e*(f**(c*(a + b*x)))**n/d)/(b**S(2)*c**S(2)*n**S(2)*log(f)**S(2)) - S(2)*polylog(S(4), -e*(f**(c*(a + b*x)))**n/d)/(b**S(3)*c**S(3)*n**S(3)*log(f)**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(d + e*(f**(c*(a + b*x)))**n), x), x, -x**S(2)*log(S(1) + e*(f**(c*(a + b*x)))**n/d)/S(2) + x**S(2)*log(d + e*(f**(c*(a + b*x)))**n)/S(2) - x*polylog(S(2), -e*(f**(c*(a + b*x)))**n/d)/(b*c*n*log(f)) + polylog(S(3), -e*(f**(c*(a + b*x)))**n/d)/(b**S(2)*c**S(2)*n**S(2)*log(f)**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d + e*(f**(c*(a + b*x)))**n), x), x, -x*log(S(1) + e*(f**(c*(a + b*x)))**n/d) + x*log(d + e*(f**(c*(a + b*x)))**n) - polylog(S(2), -e*(f**(c*(a + b*x)))**n/d)/(b*c*n*log(f)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(d + e*(f**(c*(a + b*x)))**n)/x, x), x, Integral(log(d + e*(f**(c*(a + b*x)))**n)/x, x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(b*(F**(e*(c + d*x)))**n + pi), x), x, x*log(pi) - polylog(S(2), -b*(F**(e*(c + d*x)))**n/pi)/(d*e*n*log(F)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sin(a + b*x), x), x, -log(x)*cos(a + b*x)/b - sin(a)*Si(b*x)/b + cos(a)*Ci(b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sin(a + b*x)**S(2), x), x, x*log(x)/S(2) - x/S(2) - log(x)*sin(a + b*x)*cos(a + b*x)/(S(2)*b) + sin(S(2)*a)*Ci(S(2)*b*x)/(S(4)*b) + cos(S(2)*a)*Si(S(2)*b*x)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sin(a + b*x)**S(3), x), x, log(x)*cos(a + b*x)**S(3)/(S(3)*b) - log(x)*cos(a + b*x)/b - S(3)*sin(a)*Si(b*x)/(S(4)*b) + sin(S(3)*a)*Si(S(3)*b*x)/(S(12)*b) + S(3)*cos(a)*Ci(b*x)/(S(4)*b) - cos(S(3)*a)*Ci(S(3)*b*x)/(S(12)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cos(a + b*x), x), x, log(x)*sin(a + b*x)/b - sin(a)*Ci(b*x)/b - cos(a)*Si(b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cos(a + b*x)**S(2), x), x, x*log(x)/S(2) - x/S(2) + log(x)*sin(a + b*x)*cos(a + b*x)/(S(2)*b) - sin(S(2)*a)*Ci(S(2)*b*x)/(S(4)*b) - cos(S(2)*a)*Si(S(2)*b*x)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cos(a + b*x)**S(3), x), x, -log(x)*sin(a + b*x)**S(3)/(S(3)*b) + log(x)*sin(a + b*x)/b - S(3)*sin(a)*Ci(b*x)/(S(4)*b) - sin(S(3)*a)*Ci(S(3)*b*x)/(S(12)*b) - S(3)*cos(a)*Si(b*x)/(S(4)*b) - cos(S(3)*a)*Si(S(3)*b*x)/(S(12)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cos(x) + sin(x)/x, x), x, log(x)*sin(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sin(x)), x), x, I*x**S(2)/S(2) + x*log(a*sin(x)) - x*log(-exp(S(2)*I*x) + S(1)) + I*polylog(S(2), exp(S(2)*I*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sin(x)**S(2)), x), x, I*x**S(2) + x*log(a*sin(x)**S(2)) - S(2)*x*log(-exp(S(2)*I*x) + S(1)) + I*polylog(S(2), exp(S(2)*I*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sin(x)**n), x), x, I*n*x**S(2)/S(2) - n*x*log(-exp(S(2)*I*x) + S(1)) + I*n*polylog(S(2), exp(S(2)*I*x))/S(2) + x*log(a*sin(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cos(x)), x), x, I*x**S(2)/S(2) + x*log(a*cos(x)) - x*log(exp(S(2)*I*x) + S(1)) + I*polylog(S(2), -exp(S(2)*I*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cos(x)**S(2)), x), x, I*x**S(2) + x*log(a*cos(x)**S(2)) - S(2)*x*log(exp(S(2)*I*x) + S(1)) + I*polylog(S(2), -exp(S(2)*I*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cos(x)**n), x), x, I*n*x**S(2)/S(2) - n*x*log(exp(S(2)*I*x) + S(1)) + I*n*polylog(S(2), -exp(S(2)*I*x))/S(2) + x*log(a*cos(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*tan(x)), x), x, x*log(a*tan(x)) + S(2)*x*atanh(exp(S(2)*I*x)) - I*polylog(S(2), -exp(S(2)*I*x))/S(2) + I*polylog(S(2), exp(S(2)*I*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*tan(x)**S(2)), x), x, x*log(a*tan(x)**S(2)) + S(4)*x*atanh(exp(S(2)*I*x)) - I*polylog(S(2), -exp(S(2)*I*x)) + I*polylog(S(2), exp(S(2)*I*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*tan(x)**n), x), x, S(2)*n*x*atanh(exp(S(2)*I*x)) - I*n*polylog(S(2), -exp(S(2)*I*x))/S(2) + I*n*polylog(S(2), exp(S(2)*I*x))/S(2) + x*log(a*tan(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cot(x)), x), x, x*log(a*cot(x)) - S(2)*x*atanh(exp(S(2)*I*x)) + I*polylog(S(2), -exp(S(2)*I*x))/S(2) - I*polylog(S(2), exp(S(2)*I*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cot(x)**S(2)), x), x, x*log(a*cot(x)**S(2)) - S(4)*x*atanh(exp(S(2)*I*x)) + I*polylog(S(2), -exp(S(2)*I*x)) - I*polylog(S(2), exp(S(2)*I*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cot(x)**n), x), x, -S(2)*n*x*atanh(exp(S(2)*I*x)) + I*n*polylog(S(2), -exp(S(2)*I*x))/S(2) - I*n*polylog(S(2), exp(S(2)*I*x))/S(2) + x*log(a*cot(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sec(x)), x), x, -I*x**S(2)/S(2) + x*log(a*sec(x)) + x*log(exp(S(2)*I*x) + S(1)) - I*polylog(S(2), -exp(S(2)*I*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sec(x)**S(2)), x), x, -I*x**S(2) + x*log(a*sec(x)**S(2)) + S(2)*x*log(exp(S(2)*I*x) + S(1)) - I*polylog(S(2), -exp(S(2)*I*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sec(x)**n), x), x, -I*n*x**S(2)/S(2) + n*x*log(exp(S(2)*I*x) + S(1)) - I*n*polylog(S(2), -exp(S(2)*I*x))/S(2) + x*log(a*sec(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*csc(x)), x), x, -I*x**S(2)/S(2) + x*log(a*csc(x)) + x*log(-exp(S(2)*I*x) + S(1)) - I*polylog(S(2), exp(S(2)*I*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*csc(x)**S(2)), x), x, -I*x**S(2) + x*log(a*csc(x)**S(2)) + S(2)*x*log(-exp(S(2)*I*x) + S(1)) - I*polylog(S(2), exp(S(2)*I*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*csc(x)**n), x), x, -I*n*x**S(2)/S(2) + n*x*log(-exp(S(2)*I*x) + S(1)) - I*n*polylog(S(2), exp(S(2)*I*x))/S(2) + x*log(a*csc(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-cos(S(2)*x)/S(2) + S(1)/2)*cos(x), x), x, log(-cos(S(2)*x)/S(2) + S(1)/2)*sin(x) - S(2)*sin(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(cot(x)/log(E*sin(x)), x), x, log(log(E*sin(x))), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate(cot(x)/log(E*sin(x)), x), x, log(log(sin(x)) + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(cot(x)/log(exp(sin(x))), x), x, log(log(exp(sin(x))))/(-log(exp(sin(x))) + sin(x)) - log(sin(x))/(-log(exp(sin(x))) + sin(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(cos(x))*sec(x)**S(2), x), x, -x + log(cos(x))*tan(x) + tan(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(x))*cot(x), x), x, log(sin(x))**S(2)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(x))*sin(x)**S(2)*cos(x), x), x, log(sin(x))*sin(x)**S(3)/S(3) - sin(x)**S(3)/S(9), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(a/S(2) + b*x/S(2))*cos(a/S(2) + b*x/S(2)))*cos(a + b*x), x), x, log(sin(a/S(2) + b*x/S(2))*cos(a/S(2) + b*x/S(2)))*sin(a + b*x)/b - sin(a + b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(tan(x)/log(cos(x)), x), x, -log(log(cos(x))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(cos(x))*tan(x), x), x, -log(cos(x))**S(2)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(cos(x))*sin(x), x), x, -log(cos(x))*cos(x) + cos(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(cos(x))*cos(x), x), x, log(cos(x))*sin(x) - sin(x) + atanh(sin(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(x))*cos(x), x), x, log(sin(x))*sin(x) - sin(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(x))*sin(x)**S(2), x), x, I*x**S(2)/S(4) - x*log(-exp(S(2)*I*x) + S(1))/S(2) + x*log(sin(x))/S(2) + x/S(4) - log(sin(x))*sin(x)*cos(x)/S(2) + sin(x)*cos(x)/S(4) + I*polylog(S(2), exp(S(2)*I*x))/S(4), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(x))*sin(x)**S(3), x), x, log(sin(x))*cos(x)**S(3)/S(3) - log(sin(x))*cos(x) - cos(x)**S(3)/S(9) + S(2)*cos(x)/S(3) - S(2)*atanh(cos(x))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(sqrt(x))), x), x, I*x**(S(3)/2)/S(3) + I*sqrt(x)*polylog(S(2), exp(S(2)*I*sqrt(x))) - x*log(-exp(S(2)*I*sqrt(x)) + S(1)) + x*log(sin(sqrt(x))) - polylog(S(3), exp(S(2)*I*sqrt(x)))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sin(x))*csc(x)**S(2), x), x, -x - log(sin(x))*cot(x) - cot(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sinh(a + b*x), x), x, log(x)*cosh(a + b*x)/b - sinh(a)*Shi(b*x)/b - cosh(a)*Chi(b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sinh(a + b*x)**S(2), x), x, -x*log(x)/S(2) + x/S(2) + log(x)*sinh(a + b*x)*cosh(a + b*x)/(S(2)*b) - sinh(S(2)*a)*Chi(S(2)*b*x)/(S(4)*b) - cosh(S(2)*a)*Shi(S(2)*b*x)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sinh(a + b*x)**S(3), x), x, log(x)*cosh(a + b*x)**S(3)/(S(3)*b) - log(x)*cosh(a + b*x)/b + S(3)*sinh(a)*Shi(b*x)/(S(4)*b) - sinh(S(3)*a)*Shi(S(3)*b*x)/(S(12)*b) + S(3)*cosh(a)*Chi(b*x)/(S(4)*b) - cosh(S(3)*a)*Chi(S(3)*b*x)/(S(12)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cosh(a + b*x), x), x, log(x)*sinh(a + b*x)/b - sinh(a)*Chi(b*x)/b - cosh(a)*Shi(b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cosh(a + b*x)**S(2), x), x, x*log(x)/S(2) - x/S(2) + log(x)*sinh(a + b*x)*cosh(a + b*x)/(S(2)*b) - sinh(S(2)*a)*Chi(S(2)*b*x)/(S(4)*b) - cosh(S(2)*a)*Shi(S(2)*b*x)/(S(4)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*cosh(a + b*x)**S(3), x), x, log(x)*sinh(a + b*x)**S(3)/(S(3)*b) + log(x)*sinh(a + b*x)/b - S(3)*sinh(a)*Chi(b*x)/(S(4)*b) - sinh(S(3)*a)*Chi(S(3)*b*x)/(S(12)*b) - S(3)*cosh(a)*Shi(b*x)/(S(4)*b) - cosh(S(3)*a)*Shi(S(3)*b*x)/(S(12)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sinh(x)), x), x, x**S(2)/S(2) + x*log(a*sinh(x)) - x*log(-exp(S(2)*x) + S(1)) - polylog(S(2), exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sinh(x)**S(2)), x), x, x**S(2) + x*log(a*sinh(x)**S(2)) - S(2)*x*log(-exp(S(2)*x) + S(1)) - polylog(S(2), exp(S(2)*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sinh(x)**n), x), x, n*x**S(2)/S(2) - n*x*log(-exp(S(2)*x) + S(1)) - n*polylog(S(2), exp(S(2)*x))/S(2) + x*log(a*sinh(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cosh(x)), x), x, x**S(2)/S(2) + x*log(a*cosh(x)) - x*log(exp(S(2)*x) + S(1)) - polylog(S(2), -exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cosh(x)**S(2)), x), x, x**S(2) + x*log(a*cosh(x)**S(2)) - S(2)*x*log(exp(S(2)*x) + S(1)) - polylog(S(2), -exp(S(2)*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*cosh(x)**n), x), x, n*x**S(2)/S(2) - n*x*log(exp(S(2)*x) + S(1)) - n*polylog(S(2), -exp(S(2)*x))/S(2) + x*log(a*cosh(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(tanh(x)), x), x, x*log(tanh(x)) + S(2)*x*atanh(exp(S(2)*x)) + polylog(S(2), -exp(S(2)*x))/S(2) - polylog(S(2), exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*tanh(x)), x), x, x*log(a*tanh(x)) + S(2)*x*atanh(exp(S(2)*x)) + polylog(S(2), -exp(S(2)*x))/S(2) - polylog(S(2), exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*tanh(x)**S(2)), x), x, x*log(a*tanh(x)**S(2)) + S(4)*x*atanh(exp(S(2)*x)) + polylog(S(2), -exp(S(2)*x)) - polylog(S(2), exp(S(2)*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*tanh(x)**n), x), x, S(2)*n*x*atanh(exp(S(2)*x)) + n*polylog(S(2), -exp(S(2)*x))/S(2) - n*polylog(S(2), exp(S(2)*x))/S(2) + x*log(a*tanh(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(coth(x)), x), x, x*log(coth(x)) - S(2)*x*atanh(exp(S(2)*x)) - polylog(S(2), -exp(S(2)*x))/S(2) + polylog(S(2), exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*coth(x)), x), x, x*log(a*coth(x)) - S(2)*x*atanh(exp(S(2)*x)) - polylog(S(2), -exp(S(2)*x))/S(2) + polylog(S(2), exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*coth(x)**S(2)), x), x, x*log(a*coth(x)**S(2)) - S(4)*x*atanh(exp(S(2)*x)) - polylog(S(2), -exp(S(2)*x)) + polylog(S(2), exp(S(2)*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*coth(x)**n), x), x, -S(2)*n*x*atanh(exp(S(2)*x)) - n*polylog(S(2), -exp(S(2)*x))/S(2) + n*polylog(S(2), exp(S(2)*x))/S(2) + x*log(a*coth(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sech(x)), x), x, -x**S(2)/S(2) + x*log(a*sech(x)) + x*log(exp(S(2)*x) + S(1)) + polylog(S(2), -exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sech(x)**S(2)), x), x, -x**S(2) + x*log(a*sech(x)**S(2)) + S(2)*x*log(exp(S(2)*x) + S(1)) + polylog(S(2), -exp(S(2)*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*sech(x)**n), x), x, -n*x**S(2)/S(2) + n*x*log(exp(S(2)*x) + S(1)) + n*polylog(S(2), -exp(S(2)*x))/S(2) + x*log(a*sech(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*csch(x)), x), x, -x**S(2)/S(2) + x*log(a*csch(x)) + x*log(-exp(S(2)*x) + S(1)) + polylog(S(2), exp(S(2)*x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*csch(x)**S(2)), x), x, -x**S(2) + x*log(a*csch(x)**S(2)) + S(2)*x*log(-exp(S(2)*x) + S(1)) + polylog(S(2), exp(S(2)*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*csch(x)**n), x), x, -n*x**S(2)/S(2) + n*x*log(-exp(S(2)*x) + S(1)) + n*polylog(S(2), exp(S(2)*x))/S(2) + x*log(a*csch(x)**n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(cosh(x)**S(2))*sinh(x), x), x, log(cosh(x)**S(2))*cosh(x) - S(2)*cosh(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/sqrt(x), x), x, S(2)*sqrt(x)*log(x) - S(4)*sqrt(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(-S(3)*x**S(2) + S(2)), x), x, -x**S(2)/S(2) - (-x**S(2)/S(2) + S(1)/3)*log(-S(3)*x**S(2) + S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*sqrt(-log(x)**S(2) + S(1))), x), x, asin(log(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(16)*x**S(3)*log(x)**S(2), x), x, S(4)*x**S(4)*log(x)**S(2) - S(2)*x**S(4)*log(x) + x**S(4)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sqrt(a + b*x)), x), x, -x/S(2) + (a + b*x)*log(sqrt(a + b*x))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(sqrt(x + S(2))), x), x, x**S(2)*log(sqrt(x + S(2)))/S(2) - x**S(2)/S(8) + x/S(2) - log(x + S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log((S(3)*x + S(1))**(S(1)/3)), x), x, x**S(2)*log((S(3)*x + S(1))**(S(1)/3))/S(2) - x**S(2)/S(12) + x/S(18) - log(S(3)*x + S(1))/S(54), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(x**S(3) + x), x), x, x**S(2)*log(x**S(3) + x)/S(2) - S(3)*x**S(2)/S(4) + log(x**S(2) + S(1))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x + sqrt(x**S(2) + S(1))), x), x, x*log(x + sqrt(x**S(2) + S(1))) - sqrt(x**S(2) + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x + sqrt(x**S(2) + S(-1))), x), x, x*log(x + sqrt(x**S(2) + S(-1))) - sqrt(x**S(2) + S(-1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x - sqrt(x**S(2) + S(-1))), x), x, x*log(x - sqrt(x**S(2) + S(-1))) + sqrt(x**S(2) + S(-1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sqrt(x) + sqrt(x + S(1))), x), x, -sqrt(x)*sqrt(x + S(1))/S(2) + x*log(sqrt(x) + sqrt(x + S(1))) + asinh(sqrt(x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**(S(1)/3)*log(x), x), x, S(3)*x**(S(4)/3)*log(x)/S(4) - S(9)*x**(S(4)/3)/S(16), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(2)**log(x), x), x, x**(log(S(2)) + S(1))/(log(S(2)) + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((-log(x) + S(1))/x**S(2), x), x, log(x)/x, expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((-log(x) + S(1))/x**S(2), x), x, (log(x) + S(-1))/x + S(1)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x + sqrt(x + S(1)) + S(1)), x), x, x*log(x + sqrt(x + S(1)) + S(1)) - x + sqrt(x + S(1)) + log(x + S(1))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**S(3) + x), x), x, x*log(x**S(3) + x) - S(3)*x + S(2)*atan(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(2)**log(S(7)*x + S(-8)), x), x, (S(7)*x + S(-8))**(log(S(2)) + S(1))/(S(7)*(log(S(2)) + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((S(5)*x + S(-11))/(S(76)*x + S(5))), x), x, (x + S(-11)/5)*log((S(5)*x + S(-11))/(S(76)*x + S(5))) - S(861)*log(S(76)*x + S(5))/S(380), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((x + S(1))/(x + S(-1)))/x**S(2), x), x, S(2)*log(x) - S(2)*log(-x + S(1)) - (x + S(1))*log((-x + S(-1))/(-x + S(1)))/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(1)/(x + S(13))), x), x, x + (x + S(13))*log(S(1)/(x + S(13))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log((x + S(1))/x**S(2)), x), x, x**S(2)*log((x + S(1))/x**S(2))/S(2) + x**S(2)/S(4) + x/S(2) - log(x + S(1))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(3)*log((S(5)*x + S(7))/x**S(2)), x), x, x**S(4)*log((S(5)*x + S(7))/x**S(2))/S(4) + x**S(4)/S(16) + S(7)*x**S(3)/S(60) - S(49)*x**S(2)/S(200) + S(343)*x/S(500) - S(2401)*log(S(5)*x + S(7))/S(2500), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)*log(a + b*x), x), x, -a*x/S(2) - b*x**S(2)/S(4) + (a + b*x)**S(2)*log(a + b*x)/(S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**S(2)*log(a + b*x), x), x, (a + b*x)**S(3)*log(a + b*x)/(S(3)*b) - (a + b*x)**S(3)/(S(9)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x)/(a + b*x), x), x, log(a + b*x)**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x)/(a + b*x)**S(2), x), x, -log(a + b*x)/(b*(a + b*x)) - S(1)/(b*(a + b*x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((a + b*x)**n*log(a + b*x), x), x, (a + b*x)**(n + S(1))*log(a + b*x)/(b*(n + S(1))) - (a + b*x)**(n + S(1))/(b*(n + S(1))**S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*log(b*x)**p), x), x, x*log(a*log(b*x)**p) - p*li(b*x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*log(b*x**n)**p), x), x, -p*x*(b*x**n)**(-S(1)/n)*Ei(log(b*x**n)/n) + x*log(a*log(b*x**n)**p), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*log(b*x)**p)/x, x), x, -(p - log(a*log(b*x)**p))*log(b*x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a*log(b*x**n)**p)/x, x), x, -(p - log(a*log(b*x**n)**p))*log(b*x**n)/n, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*log(b*x)**p), x), x, -p*x**(m + S(1))*(b*x)**(-m + S(-1))*Ei((m + S(1))*log(b*x))/(m + S(1)) + x**(m + S(1))*log(a*log(b*x)**p)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**m*log(a*log(b*x**n)**p), x), x, -p*x**(m + S(1))*(b*x**n)**(-(m + S(1))/n)*Ei((m + S(1))*log(b*x**n)/n)/(m + S(1)) + x**(m + S(1))*log(a*log(b*x**n)**p)/(m + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/sqrt(a + b*log(x)), x), x, -sqrt(pi)*a*exp(-a/b)*erfi(sqrt(a + b*log(x))/sqrt(b))/b**(S(3)/2) + x*sqrt(a + b*log(x))/b - sqrt(pi)*exp(-a/b)*erfi(sqrt(a + b*log(x))/sqrt(b))/(S(2)*sqrt(b)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/sqrt(a - b*log(x)), x), x, -sqrt(pi)*a*exp(a/b)*erf(sqrt(a - b*log(x))/sqrt(b))/b**(S(3)/2) - x*sqrt(a - b*log(x))/b + sqrt(pi)*exp(a/b)*erf(sqrt(a - b*log(x))/sqrt(b))/(S(2)*sqrt(b)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((A + B*log(x))/sqrt(a + b*log(x)), x), x, B*x*sqrt(a + b*log(x))/b - sqrt(pi)*B*exp(-a/b)*erfi(sqrt(a + b*log(x))/sqrt(b))/(S(2)*sqrt(b)) + sqrt(pi)*(A*b - B*a)*exp(-a/b)*erfi(sqrt(a + b*log(x))/sqrt(b))/b**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((A + B*log(x))/sqrt(a - b*log(x)), x), x, -B*x*sqrt(a - b*log(x))/b + sqrt(pi)*B*exp(a/b)*erf(sqrt(a - b*log(x))/sqrt(b))/(S(2)*sqrt(b)) + sqrt(pi)*(-A*b - B*a)*exp(a/b)*erf(sqrt(a - b*log(x))/sqrt(b))/b**(S(3)/2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(x)/sqrt(x**S(2) + S(-1)), x), x, sqrt(x**S(2) + S(-1))*log(x) - sqrt(x**S(2) + S(-1)) + atan(sqrt(x**S(2) + S(-1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*sqrt(x**S(2) + S(4))*log(x), x), x, (x**S(2) + S(4))**(S(3)/2)*log(x)/S(3) - (x**S(2) + S(4))**(S(3)/2)/S(9) - S(4)*sqrt(x**S(2) + S(4))/S(3) + S(8)*atanh(sqrt(x**S(2) + S(4))/S(2))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x*log(c*x**n)), x), x, log(a + b*log(c*x**n))/(b*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x*log(c*x**n)**S(2)), x), x, atan(sqrt(b)*log(c*x**n)/sqrt(a))/(sqrt(a)*sqrt(b)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x*log(c*x**n)**S(3)), x), x, log(a**(S(1)/3) + b**(S(1)/3)*log(c*x**n))/(S(3)*a**(S(2)/3)*b**(S(1)/3)*n) - log(a**(S(2)/3) - a**(S(1)/3)*b**(S(1)/3)*log(c*x**n) + b**(S(2)/3)*log(c*x**n)**S(2))/(S(6)*a**(S(2)/3)*b**(S(1)/3)*n) - sqrt(S(3))*atan(sqrt(S(3))*(a**(S(1)/3) - S(2)*b**(S(1)/3)*log(c*x**n))/(S(3)*a**(S(1)/3)))/(S(3)*a**(S(2)/3)*b**(S(1)/3)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x*log(c*x**n)**S(4)), x), x, -sqrt(S(2))*log(-sqrt(S(2))*a**(S(1)/4)*b**(S(1)/4)*log(c*x**n) + sqrt(a) + sqrt(b)*log(c*x**n)**S(2))/(S(8)*a**(S(3)/4)*b**(S(1)/4)*n) + sqrt(S(2))*log(sqrt(S(2))*a**(S(1)/4)*b**(S(1)/4)*log(c*x**n) + sqrt(a) + sqrt(b)*log(c*x**n)**S(2))/(S(8)*a**(S(3)/4)*b**(S(1)/4)*n) - sqrt(S(2))*atan(S(1) - sqrt(S(2))*b**(S(1)/4)*log(c*x**n)/a**(S(1)/4))/(S(4)*a**(S(3)/4)*b**(S(1)/4)*n) + sqrt(S(2))*atan(S(1) + sqrt(S(2))*b**(S(1)/4)*log(c*x**n)/a**(S(1)/4))/(S(4)*a**(S(3)/4)*b**(S(1)/4)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x/log(c*x**n)), x), x, log(x)/a - b*log(a*log(c*x**n) + b)/(a**S(2)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x/log(c*x**n)**S(2)), x), x, log(x)/a - sqrt(b)*atan(sqrt(a)*log(c*x**n)/sqrt(b))/(a**(S(3)/2)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x/log(c*x**n)**S(3)), x), x, log(x)/a - b**(S(1)/3)*log(a**(S(1)/3)*log(c*x**n) + b**(S(1)/3))/(S(3)*a**(S(4)/3)*n) + b**(S(1)/3)*log(a**(S(2)/3)*log(c*x**n)**S(2) - a**(S(1)/3)*b**(S(1)/3)*log(c*x**n) + b**(S(2)/3))/(S(6)*a**(S(4)/3)*n) + sqrt(S(3))*b**(S(1)/3)*atan(sqrt(S(3))*(-S(2)*a**(S(1)/3)*log(c*x**n) + b**(S(1)/3))/(S(3)*b**(S(1)/3)))/(S(3)*a**(S(4)/3)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(a*x + b*x/log(c*x**n)**S(4)), x), x, log(x)/a + sqrt(S(2))*b**(S(1)/4)*log(-sqrt(S(2))*a**(S(1)/4)*b**(S(1)/4)*log(c*x**n) + sqrt(a)*log(c*x**n)**S(2) + sqrt(b))/(S(8)*a**(S(5)/4)*n) - sqrt(S(2))*b**(S(1)/4)*log(sqrt(S(2))*a**(S(1)/4)*b**(S(1)/4)*log(c*x**n) + sqrt(a)*log(c*x**n)**S(2) + sqrt(b))/(S(8)*a**(S(5)/4)*n) - sqrt(S(2))*b**(S(1)/4)*atan(sqrt(S(2))*a**(S(1)/4)*log(c*x**n)/b**(S(1)/4) + S(-1))/(S(4)*a**(S(5)/4)*n) - sqrt(S(2))*b**(S(1)/4)*atan(sqrt(S(2))*a**(S(1)/4)*log(c*x**n)/b**(S(1)/4) + S(1))/(S(4)*a**(S(5)/4)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(S(4)*x*log(x)**S(2) + x), x), x, log(S(4)*log(x)**S(2) + S(1))/S(8), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(S(7)*x)**S(2) + x*log(S(7)*x) + x), x), x, S(2)*sqrt(S(3))*atan(sqrt(S(3))*(S(2)*log(S(7)*x) + S(1))/S(3))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(S(3)*x) + S(-1))/(x*(log(S(3)*x)**S(2) - log(S(3)*x) + S(1))), x), x, log(log(S(3)*x)**S(2) - log(S(3)*x) + S(1))/S(2) + sqrt(S(3))*atan(sqrt(S(3))*(-S(2)*log(S(3)*x) + S(1))/S(3))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(S(3)*x)**S(2) + S(-1))/(x*log(S(3)*x)**S(3) + x), x), x, log(log(S(3)*x)**S(2) - log(S(3)*x) + S(1))/S(2) + sqrt(S(3))*atan(sqrt(S(3))*(-S(2)*log(S(3)*x) + S(1))/S(3))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(S(3)*x)**S(2) + S(-1))/(x*log(S(3)*x)**S(2) + x*log(S(3)*x) + x), x), x, log(x) - log(log(S(3)*x)**S(2) + log(S(3)*x) + S(1))/S(2) - sqrt(S(3))*atan(sqrt(S(3))*(S(2)*log(S(3)*x) + S(1))/S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*(log(x) + S(3))), x), x, log(log(x) + S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(x) + S(1))/x, x), x, S(2)*(log(x) + S(1))**(S(3)/2)/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(x) + S(1))**S(5)/x, x), x, (log(x) + S(1))**S(6)/S(6), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*sqrt(log(x))), x), x, S(2)*sqrt(log(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*(log(x)**S(2) + S(1))), x), x, atan(log(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*sqrt(log(x)**S(2) + S(-3))), x), x, atanh(log(x)/sqrt(log(x)**S(2) + S(-3))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*sqrt(-S(9)*log(x)**S(2) + S(4))), x), x, asin(S(3)*log(x)/S(2))/S(3), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*sqrt(log(x)**S(2) + S(4))), x), x, asinh(log(x)/S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*(S(3)*log(S(6)*x)**S(3) + S(2))), x), x, S(2)**(S(1)/3)*S(3)**(S(2)/3)*log(S(3)**(S(1)/3)*log(S(6)*x) + S(2)**(S(1)/3))/S(18) - S(2)**(S(1)/3)*S(3)**(S(2)/3)*log(S(3)**(S(2)/3)*log(S(6)*x)**S(2) - S(6)**(S(1)/3)*log(S(6)*x) + S(2)**(S(2)/3))/S(36) - S(2)**(S(1)/3)*S(3)**(S(1)/6)*atan(sqrt(S(3))*(-S(2)**(S(2)/3)*S(3)**(S(1)/3)*log(S(6)*x) + S(1))/S(3))/S(6), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(log(S(6)*x))/(x*log(S(6)*x)), x), x, log(log(S(6)*x))**S(2)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(2)**log(x)/x, x), x, S(2)**log(x)/log(S(2)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sin(log(x))**S(2)/x, x), x, log(x)/S(2) - sin(log(x))*cos(log(x))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((-log(x) + S(7))/(x*(log(x) + S(3))), x), x, -log(x) + S(10)*log(log(x) + S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((-log(x) + S(2))*(log(x) + S(3))**S(2)/x, x), x, -log(x)**S(4)/S(4) - S(4)*log(x)**S(3)/S(3) + S(3)*log(x)**S(2)/S(2) + S(18)*log(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(x)**S(2) + S(1))*log(x)**S(2)/x, x), x, sqrt(log(x)**S(2) + S(1))*log(x)**S(3)/S(4) + sqrt(log(x)**S(2) + S(1))*log(x)/S(8) - asinh(log(x))/S(8), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(x) + S(1))/(x*(S(2)*log(x) + S(3))**S(2)), x), x, log(S(2)*log(x) + S(3))/S(4) + S(1)/(S(4)*(S(2)*log(x) + S(3))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x*sqrt(log(x) + S(1))), x), x, S(2)*(log(x) + S(1))**(S(3)/2)/S(3) - S(2)*sqrt(log(x) + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x*sqrt(S(4)*log(x) + S(-1))), x), x, (S(4)*log(x) + S(-1))**(S(3)/2)/S(24) + sqrt(S(4)*log(x) + S(-1))/S(8), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(x) + S(1))/(x*log(x)), x), x, S(2)*sqrt(log(x) + S(1)) - S(2)*atanh(sqrt(log(x) + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(x)**S(2) - S(4)*log(x) + S(1))/(x*(log(x) + S(-1))**S(4)), x), x, (log(x) + S(-1))**(S(-2)) + S(1)/(-log(x) + S(1)) - S(2)/(S(3)*(-log(x) + S(1))**S(3)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(S(1)/x)**S(2)/x**S(5), x), x, -log(S(1)/x)**S(2)/(S(4)*x**S(4)) + log(S(1)/x)/(S(8)*x**S(4)) - S(1)/(S(32)*x**S(4)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(a*x**n)**S(2))**p/x, x), x, (log(a*x**n)**S(2))**p*log(a*x**n)/(n*(S(2)*p + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((log(a*x**n)**m)**p/x, x), x, (log(a*x**n)**m)**p*log(a*x**n)/(n*(m*p + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(sqrt(log(a*x**n)**S(2))/x, x), x, sqrt(log(a*x**n)**S(2))*log(a*x**n)/(S(2)*n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((b*log(a*x**n)**m)**p/x, x), x, (b*log(a*x**n)**m)**p*log(a*x**n)/(n*(m*p + S(1))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/sqrt(-log(a*x**S(2))), x), x, -sqrt(S(2))*sqrt(pi)*x*erf(sqrt(S(2))*sqrt(-log(a*x**S(2)))/S(2))/(S(2)*sqrt(a*x**S(2))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/sqrt(-log(a/x**S(2))), x), x, sqrt(S(2))*sqrt(pi)*x*sqrt(a/x**S(2))*erfi(sqrt(S(2))*sqrt(-log(a/x**S(2)))/S(2))/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/sqrt(-log(a*x**n)), x), x, -sqrt(pi)*x*(a*x**n)**(-S(1)/n)*erf(sqrt(-log(a*x**n))/sqrt(n))/sqrt(n), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sqrt(x) - x + S(1))/x, x), x, -S(2)*log(sqrt(x))*log((-S(2)*sqrt(x) - sqrt(S(5)) + S(1))/(-sqrt(S(5)) + S(1))) + S(2)*log(sqrt(x))*log(sqrt(x) - x + S(1)) - S(2)*log(S(1)/2 + sqrt(S(5))/S(2))*log(-S(2)*sqrt(x) + S(1) + sqrt(S(5))) - S(2)*polylog(S(2), S(2)*sqrt(x)/(-sqrt(S(5)) + S(1))) + S(2)*polylog(S(2), (-S(2)*sqrt(x) + S(1) + sqrt(S(5)))/(S(1) + sqrt(S(5)))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(c + d*x)/(a + b*x), x), x, -a*log(-d*(a + b*x)/(-a*d + b*c))*log(c + d*x)/b**S(2) - a*polylog(S(2), b*(c + d*x)/(-a*d + b*c))/b**S(2) - x/b + (c + d*x)*log(c + d*x)/(b*d), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x + S(-1)), x), x, -polylog(S(2), -x + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x*log(-a - b*x + S(1))/(a + b*x), x), x, a*polylog(S(2), a + b*x)/b**S(2) - x/b - (-a - b*x + S(1))*log(-a - b*x + S(1))/b**S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((b + S(2)*c*x)*log(x)/(x*(b + c*x)), x), x, log(x)**S(2)/S(2) + log(x)*log((b + c*x)/b) + polylog(S(2), -c*x/b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)*sin(x*log(x)) + sin(x*log(x)), x), x, -cos(x*log(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((-(x + S(-1))**S(2) + S(1))/((x + S(-1))**S(2) + S(1)))/x**S(2), x), x, log(x)/S(2) + log(-x + S(2))/S(2) - log(x**S(2) - S(2)*x + S(2))/S(2) - atan(x + S(-1)) - log((-(-x + S(1))**S(2) + S(1))/((x + S(-1))**S(2) + S(1)))/x - S(1)/x, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(sqrt(x) + x), x), x, sqrt(x) + x*log(sqrt(x) + x) - x - log(sqrt(x) + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-x/(x + S(1))), x), x, x*log(-x/(x + S(1))) - log(x + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((x + S(-1))/(x + S(1))), x), x, (x + S(-1))*log((x + S(-1))/(x + S(1))) - S(2)*log(x + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log((-x**S(2) + S(1))/(x**S(2) + S(1)))/(x + S(1))**S(2), x), x, log(-x**S(2) + S(1))/S(2) - log(x**S(2) + S(1))/S(2) - atan(x) - log((-x**S(2) + S(1))/(x**S(2) + S(1)))/(x + S(1)) - S(1)/(x + S(1)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(-x**S(2) + S(1)), x), x, log(x)*atanh(x) + polylog(S(2), -x)/S(2) - polylog(S(2), x)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x)/(x**S(2) + S(1)), x), x, log(x)*atan(x) - I*polylog(S(2), -I*x)/S(2) + I*polylog(S(2), I*x)/S(2), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(x**S(2) + S(1))**n)/(x**S(2) + S(1)), x), x, S(2)*n*log(S(2)*I/(-x + I))*atan(x) + I*n*atan(x)**S(2) + I*n*polylog(S(2), (-x - I)/(-x + I)) + log(c*(x**S(2) + S(1))**n)*atan(x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(x**S(2)/(x**S(2) + S(1)))/(x**S(2) + S(1)), x), x, -S(2)*log(S(2)*x/(x + I))*atan(x) + log(x**S(2)/(x**S(2) + S(1)))*atan(x) + I*atan(x)**S(2) + I*polylog(S(2), (-x + I)/(x + I)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**n)/(a + b*x**S(2)), x), x, -I*n*polylog(S(2), -I*sqrt(b)*x/sqrt(a))/(S(2)*sqrt(a)*sqrt(b)) + I*n*polylog(S(2), I*sqrt(b)*x/sqrt(a))/(S(2)*sqrt(a)*sqrt(b)) + log(c*x**n)*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*sqrt(b)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*(a + b*x**S(2))**n)/(a + b*x**S(2)), x), x, S(2)*n*log(S(2)*I*sqrt(a)/(I*sqrt(a) - sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*sqrt(b)) + I*n*atan(sqrt(b)*x/sqrt(a))**S(2)/(sqrt(a)*sqrt(b)) + I*n*polylog(S(2), (-sqrt(a) + I*sqrt(b)*x)/(sqrt(a) + I*sqrt(b)*x))/(sqrt(a)*sqrt(b)) + log(c*(a + b*x**S(2))**n)*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*sqrt(b)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(c*x**S(2)/(a + b*x**S(2)))/(a + b*x**S(2)), x), x, -S(2)*log(S(2)*sqrt(b)*x/(I*sqrt(a) + sqrt(b)*x))*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*sqrt(b)) + log(c*x**S(2)/(a + b*x**S(2)))*atan(sqrt(b)*x/sqrt(a))/(sqrt(a)*sqrt(b)) + I*atan(sqrt(b)*x/sqrt(a))**S(2)/(sqrt(a)*sqrt(b)) + I*polylog(S(2), (sqrt(a) + I*sqrt(b)*x)/(sqrt(a) - I*sqrt(b)*x))/(sqrt(a)*sqrt(b)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(I*sqrt(-a*x + S(1))/sqrt(a*x + S(1)) + S(1))/(-a**S(2)*x**S(2) + S(1)), x), x, polylog(S(2), -I*sqrt(-a*x + S(1))/sqrt(a*x + S(1)))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(-I*sqrt(-a*x + S(1))/sqrt(a*x + S(1)) + S(1))/(-a**S(2)*x**S(2) + S(1)), x), x, polylog(S(2), I*sqrt(-a*x + S(1))/sqrt(a*x + S(1)))/a, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(exp(a + b*x)), x), x, log(exp(a + b*x))**S(2)/(S(2)*b), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(exp(a + b*x**n)), x), x, -b*n*x**(n + S(1))/(n + S(1)) + x*log(exp(a + b*x**n)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(exp(x)*log(a + b*exp(x)), x), x, -exp(x) + (a + b*exp(x))*log(a + b*exp(x))/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*log(exp(x))), x), x, -log(x)/(x - log(exp(x))) + log(log(exp(x)))/(x - log(exp(x))), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(exp(a + b*x)*log(x), x), x, -exp(a)*Ei(b*x)/b + exp(a + b*x)*log(x)/b, expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x**S(2)/(x + log(x)), x), x, Integral(x**S(2)/(x + log(x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(x/(x + log(x)), x), x, Integral(x/(x + log(x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x + log(x)), x), x, Integral(S(1)/(x + log(x)), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x*(x + log(x))), x), x, Integral(S(1)/(x*(x + log(x))), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(S(1)/(x**S(2)*(x + log(x))), x), x, Integral(S(1)/(x**S(2)*(x + log(x))), x), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate((-log(x) + S(1))/(x*(x + log(x))), x), x, log(S(1) + log(x)/x), expand=True, _diff=True, _numerical=True)
''' apart
# apart assert rubi_test(rubi_integrate((x + S(1))/((x + log(x))*log(x)), x), x, -log(x + log(x)) + log(log(x)) + li(x), expand=True, _diff=True, _numerical=True) or rubi_test(rubi_integrate((x + S(1))/((x + log(x))*log(x)), x), x, -log(x + log(x)) + log(log(x)) + Ei(log(x)), expand=True, _diff=True, _numerical=True)
# assert rubi_test(rubi_integrate(log(sqrt((x + S(1))/x) + S(2)), x), x, x*log(sqrt((x + S(1))/x) + S(2)) - log(-sqrt((x + S(1))/x) + S(1))/S(6) + log(sqrt((x + S(1))/x) + S(1))/S(2) - log(sqrt((x + S(1))/x) + S(2))/S(3), expand=True, _diff=True, _numerical=True)
# assert rubi_test(rubi_integrate(log(sqrt((x + S(1))/x) + S(1)), x), x, x*log(sqrt((x + S(1))/x) + S(1)) + atanh(sqrt((x + S(1))/x))/S(2) - S(1)/(S(2)*(sqrt((x + S(1))/x) + S(1))), expand=True, _diff=True, _numerical=True)
# assert rubi_test(rubi_integrate(log(sqrt((x + S(1))/x)), x), x, (x + S(1))*log(sqrt((x + S(1))/x)) + log(x)/S(2), expand=True, _diff=True, _numerical=True)
# assert rubi_test(rubi_integrate(log(sqrt((x + S(1))/x) + S(-1)), x), x, x*log(sqrt((x + S(1))/x) + S(-1)) - atanh(sqrt(S(1) + S(1)/x))/S(2) - S(1)/(S(2)*(-sqrt(S(1) + S(1)/x) + S(1))), expand=True, _diff=True, _numerical=True)
# assert rubi_test(rubi_integrate(log(sqrt((x + S(1))/x) + S(-2)), x), x, x*log(sqrt((x + S(1))/x) + S(-2)) + log(-sqrt(S(1) + S(1)/x) + S(1))/S(2) - log(-sqrt(S(1) + S(1)/x) + S(2))/S(3) - log(sqrt(S(1) + S(1)/x) + S(1))/S(6), expand=True, _diff=True, _numerical=True)
'''
assert rubi_test(rubi_integrate(x**(a*x)*log(x) + x**(a*x), x), x, x**(a*x)/a, expand=True, _diff=True, _numerical=True)
# fails in mathematica too assert rubi_test(rubi_integrate((log(x)**m)**p, x), x, (-log(x))**(-m*p)*(log(x)**m)**p*Gamma(m*p + S(1), -log(x)), expand=True, _diff=True, _numerical=True)
assert rubi_test(rubi_integrate(log(a + b*x + c*sqrt(d + e*x))/(f + g*x**S(2)), x), x, -log((a*e - b*d + b*(d + e*x) + c*e*sqrt(d + e*x))/e)*log(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + log((a*e - b*d + b*(d + e*x) + c*e*sqrt(d + e*x))/e)*log(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) - log((a*e - b*d + b*(d + e*x) + c*e*sqrt(d + e*x))/e)*log(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + log((a*e - b*d + b*(d + e*x) + c*e*sqrt(d + e*x))/e)*log(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + log(-g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) - g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + log(g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) + g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + log(-g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) - g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + log(g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) + g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) - log(-g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) - g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) - log(g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) + g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) - log(-g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) - g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) - log(g**(S(1)/4)*(S(2)*b*sqrt(d + e*x) + c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) + g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))*log(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*sqrt(g)*sqrt(-f)) + polylog(S(2), S(2)*b*(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) - g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) + polylog(S(2), S(2)*b*(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) + g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) + polylog(S(2), S(2)*b*(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) - g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) + polylog(S(2), S(2)*b*(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) - e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) - e*sqrt(-f)) + g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) - polylog(S(2), S(2)*b*(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) - g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) - polylog(S(2), S(2)*b*(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) + g**(S(1)/4)*(c*e - sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) - polylog(S(2), S(2)*b*(g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) - g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)) - polylog(S(2), S(2)*b*(-g**(S(1)/4)*sqrt(d + e*x) + sqrt(d*sqrt(g) + e*sqrt(-f)))/(S(2)*b*sqrt(d*sqrt(g) + e*sqrt(-f)) + g**(S(1)/4)*(c*e + sqrt(-S(4)*a*b*e + S(4)*b**S(2)*d + c**S(2)*e**S(2)))))/(S(2)*sqrt(g)*sqrt(-f)), expand=True, _diff=True, _numerical=True)
| 384.46
| 4,886
| 0.458676
| 118,296
| 442,129
| 1.677512
| 0.001817
| 0.084195
| 0.033501
| 0.114819
| 0.990133
| 0.983925
| 0.97361
| 0.961908
| 0.944815
| 0.926604
| 0
| 0.051553
| 0.099636
| 442,129
| 1,149
| 4,887
| 384.794604
| 0.446951
| 0.022688
| 0
| 0.034515
| 0
| 0.000933
| 0.000196
| 0
| 0
| 0
| 0
| 0
| 0.981343
| 1
| 0.001866
| false
| 0
| 0.010261
| 0
| 0.012127
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
c7f321de62548926cf92c332a03163894751761d
| 67,338
|
py
|
Python
|
tests/ut/python/dataset/test_cache_map.py
|
GeekHee/mindspore
|
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_cache_map.py
|
GeekHee/mindspore
|
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_cache_map.py
|
GeekHee/mindspore
|
896b8e5165dd0a900ed5a39e0fb23525524bf8b0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing cache operator with mappable datasets
"""
import os
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
from mindspore import log as logger
from util import save_and_check_md5
DATA_DIR = "../data/dataset/testImageNetData/train/"
COCO_DATA_DIR = "../data/dataset/testCOCO/train/"
COCO_ANNOTATION_FILE = "../data/dataset/testCOCO/annotations/train.json"
NO_IMAGE_DIR = "../data/dataset/testRandomData/"
MNIST_DATA_DIR = "../data/dataset/testMnistData/"
CELEBA_DATA_DIR = "../data/dataset/testCelebAData/"
VOC_DATA_DIR = "../data/dataset/testVOC2012/"
MANIFEST_DATA_FILE = "../data/dataset/testManifestData/test.manifest"
CIFAR10_DATA_DIR = "../data/dataset/testCifar10Data/"
CIFAR100_DATA_DIR = "../data/dataset/testCifar100Data/"
MIND_RECORD_DATA_DIR = "../data/mindrecord/testTwoImageData/twobytes.mindrecord"
GENERATE_GOLDEN = False
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_basic1():
"""
Test mappable leaf with cache op right over the leaf
Repeat
|
Map(decode)
|
Cache
|
ImageFolder
"""
logger.info("Test cache map basic 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
session_id = 1
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(operations=decode_op, input_columns=["image"])
ds1 = ds1.repeat(4)
filename = "cache_map_01_result.npz"
save_and_check_md5(ds1, filename, generate_golden=GENERATE_GOLDEN)
logger.info("test_cache_map_basic1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_basic2():
"""
Test mappable leaf with the cache op later in the tree above the map(decode)
Repeat
|
Cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map basic 2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(operations=decode_op, input_columns=["image"], cache=some_cache)
ds1 = ds1.repeat(4)
filename = "cache_map_02_result.npz"
save_and_check_md5(ds1, filename, generate_golden=GENERATE_GOLDEN)
logger.info("test_cache_map_basic2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_basic3():
"""
Test different rows result in core dump
"""
logger.info("Test cache basic 3")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.repeat(4)
ds1 = ds1.map(operations=decode_op, input_columns=["image"])
logger.info("ds1.dataset_size is ", ds1.get_dataset_size())
shape = ds1.output_shapes()
logger.info(shape)
num_iter = 0
for _ in ds1.create_dict_iterator(num_epochs=1):
logger.info("get data from dataset")
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info('test_cache_basic3 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_basic4():
"""
Test Map containing random operation above cache
repeat
|
Map(decode, randomCrop)
|
Cache
|
ImageFolder
"""
logger.info("Test cache basic 4")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode()
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_op)
data = data.repeat(4)
num_iter = 0
for _ in data.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info('test_cache_basic4 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_basic5():
"""
Test cache as root node
cache
|
ImageFolder
"""
logger.info("Test cache basic 5")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
num_iter = 0
for _ in ds1.create_dict_iterator(num_epochs=1):
logger.info("get data from dataset")
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 2
logger.info('test_cache_basic5 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure1():
"""
Test nested cache (failure)
Repeat
|
Cache
|
Map(decode)
|
Cache
|
Coco
"""
logger.info("Test cache failure 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR has 6 images in it
ds1 = ds.CocoDataset(COCO_DATA_DIR, annotation_file=COCO_ANNOTATION_FILE, task="Detection", decode=True,
cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(operations=decode_op, input_columns=["image"], cache=some_cache)
ds1 = ds1.repeat(4)
with pytest.raises(RuntimeError) as e:
ds1.get_batch_size()
assert "Nested cache operations" in str(e.value)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator(num_epochs=1):
num_iter += 1
assert "Nested cache operations" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure1 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure2():
"""
Test zip under cache (failure)
repeat
|
Cache
|
Map(decode)
|
Zip
| |
ImageFolder ImageFolder
"""
logger.info("Test cache failure 2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
ds2 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
dsz = ds.zip((ds1, ds2))
decode_op = c_vision.Decode()
dsz = dsz.map(input_columns=["image"], operations=decode_op, cache=some_cache)
dsz = dsz.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in dsz.create_dict_iterator():
num_iter += 1
assert "ZipNode is not supported as a descendant operator under a cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure2 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure3():
"""
Test batch under cache (failure)
repeat
|
Cache
|
Map(resize)
|
Batch
|
Mnist
"""
logger.info("Test cache failure 3")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.MnistDataset(MNIST_DATA_DIR, num_samples=10)
ds1 = ds1.batch(2)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
ds1 = ds1.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert "BatchNode is not supported as a descendant operator under a cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure3 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure4():
"""
Test filter under cache (failure)
repeat
|
Cache
|
Map(decode)
|
Filter
|
CelebA
"""
logger.info("Test cache failure 4")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 4 records
ds1 = ds.CelebADataset(CELEBA_DATA_DIR, shuffle=False, decode=True)
ds1 = ds1.filter(predicate=lambda data: data < 11, input_columns=["label"])
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert "FilterNode is not supported as a descendant operator under a cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure4 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure5():
"""
Test Map containing random operation under cache (failure)
repeat
|
Cache
|
Map(decode, randomCrop)
|
Manifest
"""
logger.info("Test cache failure 5")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 4 records
data = ds.ManifestDataset(MANIFEST_DATA_FILE, decode=True)
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode()
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_op, cache=some_cache)
data = data.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in data.create_dict_iterator():
num_iter += 1
assert "MapNode containing random operation is not supported as a descendant of cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure5 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure6():
"""
Test no-cache-supporting MindRecord leaf with Map under cache (failure)
repeat
|
Cache
|
Map(resize)
|
MindRecord
"""
logger.info("Test cache failure 6")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
columns_list = ["id", "file_name", "label_name", "img_data", "label_data"]
num_readers = 1
# The dataset has 5 records
data = ds.MindDataset(MIND_RECORD_DATA_DIR, columns_list, num_readers)
resize_op = c_vision.Resize((224, 224))
data = data.map(input_columns=["img_data"], operations=resize_op, cache=some_cache)
data = data.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in data.create_dict_iterator():
num_iter += 1
assert "There is currently no support for MindRecordOp under cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure6 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure7():
"""
Test no-cache-supporting Generator leaf with Map under cache (failure)
repeat
|
Cache
|
Map(lambda x: x)
|
Generator
"""
def generator_1d():
for i in range(64):
yield (np.array(i),)
logger.info("Test cache failure 7")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
data = ds.GeneratorDataset(generator_1d, ["data"])
data = data.map((lambda x: x), ["data"], cache=some_cache)
data = data.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in data.create_dict_iterator():
num_iter += 1
assert "There is currently no support for GeneratorOp under cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure7 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure8():
"""
Test a repeat under mappable cache (failure)
Cache
|
Map(decode)
|
Repeat
|
Cifar10
"""
logger.info("Test cache failure 8")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=10)
decode_op = c_vision.Decode()
ds1 = ds1.repeat(4)
ds1 = ds1.map(operations=decode_op, input_columns=["image"], cache=some_cache)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator(num_epochs=1):
num_iter += 1
assert "A cache over a RepeatNode of a mappable dataset is not supported" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure8 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure9():
"""
Test take under cache (failure)
repeat
|
Cache
|
Map(decode)
|
Take
|
Cifar100
"""
logger.info("Test cache failure 9")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_samples=10)
ds1 = ds1.take(2)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert "TakeNode (possibly from Split) is not supported as a descendant operator under a cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure9 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure10():
"""
Test skip under cache (failure)
repeat
|
Cache
|
Map(decode)
|
Skip
|
VOC
"""
logger.info("Test cache failure 10")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 9 records
ds1 = ds.VOCDataset(VOC_DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
ds1 = ds1.skip(1)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert "SkipNode is not supported as a descendant operator under a cache" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure10 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_failure11():
"""
Test set spilling=true when cache server is started without spilling support (failure)
Cache(spilling=true)
|
ImageFolder
"""
logger.info("Test cache failure 11")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0, spilling=True)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert "Unexpected error. Server is not set up with spill support" in str(e.value)
assert num_iter == 0
logger.info('test_cache_failure11 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_split1():
"""
Test split (after a non-source node) under cache (failure).
Split after a non-source node is implemented with TakeOp/SkipOp, hence the failure.
repeat
|
Cache
|
Map(resize)
|
Split
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache split 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1, ds2 = ds1.split([0.5, 0.5])
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
ds2 = ds2.map(input_columns=["image"], operations=resize_op, cache=some_cache)
ds1 = ds1.repeat(4)
ds2 = ds2.repeat(4)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert "TakeNode (possibly from Split) is not supported as a descendant operator under a cache" in str(e.value)
with pytest.raises(RuntimeError) as e:
num_iter = 0
for _ in ds2.create_dict_iterator():
num_iter += 1
assert "TakeNode (possibly from Split) is not supported as a descendant operator under a cache" in str(e.value)
logger.info('test_cache_split1 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_split2():
"""
Test split (after a source node) under cache (ok).
Split after a source node is implemented with subset sampler, hence ok.
repeat
|
Cache
|
Map(resize)
|
Split
|
VOCDataset
"""
logger.info("Test cache split 2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 9 records
ds1 = ds.VOCDataset(VOC_DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
ds1, ds2 = ds1.split([0.3, 0.7])
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
ds2 = ds2.map(input_columns=["image"], operations=resize_op, cache=some_cache)
ds1 = ds1.repeat(4)
ds2 = ds2.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert num_iter == 12
num_iter = 0
for _ in ds2.create_dict_iterator():
num_iter += 1
assert num_iter == 24
logger.info('test_cache_split2 Ended.\n')
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_parameter_check():
"""
Test illegal parameters for DatasetCache
"""
logger.info("Test cache map parameter check")
with pytest.raises(ValueError) as info:
ds.DatasetCache(session_id=-1, size=0)
assert "Input is not within the required interval" in str(info.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id="1", size=0)
assert "Argument session_id with value 1 is not of type (<class 'int'>,)" in str(info.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id=None, size=0)
assert "Argument session_id with value None is not of type (<class 'int'>,)" in str(info.value)
with pytest.raises(ValueError) as info:
ds.DatasetCache(session_id=1, size=-1)
assert "Input size must be greater than 0" in str(info.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id=1, size="1")
assert "Argument size with value 1 is not of type (<class 'int'>,)" in str(info.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id=1, size=None)
assert "Argument size with value None is not of type (<class 'int'>,)" in str(info.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id=1, size=0, spilling="illegal")
assert "Argument spilling with value illegal is not of type (<class 'bool'>,)" in str(info.value)
with pytest.raises(TypeError) as err:
ds.DatasetCache(session_id=1, size=0, hostname=50052)
assert "Argument hostname with value 50052 is not of type (<class 'str'>,)" in str(err.value)
with pytest.raises(RuntimeError) as err:
ds.DatasetCache(session_id=1, size=0, hostname="illegal")
assert "now cache client has to be on the same host with cache server" in str(err.value)
with pytest.raises(RuntimeError) as err:
ds.DatasetCache(session_id=1, size=0, hostname="127.0.0.2")
assert "now cache client has to be on the same host with cache server" in str(err.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id=1, size=0, port="illegal")
assert "Argument port with value illegal is not of type (<class 'int'>,)" in str(info.value)
with pytest.raises(TypeError) as info:
ds.DatasetCache(session_id=1, size=0, port="50052")
assert "Argument port with value 50052 is not of type (<class 'int'>,)" in str(info.value)
with pytest.raises(ValueError) as err:
ds.DatasetCache(session_id=1, size=0, port=0)
assert "Input port is not within the required interval of (1025 to 65535)" in str(err.value)
with pytest.raises(ValueError) as err:
ds.DatasetCache(session_id=1, size=0, port=65536)
assert "Input port is not within the required interval of (1025 to 65535)" in str(err.value)
with pytest.raises(TypeError) as err:
ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=True)
assert "Argument cache with value True is not of type" in str(err.value)
logger.info("test_cache_map_parameter_check Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_running_twice1():
"""
Executing the same pipeline for twice (from python), with cache injected after map
Repeat
|
Cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map running twice 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_running_twice1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_running_twice2():
"""
Executing the same pipeline for twice (from shell), with cache injected after leaf
Repeat
|
Map(decode)
|
Cache
|
ImageFolder
"""
logger.info("Test cache map running twice 2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_running_twice2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_extra_small_size1():
"""
Test running pipeline with cache of extra small size and spilling true
Repeat
|
Map(decode)
|
Cache
|
ImageFolder
"""
logger.info("Test cache map extra small size 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=1, spilling=True)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_extra_small_size1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_extra_small_size2():
"""
Test running pipeline with cache of extra small size and spilling false
Repeat
|
Cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map extra small size 2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=1, spilling=False)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_extra_small_size2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_no_image():
"""
Test cache with no dataset existing in the path
Repeat
|
Map(decode)
|
Cache
|
ImageFolder
"""
logger.info("Test cache map no image")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=1, spilling=False)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=NO_IMAGE_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
with pytest.raises(RuntimeError):
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert num_iter == 0
logger.info("test_cache_map_no_image Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_parallel_pipeline1(shard):
"""
Test running two parallel pipelines (sharing cache) with cache injected after leaf op
Repeat
|
Map(decode)
|
Cache
|
ImageFolder
"""
logger.info("Test cache map parallel pipeline 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, num_shards=2, shard_id=int(shard), cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 4
logger.info("test_cache_map_parallel_pipeline1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_parallel_pipeline2(shard):
"""
Test running two parallel pipelines (sharing cache) with cache injected after map op
Repeat
|
Cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map parallel pipeline 2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, num_shards=2, shard_id=int(shard))
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 4
logger.info("test_cache_map_parallel_pipeline2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_parallel_workers():
"""
Test cache with num_parallel_workers > 1 set for map op and leaf op
Repeat
|
cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map parallel workers")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, num_parallel_workers=4)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, num_parallel_workers=4, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_parallel_workers Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_server_workers_1():
"""
start cache server with --workers 1 and then test cache function
Repeat
|
cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map server workers 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_server_workers_1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_server_workers_100():
"""
start cache server with --workers 100 and then test cache function
Repeat
|
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map server workers 100")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_server_workers_100 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_num_connections_1():
"""
Test setting num_connections=1 in DatasetCache
Repeat
|
cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map num_connections 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0, num_connections=1)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_num_connections_1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_num_connections_100():
"""
Test setting num_connections=100 in DatasetCache
Repeat
|
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map num_connections 100")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0, num_connections=100)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_num_connections_100 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_prefetch_size_1():
"""
Test setting prefetch_size=1 in DatasetCache
Repeat
|
cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map prefetch_size 1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0, prefetch_size=1)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_prefetch_size_1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_prefetch_size_100():
"""
Test setting prefetch_size=100 in DatasetCache
Repeat
|
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map prefetch_size 100")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0, prefetch_size=100)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_prefetch_size_100 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_to_device():
"""
Test cache with to_device
DeviceQueue
|
EpochCtrl
|
Repeat
|
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map to_device")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
ds1 = ds1.to_device()
ds1.send()
logger.info("test_cache_map_to_device Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_epoch_ctrl1():
"""
Test using two-loops method to run several epochs
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map epoch ctrl1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
num_epoch = 5
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
row_count = 0
for _ in iter1:
row_count += 1
logger.info("Number of data in ds1: {} ".format(row_count))
assert row_count == 2
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_epoch_ctrl1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_epoch_ctrl2():
"""
Test using two-loops method with infinite epochs
cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map epoch ctrl2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
num_epoch = 5
# iter1 will always assume there is a next epoch and never shutdown
iter1 = ds1.create_dict_iterator()
epoch_count = 0
for _ in range(num_epoch):
row_count = 0
for _ in iter1:
row_count += 1
logger.info("Number of data in ds1: {} ".format(row_count))
assert row_count == 2
epoch_count += 1
assert epoch_count == num_epoch
# manually stop the iterator
iter1.stop()
logger.info("test_cache_map_epoch_ctrl2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_epoch_ctrl3():
"""
Test using two-loops method with infinite epochs over repeat
repeat
|
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map epoch ctrl3")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(2)
num_epoch = 5
# iter1 will always assume there is a next epoch and never shutdown
iter1 = ds1.create_dict_iterator()
epoch_count = 0
for _ in range(num_epoch):
row_count = 0
for _ in iter1:
row_count += 1
logger.info("Number of data in ds1: {} ".format(row_count))
assert row_count == 4
epoch_count += 1
assert epoch_count == num_epoch
# reply on garbage collector to destroy iter1
logger.info("test_cache_map_epoch_ctrl3 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_coco1():
"""
Test mappable coco leaf with cache op right over the leaf
cache
|
Coco
"""
logger.info("Test cache map coco1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 6 records
ds1 = ds.CocoDataset(COCO_DATA_DIR, annotation_file=COCO_ANNOTATION_FILE, task="Detection", decode=True,
cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 6
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_coco1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_coco2():
"""
Test mappable coco leaf with the cache op later in the tree above the map(resize)
cache
|
Map(resize)
|
Coco
"""
logger.info("Test cache map coco2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 6 records
ds1 = ds.CocoDataset(COCO_DATA_DIR, annotation_file=COCO_ANNOTATION_FILE, task="Detection", decode=True)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 6
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_coco2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_mnist1():
"""
Test mappable mnist leaf with cache op right over the leaf
cache
|
Mnist
"""
logger.info("Test cache map mnist1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.MnistDataset(MNIST_DATA_DIR, num_samples=10, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 10
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_mnist1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_mnist2():
"""
Test mappable mnist leaf with the cache op later in the tree above the map(resize)
cache
|
Map(resize)
|
Mnist
"""
logger.info("Test cache map mnist2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.MnistDataset(MNIST_DATA_DIR, num_samples=10)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 10
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_mnist2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_celeba1():
"""
Test mappable celeba leaf with cache op right over the leaf
cache
|
CelebA
"""
logger.info("Test cache map celeba1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 4 records
ds1 = ds.CelebADataset(CELEBA_DATA_DIR, shuffle=False, decode=True, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 4
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_celeba1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_celeba2():
"""
Test mappable celeba leaf with the cache op later in the tree above the map(resize)
cache
|
Map(resize)
|
CelebA
"""
logger.info("Test cache map celeba2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 4 records
ds1 = ds.CelebADataset(CELEBA_DATA_DIR, shuffle=False, decode=True)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 4
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_celeba2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_manifest1():
"""
Test mappable manifest leaf with cache op right over the leaf
cache
|
Manifest
"""
logger.info("Test cache map manifest1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 4 records
ds1 = ds.ManifestDataset(MANIFEST_DATA_FILE, decode=True, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 4
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_manifest1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_manifest2():
"""
Test mappable manifest leaf with the cache op later in the tree above the map(resize)
cache
|
Map(resize)
|
Manifest
"""
logger.info("Test cache map manifest2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 4 records
ds1 = ds.ManifestDataset(MANIFEST_DATA_FILE, decode=True)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 4
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_manifest2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_cifar1():
"""
Test mappable cifar10 leaf with cache op right over the leaf
cache
|
Cifar10
"""
logger.info("Test cache map cifar1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=10, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 10
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_cifar1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_cifar2():
"""
Test mappable cifar100 leaf with the cache op later in the tree above the map(resize)
cache
|
Map(resize)
|
Cifar100
"""
logger.info("Test cache map cifar2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_samples=10)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 10
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_cifar2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_cifar3():
"""
Test mappable cifar10 leaf with the cache op later in the tree above the map(resize)
In this case, we set a extra-small size for cache (size=1) and there are 10000 rows in the dataset.
cache
|
Cifar10
"""
logger.info("Test cache map cifar3")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=1)
ds1 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, cache=some_cache)
num_epoch = 2
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 10000
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_cifar3 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_cifar4():
"""
Test mappable cifar10 leaf with cache op right over the leaf, and shuffle op over the cache op
shuffle
|
cache
|
Cifar10
"""
logger.info("Test cache map cifar4")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=10, cache=some_cache)
ds1 = ds1.shuffle(10)
num_epoch = 1
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 10
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_cifar4 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_voc1():
"""
Test mappable voc leaf with cache op right over the leaf
cache
|
VOC
"""
logger.info("Test cache map voc1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 9 records
ds1 = ds.VOCDataset(VOC_DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 9
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_voc1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_voc2():
"""
Test mappable voc leaf with the cache op later in the tree above the map(resize)
cache
|
Map(resize)
|
VOC
"""
logger.info("Test cache map voc2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 9 records
ds1 = ds.VOCDataset(VOC_DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
resize_op = c_vision.Resize((224, 224))
ds1 = ds1.map(input_columns=["image"], operations=resize_op, cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch)
epoch_count = 0
for _ in range(num_epoch):
assert sum([1 for _ in iter1]) == 9
epoch_count += 1
assert epoch_count == num_epoch
logger.info("test_cache_map_voc2 Ended.\n")
class ReverseSampler(ds.Sampler):
def __iter__(self):
for i in range(self.dataset_size - 1, -1, -1):
yield i
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_python_sampler1():
"""
Test using a python sampler, and cache after leaf
Repeat
|
Map(decode)
|
cache
|
ImageFolder
"""
logger.info("Test cache map python sampler1")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, sampler=ReverseSampler(), cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_python_sampler1 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_python_sampler2():
"""
Test using a python sampler, and cache after map
Repeat
|
cache
|
Map(decode)
|
ImageFolder
"""
logger.info("Test cache map python sampler2")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, sampler=ReverseSampler())
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 8
logger.info("test_cache_map_python_sampler2 Ended.\n")
@pytest.mark.skipif(os.environ.get('RUN_CACHE_TEST') != 'TRUE', reason="Require to bring up cache server")
def test_cache_map_nested_repeat():
"""
Test cache on pipeline with nested repeat ops
Repeat
|
Map(decode)
|
Repeat
|
Cache
|
ImageFolder
"""
logger.info("Test cache map nested repeat")
if "SESSION_ID" in os.environ:
session_id = int(os.environ['SESSION_ID'])
else:
raise RuntimeError("Testcase requires SESSION_ID environment variable")
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This DATA_DIR only has 2 images in it
ds1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.repeat(4)
ds1 = ds1.map(operations=decode_op, input_columns=["image"])
ds1 = ds1.repeat(2)
num_iter = 0
for _ in ds1.create_dict_iterator(num_epochs=1):
logger.info("get data from dataset")
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 16
logger.info('test_cache_map_nested_repeat Ended.\n')
if __name__ == '__main__':
# This is just a list of tests, don't try to run these tests with 'python test_cache_map.py'
# since cache server is required to be brought up first
test_cache_map_basic1()
test_cache_map_basic2()
test_cache_map_basic3()
test_cache_map_basic4()
test_cache_map_basic5()
test_cache_map_failure1()
test_cache_map_failure2()
test_cache_map_failure3()
test_cache_map_failure4()
test_cache_map_failure5()
test_cache_map_failure6()
test_cache_map_failure7()
test_cache_map_failure8()
test_cache_map_failure9()
test_cache_map_failure10()
test_cache_map_failure11()
test_cache_map_split1()
test_cache_map_split2()
test_cache_map_parameter_check()
test_cache_map_running_twice1()
test_cache_map_running_twice2()
test_cache_map_extra_small_size1()
test_cache_map_extra_small_size2()
test_cache_map_no_image()
test_cache_map_parallel_pipeline1(shard=0)
test_cache_map_parallel_pipeline2(shard=1)
test_cache_map_parallel_workers()
test_cache_map_server_workers_1()
test_cache_map_server_workers_100()
test_cache_map_num_connections_1()
test_cache_map_num_connections_100()
test_cache_map_prefetch_size_1()
test_cache_map_prefetch_size_100()
test_cache_map_to_device()
test_cache_map_epoch_ctrl1()
test_cache_map_epoch_ctrl2()
test_cache_map_epoch_ctrl3()
test_cache_map_coco1()
test_cache_map_coco2()
test_cache_map_mnist1()
test_cache_map_mnist2()
test_cache_map_celeba1()
test_cache_map_celeba2()
test_cache_map_manifest1()
test_cache_map_manifest2()
test_cache_map_cifar1()
test_cache_map_cifar2()
test_cache_map_cifar3()
test_cache_map_cifar4()
test_cache_map_voc1()
test_cache_map_voc2()
test_cache_map_python_sampler1()
test_cache_map_python_sampler2()
test_cache_map_nested_repeat()
| 31.045643
| 116
| 0.658781
| 9,287
| 67,338
| 4.560353
| 0.046732
| 0.070977
| 0.052418
| 0.048451
| 0.917902
| 0.871553
| 0.842062
| 0.808321
| 0.795783
| 0.784355
| 0
| 0.024069
| 0.235543
| 67,338
| 2,168
| 117
| 31.059963
| 0.798667
| 0.148846
| 0
| 0.700877
| 0
| 0
| 0.230576
| 0.024748
| 0
| 0
| 0
| 0
| 0.085088
| 1
| 0.049123
| false
| 0
| 0.00614
| 0
| 0.05614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
404ae5955528ee804b0a42d0720de97677a0871d
| 101,135
|
py
|
Python
|
tests/test_scorefiles.py
|
MednickLab/mednickdb_pysleep
|
aaa18a4aee51242997f6d8a4beb5c9e1c23d5a92
|
[
"MIT"
] | null | null | null |
tests/test_scorefiles.py
|
MednickLab/mednickdb_pysleep
|
aaa18a4aee51242997f6d8a4beb5c9e1c23d5a92
|
[
"MIT"
] | null | null | null |
tests/test_scorefiles.py
|
MednickLab/mednickdb_pysleep
|
aaa18a4aee51242997f6d8a4beb5c9e1c23d5a92
|
[
"MIT"
] | 1
|
2020-05-19T16:55:11.000Z
|
2020-05-19T16:55:11.000Z
|
import sys, os
file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, file_dir + '/../mednickdb_pysleep')
from scorefiles import extract_epochstages_from_scorefile, score_wake_as_waso_wbso_wase
from sleep_architecture import lights_on_off_and_sleep_latency
import yaml
from datetime import datetime
def test_extract_epochstages_from_scorefile():
stagemap = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/MednickHumeType_study_settings.yaml"),
'rb'))['stage_map']
testfile1 = os.path.join(file_dir, 'testfiles/example3_scorefile.mat')
epochstages, epochoffset, starttime = extract_epochstages_from_scorefile(testfile1, stagemap)
correct_epochstages = ['unknown', 'unknown', 'unknown', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake']
assert epochstages == correct_epochstages
epochstages = score_wake_as_waso_wbso_wase(epochstages)
correct_epochstages = ['unknown', 'unknown', 'unknown', 'wbso', 'wbso', 'wbso', 'wbso', 'wbso', 'wbso', 'wbso']
assert epochstages == correct_epochstages
testfile2 = os.path.join(file_dir, 'testfiles/example4_scorefile.mat')
epochstages, epochoffset, starttime = extract_epochstages_from_scorefile(testfile2, stagemap)
correct_epochstages = ['wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n3', 'n3', 'n2', 'n2']
assert epochstages == correct_epochstages
epochstages = score_wake_as_waso_wbso_wase(epochstages)
correct_epochstages = ['wbso', 'wbso', 'n1', 'n1', 'n2', 'n2', 'n3', 'n3', 'n2', 'n2']
assert epochstages == correct_epochstages
def test_choc_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/choc_type_study_settings.yaml"),
'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(
os.path.join(os.path.dirname(__file__), "testfiles/example_choc_scorefile.csv"),
study_settings_file['stage_map'])
assert epoch_stages[:300] == ['unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake']
assert epoch_offset == 0
assert start_time is None
def test_hume1_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/MednickHumeType_study_settings.yaml"),
'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(
os.path.join(os.path.dirname(__file__), "testfiles/humetype1_scorefile.mat"),
study_settings_file['stage_map'])
assert epoch_stages == ['wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n3', 'n3', 'n2', 'n2']
assert epoch_offset == 102.0
assert start_time == datetime.strptime('2016-01-01 00:00:00', "%Y-%m-%d %H:%M:%S")
def test_pkl_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/MednickHumeType_study_settings.yaml"),
'rb')) #this is actually not needed...
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(
os.path.join(os.path.dirname(__file__), "testfiles/pkl_type_scorefile.pkl"),
study_settings_file['stage_map'])
epochs300 = ['unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n2', 'n1', 'n1', 'n1', 'n1', 'wake', 'n1', 'n2', 'n2', 'n2', 'n3', 'n3', 'n2', 'n3', 'n2', 'n2', 'n3', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n2', 'n1', 'n1', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n2', 'n3', 'n3', 'wake', 'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'wake', 'n1', 'n1', 'wake', 'n2', 'n2', 'wake', 'wake', 'wake', 'n1', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n2', 'n1', 'n3', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n2', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'rem', 'n2', 'rem', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'rem', 'n2', 'n1', 'rem', 'rem', 'n1', 'n2', 'n2', 'wake', 'n2', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n1', 'n1']
assert epochs300 == epoch_stages[0:300]
assert epoch_offset == 1.88412058372
assert start_time == datetime.strptime('2000-01-01 23:15:24', "%Y-%m-%d %H:%M:%S")
def test_hume2_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/MednickHumeType_study_settings.yaml"),
'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(
os.path.join(os.path.dirname(__file__), "testfiles/humetype2_scorefile.mat"),
study_settings_file['stage_map'])
assert epoch_stages == ['unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'wake', 'wake', 'n1', 'n1', 'n1',
'n1', 'n1', 'n1', 'n1', 'n2', 'wake', 'n1', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'n2', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2',
'n2', 'n3', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3',
'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'wake',
'wake', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'rem', 'n2', 'n2', 'wake', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'wake', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'rem', 'n2', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n2', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n2', 'n3', 'n3', 'n2', 'n3', 'n2', 'n3', 'n3',
'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n3', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake',
'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n2', 'n3', 'n3', 'n2', 'n2', 'n3', 'n2', 'n2', 'wake',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'wake', 'n1', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem',
'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'wake', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown']
assert start_time is None
assert epoch_offset == 0
def test_grass_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/MednickGrassType_study_settings.yaml"),
'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/grasstype_scorefile.xls"),
study_settings_file['stage_map'])
assert epoch_stages == ['unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'n1', 'wake', 'n1', 'n1', 'n1', 'n2', 'n2', 'wake', 'n1', 'wake', 'n1', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'n1', 'n1', 'wake', 'n1', 'wake', 'n1', 'wake', 'n1', 'n2', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'n1', 'n1', 'n2', 'wake', 'n1', 'n2', 'wake', 'n1', 'n2', 'rem', 'rem', 'wake', 'n1', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake',
'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown']
assert start_time == datetime.strptime('2013-02-18 12:58:59', "%Y-%m-%d %H:%M:%S")
assert epoch_offset == 0
def test_lat_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/SpencerLab_study_settings.yaml"), 'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/lattype_scorefile.txt"),
study_settings_file['stage_map'])
assert epoch_stages == ['unknown', 'unknown', 'unknown', 'unknown', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'n1', 'wake', 'wake', 'n1', 'wake', 'n1', 'n1', 'wake', 'n1', 'n1', 'n2', 'n2',
'n1', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake', 'n1', 'wake',
'n1', 'n2', 'n2', 'wake', 'n2', 'n2', 'n1', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'rem', 'n1', 'n2', 'n2', 'rem', 'rem', 'n2', 'n2', 'n2', 'n1', 'n2',
'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'n1', 'n1', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2',
'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem',
'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'wake', 'wake', 'n1',
'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake',
'wake']
assert start_time is None
assert epoch_offset == 0
def test_basic_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/DinklemannLab_study_settings.yaml"),
'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/basictype_scorefile.txt"),
study_settings_file['stage_map'])
assert epoch_stages == ['wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'n1', 'n1', 'n1', 'n1', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1',
'wake', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'wake', 'n1', 'wake', 'n1',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n3',
'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2',
'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'wake',
'n1', 'wake', 'wake', 'n1', 'wake', 'n1', 'wake', 'wake', 'n1', 'n1', 'wake', 'n1', 'wake',
'wake', 'wake', 'n1', 'n1', 'wake', 'n1', 'n2', 'n2', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n3', 'n2',
'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3',
'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'n1', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'n1', 'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'movement',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n2', 'n2', 'n2',
'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2',
'wake', 'n1', 'n2', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n1', 'n1', 'n1', 'n2', 'wake', 'wake', 'wake', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'movement', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n2', 'n3', 'n3', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3',
'n2', 'n2', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n1', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'movement',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'movement', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n3', 'n2', 'n2', 'n3', 'n2', 'n3', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'movement', 'n1', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'movement', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake']
assert start_time is None
assert epoch_offset == 0
def test_full_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/CAPStudy_study_settings.yaml"), 'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/fulltype_scorefile.txt"),
study_settings_file['stage_map'])
assert epoch_stages == ['wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'n1', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'wake', 'n1',
'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'unknown', 'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1',
'n1', 'n1', 'n2', 'n2', 'wake', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem',
'rem', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'unknown', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'wake', 'wake', 'n1', 'n2', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake', 'n1',
'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'unknown', 'n2', 'n2',
'n2', 'n2', 'unknown', 'unknown', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n1', 'n1',
'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1',
'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n2', 'n2', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'unknown', 'n1', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake']
assert start_time == datetime(2010, 1, 28, 22, 18, 17)
assert epoch_offset == 0
def test_XML_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/NSRR_study_settings.yaml"), 'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/xmltype_scorefile.xml"),
study_settings_file['stage_map'])
assert epoch_stages == ['wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'n1', 'n2', 'n2', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem',
'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'wake', 'wake', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'wake', 'n3', 'n3', 'n3', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem', 'rem',
'rem', 'rem', 'rem', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n2', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'n1', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'n1', 'wake', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'rem',
'rem', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2', 'n2',
'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'n1', 'n1', 'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1',
'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'wake', 'wake', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'rem',
'rem', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2']
assert start_time == datetime(2000, 1, 1, 20, 33, 32)
assert epoch_offset == 0
def test_edf1_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/WamsleyLab_study_settings.yaml"), 'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/edftype1_scorefile.edf"),
study_settings_file['stage_map'])
assert epoch_stages == ['wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'n1', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1',
'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake', 'wake',
'n3', 'n3', 'n2', 'n3', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake', 'n1', 'n2',
'n2', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'wake', 'n2', 'n2', 'n2', 'n2', 'wake', 'n2', 'n2', 'n2', 'n1', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1',
'n1', 'n1', 'n1', 'n1', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'wake', 'wake', 'wake',
'n1', 'wake', 'wake', 'n1', 'n1', 'n1', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake',
'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'wake',
'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'wake', 'n2', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'n1', 'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2']
assert start_time == datetime.strptime('2014-07-08 23:05:09', "%Y-%m-%d %H:%M:%S")
assert epoch_offset == 0
def test_edf2_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/Kemp_study_settings.yaml"), 'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/edftype2_scorefile.edf"),
study_settings_file['stage_map'])
assert epoch_stages == ['wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'wake', 'n3', 'n3', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2',
'n2', 'n3', 'n3', 'n3', 'n3', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n2', 'n3',
'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n1', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2',
'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1',
'n1', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n1',
'n1', 'n1', 'n1', 'n2', 'n2', 'n2', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n1', 'n2', 'n2', 'n1', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n2', 'n2', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'n2', 'n1', 'n1', 'wake', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n2', 'n1',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n2', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n2', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'n1', 'n1',
'n1', 'n1', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'wake', 'n1', 'n1', 'n1',
'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'wake', 'n1', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n2', 'n2', 'n3',
'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n2', 'n2', 'n2', 'n2', 'n1', 'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake',
'n1', 'n1', 'n1', 'n1', 'n1', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'n1', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'n1', 'n1', 'n1', 'n1', 'n1', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake']
assert start_time == datetime.strptime('1989-04-24 16:13:00', "%Y-%m-%d %H:%M:%S")
assert epoch_offset == 0
def test_edf3_scorefile():
study_settings_file = yaml.safe_load(
open(os.path.join(os.path.dirname(__file__), "testfiles/study_settings/MASS_study_settings.yaml"), 'rb'))
epoch_stages, epoch_offset, start_time = extract_epochstages_from_scorefile(os.path.join(os.path.dirname(__file__),
"testfiles/edftype3_scorefile.edf"),
study_settings_file['stage_map'])
assert epoch_stages == ['unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'unknown',
'unknown', 'unknown', 'unknown', 'unknown', 'unknown', 'wake', 'wake', 'wake', 'wake',
'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake', 'wake',
'wake', 'n1', 'n1', 'wake', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3',
'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'wake', 'rem', 'rem', 'n1', 'rem', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'wake', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2',
'n2', 'n3', 'n2', 'n2', 'n3', 'n3', 'n2', 'n3', 'n3', 'n2', 'n3', 'n3', 'n3', 'n3', 'n2',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3',
'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n3', 'n2', 'n2', 'wake', 'wake', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n3', 'n2', 'n2', 'n2',
'n3', 'n2', 'n3', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'wake', 'n1', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n2',
'n2', 'n2', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n3', 'n3', 'n2', 'n2', 'n3', 'n2', 'n3', 'n3', 'n2', 'n2', 'n3', 'n2', 'n2', 'n3', 'n3',
'n3', 'n3', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'n1', 'n1', 'n1', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'n1', 'n2', 'rem', 'rem',
'rem', 'rem', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'wake', 'wake', 'n1', 'n1', 'wake', 'wake', 'n1', 'n1', 'n1', 'n2', 'n2', 'n2',
'n2', 'n2', 'n1', 'n1', 'n2', 'n2', 'n2', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'wake', 'n1', 'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'n1', 'n2', 'n2', 'n2',
'n2', 'n2', 'n2', 'n2', 'n2', 'n2', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem', 'rem',
'rem', 'rem', 'unknown', 'unknown', 'unknown']
assert start_time == datetime.strptime('2000-01-01 01:33:55', "%Y-%m-%d %H:%M:%S")
assert epoch_offset == 15.2460797491
| 109.335135
| 2,669
| 0.354388
| 11,332
| 101,135
| 3.126368
| 0.013237
| 0.331715
| 0.95958
| 1.254149
| 0.973665
| 0.968556
| 0.963278
| 0.958507
| 0.949729
| 0.941685
| 0
| 0.081264
| 0.330548
| 101,135
| 924
| 2,670
| 109.453463
| 0.442006
| 0.000297
| 0
| 0.650739
| 0
| 0
| 0.314181
| 0.011849
| 0
| 0
| 0
| 0
| 0.045506
| 1
| 0.01479
| false
| 0
| 0.005688
| 0
| 0.020478
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
4072ffbe95f7318909ad601520a9858f4805883c
| 8,512
|
py
|
Python
|
tests/base_tests/test_napalm_get_config.py
|
marcom4rtinez/nuts
|
b6020296ed6965889e35b9c7d4c0db13c2aa7b94
|
[
"MIT"
] | null | null | null |
tests/base_tests/test_napalm_get_config.py
|
marcom4rtinez/nuts
|
b6020296ed6965889e35b9c7d4c0db13c2aa7b94
|
[
"MIT"
] | null | null | null |
tests/base_tests/test_napalm_get_config.py
|
marcom4rtinez/nuts
|
b6020296ed6965889e35b9c7d4c0db13c2aa7b94
|
[
"MIT"
] | 1
|
2021-12-15T08:44:40.000Z
|
2021-12-15T08:44:40.000Z
|
import pytest
from nornir.core.task import AggregatedResult
from nornir_napalm.plugins import tasks
from nuts.base_tests.napalm_get_config import CONTEXT
from tests.utils import create_multi_result, SelfTestData
nornir_raw_result_s1 = {
"startup": "!\n\n!\nversion 15.2\nservice timestamps debug datetime msec\nservice timestamps log datetime msec\nno service password-encryption\nservice compress-config\n!\nhostname viosswitch-1\n!\nboot-start-marker\nboot-end-marker\n!\n!\nvrf definition mgmt\n !\n address-family ipv4\n exit-address-family\n!\n!\nusername cisco privilege 15 password 0 cisco\nno aaa new-model\n!\n!\n!\n!\n!\n!\n!\n!\nno ip domain-lookup\nip domain-name lab\nip cef\nno ipv6 cef\n!\n!\narchive\n path flash:backup\n write-memory\n!\nspanning-tree mode pvst\nspanning-tree extend system-id\n!\nvlan internal allocation policy ascending\n!\n! \n!\n!\n!\n!\n!\n!\n!\n!\n!\n!\n!\n!\ninterface GigabitEthernet0/0\n description OOB-Mgmt\n no switchport\n vrf forwarding mgmt\n ip address dhcp\n negotiation auto\n no cdp enable\n!\ninterface GigabitEthernet0/1\n description R3\n switchport trunk encapsulation dot1q\n switchport mode trunk\n media-type rj45\n negotiation auto\n!\ninterface GigabitEthernet0/2\n description sw01\n switchport trunk encapsulation dot1q\n switchport mode trunk\n media-type rj45\n negotiation auto\n!\ninterface GigabitEthernet0/3\n description client\n media-type rj45\n negotiation auto\n!\ninterface Vlan1\n ip address 10.0.0.110 255.255.255.0\n!\ninterface Vlan200\n ip address 10.0.200.110 255.255.255.0\n!\nip forward-protocol nd\n!\nno ip http server\nno ip http secure-server\n!\nip ssh version 2\nip scp server enable\n!\n!\n!\n!\n!\ncontrol-plane\n!\nbanner exec ^C\n**************************************************************************\n* IOSv is strictly limited to use for evaluation, demonstration and IOS *\n* education. IOSv is provided as-is and is not supported by Cisco's *\n* Technical Advisory Center. Any use or disclosure, in whole or in part, *\n* of the IOSv Software or Documentation to any third party for any *\n* purposes is expressly prohibited except as otherwise authorized by *\n* Cisco in writing. *\n**************************************************************************^C\nbanner incoming ^C\n**************************************************************************\n* IOSv is strictly limited to use for evaluation, demonstration and IOS *\n* education. IOSv is provided as-is and is not supported by Cisco's *\n* Technical Advisory Center. Any use or disclosure, in whole or in part, *\n* of the IOSv Software or Documentation to any third party for any *\n* purposes is expressly prohibited except as otherwise authorized by *\n* Cisco in writing. *\n**************************************************************************^C\nbanner login ^C\n**************************************************************************\n* IOSv is strictly limited to use for evaluation, demonstration and IOS *\n* education. IOSv is provided as-is and is not supported by Cisco's *\n* Technical Advisory Center. Any use or disclosure, in whole or in part, *\n* of the IOSv Software or Documentation to any third party for any *\n* purposes is expressly prohibited except as otherwise authorized by *\n* Cisco in writing. *\n**************************************************************************^C\n!\nline con 0\nline aux 0\nline vty 0 4\n login local\nline vty 5 15\n login local\n!\nntp server pnpntpserver.ins.local\n!\nend",
"running": "!\n\n!\nversion 15.2\nservice timestamps debug datetime msec\nservice timestamps log datetime msec\nno service password-encryption\nservice compress-config\n!\nhostname viosswitch-1\n!\nboot-start-marker\nboot-end-marker\n!\n!\nvrf definition mgmt\n !\n address-family ipv4\n exit-address-family\n!\n!\nusername cisco privilege 15 password 0 cisco\nno aaa new-model\n!\n!\n!\n!\n!\n!\n!\n!\nno ip domain-lookup\nip domain-name lab\nip cef\nno ipv6 cef\n!\n!\narchive\n path flash:backup\n write-memory\n!\nspanning-tree mode pvst\nspanning-tree extend system-id\n!\nvlan internal allocation policy ascending\n!\n! \n!\n!\n!\n!\n!\n!\n!\n!\n!\n!\n!\n!\ninterface GigabitEthernet0/0\n description OOB-Mgmt\n no switchport\n vrf forwarding mgmt\n ip address dhcp\n negotiation auto\n no cdp enable\n!\ninterface GigabitEthernet0/1\n description R3\n switchport trunk encapsulation dot1q\n switchport mode trunk\n media-type rj45\n negotiation auto\n!\ninterface GigabitEthernet0/2\n description sw01\n switchport trunk encapsulation dot1q\n switchport mode trunk\n media-type rj45\n negotiation auto\n!\ninterface GigabitEthernet0/3\n description client\n media-type rj45\n negotiation auto\n!\ninterface Vlan1\n ip address 10.0.0.110 255.255.255.0\n!\ninterface Vlan200\n ip address 10.0.200.110 255.255.255.0\n!\nip forward-protocol nd\n!\nno ip http server\nno ip http secure-server\n!\nip ssh version 2\nip scp server enable\n!\n!\n!\n!\n!\ncontrol-plane\n!\nbanner exec ^C\n**************************************************************************\n* IOSv is strictly limited to use for evaluation, demonstration and IOS *\n* education. IOSv is provided as-is and is not supported by Cisco's *\n* Technical Advisory Center. Any use or disclosure, in whole or in part, *\n* of the IOSv Software or Documentation to any third party for any *\n* purposes is expressly prohibited except as otherwise authorized by *\n* Cisco in writing. *\n**************************************************************************^C\nbanner incoming ^C\n**************************************************************************\n* IOSv is strictly limited to use for evaluation, demonstration and IOS *\n* education. IOSv is provided as-is and is not supported by Cisco's *\n* Technical Advisory Center. Any use or disclosure, in whole or in part, *\n* of the IOSv Software or Documentation to any third party for any *\n* purposes is expressly prohibited except as otherwise authorized by *\n* Cisco in writing. *\n**************************************************************************^C\nbanner login ^C\n**************************************************************************\n* IOSv is strictly limited to use for evaluation, demonstration and IOS *\n* education. IOSv is provided as-is and is not supported by Cisco's *\n* Technical Advisory Center. Any use or disclosure, in whole or in part, *\n* of the IOSv Software or Documentation to any third party for any *\n* purposes is expressly prohibited except as otherwise authorized by *\n* Cisco in writing. *\n**************************************************************************^C\n!\nline con 0\nline aux 0\nline vty 0 4\n login local\nline vty 5 15\n login local\n!\nntp server pnpntpserver.ins.local\n!\nend",
"candidate": "",
}
config_s1 = SelfTestData(
name="s1",
nornir_raw_result=nornir_raw_result_s1,
test_data={"host": "S1", "startup_equals_running_config": True},
)
@pytest.fixture
def general_result(timeouted_multiresult):
task_name = "napalm_get_facts"
result = AggregatedResult(task_name)
result["S1"] = create_multi_result(
[config_s1.create_nornir_result()],
task_name,
)
result["S3"] = timeouted_multiresult
return result
@pytest.fixture(
params=[config_s1],
ids=lambda data: data.name,
)
def selftestdata(request):
return request.param
@pytest.fixture
def testdata(selftestdata):
return selftestdata.test_data
pytestmark = [pytest.mark.nuts_test_ctx(CONTEXT())]
@pytest.fixture
def single_result(transformed_result, testdata):
host_result = transformed_result[testdata["host"]]
host_result.validate()
return host_result.result
def test_startup_equals_running_config_isBoolean(testdata):
assert isinstance(testdata["startup_equals_running_config"], bool)
def test_integration(selftestdata, integration_tester):
integration_tester(
selftestdata,
test_class="TestNapalmConfig",
task_module=tasks,
task_name="napalm_get",
test_count=1,
)
| 119.887324
| 3,489
| 0.646969
| 1,206
| 8,512
| 4.518242
| 0.181592
| 0.023491
| 0.023124
| 0.026427
| 0.812993
| 0.800147
| 0.800147
| 0.800147
| 0.800147
| 0.800147
| 0
| 0.022619
| 0.1586
| 8,512
| 70
| 3,490
| 121.6
| 0.738202
| 0
| 0
| 0.06
| 0
| 0.04
| 0.831884
| 0.212641
| 0
| 0
| 0
| 0
| 0.02
| 1
| 0.12
| false
| 0.04
| 0.1
| 0.04
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
9071831a9fd199e6dfe817ce73105064d63dd93f
| 29,771
|
py
|
Python
|
data_link/utils.py
|
EricToshio/tcp-timestamp-covert-channel-analysis
|
1b5cfa9c12fd787977b9040ab9e65a0bf93488b4
|
[
"MIT"
] | null | null | null |
data_link/utils.py
|
EricToshio/tcp-timestamp-covert-channel-analysis
|
1b5cfa9c12fd787977b9040ab9e65a0bf93488b4
|
[
"MIT"
] | null | null | null |
data_link/utils.py
|
EricToshio/tcp-timestamp-covert-channel-analysis
|
1b5cfa9c12fd787977b9040ab9e65a0bf93488b4
|
[
"MIT"
] | null | null | null |
import re
class Utils:
def __init__(self, FLAG, MESSAGE_SIZE):
self.FLAG = FLAG
self.MESSAGE_SIZE = MESSAGE_SIZE
def parse(self, message):
maxIncrease = int(self.MESSAGE_SIZE/(len(self.FLAG)-2))+1
minMessage = self.MESSAGE_SIZE
maxMessageSize = self.MESSAGE_SIZE + maxIncrease
flagAddSize = 2 * len(self.FLAG)
p = re.compile(''.join(list(map(lambda x: str(x),self.FLAG))))
pos = []
for match in re.finditer(p, message):
pos.append([match.start(0),match.end(0)])
possibilities = []
for idx in range(1,len(pos)):
before = pos[idx-1]
actual = pos[idx]
possibilities.append(message[before[0]:actual[1]])
values = list(filter(lambda x: len(x) >= flagAddSize + minMessage and len(x) <= flagAddSize + maxMessageSize, possibilities))
return values
def compare(self, send, received):
idxSent = 0
idxRec = 0
match = 0
last_rec = 0
last_sent = 0
while idxRec < len(received):
if idxSent == len(send):
idxSent = last_sent
if idxRec == last_rec + 1:
idxRec += 1
else:
idxRec = last_rec + 1
elif send[idxSent] == received[idxRec]:
match += 1
idxRec += 1
idxSent += 1
last_rec = idxRec
last_sent = idxSent
else:
idxSent += 1
return match
# se = ['01111011100011101011110', '0111100001001001011110', '0111100001001100011110', '0111100110001100011110', '0111100110001101011110', '0111101010110000011110', '0111101101010000011110', '01111000001011101011110', '01111001101110100011110', '0111100111001001011110', '01111000101011101011110', '0111100111010111011110', '0111100110100010011110', '01111011101011011011110', '0111101010010001011110', '0111101010101110011110', '0111101010110001011110', '0111100100110010011110', '01111010010111011011110', '0111100111010001011110', '0111100100001100011110', '01111001101110111011110', '0111100101101000011110', '0111101001110010011110', '0111101110110110011110', '01111000111011000011110', '01111011101100000011110', '0111101000001001011110', '0111101001001110011110', '0111101101101010011110', '0111101110111000011110', '01111000011101010011110', '0111101010100111011110', '0111100111000111011110', '0111101110111011101011110', '0111100100100001011110', '01111011011101110011110', '0111100000010001011110', '0111100101101000011110', '0111101000001100011110', '0111101100010010011110', '0111100111001001011110', '0111100110110110011110', '0111101011101100011110', '0111101010110010011110', '0111100001100100011110', '01111010001110110011110', '0111100010110011011110', '0111100110000001011110', '01111001100111011011110', '01111011001110100011110', '01111011101101011011110', '01111010001011101011110', '01111011011101001011110', '0111101000001010011110', '0111100100111010011110', '0111100010101001011110', '0111101100101100011110', '0111101001010110011110', '01111011011011101011110', '0111100001011100011110', '0111100101001000011110', '0111100101101010011110', '0111101000101110011110', '0111101101000000011110', '01111000111010110011110', '01111001110101010011110', '0111100100010011011110', '0111101101000111011110', '0111101010010001011110', '0111100010110010011110', '0111101100100000011110', '011110001110111011011110', '0111100010100110011110', '011110111010111011011110', '0111100010000010011110', '0111101110010010011110', '01111001110110101011110', '0111101011011010011110', '0111100100011000011110', '0111101010100010011110', '0111101011001000011110', '0111101010100110011110', '0111101011100110011110', '0111100110001001011110', '0111101110110101011110', '01111010111011101011110', '0111100100100101011110', '0111101100101011011110', '01111011101011010011110', '0111101100001001011110', '0111101101001000011110', '0111101010000000011110', '0111101011010100011110', '0111101110000011011110', '0111101010100100011110', '0111101110000001011110', '0111100110100010011110', '0111101010110101011110', '0111101000011001011110', '0111100000110111011110', '0111101001100011011110', '0111100000111010011110', '0111100101100111011110', '011110111011101000011110', '0111100011101010011110', '0111100110100111011110', '0111101100111010011110', '0111100010010000011110', '0111100110100100011110', '0111101001110100011110', '0111100000100010011110', '01111010111010000011110', '0111100010010101011110', '0111101100101010011110', '01111000001110100011110', '01111011100011101011110', '01111011101011001011110', '0111100111010101011110', '0111100100101001011110', '0111100100001100011110', '0111101010110011011110', '0111101000000110011110', '01111001011101100011110', '011110111011101011011110', '0111101110110100011110', '01111000101110101011110', '0111101010110000011110', '0111101001100111011110', '0111101110101101011110', '0111100010000100011110', '0111101011100111011110', '0111100011100110011110', '0111100010001101011110', '01111000111010111011110', '0111101011001001011110', '0111100010010001011110', '0111101010001110011110', '0111101011001101011110', '0111100001100101011110', '011110111011101000011110', '0111100000110001011110', '01111010111011011011110', '0111100001001010011110', '01111001011101100011110', '0111100001110000011110', '0111100011001101011110', '01111011101110010011110', '0111100111001000011110', '0111101011011000011110', '0111100100100101011110', '0111101100011101011110', '0111100001000011011110', '01111001110011101011110', '0111101010011100011110', '0111101001101010011110', '01111011101100100011110', '0111100100011011011110', '0111100001001100011110', '01111011101000000011110', '0111101101000110011110', '0111100001101100011110', '0111100011101001011110', '0111101011100100011110', '0111101011000001011110', '0111101010001100011110', '0111101010011100011110', '0111101001100000011110', '0111101010101001011110', '0111101000011100011110', '0111100101100000011110', '011110010111011101011110', '01111011101010110011110', '0111101110111011011110', '0111100110010000011110', '0111100000011000011110', '0111100001011000011110', '0111101100010101011110', '0111101000110101011110', '0111101001001001011110', '0111100101110101011110', '0111101101000000011110', '0111101101100110011110', '0111100101110100011110', '0111100010000101011110', '0111100000111001011110', '0111100000101011011110', '0111100110000000011110', '0111101100110111011110', '0111100001000110011110', '0111101101001100011110', '01111010111010001011110', '0111100001101010011110', '0111100011100010011110', '0111100000011001011110', '0111100000000110011110', '011110111011101110011110', '011110111011101001011110', '0111100000010110011110', '0111101110111000011110', '0111100100000111011110', '0111101110011010011110', '0111100010010110011110', '0111101011100101011110', '0111101010000111011110', '0111101100111010011110', '01111000101110100011110', '0111101101001011011110', '01111000111011010011110', '0111101100010110011110', '0111101000101010011110', '0111101010100101011110', '0111100010000000011110', '0111101101110000011110', '0111100101100111011110', '011110111010111011011110', '0111100110111010011110', '0111100100000001011110', '01111011000111011011110', '01111000100111010011110', '01111011101110001011110', '0111101001110111011110', '0111101011011011011110', '0111101110011011011110', '0111100010010010011110', '0111101100000100011110', '0111101100101000011110', '0111100000101010011110', '0111100111010011011110', '0111101100100100011110', '0111101010000001011110', '0111101001000010011110', '0111100101001001011110', '0111101110001011011110', '0111101101010000011110', '0111100011011001011110', '01111010001110110011110', '0111100001010110011110', '0111101101010110011110', '01111001011101110011110', '0111100010010110011110', '0111100010000101011110', '0111101101010100011110', '0111100101100001011110', '011110011101110100011110', '01111000011101001011110', '0111100111000110011110', '0111100111000111011110', '0111100001010101011110', '0111100000011010011110', '0111100110001001011110', '0111100100110100011110', '0111101100110000011110', '01111010001110110011110', '0111101000001011011110', '0111101001011101011110', '0111100110010001011110', '0111101001110011011110', '0111100101101110011110', '01111011101001001011110', '0111101001101011011110', '0111101101010011011110', '0111100101000000011110', '01111000111010101011110', '0111101011010111011110', '0111100010010110011110', '01111000110111010011110', '0111101110011010011110', '0111100000111000011110', '01111011101101101011110', '01111011101001000011110', '0111100011011001011110', '011110100111011101011110', '0111101000101100011110', '0111100110000010011110', '0111101100101101011110', '011110111010111010011110', '0111101110011101011110', '0111100000100101011110']
# re = ['01111011100011101011110', '0111100001001001011110', '0111100001001100011110', '0111100110001100011110', '0111100110001101011110', '0111101010110000011110', '0111101101010000011110', '01111000001011101011110', '0111100101101011110', '01111000101011101011110', '0111100111010111011110', '0111100110100010011110', '01111011101011011011110', '0111101010010001011110', '0111101010101011110', '0111100100110010011110', '01111010010111011011110', '0111100111010001011110', '0111100100001100011110', '01111001101110111011110', '0111100101101000011110', '0111101001110010011110', '0111101110110110011110', '01111000111011000011110', '01111011101100000011110', '0111101000001001011110', '0111101001001110011110', '0111101101101010011110', '0111101110111000011110', '01111000011101010011110', '0111101010100111011110', '0111100111000111011110', '0111101110111011101011110', '0111100100100001011110', '01111011011101110011110', '0111100000010001011110', '0111100101101000011110', '0111101000001100011110', '0111101100010010011110', '0111100111001001011110', '0111100110110110011110', '0111101011101100011110', '0111101010110010011110', '0111100110000001011110', '01111001100111011011110', '01111011001110100011110', '01111011101101011011110', '01111010001011101011110', '01111011011101001011110', '0111101000001010011110', '0111100100111010011110', '0111100010101001011110', '0111101100101100011110', '0111101001010110011110', '01111011011011101011110', '0111100001011100011110', '0111100101001000011110', '0111100101101010011110', '0111101000101110011110', '0111101101000000011110', '01111000111010110011110', '01111001110101010011110', '0111100100010011011110', '0111101101000111011110', '0111101010010001011110', '0111100010110010011110', '0111101100100000011110', '011110001110111011011110', '0111100010100110011110', '011110111010111011011110', '0111100010000010011110', '0111101011011010011110', '0111101010100110011110', '0111101011100110011110', '0111100110001001011110', '0111101110110101011110', '01111010111011101011110', '0111100100100101011110', '0111101100101011011110', '01111011101011010011110', '0111101100001001011110', '0111101101001000011110', '0111101010000000011110', '0111101011010100011110', '0111101110000011011110', '0111101010100100011110', '0111101110000001011110', '0111100110100010011110', '0111101010110101011110', '0111101000011001011110', '0111100000110111011110', '0111101001100011011110', '0111100011101010011110', '0111100110100111011110', '0111101100111010011110', '0111100010010000011110', '0111100110100100011110', '0111101001110100011110', '0111100000100010011110', '0111100101011110', '0111101100101010011110', '01111000001110100011110', '01111011100011101011110', '01111011101011001011110', '0111100111010101011110', '0111100100101001011110', '0111100100001100011110', '0111101010110011011110', '0111101000000110011110', '01111001011101100011110', '011110111011101011011110', '0111101110110100011110', '01111000101110101011110', '0111101010110000011110', '0111101001100111011110', '0111101110101101011110', '0111100010000100011110', '0111101011100111011110', '0111100011100110011110', '0111100010001101011110', '01111000111010111011110', '0111101011001001011110', '0111100010010001011110', '0111101011001101011110', '0111100001100101011110', '011110111011101000011110', '0111100000110001011110', '01111010111011011011110', '01111000010010101011110', '0111100001110000011110', '0111100011001101011110', '01111011101110010011110', '0111100111001000011110', '0111101011011000011110', '0111100100100101011110', '0111101100011101011110', '0111100001000011011110', '01111001110011101011110', '0111101010011100011110', '0111101001101010011110', '01111011101100100011110', '0111100100011011011110', '0111100001001100011110', '01111011101000000011110', '0111101101000110011110', '0111100001101100011110', '0111100011101001011110', '0111101011100100011110', '0111101011000001011110', '0111101010001100011110', '0111101010011100011110', '011110000011110', '0111101000011100011110', '0111100101100000011110', '011110010111011101011110', '01111011101010110011110', '0111101110111011011110', '011110011000100011110', '0111101100010101011110', '0111101000110101011110', '0111101001001001011110', '0111100101110101011110', '0111101101000000011110', '0111101101100110011110', '0111100101110100011110', '0111100010000101011110', '0111100000111001011110', '0111100000101011011110', '0111100110000000011110', '0111101100110111011110', '0111100001000110011110', '0111101101001100011110', '01111010111010001011110', '0111100001101010011110', '0111100011100010011110', '0111100000011001011110', '0111100000000110011110', '011110111011101110011110', '011110111011101001011110', '0111100000010110011110', '0111100100000111011110', '0111101110011010011110', '0111100010010110011110', '0111101011100101011110', '0111101010000111011110', '0111101100111010011110', '0111101100010110011110', '0111101000101010011110', '0111101010100101011110', '0111100010000000011110', '0111101101110000011110', '0111100101100111011110', '011110111010111011011110', '0111100110111010011110', '0111100100000001011110', '01111011000111011011110', '01111000100111010011110', '01111011101110001011110', '0111101001110111011110', '0111101011011011011110', '0111101110011011011110', '0111100010010010011110', '0111101100000100011110', '0111101100101000011110', '0111100000101010011110', '0111100111010011011110', '0111101100100100011110', '0111101010000001011110', '0111101001000010011110', '0111100101001001011110', '0111101110001011011110', '0111101101010000011110', '0111100011011001011110', '01111010001110110011110', '0111100001010110011110', '0111101101010110011110', '01111001011101110011110', '0111100010010110011110', '0111100010000101011110', '0111101101010100011110', '0111100101100001011110', '011110011101110100011110', '01111000011101001011110', '0111100111000110011110', '0111100111000111011110', '0111100001010101011110', '0111100000011010011110', '0111100110001001011110', '0111100100110100011110', '0111101100110000011110', '01111010001110110011110', '0111101000001011011110', '0111101001011101011110', '0111100110010001011110', '0111101001110011011110', '0111100101101110011110', '01111011101001001011110', '0111101001101011011110', '0111101101010011011110', '0111100101000000011110', '011110100111011101011110', '011110111010111010011110', '01111000010110101011110']
if __name__ == '__main__':
pass
# send = insertMessage()
# rawMessage = readMessage()
# receive = parse(rawMessage)
# print("inserted:", len(send))
# print("read:", len(receive))
# print("sent:",send )
# print("read:", receive)
# # send = ['0111101000100010011110', '0111100001010110011110', '01111001110101011011110', '0111101010001001011110', '01111011101000111011110', '01111011101010110011110', '0111101110010100011110', '01111000110111010011110', '0111101010100000011110', '0111100110000000011110', '0111101100000000011110', '0111100111000011011110', '0111101011000101011110', '011110111011101000011110', '01111001110111010011110', '01111000111010100011110', '011110111011101001011110', '0111101011000110011110', '0111100010000100011110', '0111101101001010011110', '0111101101001000011110', '01111011101010100011110', '0111100011101010011110', '0111101110101000011110', '0111101010111011011110', '0111100010001011011110', '0111100101110111011110', '01111010110011101011110', '011110101110111011011110', '0111100100101011011110', '0111100010101001011110', '01111000001110101011110', '0111100101110000011110', '0111101110010111011110', '0111101100110100011110', '0111101101100000011110', '0111101101110010011110', '0111101110001011011110', '0111101011010000011110', '0111100000110110011110', '0111100111010001011110', '0111100010010011011110', '01111011101000111011110', '01111001011101101011110', '0111100111000000011110', '0111101001110101011110', '0111100001110000011110', '011110111010111010011110', '0111100001010111011110', '0111101011001000011110', '0111101100011101011110', '0111100100000010011110', '01111001110011101011110', '0111101100010110011110', '0111101100010110011110', '0111100101000011011110', '0111100001110111011110', '01111001110100001011110', '0111100000011001011110', '0111100000110111011110', '01111001110101011011110', '0111100100000001011110', '0111100001011001011110', '0111100101101110011110', '0111101001110100011110', '01111011101000100011110', '0111100100000001011110', '0111100010010011011110', '0111100110110100011110', '0111101010010011011110', '0111101000000101011110', '0111101001100110011110', '0111100100110100011110', '01111011101110101011110', '0111100100110011011110', '01111011011101000011110', '0111101000010000011110', '0111100001000101011110', '01111000001110110011110', '0111100100011011011110', '0111100111001000011110', '011110111011101010011110', '0111101100101010011110', '0111100010001000011110', '01111011101011101011110', '0111100000100011011110', '0111101100110110011110', '01111001101110101011110', '01111001110111011011110', '01111011011101110011110', '0111101101011010011110', '01111001101110111011110', '0111101101101110011110', '0111101110011010011110', '0111101001001101011110', '0111100000110110011110', '0111100010011101011110', '0111101101000101011110', '01111001110100000011110', '0111101100111001011110', '0111101100101100011110', '0111100110010010011110', '0111101000000001011110', '0111100010000110011110', '01111010110111011011110', '0111101101110010011110', '01111001110110011011110', '0111101001000110011110', '0111100010010101011110', '0111100111000000011110', '0111100001011010011110', '0111100001001000011110', '01111010101110100011110', '0111100000010101011110', '01111001110110100011110', '0111101010100001011110', '01111011101000110011110', '0111101110000011011110', '0111101110111010011110', '0111101100000010011110', '0111100011001011011110', '011110111011101000011110', '0111100001000010011110', '0111101010001110011110', '0111100000100100011110', '01111011101011011011110', '0111100010101010011110', '0111100010110011011110', '0111100011001100011110', '0111100101011000011110', '01111011101000000011110', '0111101011100010011110', '01111010101011101011110', '0111101110101101011110', '01111001110111010011110', '0111101110110000011110', '0111101000100100011110', '0111101110001010011110', '0111100101010111011110', '01111000011011101011110', '0111100110001011011110', '01111011101000111011110', '0111100000000100011110', '01111000001110110011110', '0111101110001110011110', '0111101011000000011110', '0111101001001100011110', '0111100101110101011110', '011110011101110111011110', '0111100011101110011110', '0111100100011010011110', '0111101001100111011110', '0111100100001001011110', '0111101101100110011110', '0111100011001100011110', '0111101001000101011110', '0111101110011011011110', '0111101011010101011110', '0111101110110111011110', '0111100000001101011110', '0111100011000101011110', '01111001100111010011110', '0111100110000111011110', '01111011101000010011110', '0111100000000001011110', '0111100010010101011110', '0111101100000001011110', '0111101000110000011110', '0111100010101110011110', '0111101110001110011110', '0111100011010111011110', '01111011101110100011110', '0111101100101100011110', '0111100110101001011110', '01111000001011101011110', '0111100110111011011110', '01111000111010111011110', '0111101101100101011110', '01111001110111000011110', '01111001110111011011110', '01111000010111011011110', '0111101000101000011110', '0111101010001010011110', '01111001110101100011110', '0111100100000100011110', '011110011101110100011110', '01111010111010000011110', '01111011101000100011110', '0111101110110111011110', '0111100000010001011110', '0111100001001010011110', '0111101010011100011110', '01111000111011010011110', '01111000011101010011110', '011110010111011101011110', '0111100010100010011110', '0111101011001100011110', '0111101001000010011110', '0111100001010011011110', '0111100011100100011110', '01111011101000101011110', '0111100100011011011110', '0111100011001010011110', '0111101001011011011110', '011110111010111010011110', '0111101001110011011110', '0111101100010010011110', '0111100011011010011110', '0111100110100111011110', '0111101110011010011110', '0111100000010000011110', '0111101000100110011110', '0111100000010001011110', '01111000110111010011110', '01111001100111010011110', '011110010111011101011110', '0111100010000101011110', '0111100101001100011110', '0111100011000110011110', '0111100011100101011110', '011110111011011101011110', '0111100100101101011110', '0111100001101000011110', '0111100100100011011110', '0111101010110101011110', '0111100100111000011110', '0111101001110010011110', '0111100110010001011110', '0111101101101110011110', '0111101110001110011110', '0111100000011011011110', '0111100101100110011110', '0111101011001100011110', '0111101000011101011110', '01111011001011101011110', '01111000001110101011110', '01111011101010110011110', '0111101001000001011110', '01111001110011101011110', '01111010001110100011110', '0111100001010000011110', '0111101011001000011110', '0111101100010100011110', '0111100110010100011110', '01111010110011101011110', '0111100111010101011110', '0111101000011000011110', '0111101110011010011110', '0111101110010111011110', '0111100111000010011110', '01111000100111011011110', '011110110111011101011110', '01111011001011101011110', '0111101110001100011110', '01111011101000011011110', '0111101001001100011110', '01111011010011101011110', '0111100011100010011110', '01111000101011101011110', '0111101011010111011110', '0111101011011101011110', '0111101000011011011110', '0111101101010010011110', '0111100001000011011110', '0111100101010000011110', '0111100110011011011110', '01111000110011101011110', '0111100010001001011110', '0111100011101011011110', '01111000011101000011110', '0111100011000011011110', '01111001110101011011110', '0111101000101011011110', '0111100011010000011110', '01111000011011101011110', '01111000111010110011110', '0111100110010111011110', '011110011101110111011110', '01111001110111011011110', '0111100001110111011110', '0111100110100111011110']
# # receive = ['0111101000100010011110', '0111100001010110011110', '01111001110101011011110', '0111101010001001011110', '01111011101000111011110', '01111011101010110011110', '0111101110010100011110', '01111000110111010011110', '0111101010100000011110', '0111100110000000011110', '0111101100000000011110', '0111100111000011011110', '0111101011000101011110', '011110111011101000011110', '01111001110111010011110', '01111001001011110', '0111101011000110011110', '0111100010000100011110', '0111101101001010011110', '0111101101001000011110', '01111011101010101110011110', '0111101110101000011110', '0111101010111011011110', '0111100010001011011110', '0111100101110111011110', '01111010110011101011110', '011110101110111011011110', '0111100100101011011110', '0111100010101001011110', '01111000001110101011110', '0111100101110000011110', '0111101110010111011110', '0111101100110100011110', '0111101101100000011110', '0111101101110010011110', '0111101110001011011110', '0111101011010000011110', '0111100000110110011110', '0111100111010001011110', '0111100010010011011110', '01111011101000111011110', '01111001011101101011110', '0111100111000000011110', '0111101001110101011110', '0111101011001000011110', '0111101100011101011110', '0111100100000010011110', '01111001110011101011110', '0111101100010110011110', '0111101100010110011110', '0111100000011001011110', '0111100000110111011110', '01111001110101011011110', '0111100100000001011110', '0111100001011001011110', '0111100101101110011110', '0111101001110100011110', '01111011101000100011110', '0111100100000001011110', '0111100010010011011110', '0111100110110100011110', '0111101010010011011110', '0111101000000101011110', '0111101001100110011110', '0111100100110100011110', '01111011101110101011110', '0111100100110011011110', '01111011011101000011110', '0111101000010000011110', '0111100001000101011110', '01111000001110110011110', '0111100100011011011110', '0111100111001000011110', '011110111011101010011110', '0111101100101010011110', '0111100010001000011110', '01111011101011101011110', '0111100000100011011110', '01111000100011011110', '01111001110111011011110', '01111011011101110011110', '0111101101011010011110', '01111001101110111011110', '0111101101101110011110', '0111101110011010011110', '0111101001001000011110', '01111011110011110', '0111101101000101011110', '01111001110100000011110', '0111101100111001011110', '0111101100101100011110', '0111100110010010011110', '0111101000000001011110', '0111100010000110011110', '01111010110111011011110', '0111101101110010011110', '01111001110110011011110', '0111101001000110011110', '0111100010010101011110', '0111100111000000011110', '0111100001011010011110', '0111100001001000011110', '01111010101110100011110', '0111100000010101011110', '01111001110110100011110', '0111101010100001011110', '01111011101000110011110', '0111101110000011011110', '0111101110111010011110', '0111101100001011110', '011110111011101000011110', '0111100001000010011110', '0111101010001110011110', '0111100000100100011110', '01111011101011011011110', '0111100110110011011110', '0111100011001100011110', '0111100101011000011110', '01111011101000000011110', '0111101011100010011110', '01111010101011101011110', '0111101110101101011110', '01111001110111010011110', '0111101110110000011110', '0111101000100100011110', '0111101110001010011110', '0111100101010111011110', '01111000011011101011110', '0111100110001011011110', '01111011101000111011110', '0111100000000100011110', '01111000001110110011110', '0111101110001110011110', '0111101011000000011110', '0111101001001100011110', '0111100101110101011110', '011110011101110111011110', '0111100011101110011110', '0111100100011010011110', '0111101001100111011110', '0111100100001001011110', '0111101101100110011110', '0111100011001100011110', '01111010010000100110011110', '0111101110110111011110', '0111100000001101011110', '0111100011000101011110', '01111001100111010011110', '0111100110000111011110', '01111011101000010011110', '0111100000000001011110', '0111100010010101011110', '0111101100000001011110', '0111101000110000011110', '0111100010101110011110', '0111101110001110011110', '0111100011010111011110', '01111011101110100011110', '0111101100101100011110', '0111100110101001011110', '01111000001011101011110', '0111100110111011011110', '01111000111010111011110', '0111101101100101011110', '01111001110111000011110', '01111001110111011011110', '01111000010111011011110', '0111101000101000011110', '0111101010001010011110', '011110010100011110', '011110011101110100011110', '01111010111010000011110', '01111011101000100011110', '0111101110110111011110', '0111100000010001011110', '0111100001001010011110', '0111101010011100011110', '01111000111011010011110', '01111000011101010011110', '011110010111011101011110', '0111100001010011011110', '0111100011100100011110', '01111011101000101011110', '0111100100011011011110', '0111100011001010011110', '0111101001011011011110', '011110111010111010011110', '0111101001110011011110', '0111101100010010011110', '0111100011011010011110', '0111100110100111011110', '0111101110011010011110', '0111100000010000011110', '0111101000100110011110', '0111100000010001011110', '01111000110111010011110', '01111001100111010011110', '011110010111011101011110', '0111100010000101011110', '0111100101001100011110', '01111001100100101011110', '011110111011011101011110', '0111100100101101011110', '0111100001101000011110', '0111100100100011011110', '0111101010110101011110', '0111100100111000011110', '0111101001001011110', '0111101101101110011110', '0111101110001110011110', '0111100000011011011110', '0111100101100110011110', '0111101011001100011110', '0111101000011101011110', '01111011001011101011110', '01111000001110101011110', '01111011101010110011110', '0111101001000001011110', '01111001110011101011110', '01111010001110100011110', '0111100001010000011110', '0111101011001000011110', '0111101100010100011110', '0111100110010100011110', '01111010110011101011110', '0111100111010101011110', '0111101000011000011110', '0111101110011010011110', '0111101110010111011110', '01111011001011101011110', '0111101110001100011110', '01111011101000011011110', '0111101001001100011110', '01111011010011101011110', '0111101011011101011110', '0111101000011011011110', '0111101101010010011110', '0111100001000011011110', '0111100101010000011110', '0111100110011011011110', '01111000110011101011110', '0111100010001001011110', '0111100011101011011110', '01111000011101000011110', '0111100011000011011110', '01111001110101011011110', '0111101000101011011110', '0111100011010000011110', '01111000011011101011110', '01111000111010110011110', '0111100110010111011110', '011110011101110111011110', '01111001110111011011110', '0111100001110111011110', '0111100110100111011110']
# match = matcher(send,receive)
# print(match)
# pass
| 419.309859
| 7,418
| 0.825736
| 1,269
| 29,771
| 19.352246
| 0.401891
| 0.002688
| 0.002443
| 0.00114
| 0.859353
| 0.813421
| 0.769932
| 0.757391
| 0.742894
| 0.742894
| 0
| 0.842782
| 0.059723
| 29,771
| 71
| 7,419
| 419.309859
| 0.034509
| 0.94273
| 0
| 0.133333
| 0
| 0
| 0.004737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.022222
| 0.022222
| 0
| 0.155556
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
9076fefe1c2945ee101634ede8b9edd3d8d63f45
| 87,976
|
py
|
Python
|
LightningF/Models/pdp_simple.py
|
aripakman/Lightning
|
97c1d4deec2a2c7886b8be568a9486108bc4cba4
|
[
"MIT"
] | 1
|
2021-01-10T18:29:36.000Z
|
2021-01-10T18:29:36.000Z
|
LightningF/Models/pdp_simple.py
|
aripakman/Lightning
|
97c1d4deec2a2c7886b8be568a9486108bc4cba4
|
[
"MIT"
] | null | null | null |
LightningF/Models/pdp_simple.py
|
aripakman/Lightning
|
97c1d4deec2a2c7886b8be568a9486108bc4cba4
|
[
"MIT"
] | 1
|
2021-01-11T09:17:06.000Z
|
2021-01-11T09:17:06.000Z
|
import sys
import copy
import numpy as np
from sklearn.cluster import KMeans
from scipy.special import psi, gammaln, digamma
from abc import ABCMeta, abstractmethod
from scipy.spatial import cKDTree as Tree
from ..Utils.fw import fw_bw
from ..Utils.QT.python import LightC as LightC
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from ..Utils.ParamBag3 import ParamBag
from ..Utils.initial_condition import initiliaze, priors, handle_birth, handle_split
from ..Utils.NumericUtil import convert_to_n0, e_log_beta, e_log_n, inplace_exp_normalize_rows_numpy as e_norm
from ..Utils.NumericUtil import dotatb, calc_beta_expectations, calc_entropy, Lalloc
from ..Utils.NumericUtil import c_h, c_alpha, c_beta, e_gamma, elog_gamma, c_dir, c_gamma, delta_c, c_obs
CLT_BELONGING = 0.4
dtarget_min_count = 1.5
debug = False
# Abstract class model
class AbstractModel(object):
__meta__ = ABCMeta
def __init__(self):
self.Post = []
self.elbo = np.array(())
self.move_counter = np.zeros((4, 2))
# Save Best Configuration Explored
self.best_configuration = []
self.best_elbo = - np.inf
# #########################################################################################################
# Abstract Methods
@abstractmethod
def vb_update(self, empty=False, prt=False, iteration=-1):
raise NotImplementedError
@abstractmethod
def find_nearby_centers(self, target_radius):
raise NotImplementedError
@abstractmethod
def propagate(self, params, mus):
return None
@abstractmethod
def get_rnk_k(self, k):
raise NotImplementedError
# #########################################################################################################
# #########################################################################################################
# Fitting Methods
def fit(self, prt=False, pl=False, iterations=100, empty=True, circles=True, tolerance=1e-10):
# Init ELBO
ax = None
self.elbo = np.zeros([iterations])
self.vb_update(empty=False, prt=prt, iteration=0)
self.elbo[0] = self.calc_elbo()
# Iterating
if prt:
print("Iterating")
for i_ in np.arange(1, iterations):
self.vb_update(empty=empty, prt=prt, iteration=i_)
self.elbo[i_] = self.calc_elbo()
# Save Best Configuration
if np.sum(self.elbo[i_]) > self.best_elbo:
self.best_elbo = np.sum(self.elbo[i_])
self.save_configuration()
# Check Elbo
if np.abs((self.elbo[i_] - self.elbo[i_ - 1])) < tolerance:
break
if (self.elbo[i_] - self.elbo[i_ - 1]) < - 0.1:
print("ELBO INCREASED BY:{}".format(self.elbo[i_] - self.elbo[i_ - 1]))
# Print, Plot
if prt and (i_ % 1 == 0):
self.print_iteration(elbo=self.elbo[i_], iteration=i_, components=self.Post.K)
if pl:
ax = self.pl_bl(ax=ax, circles=circles)
if prt:
print("Finish Looping")
def fit_moves(self, iterations=100, pl=False, prt=False, tolerance=1e-5,
which_moves=np.array([True, True, True, True])):
# Init ELBO
ax = None
self.elbo = np.zeros([iterations])
self.vb_update(empty=False, prt=prt, iteration=0)
self.elbo[0] = self.calc_elbo()
# Iterating
if prt:
print("Iterating")
clusters = -1
for i_ in np.arange(1, iterations):
# Updates
if ((self.elbo[i_ - 1] - self.elbo[i_ - 2]) > tolerance) or (self.Post.K != clusters):
clusters = self.Post.K
self.vb_update(empty=1, prt=prt, iteration=i_)
self.elbo[i_] = self.calc_elbo()
else:
self.elbo[i_] = self.elbo[i_ - 1]
# Save Best Configuration
if np.sum(self.elbo[i_]) > self.best_elbo:
self.best_elbo = np.sum(self.elbo[i_])
self.save_configuration()
# Print
if prt:
self.print_iteration(elbo=self.elbo[i_], iteration=i_, components=self.Post.K)
# Moves
self.moves(iteration=i_, which=which_moves, prt=prt)
if pl:
ax = self.pl_bl(ax=ax, circles=1)
if prt:
print("Finish Looping")
def sweep_moves(self, prt=False, which_moves=np.array([True, True, True, True]), iterations=None):
# Sweeping
if iterations is None:
iterations = int(self.Post.K / 2)
if prt:
print("Sweeping through Clusters")
self.print_iteration(elbo=self.elbo[-1], iteration=-1, components=self.Post.K)
self.elbo = np.hstack([self.elbo, 0])
for _ in np.arange(iterations):
if which_moves[0]:
self.birth_move(iteration=-1)
if which_moves[2]:
self.split_move(iteration=-1)
if which_moves[3]:
self.merge_move(iteration=-1)
if which_moves[1]:
self.dead_move(iteration=-1)
self.redundant(prt=prt, iteration=-1)
self.vb_update(empty=1, prt=prt)
# Wrap Up
self.vb_update(empty=1, prt=prt)
self.elbo[-1] = self.calc_elbo()
# Save Best Configuration
if np.sum(self.elbo[-1]) > self.best_elbo:
self.best_elbo = np.sum(self.elbo[-1])
self.save_configuration()
if prt:
self.print_iteration(elbo=self.elbo[-1], iteration=-1, components=self.Post.K)
print("Finish Sweeping")
def refine_clusters(self, prt=False, which_moves=np.array([True, True, True, True]), update=True):
"""
Sweeps a number of times through the moves trying to find a better optima.
:param prt: Print Elbo after Sweeping.
:param which_moves: Boolean Vector. Birth, Dead, Split, Merge.
:param update:
:return: -
"""
# Refining
if prt:
print("Refining through Clusters")
self.print_iteration(elbo=self.elbo[-1], iteration=-1, components=self.Post.K)
self.elbo = np.hstack([self.elbo, 0])
for _ in np.arange(int(self.Post.C / 10)):
if which_moves[0]:
self.birth_move(iteration=-1)
i_ = 0
while i_ < self.Post.C:
if which_moves[2]:
self.split_move(iteration=-1, k_split=i_ + 1)
i_ += 1
i_ = 0
while i_ < self.Post.C:
if which_moves[3]:
self.merge_move(iteration=-1, merge_1=i_)
i_ += 1
i_ = 0
while i_ < self.Post.C:
if which_moves[1]:
self.dead_move(iteration=-1, random=i_ + 1)
i_ += 1
# Wrap Up
self.redundant(prt=prt, iteration=-1)
if update:
self.vb_update(empty=1, prt=prt)
self.elbo[-1] = self.calc_elbo()
if prt:
self.print_iteration(elbo=self.elbo[-1], iteration=-1, components=self.Post.K)
print("Finish Sweeping")
# #########################################################################################################
# #########################################################################################################
# COMPONENT MODIFICATIONS
def delete_components(self, comp):
self.Post.removeComps(comp, noise=True)
def add_component(self, params):
self.Post.insert1Comp_fromNoise(params)
def merge_components(self, comp1, comp2, params=None):
self.Post.mergeComps(comp1, comp2, params)
def split_component(self, comp, params):
self.Post.splitComp(comp, params)
# #########################################################################################################
# #########################################################################################################
# MOVES
def moves(self, iteration, which=np.array([True, True, True, True]), prt=False):
# Birth Move
if which[0]:
birth_coordinator = 10
birth_start = 15
if ((iteration + birth_start) % birth_coordinator) == 0:
self.birth_move(iteration=iteration, prt=prt)
# Delete Move
if which[1]:
del_coordinator = 10
del_start = 13
if ((iteration + del_start) % del_coordinator) == 0:
self.dead_move(iteration=iteration, prt=prt)
# Split Move
if which[2]:
split_coordinator = 10
split_start = 5
if ((iteration + split_start) % split_coordinator) == 0:
self.split_move(iteration=iteration, prt=prt)
# Merge Move
if which[3]:
merge_coordinator = 10
merge_start = 8
if ((iteration + merge_start) % merge_coordinator) == 0:
self.merge_move(iteration=iteration, prt=prt)
def empty(self, prt=0, iteration=-1):
# First Plan Components to erase
empty_components = np.sort(np.flatnonzero(self.Post.rk < dtarget_min_count) + 1)[::-1]
if empty_components.shape[0] > 0:
self.Post.removeComps(empty_components, noise=True)
if prt == 1:
self.print_iteration(iteration=iteration, message='Empty Move. Delete Component:{}'.
format(empty_components))
self.vb_update(empty=0)
def redundant(self, prt=0, iteration=-1):
# CHECK REDUNDANT CLUSTERS
target_radius = 1.
# First Plan Components to erase
if self.Post.C > 1:
centers = self.find_nearby_centers(target_radius)
if len(centers) > 1:
self.delete_components(centers)
if prt == 1:
self.print_iteration(iteration=iteration,
message='Redundant Move. Delete Components:{}'.format(centers[:-1]))
self.vb_update_global()
def birth_move(self, iteration=-1, prt=0):
# #################################################################
# Parameters
thres_noise = 0.5
# Identify Noise points
noise_index = np.flatnonzero(self.Post.rn0_vector > thres_noise)
noise_n = noise_index.shape[0]
# If there are more than 5 points. Try to search for clusters
if noise_n > 5:
gap, evaluate, params = handle_birth(noise_points=self.x[noise_index], noise_index=noise_index,
gap_routine=self.gap_birth)
# if improved, update best_model:
if debug:
evaluate = True
self.pl()
plt.plot(self.x[noise_index, 0], self.x[noise_index, 1], '.')
plt.plot(self.x[params["idx"], 0], self.x[params["idx"], 1], '+')
plt.plot(params['mu'][0, 0], params['mu'][0, 1], 'or')
plt.show()
if evaluate:
self.move_counter[0, 0] += 1
if debug:
old_elbo = self.calc_elbo(deb=True)
self.add_component(params)
if debug:
new_elbo = self.calc_elbo(deb=True)
print("Old ELbo:{}".format(old_elbo))
print("New ELbo:{}".format(new_elbo))
print("True Gap:{}".format(new_elbo - old_elbo))
print("Estimated Gap:{}".format(gap))
print("EstimatedGap:{} TrueGap{}".format(np.sum(gap), np.sum(new_elbo - old_elbo)))
self.pl_bl()
plt.show()
self.vb_update(empty=False)
if prt == 1:
self.print_iteration(iteration=iteration, message='Birth Move. Add 1 Component')
else:
self.move_counter[0, 1] += 1
def dead_move(self, random=None, prt=0, iteration=-1):
# Select Component to erase
if (random is None) and (self.Post.K > 1):
choices = np.arange(1, self.Post.K)
random_component = np.random.choice(choices)
else:
random_component = random
if random_component is not None:
gap, evaluate = self.gap_delete(random_component)
# if improved, update best_model:
if debug:
evaluate = True
if evaluate:
self.move_counter[1, 0] += 1
if debug:
old_elbo = self.calc_elbo(deb=True)
self.delete_components(random_component)
if debug:
new_elbo = self.calc_elbo(deb=True)
print("Old ELbo:{}".format(old_elbo))
print("New ELbo:{}".format(new_elbo))
print("True Gap:{}".format(new_elbo - old_elbo))
print("Estimated Gap:{}".format(gap))
print("EstimatedGap:{} TrueGap{}".format(np.sum(gap), np.sum(new_elbo - old_elbo)))
if prt == 1:
self.print_iteration(iteration=iteration,
message='Dead Move. Delete Component:{}'.format(random_component))
else:
self.move_counter[1, 1] += 1
def merge_move(self, prt=0, iteration=-1, merge_1=None):
# Parameters
target_radius = 4 * self.mean_sigma
# Randomly Select Component to Merge
if self.Post.K > 2:
if merge_1 is None:
merge_1 = np.random.choice(np.arange(self.Post.C))
p_qt = Tree(self.Post.mu)
centers = p_qt.query_ball_point(self.Post.mu[merge_1], target_radius)
if len(centers) > 1:
centers.remove(merge_1)
centers = np.sort(centers)[::-1]
for merge_2 in centers:
gap, evaluate, params = self.gap_merge(merge_1 + 1, merge_2 + 1)
if debug:
evaluate = True
plt.plot(self.x[:, 0], self.x[:, 1], ".")
plt.plot(self.Post.mu[merge_1, 0], self.Post.mu[merge_1, 1], '+')
plt.plot(self.Post.mu[merge_2, 0], self.Post.mu[merge_2, 1], '+')
plt.plot(params['mu'][0, 0], params['mu'][0, 1], 'or')
if evaluate:
self.move_counter[2, 0] += 1
if debug:
old_elbo = self.calc_elbo(deb=True)
self.merge_components(merge_1 + 1, merge_2 + 1, params)
if merge_2 < merge_1:
merge_1 += -1
if debug:
new_elbo = self.calc_elbo(deb=True)
print("DEBUG MERGE MOVE")
print("Old ELbo:{}".format(old_elbo))
print("New ELbo:{}".format(new_elbo))
print("True Gap:{}".format(new_elbo - old_elbo))
print("Estimated Gap:{}".format(gap))
print("EstimatedGap:{} TrueGap{}".format(np.sum(gap), np.sum(new_elbo - old_elbo)))
self.vb_update(empty=False, iteration=1)
new_elbo = self.calc_elbo(deb=True)
print("EvolvedGap{}".format(new_elbo - old_elbo))
print("EvolvedGap{}".format(np.sum(new_elbo - old_elbo)))
self.vb_update(empty=False)
if prt == 1:
self.print_iteration(iteration=iteration,
message='Merge Move. Components:{}'.format([merge_1, merge_2]))
else:
self.move_counter[2, 1] += 1
def split_move(self, prt=0, iteration=-1, k_split=None):
# ##########################################################################################
# Randomly Select Component to erase, weighted by the number of observations in the cluster
if (k_split is None) and (self.Post.K > 1):
pvals = self.Post.rk**2 / np.sum(self.Post.rk**2)
k_split = np.argmax(np.random.multinomial(1, pvals)) + 1
# ##########################################################################################
if (k_split is not None) and (k_split <= self.Post.K):
# ##########################################################################################
# Isolate Cluster Points
rnk = self.get_rnk_k(k_split)
idx_points = rnk > CLT_BELONGING
# Add nearby Noise Points
p_qt = Tree(self.x)
idx = np.array(p_qt.query_ball_point(self.Post.mu[k_split-1], 50))
sub_idx = np.where(self.Post.rn0_vector[idx] > CLT_BELONGING)[0]
idx_points[idx[sub_idx]] = True
# ##########################################################################################
# ##########################################################################################
if np.sum(idx_points) > 5:
param = {"points": self.x[idx_points], "sigmas2": self.sigma2_x[idx_points], "k_split": k_split,
"idx_points": idx_points, "rnk_split": rnk}
if self.infer_pi1:
param["a"] = self.Post.a
param["b"] = self.Post.b
gap, evaluate, split_params = handle_split(param, self.gap_split, self.propagate)
if debug:
evaluate = True
if evaluate:
self.move_counter[3, 0] += 1
if debug:
old_elbo = self.calc_elbo(deb=True)
plt.plot(self.x[:, 0], self.x[:, 1], '+k')
plt.plot(self.x[idx_points, 0], self.x[idx_points, 1], '+r')
plt.plot(self.Post.mu[k_split-1, 0], self.Post.mu[k_split-1, 1], 'og')
plt.plot(split_params['mu'][:, 0], split_params['mu'][:, 1], 'or')
self.split_component(comp=k_split, params=split_params)
if debug:
new_elbo = self.calc_elbo(deb=True)
print("DEBUG SPLIT MOVE")
print("Old ELbo:{}".format(old_elbo))
print("New ELbo:{}".format(new_elbo))
print("True Gap:{}".format(new_elbo - old_elbo))
print("Estimated Gap:{}".format(gap))
print("")
print("EstimatedGap:{} TrueGap{}".format(np.sum(gap), np.sum(new_elbo - old_elbo)))
if prt == 1:
self.print_iteration(iteration=iteration,
message='Split Move. Component:{}'.format(k_split))
else:
self.move_counter[3, 1] += 1
# ##########################################################################################
# #########################################################################################################
# #########################################################################################################
# Plotting/Printing/Saving/Loading
def save_configuration(self):
self.best_configuration = copy.deepcopy(self.Post)
if hasattr(self.Post, 'rnk'):
self.best_configuration.removeField('rnk')
def load_configuration(self):
self.Post = copy.deepcopy(self.best_configuration)
self.vb_update_local()
def pl(self, ax=None, circles=False):
# Bind Data
x = self.x
n = x.shape[0]
sigma2_x = self.sigma2_x
if ax is None:
fig, ax = plt.subplots()
ax.cla()
# Plot all points in black
ax.plot(x[:, 0], x[:, 1], '.k', markersize=0.5)
if circles:
if n < 1000:
for n_ in np.arange(n):
ax.add_artist(plt.Circle((x[n_, 0], x[n_, 1]), np.sqrt(sigma2_x[n_, 0]), fill=False, lw=0.5))
else:
print("Cannot Show Circles for more than 1000 points")
# Points in Clusters have colors
colors = (i for i in 'bgrmy' * self.Post.K)
for k_, c in zip(np.arange(self.Post.C), colors):
if hasattr(self.Post, 'rnk'):
idx = self.Post.rnk[:, k_ + 1] > CLT_BELONGING
else:
idx = self.get_rnk_k(k_) > CLT_BELONGING
ax.scatter(x[idx, 0], x[idx, 1], c=c, s=1)
ax.plot(self.Post.mu[k_, 0], self.Post.mu[k_, 1], '+' + c)
plt.show()
return ax
def bl(self):
"""
Plots the assignment of each point to Noise and clusters.
:return:
"""
if self.x.shape[0] > 1000:
print("Cannot display cluster assignment for more than 1000points")
return
if hasattr(self.Post, 'rnk'):
rnk = self.Post.rnk
else:
rnk = post.rn0_vector
for k_ in np.arange(self.Post.K):
rnk = np.concatenate([rnk, self.get_rnk_k(k_)])
plt.imshow(rnk, aspect='auto')
plt.show()
def pl_bl(self, ax=None, circles=False, pdf_name=None):
# Bind Data
x = self.x
sigma2_x = self.sigma2_x
if (ax is None) or (pdf_name is not None):
fig, ax = plt.subplots(1, 2)
else:
fig = []
ax[0].cla()
# plot all points in red with a circle
ax[0].plot(x[:, 0], x[:, 1], '.k', markersize=0.5)
if circles:
for n_ in np.arange(x.shape[0]):
ax[0].add_artist(plt.Circle((x[n_, 0], x[n_, 1]), np.sqrt(sigma2_x[n_, 0]), fill=False, lw=0.5))
# Points in Clusters have colors
colors = (i for i in 'bgrmy' * self.Post.K)
for k_, c in zip(np.arange(self.Post.C), colors):
if hasattr(self.Post, 'rnk'):
idx = self.Post.rnk[:, k_ + 1] > CLT_BELONGING
else:
idx = self.get_rnk_k(k_ + 1) > CLT_BELONGING
ax[0].scatter(x[idx, 0], x[idx, 1], c=c, s=1)
ax[0].plot(self.Post.mu[k_, 0], self.Post.mu[k_, 1], '+' + c)
ax[0].plot(self.Post.mu[k_, 0], self.Post.mu[k_, 1], '+' + c)
if hasattr(self.Post, 'rnk'):
rnk = self.Post.rnk
else:
rnk = self.Post.rn0_vector
for k_ in np.arange(1, self.Post.K):
rnk = np.vstack([rnk, self.get_rnk_k(k_)])
rnk = rnk.transpose()
ax[1].imshow(rnk, aspect='auto')
plt.show()
if pdf_name is not None:
pp = PdfPages(pdf_name)
pp.savefig(fig)
pp.close()
return ax
@staticmethod
def print_iteration(message=None, elbo=None, iteration=None, components=None):
"""
Prints Iteration Specific Information.
A message can be added to the statistics collected.
:param message:
:param elbo:
:param iteration:
:param components:
:return:
"""
if message is not None:
print('iteration:{0:<5d} {1:10s}'.format(iteration, message))
else:
print('iteration:{0:<5d} ELBO: {1:<7.0f} K Comps:{2:<5d}'.format(iteration, elbo, components))
def print_number_moves(self):
print("NUMBER OF ACCEPTED MOVES. Birth:{}, Dead:{}, Merge{}:, Split:{}".format(self.move_counter[0],
self.move_counter[1],
self.move_counter[2],
self.move_counter[3]))
# #########################################################################################################
class TimeIndependentModel(AbstractModel):
def __init__(self, data=None, x=None, sigma2=None, time=None,
mu0=None, sigma02=None, log_p0=None, rnk=None,
infer_pi1=False, pi1=None, a=None, b=None,
infer_alpha0=False, alpha0=None, gamma1=None, gamma2=None,
init_type='points', condition=None, prt=False, post=None, **kwargs):
super(TimeIndependentModel, self).__init__()
# #############################################################################################
# #############################################################################################
# Verify Data Dim and Bind it Inside Class
if x is None:
# 2D data
if data.shape[1] == 4:
if prt:
print("2D data detected.")
self.x = data[:, :2]
self.sigma2_x = data[:, 2:]
# 3D data
elif data.shape[1] == 6:
if prt:
print("3D data detected.")
self.x = data[:, :3]
self.sigma2_x = data[:, 3:]
# 2D data + Time
elif data.shape[1] == 5:
if prt:
print("2D data detected + time component.")
self.time = data[:, 0]
self.T = int(np.max(self.time))
self.x = data[:, 1:3]
self.sigma2_x = data[:, 3:]
# 3D data + Time
elif data.shape[1] == 7:
if prt:
print("3D data detected + time component.")
self.time = data[:, 0]
self.T = int(np.max(self.time))
self.x = data[:, 1:4]
self.sigma2_x = data[:, 4:]
else:
sys.exit('Wrong Data Format')
else:
self.time = time
self.T = int(np.max(self.time))
self.x = x
self.sigma2_x = sigma2
self.mean_sigma = np.sqrt(np.mean(self.sigma2_x))
# #############################################################################################
# #############################################################################################
# #############################################################################################
# #############################################################################################
# Compute prior
mu0, sigma02, alpha0, log_p0, pi1, a, b, gamma1, gamma2, gamma0, _ = \
priors(x=self.x, mu0=mu0, sigma02=sigma02, alpha0=alpha0,
pi1=pi1, log_p0=log_p0, gamma1=gamma1, gamma2=gamma2, a=a, b=b)
# #############################################################################################
# #############################################################################################
# #############################################################################################
# #############################################################################################
# Build PRIOR containers
self.Prior = ParamBag(K=0, D=self.x.shape[1])
self.Prior.setField('mu0', mu0, dims='D')
self.Prior.setField('sigma02', sigma02, dims='D')
self.Prior.setField('logp0', log_p0, dims=None)
# Pi1 containers
self.infer_pi1 = infer_pi1
if infer_pi1 is False:
self.Prior.setField('pi1', pi1, dims=None)
elif infer_pi1 is True:
self.Prior.setField('a', a, dims=None)
self.Prior.setField('b', b, dims=None)
# Alpha0 containers
self.infer_alpha0 = infer_alpha0
if infer_alpha0 is False:
self.Prior.setField('alpha0', alpha0, dims=None)
else:
self.Prior.setField('gamma1', gamma1, dims=None)
self.Prior.setField('gamma2', gamma2, dims=None)
# #############################################################################################
# #############################################################################################
# #############################################################################################
# #############################################################################################
# Build POSTERIOR containers
if rnk is None:
# Compute Initial Condition
mu_init, sigma2_init, eta0_init, eta1_init, k0 = \
initiliaze(x=self.x, sigma2_x=self.sigma2_x, alpha0=alpha0, condition=condition,
init_type=init_type, post=post)
self.Post = ParamBag(K=k0 + 1, D=self.x.shape[1], N=self.x.shape[0], C=k0)
self.Post.setField('mu', mu_init.copy(), dims=('C', 'D'))
self.Post.setField('sigma2', sigma2_init.copy(), dims=('C', 'D'))
self.Post.setField('eta0', eta0_init, dims='C')
self.Post.setField('eta1', eta1_init, dims='C')
if infer_pi1 is True:
self.Post.setField('a', a, dims=None)
self.Post.setField('b', b, dims=None)
if infer_alpha0 is True:
self.Post.setField('gamma1', gamma1, dims=None)
self.Post.setField('gamma2', gamma2, dims=None)
else:
self.Post = ParamBag(K=rnk.shape[1], D=self.x.shape[1], N=self.x.shape[0], C=rnk.shape[1] - 1)
self.Post.setField('rnk', rnk, dims=('N', 'K'))
self.Post.setField('rn0_vector', np.zeros(self.Post.N), dims='N')
if infer_pi1 is True:
self.Post.setField('a', a, dims=None)
self.Post.setField('b', b, dims=None)
if infer_alpha0 is True:
self.Post.setField('gamma1', gamma1, dims=None)
self.Post.setField('gamma2', gamma2, dims=None)
self.vb_update_global()
# #############################################################################################
# #############################################################################################
def vb_update(self, empty=False, prt=False, iteration=-1):
self.vb_update_local()
self.vb_update_global()
# Empty-Redundant Cluster Move.
if empty:
self.empty(prt=prt, iteration=iteration)
self.redundant(prt=prt, iteration=iteration)
def calc_elbo(self, deb=False):
"""
Calculates ELBO terms after an update !
:return:
"""
# Binding Data
post = self.Post
prior = self.Prior
# Parameters
n, k = post.N, post.K
c = k - 1
# Calculating Poisson Noise ELBO terms
if self.infer_pi1 is True:
e_noise = post.rn0_vector.sum() * prior.logp0
e_noise += c_beta(eta1=prior.a, eta0=prior.b) - c_beta(eta1=post.a, eta0=post.b)
else:
e_noise = post.rn0_vector.sum() * (np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0)
e_noise += n * np.log(1 - prior.pi1)
e_obs = c * c_obs(prior.mu0, prior.sigma02)
e_obs += post.cobs
e_obs += - c_obs(post.mu, post.sigma2)
if self.infer_alpha0 is True:
e_sb = c_alpha(prior.gamma1, prior.gamma2, post.gamma1 / post.gamma2) - \
c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
else:
e_sb = c * c_beta(1, prior.alpha0)
e_sb += - c_beta(post.eta1, post.eta0)
e_entropy = - self.calc_all_entropy()
if deb:
return np.array([e_noise, e_sb, e_entropy, e_obs])
else:
return e_noise + e_sb + e_entropy + e_obs
def propagate(self, params, mus):
# 0: Project Points To Centers' Proposals -> Get Initial rnk
prior = self.Prior
points = params["points"]
sigmas2 = params["sigmas2"]
n = points.shape[0]
c, d = mus.shape
new_rnk = np.zeros((n, c + 1))
new_rnk[:, 0] = -100
new_rnk[:, 1:] = e_log_n(points, sigmas2, mus, np.zeros((c, d)))
e_norm(new_rnk)
# 1: Iterate through the model n_iterations
n_iterations = 10
# Pi1
if self.infer_pi1 is False:
value0 = np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0
l_1mpi1 = 0.
else:
l_pi1 = psi(params["a"])
l_1mpi1 = psi(params["b"])
value0 = l_pi1 - l_1mpi1 + prior.logp0
l_1mpi1 = 0.
for _ in np.arange(n_iterations):
# Global Update
# rc = np.sum(new_rnk[:, 1:], axis=0)
sigma2c = (dotatb(new_rnk[:, 1:], np.reciprocal(sigmas2)) + prior.sigma02 ** -1) ** -1
mus = (dotatb(new_rnk[:, 1:], points * np.reciprocal(sigmas2)) + prior.mu0 * (prior.sigma02 ** -1)) * \
sigma2c
# alphakt
"""
eta1 = 1 + rc
if self.infer_alpha0 is True:
post.setField('eta0', post.gamma1 / post.gamma2 + convert_to_n0(post.rk), dims='C')
else:
post.setField('eta0', prior.alpha0 + convert_to_n0(post.rk), dims='C')
elog_u, elog1m_u = calc_beta_expectations(post.eta1, post.eta0)
if self.infer_alpha0 is True:
post.setField('gamma1', prior.gamma1 + post.C, dims=None)
post.setField('gamma2', prior.gamma2 - post.elog1m_u.sum(), dims=None)
"""
new_rnk[:, 0] = value0
new_rnk[:, 1:] = l_1mpi1 + e_log_n(points, sigmas2, mus, sigma2c)
e_norm(new_rnk)
# 2: Validate New Clusters (a_ points in them, b_ they are not too close)
out_rnk = np.zeros((self.Post.N, new_rnk.shape[1] - 1))
out_rnk[params['idx_points'], :] = new_rnk[:, 1:] / new_rnk[:, 1:].sum(axis=1)[:, None]
out_rnk *= params['rnk_split'][:, None]
out_rnk[np.isnan(out_rnk)] = 0.
return out_rnk
# ##############################################################
# ##############################################################
# GAPS
def gap_delete(self, k):
# Binding Parameters
prior = self.Prior
post = self.Post
# Points in cluster
rnk_k = self.get_rnk_k(k)
idx_k = np.flatnonzero(rnk_k > CLT_BELONGING)
if len(idx_k) < 1:
gap = 0
evaluation = False
else:
# Calculating mu, sigma ELBO terms
gap_obs = - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=rnk_k) \
- c_obs(mu=prior.mu0, sigma2=prior.sigma02) \
+ c_obs(mu=post.mu[k - 1], sigma2=post.sigma2[k - 1])
# Calculating Poisson Noise ELBO terms
add_rn0 = np.sum(rnk_k)
if self.infer_pi1 is True:
gap_pois = add_rn0 * prior.logp0
added = post.rn0_vector + rnk_k
after_a = prior.a + added.sum()
after_b = prior.b + (1. - added).sum()
gap_pois += - c_beta(eta1=after_a, eta0=after_b) + c_beta(eta1=post.a, eta0=post.b)
else:
gap_pois = add_rn0 * (np.log(prior.pi1 / (1. - prior.pi1)) + prior.logp0)
# Calculating Stick Break ELBO terms
if self.infer_alpha0 is True:
# After Erasing Cluster k
after_gamma1 = post.gamma1 - 1.
after_gamma2 = post.gamma2 + post.elog1m_u[k - 1]
gap_sb = - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
new_nn = np.delete(post.rk, k - 1, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = after_gamma1 / after_gamma2 + convert_to_n0(new_nn)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
# Before
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
gap_sb += c_beta(eta1=post.eta1, eta0=post.eta0)
else:
new_nn = np.delete(post.rk, k - 1, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = prior.alpha0 + convert_to_n0(new_nn)
gap_sb = - c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0) + c_beta(eta1=post.eta1, eta0=post.eta0)
# gap_sb += - c_beta(eta1=after_eta1[:k-1], eta0=after_eta0[:k-1]) \
# + c_beta(eta1=post.eta1[:k], eta0=post.eta0[:k])
# Subtraction of terms after k and k-1 respectively are zero.
# Calculating Entropy ELBO terms
# After
gap_entropy = - calc_entropy(post.rn0_vector + rnk_k)
# Before
gap_entropy += calc_entropy(post.rn0_vector) + calc_entropy(rnk_k)
gap = np.array([gap_pois, gap_sb, gap_entropy, gap_obs])
evaluation = np.sum(gap) > 0
return gap, evaluation
def gap_merge(self, k1, k2):
# Check k1 < k2
if k1 > k2:
k1, k2 = k2, k1
# Binding Parameters
prior = self.Prior
post = self.Post
# Estimate New Center
rnk_k1 = self.get_rnk_k(k1)
rnk_k2 = self.get_rnk_k(k2)
rnk_k1_k2 = np.vstack([rnk_k1, rnk_k2]).transpose()
# Calculating mu, sigma ELBO terms
new_rnk = np.sum(rnk_k1_k2, axis=1)
new_sigma2 = (np.sum(new_rnk[:, None] * (self.sigma2_x ** -1), axis=0) + self.Prior.sigma02 ** -1) ** -1
new_mu = (np.sum(new_rnk[:, None] * self.x * (self.sigma2_x ** -1), axis=0) +
self.Prior.mu0 * (self.Prior.sigma02 ** -1)) * new_sigma2
params = {"mu": new_mu[None, :], "sigma2": new_sigma2[None, :]}
gap_obs = - c_obs(mu=prior.mu0, sigma2=prior.sigma02) - c_obs(mu=new_mu, sigma2=new_sigma2) + c_obs(
mu=post.mu[[k1 - 1, k2 - 1], :], sigma2=post.sigma2[[k1 - 1, k2 - 1], :])
# The next terms should be always zero
# gap_obs += c_obs(mu=self.x, sigma2=self.sigma2_x, mult=new_rnk) + \
# - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=post.rnk[:, k1]) \
# - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=post.rnk[:, k2])
# Calculating Stick Break ELBO terms
if self.infer_alpha0 is True:
# After Adding Cluster k at the beginning
after_gamma1 = post.gamma1 - 1
after_gamma2 = post.gamma2 + np.sum(post.elog1m_u[k2 - 1])
after_eta1 = copy.copy(post.eta1[k1 - 1:k2 - 1])
after_eta1[0] += post.eta1[k2 - 1] - 1
after_eta0 = copy.copy(post.eta0[k1 - 1:k2 - 1])
after_eta0 += - (post.eta1[k2 - 1] - 1) - post.gamma1 / post.gamma2 + after_gamma1 / after_gamma2
gap_sb = - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_beta(eta1=post.eta1[k1 - 1:k2], eta0=post.eta0[k1 - 1:k2])
gap_sb += - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
params["eta0"] = after_eta0
params["eta1"] = after_eta1
params["gamma1"] = after_gamma1
params["gamma2"] = after_gamma2
else:
# Merging Clusters k1, k2. Everything in between gets modify.
after_eta1 = copy.copy(post.eta1[k1 - 1:k2 - 1])
after_eta1[0] += post.eta1[k2 - 1] - 1
after_eta0 = copy.copy(post.eta0[k1 - 1:k2 - 1])
after_eta0 += - (post.eta1[k2 - 1] - 1)
gap_sb = - c_beta(eta1=1., eta0=prior.alpha0) - c_beta(eta1=after_eta1, eta0=after_eta0) + c_beta(
eta1=post.eta1[k1 - 1:k2], eta0=post.eta0[k1 - 1:k2])
params["eta0"] = after_eta0
params["eta1"] = after_eta1
# Calculating Entropy ELBO terms
gap_entropy = - calc_entropy(np.sum(rnk_k1_k2, axis=1)) + calc_entropy(rnk_k1) + calc_entropy(rnk_k2)
gap = np.array([0, gap_sb, gap_entropy, gap_obs])
evaluation = (gap_obs + gap_sb + gap_entropy) > 0
return gap, evaluation, params
def gap_birth(self, idx_points):
# Initial check
if len(idx_points) == 0:
return -1, False, 0, 0
# Binding Parameters
post = self.Post
prior = self.Prior
x_k = self.x[idx_points]
sigma2_xk = self.sigma2_x[idx_points]
r_n0 = post.rn0_vector[idx_points]
add_r_n0 = np.sum(r_n0)
# New Sigma, mu
sx = np.sum(r_n0[:, None] * (sigma2_xk ** -1), axis=0)
sigma2_k = (sx + self.Prior.sigma02 ** -1) ** -1
xx = np.sum(r_n0[:, None] * x_k * (sigma2_xk ** -1), axis=0)
mu_k = (xx + self.Prior.mu0 * (self.Prior.sigma02 ** -1)) * sigma2_k
params = {"mu": mu_k[None, :], "sigma2": sigma2_k[None, :], "idx": idx_points}
# Calculating mu, sigma ELBO terms
gap_obs = + c_obs(mu=x_k, sigma2=sigma2_xk, mult=r_n0) + c_obs(mu=prior.mu0, sigma2=prior.sigma02) - c_obs(
mu=mu_k, sigma2=sigma2_k)
# Calculating Stick Break ELBO terms
# Inserting Cluster at the beginning creates minimum disturbances
if self.infer_alpha0 is True:
after_gamma1 = prior.gamma1 + post.C + 1
after_gamma2 = prior.gamma2 - post.elog1m_u.sum()
new_nn = np.insert(post.rk, 0, add_r_n0, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = after_gamma1 / after_gamma2 + convert_to_n0(new_nn)
params["rk"] = [add_r_n0]
params["eta0"] = after_eta0
params["eta1"] = after_eta1
params["gamma1"] = after_gamma1
params["gamma2"] = after_gamma2
gap_sb = - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
gap_sb += c_beta(eta1=post.eta1, eta0=post.eta0)
else:
new_nn = np.insert(post.rk, 0, add_r_n0, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = prior.alpha0 + convert_to_n0(new_nn)
params["rk"] = [add_r_n0]
params["eta0"] = after_eta0
params["eta1"] = after_eta1
gap_sb = c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1[0], eta0=after_eta0[0])
# Calculating Poisson Noise ELBO terms
if self.infer_pi1 is True:
gap_pois = - add_r_n0 * prior.logp0
added = copy.copy(post.rn0_vector)
added[idx_points] += - r_n0
after_a = prior.a + added.sum()
after_b = prior.b + (1. - added).sum()
gap_pois += - c_beta(eta1=after_a, eta0=after_b) + c_beta(eta1=post.a, eta0=post.b)
params["a"] = after_a
params["b"] = after_b
else:
gap_pois = - add_r_n0 * (np.log(prior.pi1 / (1. - prior.pi1)) + prior.logp0)
# Calculating Entropy ELBO terms
# Entropy should be 0 as we just move the terms in r_n0 to r_nk_new
"""
# After
new_rn0 = copy.copy(self.Post.rnk[:, 0])
new_rn0[idx_points] += - r_n0
gap_entropy = - calc_entropy(new_rn0) - calc_entropy(r_n0)
# Before
gap_entropy += calc_entropy(self.Post.rnk[:, 0])
"""
gap_entropy = 0
gap = np.array([gap_pois, gap_sb, gap_entropy, gap_obs])
evaluation = np.sum(gap) > 0
return gap, evaluation, params
def gap_split(self, k, new_rnk):
# Initial Validation
if new_rnk is None:
return -np.inf, False
# Binding Parameters
prior = self.Prior
post = self.Post
xk = self.x
sigma2_xk = self.sigma2_x
rnk_k = self.get_rnk_k(k)
# Calculating mu, sigma ELBO terms
sx = np.sum(new_rnk[:, :, None] * (sigma2_xk ** -1)[:, None, :], axis=0)
new_sigma2 = (sx + (self.Prior.sigma02 ** -1)) ** -1
xx = np.sum(new_rnk[:, :, None] * xk[:, None, :] * (sigma2_xk ** -1)[:, None, :], axis=0)
new_mu = (xx + self.Prior.mu0 * (self.Prior.sigma02 ** -1)) * new_sigma2
params = {"mu": new_mu, "sigma2": new_sigma2}
# Calculating Obs Model GAP terms.
gap_obs = + c_obs(mu=prior.mu0, sigma2=prior.sigma02) - c_obs(mu=new_mu, sigma2=new_sigma2) + c_obs(
mu=post.mu[k - 1], sigma2=post.sigma2[k - 1])
# The next terms should be zero
# gap_obs += c_obs(mu=self.x, sigma2=self.sigma2_x, mult=new_rnk) \
# - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=post.rnk[:, k])
# Calculating Stick Break GAP terms
if self.infer_alpha0 is True:
# After Adding Cluster k
after_gamma1 = prior.gamma1 + post.C + 1
after_gamma2 = prior.gamma2 - np.sum(post.elog1m_u) + post.elog1m_u[k - 1]
new_nn = np.insert(post.rk, k - 1, 0, axis=0)
new_nn[k - 1:k + 1] = np.sum(new_rnk, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = after_gamma1 / after_gamma2 + convert_to_n0(new_nn)
params["rk"] = np.sum(new_rnk, axis=0)
params["eta0"] = after_eta0[k-1:k+1]
params["eta1"] = after_eta1[k-1:k+1]
params["gamma1"] = after_gamma1
params["gamma2"] = after_gamma2
gap_sb = - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_beta(eta1=post.eta1, eta0=post.eta0)
else:
# Splitting Cluster k into cluster k, k+1.
"""
new_nn = np.insert(post.nn, k-1, 0, axis=0)
new_nn[k-1:k+1] = np.sum(new_rnk, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = prior.alpha0 + convert_to_n0(new_nn)
gap_sb = c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1[k - 1:k + 1], eta0=after_eta0[k - 1:k + 1])
gap_sb += c_beta(eta1=post.eta1[k - 1], eta0=post.eta0[k - 1])
"""
new_nn = np.sum(new_rnk, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = post.eta0[k - 1] * np.ones(2)
after_eta0[0] += new_nn[1]
params["rk"] = np.sum(new_rnk, axis=0)
params["eta0"] = after_eta0[k-1:k+1]
params["eta1"] = after_eta1[k-1:k+1]
gap_sb = c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_beta(eta1=post.eta1[k - 1], eta0=post.eta0[k - 1])
# Calculating Entropy GAP terms
gap_entropy = - calc_entropy(new_rnk) + calc_entropy(rnk_k)
gap = np.array([0, gap_sb, gap_entropy, gap_obs])
evaluation = (gap_obs + gap_sb + gap_entropy) > 0
return gap, evaluation, params
# ##############################################################
# ##############################################################
# #########################################################################################################
# Abstract Methods
@abstractmethod
def calc_all_entropy(self):
raise NotImplementedError
@abstractmethod
def get_rnk_k(self, k):
raise NotImplementedError
@abstractmethod
def vb_update_local(self):
raise NotImplementedError
@abstractmethod
def vb_update_global(self):
raise NotImplementedError
@abstractmethod
def find_nearby_centers(self, target_radius):
raise NotImplementedError
# #########################################################################################################
class TimeIndepentModelPython(TimeIndependentModel):
def __init__(self, **kwargs):
super(TimeIndepentModelPython, self).__init__(**kwargs)
def calc_all_entropy(self):
return calc_entropy(self.Post.rnk)
def get_rnk_k(self, k):
return self.Post.rnk[:, k]
def find_nearby_centers(self, target_radius):
p_qt = Tree(self.Post.mu)
nearby = list()
for c_ in np.arange(self.Post.C):
centers = p_qt.query_ball_point(self.Post.mu[c_], target_radius)
centers.remove(c_)
if len(centers) > 1:
centers = np.sort(centers)[::-1]
elements = centers[[not (x in nearby) for x in centers]]
[nearby.append(e_) for e_ in elements]
return np.sort(nearby) + 1
# INFERENCE ROUTINES
def vb_update_local(self):
# Bind Data
x = self.x
sigma2_x = self.sigma2_x
post = self.Post
prior = self.Prior
# Pi1
if self.infer_pi1 is False:
value0 = np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0
l_1mpi1 = 0.
else:
l_pi1 = psi(post.a) # - psi(self.Post.a + self.Post.b)
l_1mpi1 = psi(post.b) # - psi(self.Post.a + self.Post.b)
value0 = l_pi1 - l_1mpi1 + prior.logp0
l_1mpi1 = 0.
# rnk update
elog_beta_k = e_log_beta(eta1=post.eta1, eta0=post.eta0)
elog_n_nk = e_log_n(x, sigma2_x, post.mu, post.sigma2)
rnk = np.zeros((post.N, post.K))
rnk[:, 0] = value0
rnk[:, 1:] = l_1mpi1 + elog_n_nk + elog_beta_k[None, :]
e_norm(rnk)
post.setField('rnk', rnk, dims=('N', 'K'))
post.setField('rn0_vector', rnk[:, 0], dims='N')
post.setField('cobs', c_obs(x, sigma2_x, (1. - post.rn0_vector)), dims=None)
def vb_update_global(self):
# Bind data
x = self.x
sigma2_x = self.sigma2_x
post = self.Post
prior = self.Prior
# Calculate Sufficient Stats
post.setField('rk', np.sum(post.rnk[:, 1:], axis=0), dims='C')
post.setField('xx', dotatb(post.rnk[:, 1:], x * np.reciprocal(sigma2_x)), dims=('C', 'D'))
post.setField('sx', dotatb(post.rnk[:, 1:], np.reciprocal(sigma2_x)), dims=('C', 'D'))
# sigma, mu
post.setField('sigma2', (post.sx + prior.sigma02 ** -1) ** -1, dims=('C', 'D'))
post.setField('mu', (post.xx + prior.mu0 * (prior.sigma02 ** -1)) * post.sigma2, dims=('C', 'D'))
# alphakt
post.setField('eta1', 1 + post.rk, dims='C')
if self.infer_alpha0 is True:
post.setField('eta0', post.gamma1 / post.gamma2 + convert_to_n0(post.rk), dims='C')
else:
post.setField('eta0', prior.alpha0 + convert_to_n0(post.rk), dims='C')
elog_u, elog1m_u = calc_beta_expectations(post.eta1, post.eta0)
post.setField('elog_u', elog_u, dims='C')
post.setField('elog1m_u', elog1m_u, dims='C')
if self.infer_alpha0 is True:
post.setField('gamma1', prior.gamma1 + post.C, dims=None)
post.setField('gamma2', prior.gamma2 - post.elog1m_u.sum(), dims=None)
# pi1
if self.infer_pi1 is True:
post.setField('a', prior.a + np.sum(post.rn0_vector), dims=None)
post.setField('b', prior.b + np.sum(1 - post.rn0_vector), dims=None)
# ##############################################################
# ##############################################################
class TimeIndepentModelC(TimeIndependentModel):
def __init__(self, **kwargs):
super(TimeIndepentModelC, self).__init__(**kwargs)
self.LC = LightC.LightC()
# perm_forward, perm_reverse = self.LC.find_cache_friendly_permutation(np.float64(self.x).copy())
# self.x = self.x[perm_forward, :]
# self.sigma2_x = self.sigma2_x[perm_forward, :]
self.LC.load_points(np.float64(self.x).copy(), np.float64(self.sigma2_x).copy())
def calc_all_entropy(self):
return self.LC.calc_entropy()
def get_rnk_k(self, k):
if k == 0:
rnk_k = self.Post.rn0_vector
idx_k = np.flatnonzero(rnk_k)
else:
rnk_k, idx_k = self.LC.get_rnk_given_c(k)
out_rnk_k = np.zeros(self.Post.N)
out_rnk_k[idx_k] = rnk_k
return out_rnk_k
def find_nearby_centers(self, target_radius):
near = self.LC.get_nearby_centers(target_radius)
collapsed_components = np.array(np.where(near)[0])
return collapsed_components
def vb_update_local(self, search_radius=4):
# Bind Data
post = self.Post
prior = self.Prior
if post.K > 1:
# ###### LOCAL UPDATE
# Pi1
if self.infer_pi1 is False:
value0 = np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0
l_1mpi1 = 0.
else:
l_pi1 = psi(post.a) # - psi(self.Post.a + self.Post.b)
l_1mpi1 = psi(post.b) # - psi(self.Post.a + self.Post.b)
value0 = l_pi1 - l_1mpi1 + prior.logp0
l_1mpi1 = 0.
# rnk update
elog_beta_k = e_log_beta(eta1=post.eta1, eta0=post.eta0)
self.LC.load_centers(post.mu, post.sigma2, elog_beta_k)
self.LC.build_kdtree_centers()
xx, sx, rk, rn0_vector, cobs = self.LC.points_to_centers(l_1mpi1, value0, search_radius)
# ###### GLOBAL UPDATE
# mu, sigma2
post.setField('sigma2', (sx + prior.sigma02 ** -1) ** -1, dims=('C', 'D'))
post.setField('mu', (xx + prior.mu0 * (prior.sigma02 ** -1)) * post.sigma2, dims=('C', 'D'))
post.setField('rk', rk, dims='C')
post.setField('rn0_vector', rn0_vector, dims='N')
post.setField('cobs', cobs, dims=None)
# alphakt
post.setField('eta1', 1 + rk, dims='C')
if self.infer_alpha0 is True:
post.setField('eta0', post.gamma1 / post.gamma2 + convert_to_n0(rk), dims='C')
else:
post.setField('eta0', prior.alpha0 + convert_to_n0(rk), dims='C')
elog_u, elog1m_u = calc_beta_expectations(post.eta1, post.eta0)
post.setField('elog_u', elog_u, dims='C')
post.setField('elog1m_u', elog1m_u, dims='C')
if self.infer_alpha0 is True:
post.setField('gamma1', prior.gamma1 + post.C, dims=None)
post.setField('gamma2', prior.gamma2 - post.elog1m_u.sum(), dims=None)
# pi1
if self.infer_pi1 is True:
post.setField('a', prior.a + post.rn0_vector.sum(), dims=None)
post.setField('b', prior.b + (1. - post.rn0_vector).sum(), dims=None)
def vb_update_global(self):
pass
# ##############################################################
# ##############################################################
class TimeDependentModel(AbstractModel):
def __init__(self, data=None, x=None, sigma2=None, time=None,
mu0=None, sigma02=None, log_p0=None, rnk=None,
infer_pi1=False, pi1=None, a=None, b=None,
infer_alpha0=False, alpha0=None, gamma1=None, gamma2=None,
init_type='points', condition=None, prt=False, post=None, **kwargs):
super(TimeDependentModel, self).__init__()
# #############################################################################################
# #############################################################################################
# Verify Data Dim and Bind it Inside Class
if x is None:
# 2D data
if data.shape[1] == 4:
if prt:
print("2D data detected.")
self.x = data[:, :2]
self.sigma2_x = data[:, 2:]
# 3D data
elif data.shape[1] == 6:
if prt:
print("3D data detected.")
self.x = data[:, :3]
self.sigma2_x = data[:, 3:]
# 2D data + Time
elif data.shape[1] == 5:
if prt:
print("2D data detected + time component.")
self.time = data[:, 0]
self.T = int(np.max(self.time))
self.x = data[:, 1:3]
self.sigma2_x = data[:, 3:]
# 3D data + Time
elif data.shape[1] == 7:
if prt:
print("3D data detected + time component.")
self.time = data[:, 0]
self.T = int(np.max(self.time))
self.x = data[:, 1:4]
self.sigma2_x = data[:, 4:]
else:
sys.exit('Wrong Data Format')
else:
self.time = time
self.T = int(np.max(self.time))
self.x = x
self.sigma2_x = sigma2
self.mean_sigma = np.sqrt(np.mean(self.sigma2_x))
# #############################################################################################
# #############################################################################################
# #############################################################################################
# #############################################################################################
# Compute prior
mu0, sigma02, alpha0, log_p0, pi1, a, b, gamma1, gamma2, gamma0, _ = \
priors(x=self.x, mu0=mu0, sigma02=sigma02, alpha0=alpha0,
pi1=pi1, log_p0=log_p0, gamma1=gamma1, gamma2=gamma2, a=a, b=b)
# #############################################################################################
# #############################################################################################
# #############################################################################################
# #############################################################################################
# Build PRIOR containers
self.Prior = ParamBag(K=0, D=self.x.shape[1], A=4)
self.Prior.setField('mu0', mu0, dims='D')
self.Prior.setField('sigma02', sigma02, dims='D')
self.Prior.setField('logp0', log_p0, dims=None)
# Pi1 containers
self.infer_pi1 = infer_pi1
if infer_pi1 is False:
self.Prior.setField('pi1', pi1, dims=None)
elif infer_pi1 is True:
self.Prior.setField('a', a, dims=None)
self.Prior.setField('b', b, dims=None)
# Alpha0 containers
self.infer_alpha0 = infer_alpha0
self.Prior.setField('alpha0', alpha0, dims=None)
# Time dependent terms
aij = np.zeros((4, 4))
aij[0, 0] = self.Prior.alpha0
aij[0, 1] = 1
aij[1, 1:] = 1
aij[2, 1:3] = 1
aij[3, 3] = 1
self.Prior.setField('aij', aij, dims=('A', 'A'))
# #############################################################################################
# #############################################################################################
# #############################################################################################
# #############################################################################################
# Build POSTERIOR containers
if rnk is None:
# Compute Initial Condition
mu_init, sigma2_init, eta0_init, eta1_init, k0 = \
initiliaze(x=self.x, sigma2_x=self.sigma2_x, alpha0=alpha0, condition=condition,
init_type=init_type, post=post)
self.Post = ParamBag(K=k0 + 1, D=self.x.shape[1], N=self.x.shape[0], C=k0, A=4)
self.Post.setField('mu', mu_init.copy(), dims=('C', 'D'))
self.Post.setField('sigma2', sigma2_init.copy(), dims=('C', 'D'))
if infer_pi1 is True:
self.Post.setField('a', a, dims=None)
self.Post.setField('b', b, dims=None)
self.Post.setField('alpha', eta0_init, dims='C')
else:
self.Post = ParamBag(K=rnk.shape[1], D=self.x.shape[1], N=self.x.shape[0], C=rnk.shape[1] - 1, A=4)
self.Post.setField('rnk', rnk, dims=('N', 'K'))
self.Post.setField('rn0_vector', np.zeros(self.Post.N), dims='N')
self.Post.setField('alpha', np.ones(self.Post.C), dims='C')
self.vb_update_global()
# Time dependent terms
self.Post.setField('aij', self.Prior.aij + self.init_aij(), dims=('A', 'A'))
self.Post.setField('const', None, dims=None)
# #############################################################################################
# #############################################################################################
def init_aij(self):
counts = np.zeros((4, 4))
counts[0, 1] = 10 # self.Post.C
counts[1, 2] = 10 # self.Post.C
counts[1, 3] = 10
for c_ in np.arange(np.min([10, self.Post.C])):
times = self.time[np.where(self.Post.rnk[c_ + 1, :] > 0)]
n_times = len(times)
d_times = np.diff(times)
if n_times > 1:
a11 = np.sum(d_times == 1)
length = times[-1] - times[0]
counts[0, 0] += times[0]
counts[1, 1] += a11
counts[1, 2] += n_times - a11 - 1
counts[2, 1] += counts[1, 2] + 1
counts[2, 2] += length - n_times - counts[2, 1] - counts[1, 2]
return counts
def vb_update(self, empty=False, prt=False, iteration=-1):
self.vb_update_local()
self.vb_update_global()
# Empty-Redundant Cluster Move.
if empty:
self.empty(prt=prt, iteration=iteration)
self.redundant(prt=prt, iteration=iteration)
def calc_elbo(self, deb=False):
"""
Calculates ELBO terms after an update !
:return:
"""
# Binding Data
post = self.Post
prior = self.Prior
n, k = post.N, post.K
c = k - 1
# Calculating Poisson Noise ELBO terms
if self.infer_pi1 is True:
e_noise = post.rn0_vector.sum() * prior.logp0
e_noise += c_beta(eta1=prior.a, eta0=prior.b) - c_beta(eta1=post.a, eta0=post.b)
else:
e_noise = post.rn0_vector.sum() * (np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0)
e_noise += n * np.log(1 - prior.pi1)
e_obs = c * c_obs(prior.mu0, prior.sigma02)
e_obs += post.cobs
e_obs += - c_obs(post.mu, post.sigma2)
e_sb = c * c_gamma(prior.alpha0, 1) - c_gamma(post.alpha, 1)
e_entropy = - self.calc_all_entropy()
if deb:
return np.array([e_noise, e_sb, e_entropy, e_obs])
else:
return e_noise + e_sb + e_entropy + e_obs
def vb_update_tterms(self):
counts, const = self.calc_time_series()
self.Post.setField('aij', self.Prior.aij + counts, dims=('A', 'A'))
self.Post.setField('const', const.squeeze(), dims=None)
def calc_elbo_tterms(self):
if np.isnan(self.Post.const):
self.vb_update_tterms()
t_elbo = 0
for r_ in np.arange(self.Post.A):
aij_prior = self.Prior.aij[r_]
aij_post = self.Post.aij[r_]
t_elbo += c_dir(aij_prior[aij_prior > 0]) - c_dir(aij_post[aij_post > 0])
t_elbo += self.Post.const * self.Post.C
return t_elbo
def calc_time_series(self):
# Calculate an average decaying time trace for an average fluorophore
c = self.Post.C
log_tmat = psi(self.Post.aij) - psi(np.sum(self.Post.aij, axis=1))[:, None]
log_tmat = log_tmat[1:, 1:]
e_g_c = self.Post.alpha / self.Post.alpha.sum()
log_likelihood = np.array([-np.mean(e_g_c), -1e-10, -1e-10])[None, :] * np.ones((10000, 3))
resp, resp_pair, const = fw_bw(np.array([0, -1e10, -1e10]), log_tmat, log_likelihood)
# First Order Approximation, Only 1 when rnk is 1.
# Then, keep initial position and final position.
# Second Order approximation,
# In between one, calculate exact decay.
counts = np.zeros((4, 4))
counts[1:, 1:] += resp_pair * c
counts[0, 1] = c
counts[3, 3] = 0
for c_ in np.arange(c):
times = self.time[np.where(self.get_rnk_k(c_ + 1) > 0)]
n_times = len(times)
d_times = np.diff(times)
if n_times > 1:
a11 = np.sum(d_times == 1)
length = times[-1] - times[0]
counts[0, 0] += times[0]
counts[1, 1] += a11
counts[1, 2] += n_times - a11 - 1
counts[2, 1] += n_times - a11
counts[2, 2] += length - n_times - n_times + a11 - n_times + a11 + 1
return counts, const
def propagate(self, params, mus):
# 0: Project Points To Centers' Proposals -> Get Initial rnk
prior = self.Prior
points = params["points"]
sigmas2 = params["sigmas2"]
n = points.shape[0]
c, d = mus.shape
new_rnk = np.zeros((n, c + 1))
new_rnk[:, 0] = -100
new_rnk[:, 1:] = e_log_n(points, sigmas2, mus, np.zeros((c, d)))
e_norm(new_rnk)
# 1: Iterate through the model n_iterations
n_iterations = 10
# Pi1
if self.infer_pi1 is False:
value0 = np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0
l_1mpi1 = 0.
else:
l_pi1 = psi(params["a"])
l_1mpi1 = psi(params["b"])
value0 = l_pi1 - l_1mpi1 + prior.logp0
l_1mpi1 = 0.
for _ in np.arange(n_iterations):
# Global Update
# rc = np.sum(new_rnk[:, 1:], axis=0)
sigma2c = (dotatb(new_rnk[:, 1:], np.reciprocal(sigmas2)) + prior.sigma02 ** -1) ** -1
mus = (dotatb(new_rnk[:, 1:], points * np.reciprocal(sigmas2)) + prior.mu0 * (prior.sigma02 ** -1)) * \
sigma2c
# alphakt
"""
eta1 = 1 + rc
if self.infer_alpha0 is True:
post.setField('eta0', post.gamma1 / post.gamma2 + convert_to_n0(post.rk), dims='C')
else:
post.setField('eta0', prior.alpha0 + convert_to_n0(post.rk), dims='C')
elog_u, elog1m_u = calc_beta_expectations(post.eta1, post.eta0)
if self.infer_alpha0 is True:
post.setField('gamma1', prior.gamma1 + post.C, dims=None)
post.setField('gamma2', prior.gamma2 - post.elog1m_u.sum(), dims=None)
"""
new_rnk[:, 0] = value0
new_rnk[:, 1:] = l_1mpi1 + e_log_n(points, sigmas2, mus, sigma2c)
e_norm(new_rnk)
# 2: Validate New Clusters (a_ points in them, b_ they are not too close)
out_rnk = np.zeros((self.Post.N, new_rnk.shape[1] - 1))
out_rnk[params['idx_points'], :] = new_rnk[:, 1:] / new_rnk[:, 1:].sum(axis=1)[:, None]
out_rnk *= params['rnk_split'][:, None]
out_rnk[np.isnan(out_rnk)] = 0.
return out_rnk
# ##############################################################
# ##############################################################
# GAPS
def gap_delete(self, k):
# Binding Parameters
prior = self.Prior
post = self.Post
# Points in cluster
rnk_k = self.get_rnk_k(k)
idx_k = np.flatnonzero(rnk_k > CLT_BELONGING)
if len(idx_k) < 1:
gap = 0
evaluation = False
else:
# Calculating mu, sigma ELBO terms
gap_obs = - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=rnk_k) \
- c_obs(mu=prior.mu0, sigma2=prior.sigma02) \
+ c_obs(mu=post.mu[k - 1], sigma2=post.sigma2[k - 1])
# Calculating Poisson Noise ELBO terms
add_rn0 = np.sum(rnk_k)
if self.infer_pi1 is True:
gap_pois = add_rn0 * prior.logp0
added = post.rn0_vector + rnk_k
after_a = prior.a + added.sum()
after_b = prior.b + (1. - added).sum()
gap_pois += - c_beta(eta1=after_a, eta0=after_b) + c_beta(eta1=post.a, eta0=post.b)
else:
gap_pois = add_rn0 * (np.log(prior.pi1 / (1. - prior.pi1)) + prior.logp0)
# Calculating Stick Break ELBO terms
if self.infer_alpha0 is True:
# After Erasing Cluster k
after_gamma1 = post.gamma1 - 1.
after_gamma2 = post.gamma2 + post.elog1m_u[k - 1]
gap_sb = - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
new_nn = np.delete(post.rk, k - 1, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = after_gamma1 / after_gamma2 + convert_to_n0(new_nn)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
# Before
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
gap_sb += c_beta(eta1=post.eta1, eta0=post.eta0)
else:
new_nn = np.delete(post.rk, k - 1, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = prior.alpha0 + convert_to_n0(new_nn)
gap_sb = - c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0) + c_beta(eta1=post.eta1, eta0=post.eta0)
# gap_sb += - c_beta(eta1=after_eta1[:k-1], eta0=after_eta0[:k-1]) \
# + c_beta(eta1=post.eta1[:k], eta0=post.eta0[:k])
# Subtraction of terms after k and k-1 respectively are zero.
# Calculating Entropy ELBO terms
# After
gap_entropy = - calc_entropy(post.rn0_vector + rnk_k)
# Before
gap_entropy += calc_entropy(post.rn0_vector) + calc_entropy(rnk_k)
gap = np.array([gap_pois, gap_sb, gap_entropy, gap_obs])
evaluation = np.sum(gap) > 0
return gap, evaluation
def gap_merge(self, k1, k2):
# Check k1 < k2
if k1 > k2:
k1, k2 = k2, k1
# Binding Parameters
prior = self.Prior
post = self.Post
# Estimate New Center
rnk_k1 = self.get_rnk_k(k1)
rnk_k2 = self.get_rnk_k(k2)
rnk_k1_k2 = np.vstack([rnk_k1, rnk_k2]).transpose()
# Calculating mu, sigma ELBO terms
new_rnk = np.sum(rnk_k1_k2, axis=1)
new_sigma2 = (np.sum(new_rnk[:, None] * (self.sigma2_x ** -1), axis=0) + prior.sigma02 ** -1) ** -1
new_mu = (np.sum(new_rnk[:, None] * self.x * (self.sigma2_x ** -1), axis=0) +
prior.mu0 * (prior.sigma02 ** -1)) * new_sigma2
params = {"mu": new_mu[None, :], "sigma2": new_sigma2[None, :]}
gap_obs = - c_obs(mu=prior.mu0, sigma2=prior.sigma02) - c_obs(mu=new_mu, sigma2=new_sigma2) + c_obs(
mu=post.mu[[k1 - 1, k2 - 1], :], sigma2=post.sigma2[[k1 - 1, k2 - 1], :])
# The next terms should be always zero
# gap_obs += c_obs(mu=self.x, sigma2=self.sigma2_x, mult=new_rnk) + \
# - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=post.rnk[:, k1]) \
# - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=post.rnk[:, k2])
# Calculating Stick Break ELBO terms
# Calculating Gamma ELBO terms, Merging Clusters k1, k2.
alpha_k12 = post.alpha[[k1 - 1, k2 - 1]]
alpha_merge = np.sum(alpha_k12) - prior.alpha0 / post.C
gap_gamma = - c_gamma(alpha_merge, 1) + c_gamma(alpha_k12, 1) - c_gamma(self.Prior.alpha0, 1)
params["alpha"] = alpha_merge
# Calculating Entropy ELBO terms
gap_entropy = - calc_entropy(np.sum(rnk_k1_k2, axis=1)) + calc_entropy(rnk_k1) + calc_entropy(rnk_k2)
gap = np.array([0, gap_gamma, gap_entropy, gap_obs])
evaluation = (gap_obs + gap_gamma + gap_entropy) > 0
return gap, evaluation, params
def gap_birth(self, idx_points):
# Initial check
if len(idx_points) == 0:
return -1, False, 0, 0
# Binding Parameters
post = self.Post
prior = self.Prior
x_k = self.x[idx_points]
sigma2_xk = self.sigma2_x[idx_points]
r_n0 = post.rn0_vector[idx_points]
add_r_n0 = np.sum(r_n0)
# New Sigma, mu
sx = np.sum(r_n0[:, None] * (sigma2_xk ** -1), axis=0)
sigma2_k = (sx + self.Prior.sigma02 ** -1) ** -1
xx = np.sum(r_n0[:, None] * x_k * (sigma2_xk ** -1), axis=0)
mu_k = (xx + self.Prior.mu0 * (self.Prior.sigma02 ** -1)) * sigma2_k
params = {"mu": mu_k[None, :], "sigma2": sigma2_k[None, :], "idx": idx_points}
# Calculating mu, sigma ELBO terms
gap_obs = + c_obs(mu=x_k, sigma2=sigma2_xk, mult=r_n0) + c_obs(mu=prior.mu0, sigma2=prior.sigma02) - c_obs(
mu=mu_k, sigma2=sigma2_k)
# Calculating Stick Break ELBO terms
# Inserting Cluster at the beginning creates minimum disturbances
if self.infer_alpha0 is True:
after_gamma1 = prior.gamma1 + post.C + 1
after_gamma2 = prior.gamma2 - post.elog1m_u.sum()
new_nn = np.insert(post.rk, 0, add_r_n0, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = after_gamma1 / after_gamma2 + convert_to_n0(new_nn)
params["rk"] = [add_r_n0]
params["eta0"] = after_eta0
params["eta1"] = after_eta1
params["gamma1"] = after_gamma1
params["gamma2"] = after_gamma2
gap_sb = - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
gap_sb += c_beta(eta1=post.eta1, eta0=post.eta0)
else:
new_nn = np.insert(post.rk, 0, add_r_n0, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = prior.alpha0 + convert_to_n0(new_nn)
params["rk"] = [add_r_n0]
params["eta0"] = after_eta0
params["eta1"] = after_eta1
gap_sb = c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1[0], eta0=after_eta0[0])
# Calculating Poisson Noise ELBO terms
if self.infer_pi1 is True:
gap_pois = - add_r_n0 * prior.logp0
added = copy.copy(post.rn0_vector)
added[idx_points] += - r_n0
after_a = prior.a + added.sum()
after_b = prior.b + (1. - added).sum()
gap_pois += - c_beta(eta1=after_a, eta0=after_b) + c_beta(eta1=post.a, eta0=post.b)
params["a"] = after_a
params["b"] = after_b
else:
gap_pois = - add_r_n0 * (np.log(prior.pi1 / (1. - prior.pi1)) + prior.logp0)
# Calculating Entropy ELBO terms
# Entropy should be 0 as we just move the terms in r_n0 to r_nk_new
"""
# After
new_rn0 = copy.copy(self.Post.rnk[:, 0])
new_rn0[idx_points] += - r_n0
gap_entropy = - calc_entropy(new_rn0) - calc_entropy(r_n0)
# Before
gap_entropy += calc_entropy(self.Post.rnk[:, 0])
"""
gap_entropy = 0
gap = np.array([gap_pois, gap_sb, gap_entropy, gap_obs])
evaluation = np.sum(gap) > 0
return gap, evaluation, params
def gap_split(self, k, new_rnk):
# Initial Validation
if new_rnk is None:
return -np.inf, False
# Binding Parameters
prior = self.Prior
post = self.Post
xk = self.x
sigma2_xk = self.sigma2_x
rnk_k = self.get_rnk_k(k)
# Calculating mu, sigma ELBO terms
sx = np.sum(new_rnk[:, :, None] * (sigma2_xk ** -1)[:, None, :], axis=0)
new_sigma2 = (sx + (self.Prior.sigma02 ** -1)) ** -1
xx = np.sum(new_rnk[:, :, None] * xk[:, None, :] * (sigma2_xk ** -1)[:, None, :], axis=0)
new_mu = (xx + self.Prior.mu0 * (self.Prior.sigma02 ** -1)) * new_sigma2
params = {"mu": new_mu, "sigma2": new_sigma2}
# Calculating Obs Model GAP terms.
gap_obs = + c_obs(mu=prior.mu0, sigma2=prior.sigma02) - c_obs(mu=new_mu, sigma2=new_sigma2) + c_obs(
mu=post.mu[k - 1], sigma2=post.sigma2[k - 1])
# The next terms should be zero
# gap_obs += c_obs(mu=self.x, sigma2=self.sigma2_x, mult=new_rnk) \
# - c_obs(mu=self.x, sigma2=self.sigma2_x, mult=post.rnk[:, k])
# Calculating Stick Break GAP terms
if self.infer_alpha0 is True:
# After Adding Cluster k
after_gamma1 = prior.gamma1 + post.C + 1
after_gamma2 = prior.gamma2 - np.sum(post.elog1m_u) + post.elog1m_u[k - 1]
new_nn = np.insert(post.rk, k - 1, 0, axis=0)
new_nn[k - 1:k + 1] = np.sum(new_rnk, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = after_gamma1 / after_gamma2 + convert_to_n0(new_nn)
params["rk"] = np.sum(new_rnk, axis=0)
params["eta0"] = after_eta0[k-1:k+1]
params["eta1"] = after_eta1[k-1:k+1]
params["gamma1"] = after_gamma1
params["gamma2"] = after_gamma2
gap_sb = - c_alpha(after_gamma1, after_gamma2, after_gamma1 / after_gamma2)
gap_sb += c_alpha(post.gamma1, post.gamma2, post.gamma1 / post.gamma2)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_beta(eta1=post.eta1, eta0=post.eta0)
else:
# Splitting Cluster k into cluster k, k+1.
"""
new_nn = np.insert(post.nn, k-1, 0, axis=0)
new_nn[k-1:k+1] = np.sum(new_rnk, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = prior.alpha0 + convert_to_n0(new_nn)
gap_sb = c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1[k - 1:k + 1], eta0=after_eta0[k - 1:k + 1])
gap_sb += c_beta(eta1=post.eta1[k - 1], eta0=post.eta0[k - 1])
"""
new_nn = np.sum(new_rnk, axis=0)
after_eta1 = 1. + new_nn
after_eta0 = post.eta0[k - 1] * np.ones(2)
after_eta0[0] += new_nn[1]
params["rk"] = np.sum(new_rnk, axis=0)
params["eta0"] = after_eta0[k-1:k+1]
params["eta1"] = after_eta1[k-1:k+1]
gap_sb = c_beta(eta1=1, eta0=prior.alpha0)
gap_sb += - c_beta(eta1=after_eta1, eta0=after_eta0)
gap_sb += c_beta(eta1=post.eta1[k - 1], eta0=post.eta0[k - 1])
# Calculating Entropy GAP terms
gap_entropy = - calc_entropy(new_rnk) + calc_entropy(rnk_k)
gap = np.array([0, gap_sb, gap_entropy, gap_obs])
evaluation = (gap_obs + gap_sb + gap_entropy) > 0
return gap, evaluation, params
# ##############################################################
# ##############################################################
# #########################################################################################################
# Abstract Methods
@abstractmethod
def calc_all_entropy(self):
raise NotImplementedError
@abstractmethod
def get_rnk_k(self, k):
raise NotImplementedError
@abstractmethod
def vb_update_local(self):
raise NotImplementedError
@abstractmethod
def vb_update_global(self):
raise NotImplementedError
@abstractmethod
def find_nearby_centers(self, target_radius):
raise NotImplementedError
# #########################################################################################################
class TimeDepentModelPython(TimeDependentModel):
def __init__(self, **kwargs):
super(TimeDepentModelPython, self).__init__(**kwargs)
self.active_c = []
def calc_all_entropy(self):
return calc_entropy(self.Post.rnk)
def get_rnk_k(self, k):
return self.Post.rnk[:, k]
def find_nearby_centers(self, target_radius):
p_qt = Tree(self.Post.mu)
nearby = list()
for c_ in np.arange(self.Post.C):
centers = p_qt.query_ball_point(self.Post.mu[c_], target_radius)
centers.remove(c_)
if len(centers) > 1:
centers = np.sort(centers)[::-1]
elements = centers[[not (x in nearby) for x in centers]]
[nearby.append(e_) for e_ in elements]
return np.sort(nearby) + 1
# INFERENCE ROUTINES
def vb_update_local(self):
# Bind Data
x = self.x
sigma2_x = self.sigma2_x
post = self.Post
prior = self.Prior
if post.K == 1:
post.setField('rnk', np.ones((post.N, post.K)), dims=('N', 'K'))
else:
# Pi1
if self.infer_pi1 is False:
value0 = np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0
l_1mpi1 = 0.
else:
l_pi1 = psi(post.a) # - psi(self.Post.a + self.Post.b)
l_1mpi1 = psi(post.b) # - psi(self.Post.a + self.Post.b)
value0 = l_pi1 - l_1mpi1 + prior.logp0
l_1mpi1 = 0.
# rnk update
elog_n_nk = e_log_n(x, sigma2_x, post.mu, post.sigma2)
tree = Tree(post.mu)
rnk = -np.inf * np.ones((post.N, post.K))
rnk[:, 0] = value0
active_c = [[] for _ in np.arange(post.C)]
for t_ in np.arange(self.T):
idx_nt = np.where(self.time == t_)[0]
if len(idx_nt) > 0:
points = self.x[idx_nt, :]
centers = np.int32(np.unique(np.concatenate(tree.query_ball_point(points, 30))))
for c_ in centers:
active_c[c_].append(t_)
# TO DO: VERIFY THAT I AM SUMMING ALL ACTIVE CENTERS AT TIME T.
elog_pi_k = digamma(post.alpha[centers]) - digamma(post.alpha[centers].sum())
rnk[np.ix_(idx_nt, centers + 1)] = l_1mpi1 + elog_n_nk[np.ix_(idx_nt, centers)] + elog_pi_k[None, :]
self.active_c = active_c
e_norm(rnk)
post.setField('rnk', rnk, dims=('N', 'K'))
post.setField('rn0_vector', post.rnk[:, 0], dims='N')
post.setField('cobs', c_obs(x, sigma2_x, (1. - post.rn0_vector)), dims=None)
def vb_update_global(self):
# Bind data
x = self.x
sigma2_x = self.sigma2_x
post = self.Post
prior = self.Prior
# Calculate Sufficient Stats
post.setField('rk', np.sum(post.rnk[:, 1:], axis=0), dims='C')
post.setField('xx', dotatb(post.rnk[:, 1:], x * np.reciprocal(sigma2_x)), dims=('C', 'D'))
post.setField('sx', dotatb(post.rnk[:, 1:], np.reciprocal(sigma2_x)), dims=('C', 'D'))
# sigma, mu
post.setField('sigma2', (post.sx + prior.sigma02 ** -1) ** -1, dims=('C', 'D'))
post.setField('mu', (post.xx + prior.mu0 * (prior.sigma02 ** -1)) * post.sigma2, dims=('C', 'D'))
# alphakt
post.setField('eta1', 1 + post.rk, dims='C')
# pi1
if self.infer_pi1 is True:
post.setField('a', prior.a + np.sum(post.rn0_vector), dims=None)
post.setField('b', prior.b + np.sum(1 - post.rn0_vector), dims=None)
# ##############################################################
# ##############################################################
class TimeDepentModelC(TimeDependentModel):
def __init__(self, **kwargs):
super(TimeDepentModelC, self).__init__(**kwargs)
self.LC = LightC.LightC()
# perm_forward, perm_reverse = self.LC.find_cache_friendly_permutation(np.float64(self.x).copy())
# self.x = self.x[perm_forward, :]
# self.sigma2_x = self.sigma2_x[perm_forward, :]
self.LC.load_points(np.float64(self.x).copy(), np.float64(self.sigma2_x).copy())
def calc_all_entropy(self):
return self.LC.calc_entropy()
def get_rnk_k(self, k):
if k == 0:
rnk_k = self.Post.rn0_vector
idx_k = np.flatnonzero(rnk_k)
else:
rnk_k, idx_k = self.LC.get_rnk_given_c(k)
out_rnk_k = np.zeros(self.Post.N)
out_rnk_k[idx_k] = rnk_k
return out_rnk_k
def find_nearby_centers(self, target_radius):
near = self.LC.get_nearby_centers(target_radius)
collapsed_components = np.array(np.where(near)[0])
return collapsed_components
def vb_update_local(self, search_radius=4):
# Bind Data
post = self.Post
prior = self.Prior
if post.K > 1:
# ###### LOCAL UPDATE
# Pi1
if self.infer_pi1 is False:
value0 = np.log(prior.pi1 / (1 - prior.pi1)) + prior.logp0
l_1mpi1 = 0.
else:
l_pi1 = psi(post.a) # - psi(self.Post.a + self.Post.b)
l_1mpi1 = psi(post.b) # - psi(self.Post.a + self.Post.b)
value0 = l_pi1 - l_1mpi1 + prior.logp0
l_1mpi1 = 0.
# rnk update
elog_beta_k = e_log_beta(eta1=post.eta1, eta0=post.eta0)
self.LC.load_centers(post.mu, post.sigma2, elog_beta_k)
self.LC.build_kdtree_centers()
xx, sx, rk, rn0_vector, cobs = self.LC.points_to_centers(l_1mpi1, value0, search_radius)
# ###### GLOBAL UPDATE
# mu, sigma2
post.setField('sigma2', (sx + prior.sigma02 ** -1) ** -1, dims=('C', 'D'))
post.setField('mu', (xx + prior.mu0 * (prior.sigma02 ** -1)) * post.sigma2, dims=('C', 'D'))
post.setField('rk', rk, dims='C')
post.setField('rn0_vector', rn0_vector, dims='N')
post.setField('cobs', cobs, dims=None)
# alphakt
post.setField('eta1', 1 + rk, dims='C')
if self.infer_alpha0 is True:
post.setField('eta0', post.gamma1 / post.gamma2 + convert_to_n0(rk), dims='C')
else:
post.setField('eta0', prior.alpha0 + convert_to_n0(rk), dims='C')
elog_u, elog1m_u = calc_beta_expectations(post.eta1, post.eta0)
post.setField('elog_u', elog_u, dims='C')
post.setField('elog1m_u', elog1m_u, dims='C')
if self.infer_alpha0 is True:
post.setField('gamma1', prior.gamma1 + post.C, dims=None)
post.setField('gamma2', prior.gamma2 - post.elog1m_u.sum(), dims=None)
# pi1
if self.infer_pi1 is True:
post.setField('a', prior.a + post.rn0_vector.sum(), dims=None)
post.setField('b', prior.b + (1. - post.rn0_vector).sum(), dims=None)
def vb_update_global(self):
pass
# ##############################################################
# ##############################################################
| 41.071895
| 120
| 0.498556
| 10,931
| 87,976
| 3.83652
| 0.046839
| 0.031476
| 0.011803
| 0.008823
| 0.815056
| 0.794215
| 0.771038
| 0.758256
| 0.752462
| 0.734244
| 0
| 0.03752
| 0.311687
| 87,976
| 2,141
| 121
| 41.091079
| 0.655024
| 0.082091
| 0
| 0.745884
| 0
| 0.000716
| 0.029093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057981
| false
| 0.001432
| 0.012169
| 0.005011
| 0.102362
| 0.043665
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
291255ad2ae597a2147f400131e5fac9e437333b
| 3,572
|
py
|
Python
|
ArgsParserTest.py
|
fgsalomon/KataArgsPython
|
485d2c4fede5f1ab1491cc5552e024b21b761ad1
|
[
"MIT"
] | null | null | null |
ArgsParserTest.py
|
fgsalomon/KataArgsPython
|
485d2c4fede5f1ab1491cc5552e024b21b761ad1
|
[
"MIT"
] | null | null | null |
ArgsParserTest.py
|
fgsalomon/KataArgsPython
|
485d2c4fede5f1ab1491cc5552e024b21b761ad1
|
[
"MIT"
] | null | null | null |
import unittest
import ArgsParser
class ArgsParserTest(unittest.TestCase):
def testCompleteAndCorrectArgs(self):
parser = ArgsParser.ArgsParser(schema={'-l': True, '-p': 8080, '-d': '/usr/logs'},
args=['-l', 'False', '-p', '8081', '-d', '/var/logs'])
self.assertEqual(True, parser.parse())
self.assertEqual(False, parser.get('-l'))
self.assertEqual(8081, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def testMissingArgValue(self):
parser = ArgsParser.ArgsParser(schema={'-l': True, '-p': 8080, '-d': '/usr/logs'},
args=['-l', '-p', '8081', '-d', '/var/logs'])
self.assertEqual(True, parser.parse())
self.assertEqual(True, parser.get('-l'))
self.assertEqual(8081, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def testMissingArg(self):
parser = ArgsParser.ArgsParser(schema={'-l': True, '-p': 8080, '-d': '/usr/logs'},
args=['-l', 'False', '-p', '8081'])
self.assertEqual(False, parser.parse())
self.assertEqual(False, parser.get('-l'))
self.assertEqual(8081, parser.get('-p'))
self.assertEqual('/usr/logs', parser.get('-d'))
def testUnorderedArgs(self):
parser = ArgsParser.ArgsParser(schema={'-l': True, '-p': 8080, '-d': '/usr/logs'},
args=['-l', '-d', '/var/logs', '-p', '8081'])
self.assertEqual(True, parser.parse())
self.assertEqual(True, parser.get('-l'))
self.assertEqual(8081, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def testNegativeArgValue(self):
parser = ArgsParser.ArgsParser(schema={'-l': True, '-p': 8080, '-d': '/usr/logs'},
args=['-l', '-d', '/var/logs', '-p', '-8081'])
self.assertEqual(True, parser.parse())
self.assertEqual(True, parser.get('-l'))
self.assertEqual(-8081, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def testArgList(self):
parser = ArgsParser.ArgsParser(schema={'-l': ['this', 'is', 'a', 'list'], '-p': 8080, '-d': '/usr/logs'},
args=['-l', 'list,a,is,this', '-p', '8081', '-d', '/var/logs'])
self.assertEqual(True, parser.parse())
self.assertEqual(['list', 'a', 'is', 'this'], parser.get('-l'))
self.assertEqual(8081, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def testDefaultArgList(self):
parser = ArgsParser.ArgsParser(schema={'-l': ['this', 'is', 'a', 'list'], '-p': 8080, '-d': '/usr/logs'},
args=['-l', '-p', '8081', '-d', '/var/logs'])
self.assertEqual(True, parser.parse())
self.assertEqual(['this', 'is', 'a', 'list'], parser.get('-l'))
self.assertEqual(8081, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def testIncorrectArgIntegerType(self):
parser = ArgsParser.ArgsParser(schema={'-l': True, '-p': 8080, '-d': '/usr/logs'},
args=['-l', '-d', '/var/logs', '-p', 'verbose'])
self.assertEqual(False, parser.parse())
self.assertEqual(True, parser.get('-l'))
self.assertEqual(8080, parser.get('-p'))
self.assertEqual('/var/logs', parser.get('-d'))
def main():
unittest.main()
if __name__ == '__main__':
main()
| 46.38961
| 113
| 0.522396
| 391
| 3,572
| 4.751918
| 0.102302
| 0.258342
| 0.102261
| 0.134553
| 0.840151
| 0.831001
| 0.831001
| 0.809473
| 0.809473
| 0.809473
| 0
| 0.03438
| 0.25084
| 3,572
| 76
| 114
| 47
| 0.65994
| 0
| 0
| 0.587302
| 0
| 0
| 0.129059
| 0
| 0
| 0
| 0
| 0
| 0.507937
| 1
| 0.142857
| false
| 0
| 0.031746
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
294b9bbef033469b1450b6e4a5d635d9ac80f11d
| 66,640
|
py
|
Python
|
object_branch/stitching/stitch_methods.py
|
JasonQSY/Associative3D
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
[
"MIT"
] | 25
|
2020-08-26T02:41:12.000Z
|
2021-09-30T21:50:36.000Z
|
object_branch/stitching/stitch_methods.py
|
JasonQSY/Associative3D
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
[
"MIT"
] | null | null | null |
object_branch/stitching/stitch_methods.py
|
JasonQSY/Associative3D
|
c50818b593ec48c38ed7ee3e109c23531089da32
|
[
"MIT"
] | 1
|
2020-11-17T15:25:26.000Z
|
2020-11-17T15:25:26.000Z
|
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import os.path as osp
import os
import scipy.misc
import scipy.io as sio
import torch
import imageio
import pdb
import cv2
import random
import shutil
from pyquaternion import Quaternion
import pickle
import itertools
from copy import deepcopy
from PIL import Image
import math
from time import time
from absl import app, flags
code_root = osp.dirname(osp.dirname(osp.abspath(__file__)))
sys.path.append(osp.join(code_root, '..'))
from object_branch.renderer import utils as render_utils
from object_branch.utils import suncg_parse
from object_branch.stitching.stitch_utils import *
from object_branch.utils import metrics
opts = flags.FLAGS
def stitch_objects_singleview_left(objects1, objects2, rel_rot, rel_tran):
"""
only keep view 1
"""
rel_quat = Quaternion(rel_rot)
for obj in objects1:
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
codes = objects1
affinity_info = {}
affinity_info['hit_gt_match'] = None
affinity_info['gt_match_in_proposal'] = None
affinity_info['affinity_pred'] = None
affinity_info['affinity_gt'] = None
affinity_info['matching'] = None
return codes, rel_tran, rel_rot, affinity_info
def stitch_objects_singleview_random(objects1, objects2, rel_rot, rel_tran):
"""
keep view 1 or view 2 randomly
"""
if random.random() > 0.5:
return stitch_objects_singleview_right(objects1, objects2, rel_rot, rel_tran)
else:
return stitch_objects_singleview_left(objects1, objects2, rel_rot, rel_tran)
def stitch_objects_singleview_right(objects1, objects2, rel_rot, rel_tran):
"""
only keep view2
"""
codes = objects2
affinity_info = {}
affinity_info['hit_gt_match'] = None
affinity_info['gt_match_in_proposal'] = None
affinity_info['affinity_pred'] = None
affinity_info['affinity_gt'] = None
affinity_info['matching'] = None
return codes, rel_tran, rel_rot, affinity_info
def stitch_objects_naive(objects1, objects2, rel_rot, rel_tran):
rel_quat = Quaternion(rel_rot)
for obj in objects1:
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
codes = objects1 + objects2
affinity_info = {}
affinity_info['hit_gt_match'] = None
affinity_info['gt_match_in_proposal'] = None
affinity_info['affinity_pred'] = None
affinity_info['affinity_gt'] = None
affinity_info['matching'] = None
return codes, rel_tran, rel_rot, affinity_info
def stitch_objects_semantic(objects1, objects2, objects1_gt, objects2_gt, rel_rot, rel_tran, mesh_dir=None, gt_affinity_m=None):
"""
Category level stitching
"""
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
affinity_pred = forward_affinity(id1, id2, mesh_dir=mesh_dir)
results = {
'mesh_dir': mesh_dir,
'objects1': objects1,
'objects2': objects2,
'objects1_gt': objects1_gt,
'objects2_gt': objects2_gt,
'relpose': {
'rotation': rel_rot,
'translation': rel_tran,
},
'affinity_pred': affinity_pred,
'affinity_gt': gt_affinity_m,
}
prefix = 'semantic'
f = open(os.path.join(mesh_dir, prefix + '.pkl'), 'wb')
pickle.dump(results, f)
f.close()
def stitch_objects_nms(objects1, objects2, rel_rot, rel_tran):
rel_quat = Quaternion(rel_rot)
for obj in objects1:
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
affinity_info = {}
affinity_info['hit_gt_match'] = None
affinity_info['gt_match_in_proposal'] = None
affinity_info['affinity_pred'] = None
affinity_info['affinity_gt'] = None
affinity_info['matching'] = None
#codes = objects1 + objects2
if len(objects1) == 0 or len(objects2) == 0:
codes = objects1 + objects2
return codes, rel_tran, rel_rot, affinity_info
def collect_info(objects):
shapes = []
scales = []
rots = []
trans = []
for obj in objects:
shapes.append(obj['shape'])
scales.append(obj['scale'])
rots.append(obj['quat'])
trans.append(obj['trans'])
shapes = np.array(shapes)
scales = np.array(scales)
rots = np.array(rots)#[:, 0]
trans = np.array(trans)
return shapes, scales, rots, trans
shapes1, scales1, rots1, trans1 = collect_info(objects1)
shapes2, scales2, rots2, trans2 = collect_info(objects2)
err_trans = np.linalg.norm(np.expand_dims(trans1, 1) - np.expand_dims(trans2, 0), axis=2)
err_scales = np.mean(np.abs(np.expand_dims(np.log(scales1), 1) - np.expand_dims(np.log(scales2), 0)), axis=2)
err_scales /= np.log(2.0)
ndt, ngt = err_scales.shape
err_shapes = err_scales * 0.
err_rots = err_scales * 0.
iou_box = np.ones_like(err_scales)# / 2
for i in range(ndt):
for j in range(ngt):
err_shapes[i,j] = volume_iou(shapes1[i], shapes2[j])
#q_errs = []
#for gt_quat in gt_rots[j]:
# q_errs.append(metrics.quat_dist(rots[i], gt_quat))
#err_rots[i,j] = min(q_errs)
err_rots[i, j] = metrics.quat_dist(rots1[i], rots2[j])
ov = []
#ov.append(err_trans < 1.)
#ov.append(err_scales < 0.2)
#ov.append(err_shapes > 0.25)
#ov.append(err_rots < 30)
#ov.append(err_trans < 1.)
#ov.append(err_scales < 0.2)
#ov.append(err_shapes > 0.5)
#ov.append(err_rots < 10)
ov.append(err_trans < 1.)
ov.append(err_scales < 0.2)
ov.append(err_shapes > 0.25)
ov.append(err_rots < 30)
_ov = np.all(np.array(ov), 0)#.astype(np.float32)
merge_list = []
for i in range(ndt):
for j in range(ngt):
if _ov[i, j]:
merge_list.append([i, j])
merged1 = np.zeros(len(objects1), dtype=np.bool)
merged2 = np.zeros(len(objects2), dtype=np.bool)
codes = []
for i, j in merge_list:
#print("merge")
obj = random.choice([objects1[i], objects2[j]])
codes.append(obj)
merged1[i] = True
merged2[j] = True
for i, obj in enumerate(objects1):
if not merged1[i]:
codes.append(obj)
for j, obj in enumerate(objects2):
if not merged2[j]:
codes.append(obj)
return codes, rel_tran, rel_rot, affinity_info
def stitch_objects_affinity(objects1, objects2, rel_rot, rel_tran, mesh_dir=None):
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
affinity_pred = forward_affinity(id1, id2, mesh_dir=mesh_dir)
has_matching = np.max(affinity_pred, axis=1) > 0.5
matching = np.argmax(affinity_pred, axis=1)
# adjust matching
matching = list(matching)
for i, j in enumerate(matching):
if not has_matching[i]:
continue
if j == -1:
continue
boys = [i]
for m, n in enumerate(matching):
if i == m:
continue
if j == n:
boys.append(m)
boy_scores = affinity_pred[:, j][boys]
boy_idx = boys[np.argmax(boy_scores)]
for boy in boys:
if boy != boy_idx:
matching[boy] = -1
has_matching[boy] = False
#pdb.set_trace()
# stitch accordingly
codes = []
rel_quat = Quaternion(rel_rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if has_matching[i]:
j = matching[i]
if merged[j]:
continue
mobj = objects2[j]
merged[j] = True
obj = merge_codes(obj, mobj, 'avg')
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
affinity_info = {}
affinity_info['hit_gt_match'] = None
affinity_info['gt_match_in_proposal'] = None
affinity_info['affinity_pred'] = None
affinity_info['affinity_gt'] = None
affinity_info['matching'] = None
return codes, rel_tran, rel_rot, affinity_info
def stitch_objects_af_chamfer(objects1, objects2, rot_logits, tran_logits, mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False):
# TOP-k sample
topk_match = 2
topk_rot = 5
topk_tran = 10
upperbound_match = 32
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
matching_list = get_matching_list(affinity_m, affinity_idx, len(objects1))
# top k
rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
matching_proposals = itertools.product(*matching_list)
matching_proposals = list([m for m in matching_proposals])
if len(matching_proposals) > upperbound_match:
#matching_proposals = random.sample(matching_proposals, upperbound_match)
matching_proposals = matching_proposals[:upperbound_match]
tqdm.write(str(matching_list))
# pick up the best combination of (rot, tran, matching)
min_dis = float('inf')
best_comb = None
gt_match_in_proposal = False
for matching in (matching_proposals):
matching = list(matching)
# adjust matching
for i, j in enumerate(matching):
if j == -1:
continue
boys = [i]
for m, n in enumerate(matching):
if i == m:
continue
if j == n:
boys.append(m)
boy_scores = affinity_m[:, j][boys]
boy_idx = boys[np.argmax(boy_scores)]
for boy in boys:
if boy != boy_idx:
matching[boy] = -1
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i]))
corresponding_right_objs.append(object_to_pcd(objects2[j]))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
for rot_class in rot_proposals:
for tran_class in tran_proposals:
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
assert(len(transformation_proposals) == 50)
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
raise RuntimeError('pcd1.shape[0] == 0 or pcd2.shape[0] == 0:')
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
if len(cd_pair) != 50:
raise RuntimeError('len(cd_pair) != 50')
if len(cd_pairs) == 0: # no matching
continue
best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
if dis < min_dis:
tran = transformation_proposals[best_pair_id]['tran']
rot = transformation_proposals[best_pair_id]['rot']
min_dis = dis
best_comb = (rot, tran, matching)
# stitch accordingly
codes = []
rot, rel_tran, matching = best_comb
affinity_info = {}
affinity_info['hit_gt_match'] = (matching == np.array(gt_matching_list).flatten()).all()
affinity_info['gt_match_in_proposal'] = gt_match_in_proposal
save_affinity_after_stitch(affinity_m, len(objects1), len(objects2), matching, mesh_dir)
#rot = class2quat(rot_class)
#rel_tran = class2tran(tran_class)
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
# average everything
obj['shape'] = obj['shape'] / 2 + mobj['shape'] / 2
obj['trans'] = obj['trans'] / 2 + mobj['trans'] / 2
obj['scale'] = obj['scale'] / 2 + mobj['scale'] / 2
obj['quat'] = random.choice([obj['quat'], mobj['quat']])
obj['cmap'] = 'green'
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes, rel_tran, rot, affinity_info
def stitch_objects_af_matching(objects1, objects2, rel_rot, rel_tran, mesh_dir=None, gt_affinity_m=None):
"""
Stitch objects using affinity matrix and gt camera pose.
"""
# TOP-k sample
topk_match = 2
# topk_rot = 5
# topk_tran = 5
upperbound_match = 2000
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if gt_affinity_m is None:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
# top k
matching_list = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= 0.5:
continue
options.append(j)
if len(options) == 0:
options.append(-1)
matching_list.append(options)
matching_proposals = itertools.product(*matching_list)
matching_proposals = list([m for m in matching_proposals])
if len(matching_proposals) > upperbound_match:
matching_proposals = random.sample(matching_proposals, upperbound_match)
tqdm.write(str(matching_list))
# pick up the best combination of (matching)
min_dis = float('inf')
best_comb = None
rot = rel_rot
tran = rel_tran
rot_quat = Quaternion(rot)
for matching in matching_proposals:
matching = list(matching)
# adjust matching
for i, j in enumerate(matching):
if j == -1:
continue
boys = [i]
for m, n in enumerate(matching):
if i == m:
continue
if j == n:
boys.append(m)
boy_scores = affinity_m[:, j][boys]
boy_idx = boys[np.argmax(boy_scores)]
for boy in boys:
if boy != boy_idx:
matching[i] = -1
dis = 0
diss = []
for i, j in enumerate(matching):
if j == -1:
continue
transformed_obj1 = deepcopy(objects1[i])
quat = rot_quat * Quaternion(transformed_obj1['quat'])
transformed_obj1['quat'] = quat.elements
transformed_obj1['trans'] = rot_quat.rotate(transformed_obj1['trans']) + tran
pcd1 = object_to_pcd(transformed_obj1)
pcd2 = object_to_pcd(objects2[j])
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
diss.append(400.0)
continue
#dis += chamfer_distance(pcd1, pcd2)
diss.append(chamfer_distance(pcd1, pcd2))
dis = sum(diss)
if dis < min_dis:
min_dis = dis
best_comb = (rot, tran, matching)
# stitch accordingly
codes = []
rot, rel_tran, matching = best_comb
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
# average everything
obj['shape'] = obj['shape'] / 2 + mobj['shape'] / 2
obj['trans'] = obj['trans'] / 2 + mobj['trans'] / 2
obj['scale'] = obj['scale'] / 2 + mobj['scale'] / 2
obj['quat'] = random.choice([obj['quat'], mobj['quat']])
# obj['quat'] = mean_quaternion(obj['quat'], mobj['quat'])
# set up mixed colormap
obj['cmap'] = 'green'
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes
def stitch_objects_rand_chamfer(objects1, objects2, rot_logits, tran_logits, mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False):
# lambdas
lambda_nomatch = 1
# TOP-k sample
topk_match = 2
topk_rot = 5
topk_tran = 10
upperbound_match = 32
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
af_options = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= 0.5:
continue
options.append(j)
af_options.append(options)
# top k
rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
matching_proposals = []
gt_match_in_proposal = False
for _ in range(upperbound_match):
matching = []
num_nomatch = 0
for i in range(len(objects1)):
options = []
for op in af_options[i]:
if op not in matching:
options.append(int(op))
options.append(-1)
m = random.choice(options)
if m == -1:
num_nomatch += 1
matching.append(m)
# check matching is valid
if not is_valid_matching(matching):
raise RuntimeError('invalid matching')
# check if gt_matching_list in matching_proposals
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
matching_proposals.append([matching, num_nomatch])
# pick up the best combination of (rot, tran, matching)
min_loss = float('inf')
best_comb = None
for matching, num_nomatch in (matching_proposals):
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i]))
corresponding_right_objs.append(object_to_pcd(objects2[j]))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
for rot_class in rot_proposals:
for tran_class in tran_proposals:
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
assert(len(transformation_proposals) == 50)
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
if len(cd_pair) != 50:
raise RuntimeError('len(cd_pair) != 50:')
pass
if len(cd_pairs) == 0: # no matching
continue
best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
loss = dis + lambda_nomatch * num_nomatch + lambda_1 * tran_logits
if loss < min_loss:
tran = transformation_proposals[best_pair_id]['tran']
rot = transformation_proposals[best_pair_id]['rot']
min_loss = loss
best_comb = (rot, tran, matching)
if best_comb is None:
# if there is no matching at all, we should trust relative pose.
matching = [-1 for _ in range(len(objects1))]
rot_class = rot_logits.argmax()
tran_class = tran_logits.argmax()
rot = class2quat(rot_class)
rel_tran = class2tran(tran_class)
else:
rot, rel_tran, matching = best_comb
# collect affinity info
affinity_info = {}
affinity_info['hit_gt_match'] = (matching == np.array(gt_matching_list).flatten()).all()
affinity_info['gt_match_in_proposal'] = gt_match_in_proposal
affinity_info['affinity_pred'] = affinity_m
affinity_info['affinity_gt'] = gt_affinity_m
affinity_info['matching'] = matching
save_affinity_after_stitch(affinity_m, len(objects1), len(objects2), matching, mesh_dir)
# stitch accordingly
codes = []
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
obj = merge_codes(obj, mobj, 'avg')
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes, rel_tran, rot, affinity_info
def stitch_objects_rand_chamfer_prob(objects1, objects2, rot_logits, tran_logits, mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False):
# lambdas
lambda_nomatch = 3
lambda_rots = 5
lambda_trans = 1
lambda_af = 10
# TOP-k sample
topk_match = 2
topk_rot = 3
topk_tran = 10
upperbound_match = 128
#thres = 0.75
thres = 0.5
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
af_options = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= thres:
continue
options.append(j)
af_options.append(options)
# top k
rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
rot_proposals_prob = rot_logits[rot_proposals]
tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
tran_proposals_prob = tran_logits[tran_proposals]
matching_proposals = []
gt_match_in_proposal = False
for _ in range(upperbound_match):
matching = []
num_nomatch = 0
for i in range(len(objects1)):
options = []
for op in af_options[i]:
if op not in matching:
options.append(int(op))
options.append(-1)
m = random.choice(options)
if m == -1:
num_nomatch += 1
matching.append(m)
# check matching is valid
if not is_valid_matching(matching):
raise RuntimeError('invalid matching')
# compute scores
scores = []
for i, j in enumerate(matching):
if j == -1:
continue
scores.append(affinity_m[i, j])
scores = np.array(scores)
# check if gt_matching_list in matching_proposals
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
matching_proposals.append([matching, num_nomatch, scores])
# pick up the best combination of (rot, tran, matching)
min_loss = float('inf')
best_comb = None
for matching, num_nomatch, scores in (matching_proposals):
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i]))
corresponding_right_objs.append(object_to_pcd(objects2[j]))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
transformation_probs = {'tran':[], 'rot':[]}
for rot_class, rot_prob in zip(rot_proposals, rot_proposals_prob):
for tran_class, tran_prob in zip(tran_proposals, tran_proposals_prob):
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
transformation_probs['tran'].append(tran_prob)
transformation_probs['rot'].append(rot_prob)
for key in transformation_probs.keys():
transformation_probs[key] = np.array(transformation_probs[key])
assert(len(transformation_proposals) == (topk_rot * topk_tran))
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
if len(cd_pair) != (topk_rot * topk_tran):
raise RuntimeError('len(cd_pair) != topk_rot * topk_tran:')
pass
if len(cd_pairs) == 0: # no matching
continue
#best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
cd_cost = chamfer_metric(cd_pairs, metric='avg')
assert(len(cd_cost) == len(transformation_proposals))
losses = cd_cost + lambda_nomatch * num_nomatch \
+ lambda_trans * (1 - transformation_probs['tran']) \
+ lambda_rots * (1 - transformation_probs['rot']) \
+ lambda_af * (1 - scores).mean()
#loss = dis + lambda_nomatch * num_nomatch
loss = losses.min()
if loss < min_loss:
best_pair_id = losses.argmin()
tran = transformation_proposals[best_pair_id]['tran']
rot = transformation_proposals[best_pair_id]['rot']
min_loss = loss
best_comb = (rot, tran, matching)
if best_comb is None:
# if there is no matching at all, we should trust relative pose.
matching = [-1 for _ in range(len(objects1))]
rot_class = rot_logits.argmax()
tran_class = tran_logits.argmax()
rot = class2quat(rot_class)
rel_tran = class2tran(tran_class)
else:
rot, rel_tran, matching = best_comb
# collect affinity info
affinity_info = {}
affinity_info['hit_gt_match'] = (matching == np.array(gt_matching_list).flatten()).all()
affinity_info['gt_match_in_proposal'] = gt_match_in_proposal
affinity_info['affinity_pred'] = affinity_m
affinity_info['affinity_gt'] = gt_affinity_m
affinity_info['matching'] = matching
save_affinity_after_stitch(affinity_m, len(objects1), len(objects2), matching, mesh_dir)
# stitch accordingly
codes = []
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
obj = merge_codes(obj, mobj, 'avg')
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes, rel_tran, rot, affinity_info
def stitch_objects_asso_shape(objects1, objects2, rot_logits, tran_logits, tester, mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False):
"""
stitch_objects_rand_chamfer_prob + average shape space
"""
# lambdas
lambda_nomatch = 1
lambda_rots = 5
lambda_trans = 1
lambda_af = 5
# TOP-k sample
topk_match = 2
topk_rot = 3
topk_tran = 10
upperbound_match = 64
#thres = 0.75
thres = 0.5
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
af_options = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= thres:
continue
options.append(j)
af_options.append(options)
# top k
rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
rot_proposals_prob = rot_logits[rot_proposals]
tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
tran_proposals_prob = tran_logits[tran_proposals]
matching_proposals = []
gt_match_in_proposal = False
for _ in range(upperbound_match):
matching = []
num_nomatch = 0
for i in range(len(objects1)):
options = []
for op in af_options[i]:
if op not in matching:
options.append(int(op))
options.append(-1)
m = random.choice(options)
if m == -1:
num_nomatch += 1
matching.append(m)
# check matching is valid
if not is_valid_matching(matching):
raise RuntimeError('invalid matching')
# compute scores
scores = []
for i, j in enumerate(matching):
if j == -1:
continue
scores.append(affinity_m[i, j])
scores = np.array(scores)
# check if gt_matching_list in matching_proposals
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
matching_proposals.append([matching, num_nomatch, scores])
# pick up the best combination of (rot, tran, matching)
min_loss = float('inf')
best_comb = None
for matching, num_nomatch, scores in (matching_proposals):
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i]))
corresponding_right_objs.append(object_to_pcd(objects2[j]))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
transformation_probs = {'tran':[], 'rot':[]}
for rot_class, rot_prob in zip(rot_proposals, rot_proposals_prob):
for tran_class, tran_prob in zip(tran_proposals, tran_proposals_prob):
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
transformation_probs['tran'].append(tran_prob)
transformation_probs['rot'].append(rot_prob)
for key in transformation_probs.keys():
transformation_probs[key] = np.array(transformation_probs[key])
assert(len(transformation_proposals) == (topk_rot * topk_tran))
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
if len(cd_pair) != (topk_rot * topk_tran):
raise RuntimeError('len(cd_pair) != topk_rot * topk_tran:')
pass
if len(cd_pairs) == 0: # no matching
continue
#best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
cd_cost = chamfer_metric(cd_pairs, metric='avg')
assert(len(cd_cost) == len(transformation_proposals))
losses = cd_cost + lambda_nomatch * num_nomatch \
+ lambda_trans * (1 - transformation_probs['tran']) \
+ lambda_rots * (1 - transformation_probs['rot']) \
+ lambda_af * (1 - scores).mean()
#loss = dis + lambda_nomatch * num_nomatch
loss = losses.min()
if loss < min_loss:
best_pair_id = losses.argmin()
tran = transformation_proposals[best_pair_id]['tran']
rot = transformation_proposals[best_pair_id]['rot']
min_loss = loss
best_comb = (rot, tran, matching)
if best_comb is None:
# if there is no matching at all, we should trust relative pose.
matching = [-1 for _ in range(len(objects1))]
rot_class = rot_logits.argmax()
tran_class = tran_logits.argmax()
rot = class2quat(rot_class)
rel_tran = class2tran(tran_class)
else:
rot, rel_tran, matching = best_comb
# collect affinity info
affinity_info = {}
affinity_info['hit_gt_match'] = (matching == np.array(gt_matching_list).flatten()).all()
affinity_info['gt_match_in_proposal'] = gt_match_in_proposal
affinity_info['affinity_pred'] = affinity_m
affinity_info['affinity_gt'] = gt_affinity_m
affinity_info['matching'] = matching
save_affinity_after_stitch(affinity_m, len(objects1), len(objects2), matching, mesh_dir)
# stitch accordingly
codes = []
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
obj = merge_codes(obj, mobj, 'avg_shape', tester=tester)
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes, rel_tran, rot, affinity_info
def stitch_objects_rand_chamfer_ambiguity(objects1, objects2, rot_logits, tran_logits, mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False):
# lambdas
lambda_nomatch = 1
lambda_rots = 5
lambda_trans = 1
lambda_af = 5
# TOP-k sample
topk_match = 2
topk_rot = 3
topk_tran = 10
upperbound_match = 64
thres = 0.5
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
af_options = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= thres:
continue
options.append(j)
af_options.append(options)
# top k
rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
rot_proposals_prob = rot_logits[rot_proposals]
tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
tran_proposals_prob = tran_logits[tran_proposals]
matching_proposals = []
gt_match_in_proposal = False
for _ in range(upperbound_match):
matching = []
num_nomatch = 0
for i in range(len(objects1)):
options = []
for op in af_options[i]:
if op not in matching:
options.append(int(op))
options.append(-1)
m = random.choice(options)
if m == -1:
num_nomatch += 1
matching.append(m)
# check matching is valid
if not is_valid_matching(matching):
raise RuntimeError('invalid matching')
# compute scores
scores = []
for i, j in enumerate(matching):
if j == -1:
continue
scores.append(affinity_m[i, j])
scores = np.array(scores)
# check if gt_matching_list in matching_proposals
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
matching_proposals.append([matching, num_nomatch, scores])
# pick up the best combination of (rot, tran, matching)
#min_loss = float('inf')
#best_comb = None
combs = []
for matching, num_nomatch, scores in (matching_proposals):
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i]))
corresponding_right_objs.append(object_to_pcd(objects2[j]))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
transformation_probs = {'tran':[], 'rot':[]}
for rot_class, rot_prob in zip(rot_proposals, rot_proposals_prob):
for tran_class, tran_prob in zip(tran_proposals, tran_proposals_prob):
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
transformation_probs['tran'].append(tran_prob)
transformation_probs['rot'].append(rot_prob)
for key in transformation_probs.keys():
transformation_probs[key] = np.array(transformation_probs[key])
assert(len(transformation_proposals) == (topk_rot * topk_tran))
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
if len(cd_pair) != (topk_rot * topk_tran):
raise RuntimeError('len(cd_pair) != topk_rot * topk_tran:')
pass
if len(cd_pairs) == 0: # no matching
continue
#best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
cd_cost = chamfer_metric(cd_pairs, metric='avg')
assert(len(cd_cost) == len(transformation_proposals))
losses = cd_cost + lambda_nomatch * num_nomatch \
+ lambda_trans * (1 - transformation_probs['tran']) \
+ lambda_rots * (1 - transformation_probs['rot']) \
+ lambda_af * (1 - scores).mean()
for i, loss in enumerate(losses):
comb = {
'tran': transformation_proposals[i]['tran'],
'rot': transformation_proposals[i]['rot'],
'loss': loss,
'matching': matching
}
combs.append(comb)
#loss = losses.min()
#if loss < min_loss:
# best_pair_id = losses.argmin()
# tran = transformation_proposals[best_pair_id]['tran']
# rot = transformation_proposals[best_pair_id]['rot']
# min_loss = loss
# best_comb = (rot, tran, matching)
combs.sort(key=lambda comb: comb['loss'])
final_combs = []
cur_loss = -1
for comb in combs:
if comb['loss'] - cur_loss > 1e-1:
final_combs.append(comb)
if len(final_combs) >= 10:
break
codes_options = []
for comb in final_combs:
rot = comb['rot']
rel_tran = comb['tran']
matching = comb['matching']
# stitch accordingly
codes = []
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj_cp in enumerate(objects1):
obj = deepcopy(obj_cp)
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = deepcopy(objects2[j])
merged[j] = True
obj = merge_codes(obj, mobj, 'avg')
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(deepcopy(obj))
codes_options.append(codes)
return codes_options
def generate_volume_trans(objects1, objects2, rel_rot, rel_tran, mesh_dir=None):
results = {
'view1': [],
'view2': []
}
codes1 = suncg_parse.convert_codes_list_to_old_format(objects1)
codes2 = suncg_parse.convert_codes_list_to_old_format(objects2)
for obj in codes1:
volume, transform = render_utils.prediction_to_entity(obj)
result = {
'volume': volume,
'transform': transform,
}
results['view1'].append(result)
for obj in codes2:
volume, transform = render_utils.prediction_to_entity(obj)
result = {
'volume': volume,
'transform': transform,
}
results['view2'].append(result)
with open(os.path.join(mesh_dir, 'output.pkl'), 'wb') as f:
pickle.dump(results, f)
return []
def stitch_objects_edge(objects1, objects2, rot_logits, tran_logits, tester,
mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False, opts=None):
# lambdas
if opts is None:
lambda_nomatch = 1
lambda_rots = 5
lambda_trans = 1
lambda_af = 5
else:
lambda_nomatch = opts.lambda_nomatch
lambda_rots = opts.lambda_rots
lambda_trans = opts.lambda_trans
lambda_af = opts.lambda_af
#print(lambda_nomatch)
#print(lambda_rots)
#print(lambda_trans)
#print(lambda_af)
# TOP-k sample
topk_match = 3
topk_rot = 3
topk_tran = 10
upperbound_match = 128
#thres = 0.75
#thres = 0.79
thres = 0.5
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
af_options = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= thres:
continue
options.append(j)
af_options.append(options)
# top k
rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
rot_proposals_prob = rot_logits[rot_proposals]
tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
tran_proposals_prob = tran_logits[tran_proposals]
matching_proposals = []
gt_match_in_proposal = False
for _ in range(upperbound_match):
matching = []
num_nomatch = 0
for i in range(len(objects1)):
options = []
for op in af_options[i]:
if op not in matching:
options.append(int(op))
options.append(-1)
m = random.choice(options)
if m == -1:
num_nomatch += 1
matching.append(m)
# check matching is valid
if not is_valid_matching(matching):
raise RuntimeError('invalid matching')
# compute scores
scores = []
for i, j in enumerate(matching):
if j == -1:
continue
scores.append(affinity_m[i, j])
scores = np.array(scores)
# check if gt_matching_list in matching_proposals
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
matching_proposals.append([matching, num_nomatch, scores])
# pick up the best combination of (rot, tran, matching)
min_loss = float('inf')
best_comb = None
for matching, num_nomatch, scores in (matching_proposals):
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i], edge=True))
corresponding_right_objs.append(object_to_pcd(objects2[j], edge=True))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
transformation_probs = {'tran':[], 'rot':[]}
for rot_class, rot_prob in zip(rot_proposals, rot_proposals_prob):
for tran_class, tran_prob in zip(tran_proposals, tran_proposals_prob):
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
transformation_probs['tran'].append(tran_prob)
transformation_probs['rot'].append(rot_prob)
for key in transformation_probs.keys():
transformation_probs[key] = np.array(transformation_probs[key])
assert(len(transformation_proposals) == (topk_rot * topk_tran))
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
if len(cd_pair) != (topk_rot * topk_tran):
raise RuntimeError('len(cd_pair) != topk_rot * topk_tran:')
pass
if len(cd_pairs) == 0: # no matching
continue
#best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
cd_cost = chamfer_metric(cd_pairs, metric='avg')
assert(len(cd_cost) == len(transformation_proposals))
losses = cd_cost + lambda_nomatch * num_nomatch \
+ lambda_trans * (1 - transformation_probs['tran']) \
+ lambda_rots * (1 - transformation_probs['rot']) \
+ lambda_af * (1 - scores).mean()
#loss = dis + lambda_nomatch * num_nomatch
loss = losses.min()
if loss < min_loss:
best_pair_id = losses.argmin()
tran = transformation_proposals[best_pair_id]['tran']
rot = transformation_proposals[best_pair_id]['rot']
min_loss = loss
best_comb = (rot, tran, matching)
if best_comb is None:
# if there is no matching at all, we should trust relative pose.
matching = [-1 for _ in range(len(objects1))]
rot_class = rot_logits.argmax()
tran_class = tran_logits.argmax()
rot = class2quat(rot_class)
rel_tran = class2tran(tran_class)
else:
rot, rel_tran, matching = best_comb
# collect affinity info
affinity_info = {}
affinity_info['hit_gt_match'] = (matching == np.array(gt_matching_list).flatten()).all()
affinity_info['gt_match_in_proposal'] = gt_match_in_proposal
affinity_info['affinity_pred'] = affinity_m
affinity_info['affinity_gt'] = gt_affinity_m
affinity_info['matching'] = matching
save_affinity_after_stitch(affinity_m, len(objects1), len(objects2), matching, mesh_dir)
# stitch accordingly
codes = []
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
#obj = merge_codes(obj, mobj, 'avg', tester=tester)
obj = merge_codes(obj, mobj, 'avg_voxel', tester=tester)
#obj = merge_codes(obj, mobj, 'avg_shape', tester=tester)
#obj = merge_codes(obj, mobj, 'avg_rot', tester=tester)
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes, rel_tran, rot, affinity_info
def stitch_objects_edge_wpose(objects1, objects2, rel_rot, rel_tran, tester,
mesh_dir=None, gt_affinity_m=None, use_gt_affinity=False, opts=None):
"""
The same as stitch_objects_edge but use input relative camera pose
"""
# lambdas
if opts is None:
lambda_nomatch = 1
lambda_rots = 5
lambda_trans = 1
lambda_af = 5
else:
lambda_nomatch = opts.lambda_nomatch
lambda_rots = opts.lambda_rots
lambda_trans = opts.lambda_trans
lambda_af = opts.lambda_af
# TOP-k sample
topk_match = 3
topk_rot = 3
topk_tran = 10
upperbound_match = 128
#thres = 0.75
#thres = 0.79
thres = 0.5
# collate id
id1 = np.zeros((len(objects1), opts.nz_id))
for idx, obj in enumerate(objects1):
id1[idx] = obj['id']
id2 = np.zeros((len(objects2), opts.nz_id))
for idx, obj in enumerate(objects2):
id2[idx] = obj['id']
id1 = torch.FloatTensor(id1)
id2 = torch.FloatTensor(id2)
# calculate affinity matrix and matching
if not use_gt_affinity:
affinity_m = forward_affinity(id1, id2, mesh_dir=mesh_dir)
else:
affinity_m = gt_affinity_m
affinity_idx = np.argsort(affinity_m, axis=1)[:, ::-1][:, :topk_match]
gt_matching_idx = np.argsort(gt_affinity_m, axis=1)[:, ::-1]
gt_matching_list = get_matching_list(gt_affinity_m, gt_matching_idx, len(objects1))
af_options = []
for i in range(len(objects1)):
options = []
for j in affinity_idx[i]:
if affinity_m[i][j] <= thres:
continue
options.append(j)
af_options.append(options)
# top k
#rot_proposals = np.argsort(rot_logits)[::-1][:topk_rot]
#rot_proposals_prob = rot_logits[rot_proposals]
#tran_proposals = np.argsort(tran_logits)[::-1][:topk_tran]
#tran_proposals_prob = tran_logits[tran_proposals]
matching_proposals = []
gt_match_in_proposal = False
for _ in range(upperbound_match):
matching = []
num_nomatch = 0
for i in range(len(objects1)):
options = []
for op in af_options[i]:
if op not in matching:
options.append(int(op))
options.append(-1)
m = random.choice(options)
if m == -1:
num_nomatch += 1
matching.append(m)
# check matching is valid
if not is_valid_matching(matching):
raise RuntimeError('invalid matching')
# compute scores
scores = []
for i, j in enumerate(matching):
if j == -1:
continue
scores.append(affinity_m[i, j])
scores = np.array(scores)
# check if gt_matching_list in matching_proposals
if (matching == np.array(gt_matching_list).flatten()).all():
gt_match_in_proposal = True
matching_proposals.append([matching, num_nomatch, scores])
# pick up the best combination of (rot, tran, matching)
min_loss = float('inf')
best_comb = None
for matching, num_nomatch, scores in (matching_proposals):
# for each matching
left_objs = []
corresponding_right_objs = []
for i, j in enumerate(matching):
if j == -1:
continue
left_objs.append(object_to_pcd(objects1[i], edge=True))
corresponding_right_objs.append(object_to_pcd(objects2[j], edge=True))
assert(len(left_objs) == len(corresponding_right_objs))
transformation_proposals = []
transformation_probs = {'tran':[], 'rot':[]}
"""
for rot_class, rot_prob in zip(rot_proposals, rot_proposals_prob):
for tran_class, tran_prob in zip(tran_proposals, tran_proposals_prob):
rot = class2quat(rot_class)
tran = class2tran(tran_class)
transformation_proposals.append({'tran': tran, 'rot': rot})
transformation_probs['tran'].append(tran_prob)
transformation_probs['rot'].append(rot_prob)
"""
transformation_proposals.append({'tran': rel_tran, 'rot': rel_rot})
transformation_probs['tran'].append(1.0)
transformation_probs['rot'].append(1.0)
for key in transformation_probs.keys():
transformation_probs[key] = np.array(transformation_probs[key])
#assert(len(transformation_proposals) == (topk_rot * topk_tran))
cd_pairs = []
for pcd1, pcd2 in zip(left_objs, corresponding_right_objs):
pcd1s = []
pcd2s = []
for transformation_proposal in transformation_proposals:
rot = transformation_proposal['rot']
tran = transformation_proposal['tran']
rot_quat = Quaternion(rot).rotation_matrix
transformed_obj1 = (rot_quat@pcd1.T).T+tran
if pcd1.shape[0] == 0 or pcd2.shape[0] == 0:
pcd1s.append([[1000,1000,1000]])
pcd2s.append([[-1000,-1000,-1000]])
continue
pcd1s.append(transformed_obj1)
pcd2s.append(pcd2)
cd_pair = chamfer_distance(pcd1s, pcd2s)
cd_pairs.append(cd_pair.flatten())
#if len(cd_pair) != (topk_rot * topk_tran):
# raise RuntimeError('len(cd_pair) != topk_rot * topk_tran:')
# pass
if len(cd_pairs) == 0: # no matching
continue
#best_pair_id, dis = chamfer_metric(cd_pairs, metric="avg")
cd_cost = chamfer_metric(cd_pairs, metric='avg')
assert(len(cd_cost) == len(transformation_proposals))
losses = cd_cost + lambda_nomatch * num_nomatch \
+ lambda_trans * (1 - transformation_probs['tran']) \
+ lambda_rots * (1 - transformation_probs['rot']) \
+ lambda_af * (1 - scores).mean()
#loss = dis + lambda_nomatch * num_nomatch
loss = losses.min()
if loss < min_loss:
best_pair_id = losses.argmin()
tran = transformation_proposals[best_pair_id]['tran']
rot = transformation_proposals[best_pair_id]['rot']
min_loss = loss
best_comb = (rot, tran, matching)
if best_comb is None:
# if there is no matching at all, we should trust relative pose.
matching = [-1 for _ in range(len(objects1))]
#rot_class = rot_logits.argmax()
#tran_class = tran_logits.argmax()
#rot = class2quat(rot_class)
#rel_tran = class2tran(tran_class)
rot = rel_rot
rel_tran = rel_tran
else:
rot, rel_tran, matching = best_comb
# collect affinity info
affinity_info = {}
affinity_info['hit_gt_match'] = (matching == np.array(gt_matching_list).flatten()).all()
affinity_info['gt_match_in_proposal'] = gt_match_in_proposal
affinity_info['affinity_pred'] = affinity_m
affinity_info['affinity_gt'] = gt_affinity_m
affinity_info['matching'] = matching
save_affinity_after_stitch(affinity_m, len(objects1), len(objects2), matching, mesh_dir)
# stitch accordingly
codes = []
rel_quat = Quaternion(rot)
merged = np.zeros(len(objects2), dtype=np.bool)
for i, obj in enumerate(objects1):
for k in obj.keys():
if k == 'quat':
try:
quat = rel_quat * Quaternion(obj[k])
except ValueError:
quat = rel_quat * Quaternion(obj[k][0])
obj[k] = quat.elements
elif k == 'trans':
obj[k] = rel_quat.rotate(obj[k]) + rel_tran
if matching[i] != -1:
j = matching[i]
mobj = objects2[j]
merged[j] = True
#obj = merge_codes(obj, mobj, 'avg', tester=tester)
obj = merge_codes(obj, mobj, 'avg_voxel', tester=tester)
#obj = merge_codes(obj, mobj, 'avg_shape', tester=tester)
#obj = merge_codes(obj, mobj, 'avg_rot', tester=tester)
codes.append(obj)
# add unmerged objs in objects2 to codes
for j, obj in enumerate(objects2):
if not merged[j]:
codes.append(obj)
return codes, rel_tran, rot, affinity_info
| 36.415301
| 145
| 0.583358
| 8,152
| 66,640
| 4.542566
| 0.041708
| 0.021387
| 0.014744
| 0.014231
| 0.90354
| 0.89552
| 0.888283
| 0.887392
| 0.880425
| 0.875375
| 0
| 0.022265
| 0.307158
| 66,640
| 1,829
| 146
| 36.435211
| 0.779775
| 0.082968
| 0
| 0.862456
| 0
| 0
| 0.030817
| 0
| 0
| 0
| 0
| 0
| 0.012632
| 1
| 0.01193
| false
| 0.003509
| 0.018947
| 0
| 0.043509
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
468fc0c0cc429dde07bd535e909723bbe3a11943
| 5,945
|
py
|
Python
|
ormar/decorators/signals.py
|
ivangirko/ormar
|
1f5d993716da0da83874cbdfd5b44dbf7af1b9c5
|
[
"MIT"
] | 905
|
2020-08-31T19:13:34.000Z
|
2022-03-31T08:38:10.000Z
|
ormar/decorators/signals.py
|
ivangirko/ormar
|
1f5d993716da0da83874cbdfd5b44dbf7af1b9c5
|
[
"MIT"
] | 359
|
2020-08-28T14:14:54.000Z
|
2022-03-29T07:40:32.000Z
|
ormar/decorators/signals.py
|
ivangirko/ormar
|
1f5d993716da0da83874cbdfd5b44dbf7af1b9c5
|
[
"MIT"
] | 56
|
2020-10-26T02:22:14.000Z
|
2022-03-20T06:41:31.000Z
|
from typing import Callable, List, TYPE_CHECKING, Type, Union
if TYPE_CHECKING: # pragma: no cover
from ormar import Model
def receiver(
signal: str, senders: Union[Type["Model"], List[Type["Model"]]]
) -> Callable:
"""
Connect given function to all senders for given signal name.
:param signal: name of the signal to register to
:type signal: str
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
def _decorator(func: Callable) -> Callable:
"""
Internal decorator that does all the registering.
:param func: function to register as receiver
:type func: Callable
:return: untouched function already registered for given signal
:rtype: Callable
"""
if not isinstance(senders, list):
_senders = [senders]
else:
_senders = senders
for sender in _senders:
signals = getattr(sender.Meta.signals, signal)
signals.connect(func)
return func
return _decorator
def post_save(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for post_save signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="post_save", senders=senders)
def post_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for post_update signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="post_update", senders=senders)
def post_delete(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for post_delete signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="post_delete", senders=senders)
def pre_save(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for pre_save signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="pre_save", senders=senders)
def pre_update(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for pre_update signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="pre_update", senders=senders)
def pre_delete(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for pre_delete signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="pre_delete", senders=senders)
def pre_relation_add(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for pre_relation_add signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="pre_relation_add", senders=senders)
def post_relation_add(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for post_relation_add signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="post_relation_add", senders=senders)
def pre_relation_remove(senders: Union[Type["Model"], List[Type["Model"]]]) -> Callable:
"""
Connect given function to all senders for pre_relation_remove signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="pre_relation_remove", senders=senders)
def post_relation_remove(
senders: Union[Type["Model"], List[Type["Model"]]]
) -> Callable:
"""
Connect given function to all senders for post_relation_remove signal.
:param senders: one or a list of "Model" classes
that should have the signal receiver registered
:type senders: Union[Type["Model"], List[Type["Model"]]]
:return: returns the original function untouched
:rtype: Callable
"""
return receiver(signal="post_relation_remove", senders=senders)
| 34.166667
| 88
| 0.684272
| 756
| 5,945
| 5.316138
| 0.087302
| 0.098532
| 0.087584
| 0.114954
| 0.834038
| 0.80418
| 0.80418
| 0.80418
| 0.80418
| 0.80418
| 0
| 0
| 0.204373
| 5,945
| 173
| 89
| 34.364162
| 0.849683
| 0.574601
| 0
| 0.052632
| 0
| 0
| 0.115921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315789
| false
| 0
| 0.052632
| 0
| 0.684211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
46a2014d998212d9785d52be4f13d5b3ea1d97aa
| 19,003
|
py
|
Python
|
src/models/modules/MSResNet/MSResNet.py
|
kendreaditya/PCG-arrhythmia-detection
|
a6f414bf101ef4a35d001425646e375f0b8ae86d
|
[
"Apache-2.0"
] | null | null | null |
src/models/modules/MSResNet/MSResNet.py
|
kendreaditya/PCG-arrhythmia-detection
|
a6f414bf101ef4a35d001425646e375f0b8ae86d
|
[
"Apache-2.0"
] | null | null | null |
src/models/modules/MSResNet/MSResNet.py
|
kendreaditya/PCG-arrhythmia-detection
|
a6f414bf101ef4a35d001425646e375f0b8ae86d
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch
from . import pblm
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv1d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv5x5(in_planes, out_planes, stride=1):
return nn.Conv1d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=1, bias=False)
def conv7x7(in_planes, out_planes, stride=1):
return nn.Conv1d(in_planes, out_planes, kernel_size=7, stride=stride,
padding=1, bias=False)
class BasicBlock5x5_1(nn.Module):
expansion = 1
def __init__(self, inplanes5_1, planes, stride=1, downsample=None):
super(BasicBlock5x5_1, self).__init__()
self.conv1 = conv5x5(inplanes5_1, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv5x5(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
d = residual.shape[2] - out.shape[2]
out1 = residual[:, :, 0:-d] + out
out1 = self.relu(out1)
# out += residual
return out1
class BasicBlock5x5_2(nn.Module):
expansion = 1
def __init__(self, inplanes5_2, planes, stride=1, downsample=None):
super(BasicBlock5x5_2, self).__init__()
self.conv1 = conv5x5(inplanes5_2, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv5x5(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
d = residual.shape[2] - out.shape[2]
out1 = residual[:, :, 0:-d] + out
out1 = self.relu(out1)
# out += residual
return out1
class BasicBlock5x5_3(nn.Module):
expansion = 1
def __init__(self, inplanes5_3, planes, stride=1, downsample=None):
super(BasicBlock5x5_3, self).__init__()
self.conv1 = conv5x5(inplanes5_3, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv5x5(planes, planes)
self.bn2 = nn.BatchNorm1d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
d = residual.shape[2] - out.shape[2]
out1 = residual[:, :, 0:-d] + out
out1 = self.relu(out1)
# out += residual
return out1
class MSResNet(pblm.PrebuiltLightningModule):
def __init__(self, input_channel, layers=[1, 1, 1, 1], num_classes=10):
super().__init__(self.__class__.__name__)
self.inplanes5_1 = 64
self.inplanes5_2 = 64
self.inplanes5_3 = 64
self.conv1 = nn.Conv1d(input_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
self.layer5x5_11 = self._make_layer5_1(BasicBlock5x5_1, 64, layers[0], stride=2)
self.layer5x5_12 = self._make_layer5_1(BasicBlock5x5_1, 128, layers[1], stride=2)
self.layer5x5_13 = self._make_layer5_1(BasicBlock5x5_1, 256, layers[2], stride=2)
# self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
# maxplooing kernel size: 16, 11, 6
self.maxpool5_1 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)
self.layer5x5_21 = self._make_layer5_2(BasicBlock5x5_2, 64, layers[0], stride=2)
self.layer5x5_22 = self._make_layer5_2(BasicBlock5x5_2, 128, layers[1], stride=2)
self.layer5x5_23 = self._make_layer5_2(BasicBlock5x5_2, 256, layers[2], stride=2)
# self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
# maxplooing kernel size: 16, 11, 6
self.maxpool5_2 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)
self.layer5x5_31 = self._make_layer5_3(BasicBlock5x5_3, 64, layers[0], stride=2)
self.layer5x5_32 = self._make_layer5_3(BasicBlock5x5_3, 128, layers[1], stride=2)
self.layer5x5_33 = self._make_layer5_3(BasicBlock5x5_3, 256, layers[2], stride=2)
# self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
# maxplooing kernel size: 16, 11, 6
self.maxpool5_3 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)
# self.drop = nn.Dropout(p=0.2)
self.fc = nn.Linear(48384, num_classes)
# todo: modify the initialization
# for m in self.modules():
# if isinstance(m, nn.Conv1d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm1d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_layer3(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes3 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes3, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes3, planes, stride, downsample))
self.inplanes3 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes3, planes))
return nn.Sequential(*layers)
def _make_layer5_1(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes5_1 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes5_1, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes5_1, planes, stride, downsample))
self.inplanes5_1 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes5_1, planes))
return nn.Sequential(*layers)
def _make_layer5_2(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes5_2 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes5_2, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes5_2, planes, stride, downsample))
self.inplanes5_2 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes5_2, planes))
return nn.Sequential(*layers)
def _make_layer5_3(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes5_3 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes5_3, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes5_3, planes, stride, downsample))
self.inplanes5_3 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes5_3, planes))
return nn.Sequential(*layers)
def _make_layer7(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes7 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes7, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes7, planes, stride, downsample))
self.inplanes7 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes7, planes))
return nn.Sequential(*layers)
def forward(self, x0):
x0 = self.conv1(x0)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x0 = self.maxpool(x0)
x = self.layer5x5_11(x0)
x = self.layer5x5_12(x)
x = self.layer5x5_13(x)
# x = self.layer3x3_4(x)
x = self.maxpool5_1(x)
y = self.layer5x5_21(x0)
y = self.layer5x5_22(y)
y = self.layer5x5_23(y)
# y = self.layer5x5_4(y)
y = self.maxpool5_2(y)
z = self.layer5x5_31(x0)
z = self.layer5x5_32(z)
z = self.layer5x5_33(z)
# z = self.layer7x7_4(z)
z = self.maxpool5_3(z)
out = torch.cat([x, y, z], dim=1)
out = out.reshape(out.shape[0], -1)
# out = self.drop(out)
out1 = self.fc(out)
return out1
class MSResEncoder(nn.Module):
def __init__(self, input_channel, layers=[1, 1, 1, 1], num_classes=10):
super().__init__()
self.inplanes5_1 = 64
self.inplanes5_2 = 64
self.inplanes5_3 = 64
self.conv1 = nn.Conv1d(input_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
self.layer5x5_11 = self._make_layer5_1(BasicBlock5x5_1, 64, layers[0], stride=2)
self.layer5x5_12 = self._make_layer5_1(BasicBlock5x5_1, 128, layers[1], stride=2)
self.layer5x5_13 = self._make_layer5_1(BasicBlock5x5_1, 256, layers[2], stride=2)
# self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
# maxplooing kernel size: 16, 11, 6
self.maxpool5_1 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)
self.layer5x5_21 = self._make_layer5_2(BasicBlock5x5_2, 64, layers[0], stride=2)
self.layer5x5_22 = self._make_layer5_2(BasicBlock5x5_2, 128, layers[1], stride=2)
self.layer5x5_23 = self._make_layer5_2(BasicBlock5x5_2, 256, layers[2], stride=2)
# self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
# maxplooing kernel size: 16, 11, 6
self.maxpool5_2 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)
self.layer5x5_31 = self._make_layer5_3(BasicBlock5x5_3, 64, layers[0], stride=2)
self.layer5x5_32 = self._make_layer5_3(BasicBlock5x5_3, 128, layers[1], stride=2)
self.layer5x5_33 = self._make_layer5_3(BasicBlock5x5_3, 256, layers[2], stride=2)
# self.layer3x3_4 = self._make_layer3(BasicBlock3x3, 512, layers[3], stride=2)
# maxplooing kernel size: 16, 11, 6
self.maxpool5_3 = nn.AvgPool1d(kernel_size=11, stride=1, padding=0)
# self.drop = nn.Dropout(p=0.2)
self.fc = nn.Linear(48384, num_classes)
# todo: modify the initialization
# for m in self.modules():
# if isinstance(m, nn.Conv1d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm1d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_layer3(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes3 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes3, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes3, planes, stride, downsample))
self.inplanes3 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes3, planes))
return nn.Sequential(*layers)
def _make_layer5_1(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes5_1 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes5_1, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes5_1, planes, stride, downsample))
self.inplanes5_1 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes5_1, planes))
return nn.Sequential(*layers)
def _make_layer5_2(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes5_2 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes5_2, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes5_2, planes, stride, downsample))
self.inplanes5_2 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes5_2, planes))
return nn.Sequential(*layers)
def _make_layer5_3(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes5_3 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes5_3, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes5_3, planes, stride, downsample))
self.inplanes5_3 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes5_3, planes))
return nn.Sequential(*layers)
def _make_layer7(self, block, planes, blocks, stride=2):
downsample = None
if stride != 1 or self.inplanes7 != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes7, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes7, planes, stride, downsample))
self.inplanes7 = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes7, planes))
return nn.Sequential(*layers)
def forward(self, x0):
x0 = self.conv1(x0)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x0 = self.maxpool(x0)
x = self.layer5x5_11(x0)
x = self.layer5x5_12(x)
x = self.layer5x5_13(x)
# x = self.layer3x3_4(x)
x = self.maxpool5_1(x)
y = self.layer5x5_21(x0)
y = self.layer5x5_22(y)
y = self.layer5x5_23(y)
# y = self.layer5x5_4(y)
y = self.maxpool5_2(y)
z = self.layer5x5_31(x0)
z = self.layer5x5_32(z)
z = self.layer5x5_33(z)
# z = self.layer7x7_4(z)
z = self.maxpool5_3(z)
out = torch.cat([x, y, z], dim=1)
return out
class MSResDecoder(nn.Module):
def __init__(self, input_channel, layers=[256, 128, 64], num_classes=10):
super().__init__()
self.conv1 = nn.ConvTranspose1d(input_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def block(in_feat, out_feat, normalize=True):
return nn.Sequential(nn.ConvTranspose1d(in_feat, in_feat, kernel_size=5, stride=1), nn.ConvTranspose1d(in_feat, out_feat, kernel_size=5, stride=2), nn.ReLU(), nn.ConvTranspose1d(out_feat, out_feat, kernel_size=5, stride=1))
self.block1_1 = block(256, 128)
self.block2_1 = block(128, 64)
self.block3_1 = block(64, 64)
self.block4_1 = block(64, 64)
self.block5_1 = nn.Linear(1233, 1250)
self.block1_2 = block(256, 128)
self.block2_2 = block(128, 64)
self.block3_2 = block(64, 64)
self.block4_2 = block(64, 64)
self.block5_2 = nn.Linear(1233, 1250)
self.block1_3 = block(256, 128)
self.block2_3 = block(128, 64)
self.block3_3 = block(64, 64)
self.block4_3 = block(64, 64)
self.block5_3 = nn.Linear(1233, 1250)
self.conv2 = nn.ConvTranspose1d(64 * 3, 64, kernel_size=1)
self.conv1 = nn.ConvTranspose1d(64, input_channel, kernel_size=7, stride=2, padding=3,
bias=False)
self.fc = nn.Linear(2499, 2500)
def forward(self, x):
x, y, z = x[:, :256], x[:, 256:256 + 256], x[:, 256 + 256:256 + 256 + 256]
x = self.block1_1(x)
x = self.block2_1(x)
x = self.block3_1(x)
x = self.block4_1(x)
x = self.block5_1(x)
y = self.block1_2(y)
y = self.block2_2(y)
y = self.block3_2(y)
y = self.block4_2(y)
y = self.block5_2(y)
z = self.block1_3(z)
z = self.block2_3(z)
z = self.block3_3(z)
z = self.block4_3(z)
z = self.block5_3(z)
cat_values = torch.cat([x, y, z], dim=1)
x = self.conv2(cat_values)
x = self.conv1(x)
x = self.fc(x)
return x
if __name__ == "__main__":
model = MSResEncoder(1, num_classes=10)
print(model)
| 36.614644
| 235
| 0.59459
| 2,468
| 19,003
| 4.419773
| 0.062399
| 0.026953
| 0.073341
| 0.038504
| 0.917125
| 0.890631
| 0.867528
| 0.842776
| 0.828199
| 0.824441
| 0
| 0.082657
| 0.286955
| 19,003
| 518
| 236
| 36.685328
| 0.722362
| 0.082829
| 0
| 0.766304
| 0
| 0
| 0.00046
| 0
| 0
| 0
| 0
| 0.001931
| 0
| 1
| 0.070652
| false
| 0
| 0.013587
| 0.008152
| 0.163043
| 0.002717
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d3b28f1433d7307662ca123422b3d40203489700
| 69
|
py
|
Python
|
currencypy/utils/__init__.py
|
LucasFrassetto/CurrencyPy
|
0e1618290a9d86a1364439c2840dae29e004889c
|
[
"MIT"
] | null | null | null |
currencypy/utils/__init__.py
|
LucasFrassetto/CurrencyPy
|
0e1618290a9d86a1364439c2840dae29e004889c
|
[
"MIT"
] | null | null | null |
currencypy/utils/__init__.py
|
LucasFrassetto/CurrencyPy
|
0e1618290a9d86a1364439c2840dae29e004889c
|
[
"MIT"
] | null | null | null |
from .aliases import iso_code_alias
from .aliases import symbol_alias
| 34.5
| 35
| 0.869565
| 11
| 69
| 5.181818
| 0.636364
| 0.385965
| 0.596491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 69
| 2
| 36
| 34.5
| 0.919355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d3e7f842fc018f7145c02894c39c2e4e0c203618
| 210
|
py
|
Python
|
neurotin/logs/__init__.py
|
mscheltienne/neurotin-analysis
|
841b7d86c0c990169cceb02b40d9eb6bd0d07612
|
[
"MIT"
] | null | null | null |
neurotin/logs/__init__.py
|
mscheltienne/neurotin-analysis
|
841b7d86c0c990169cceb02b40d9eb6bd0d07612
|
[
"MIT"
] | null | null | null |
neurotin/logs/__init__.py
|
mscheltienne/neurotin-analysis
|
841b7d86c0c990169cceb02b40d9eb6bd0d07612
|
[
"MIT"
] | null | null | null |
"""Logs module to analyze log files."""
from .mml import lineplot_mml_evolution # noqa: F401
from .scores import boxplot_scores_evolution # noqa: F401
from .scores import boxplot_scores_between_participants
| 35
| 58
| 0.804762
| 29
| 210
| 5.586207
| 0.586207
| 0.160494
| 0.209877
| 0.259259
| 0.567901
| 0.567901
| 0.567901
| 0.567901
| 0
| 0
| 0
| 0.032787
| 0.128571
| 210
| 5
| 59
| 42
| 0.852459
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3189443416db70af41b44d613642f912a86f5c66
| 14,518
|
py
|
Python
|
ulfs/mycells.py
|
asappresearch/neural-ilm
|
fd7e09960525391f4084a5753429deabd7ff00aa
|
[
"MIT"
] | null | null | null |
ulfs/mycells.py
|
asappresearch/neural-ilm
|
fd7e09960525391f4084a5753429deabd7ff00aa
|
[
"MIT"
] | null | null | null |
ulfs/mycells.py
|
asappresearch/neural-ilm
|
fd7e09960525391f4084a5753429deabd7ff00aa
|
[
"MIT"
] | 2
|
2021-02-25T04:42:14.000Z
|
2021-02-25T04:43:06.000Z
|
import math
import torch
from torch import nn
import torch.nn.functional as F
from ulfs.tensor_utils import Hadamard, concat
cells_by_name = {}
# def register(name, cls):
# def _decorator(func):
# return func
# print('register', name)
# cells_by_name[name] = func
# return _decorator
class MyLSTMCell_concatfused(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
# assert input_size == hidden_size
self.input_size = input_size
self.embedding_size = hidden_size
# self.h1 = nn.Linear(self.embedding_size * 2, self.embedding_size * 4)
self.h1 = nn.Linear(self.input_size + self.embedding_size, self.embedding_size * 4)
def forward(self, x, state_cell_tuple):
state, cell = state_cell_tuple
batch_size = x.size()[0]
in_concat = concat(x, state)
xdot = self.h1(in_concat)
xdot = xdot.view(batch_size, 4, self.embedding_size)
i = torch.tanh(xdot[:, 0])
j = torch.sigmoid(xdot[:, 1])
f = torch.sigmoid(xdot[:, 2])
o = torch.tanh(xdot[:, 3])
celldot = Hadamard(cell, f) + Hadamard(i, j)
statedot = Hadamard(torch.tanh(celldot), o)
return (statedot, celldot)
cells_by_name['mylstm_concatfused'] = MyLSTMCell_concatfused
class MyLSTMCell_concat(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.h1 = nn.Linear(self.input_size + self.embedding_size, self.embedding_size)
self.h2 = nn.Linear(self.input_size + self.embedding_size, self.embedding_size)
self.h3 = nn.Linear(self.input_size + self.embedding_size, self.embedding_size)
self.h4 = nn.Linear(self.input_size + self.embedding_size, self.embedding_size)
def forward(self, x, state_cell_tuple):
state, cell = state_cell_tuple
batch_size = x.size()[0]
in_concat = concat(x, state)
# xdot = self.h1(in_concat)
# xdot = xdot.view(batch_size, 4, self.embedding_size)
i = torch.tanh(self.h1(in_concat))
j = torch.sigmoid(self.h2(in_concat))
f = torch.sigmoid(self.h3(in_concat))
o = torch.tanh(self.h4(in_concat))
celldot = Hadamard(cell, f) + Hadamard(i, j)
statedot = Hadamard(torch.tanh(celldot), o)
return (statedot, celldot)
cells_by_name['mylstm_concat'] = MyLSTMCell_concat
class MyLSTMCell_concatgrouped(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.h1a = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h2a = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h3a = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h4a = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h1b = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h2b = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h3b = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
self.h4b = nn.Linear(self.input_size // 2 + self.embedding_size // 2, self.embedding_size)
def forward(self, x, state_cell_tuple):
state, cell = state_cell_tuple
batch_size = x.size()[0]
x1 = x[:, :self.input_size//2]
x2 = x[:, self.input_size//2:]
h1 = state[:, :self.embedding_size//2]
h2 = state[:, self.embedding_size//2:]
in_concat1 = concat(x1, h1)
in_concat2 = concat(x2, h2)
# in_concat = concat(x, state)
# xdot = self.h1(in_concat)
# xdot = xdot.view(batch_size, 4, self.embedding_size)
i = torch.tanh(self.h1a(in_concat1) +self.h1b(in_concat2))
j = torch.sigmoid(self.h2a(in_concat1) +self.h2b(in_concat2))
f = torch.sigmoid(self.h3a(in_concat1) +self.h3b(in_concat2))
o = torch.tanh(self.h4a(in_concat1) +self.h4b(in_concat2))
celldot = Hadamard(cell, f) + Hadamard(i, j)
statedot = Hadamard(torch.tanh(celldot), o)
return (statedot, celldot)
cells_by_name['mylstm_concatgrouped'] = MyLSTMCell_concatgrouped
class MyLSTMCellFused(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.Wx = nn.Parameter(torch.Tensor(input_size, 4 * hidden_size))
self.Wh = nn.Parameter(torch.Tensor(hidden_size, 4 * hidden_size))
self.bias = nn.Parameter(torch.Tensor(4, hidden_size))
# this initialization adapted from pytorch nn.Linear
stdv = 1. / math.sqrt(hidden_size)
self.bias.data.uniform_(-stdv, stdv)
self.Wx.data.uniform_(-stdv, stdv)
self.Wh.data.uniform_(-stdv, stdv)
def forward(self, x, state_cell_tuple):
state, cell = state_cell_tuple
batch_size = x.size()[0]
xdot = x @ self.Wx
xdot = xdot.view(batch_size, 4, self.embedding_size)
hdot = state @ self.Wh
hdot = hdot.view(batch_size, 4, self.embedding_size)
i = torch.tanh(xdot[:, 0] + hdot[:, 0] + self.bias[0])
j = torch.sigmoid(xdot[:, 1] + hdot[:, 1] + self.bias[1])
f = torch.sigmoid(xdot[:, 2] + hdot[:, 2] + self.bias[2])
o = torch.tanh(xdot[:, 3] + hdot[:, 3] + self.bias[3])
celldot = Hadamard(cell, f) + Hadamard(i, j)
statedot = Hadamard(torch.tanh(celldot), o)
return (statedot, celldot)
cells_by_name['mylstmfused'] = MyLSTMCellFused
class MyLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x4 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h3 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h4 = nn.Linear(self.embedding_size, self.embedding_size)
def forward(self, x, state_cell_tuple):
state, cell = state_cell_tuple
batch_size = x.size()[0]
i = torch.tanh(self.fc_x1(x) + self.fc_h1(state))
j = torch.sigmoid(self.fc_x2(x) + self.fc_h2(state))
f = torch.sigmoid(self.fc_x3(x) + self.fc_h3(state))
o = torch.tanh(self.fc_x4(x) + self.fc_h4(state))
celldot = Hadamard(cell, f) + Hadamard(i, j)
statedot = Hadamard(torch.tanh(celldot), o)
return (statedot, celldot)
cells_by_name['mylstm'] = MyLSTMCell
class NegOut(nn.Module):
def __init__(self, prob):
super().__init__()
self.prob = prob
self._train = True
def forward(self, x):
# print('dir(self)', dir(self))
# print('self.train', self.train)
# asdf
if not self._train:
return x
shape = x.size()
self.mask = 1 - 2 * (torch.rand(*shape) < self.prob).int()
# self.mask = 1 - 2 * (torch.rand((1, shape[-1])).expand(*list(shape)) < self.prob).int()
x = x * self.mask.float()
return x
def train(self, arg):
# print('train', arg)
super().train(arg)
# if arg != self._train:
# print('train change', arg)
self._train = arg
class MyNegoutGRUv3Cell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_rh = nn.Linear(self.embedding_size, self.embedding_size)
self.negout = NegOut(0.1)
def forward(self, x, state):
batch_size = x.size()[0]
# state = self.negout(state)
# x = self.negout(x)
r = torch.sigmoid(self.negout(self.fc_x1(x)) + self.fc_h1(state))
z = torch.sigmoid(self.negout(self.fc_x2(x)) + self.fc_h2(state))
htilde = torch.tanh(
self.negout(self.fc_x3(x)) + self.fc_rh(Hadamard(r, state)))
hdot = Hadamard(z, state) + Hadamard(1 - z, htilde)
return hdot
class MyGRUv3Cell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_rh = nn.Linear(self.embedding_size, self.embedding_size)
def forward(self, x, state):
batch_size = x.size()[0]
r = torch.sigmoid(self.fc_x1(x) + self.fc_h1(state))
z = torch.sigmoid(self.fc_x2(x) + self.fc_h2(state))
htilde = torch.tanh(
self.fc_x3(x) + self.fc_rh(Hadamard(r, state)))
hdot = Hadamard(z, state) + Hadamard(1 - z, htilde)
return hdot
class MyNoisyGRUv3Cell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_rh = nn.Linear(self.embedding_size, self.embedding_size)
def get_noise(self, batch_size):
# return torch.randn(self.embedding_size).expand_as(batch_size, self.embedding_size) * 0.1
return torch.randn(batch_size, self.embedding_size) * 0.1
def forward(self, x, state):
batch_size = x.size()[0]
r = torch.sigmoid(self.fc_x1(x) + self.fc_h1(state))
z = torch.sigmoid(self.fc_x2(x) + self.fc_h2(state))
htilde = torch.tanh(
self.fc_x3(x) + self.fc_rh(Hadamard(r, state)))
hdot = Hadamard(z, state) + Hadamard(1 - z, htilde)
return hdot
class MyUGRUCell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x4 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h4 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_rh = nn.Linear(self.embedding_size, self.embedding_size)
def forward(self, x, state):
batch_size = x.size()[0]
self.r = torch.sigmoid(self.fc_x1(x) + self.fc_h1(state))
self.z = torch.sigmoid(self.fc_x2(x) + self.fc_h2(state))
self.i = torch.sigmoid(self.fc_x4(x) + self.fc_h4(state))
self.htilde = torch.tanh(
self.fc_x3(x) + self.fc_rh(Hadamard(self.r, state)))
hdot = Hadamard(self.z, state) + Hadamard(self.i, self.htilde)
return hdot
class MyGRUv3Cellz(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_rh = nn.Linear(self.embedding_size, self.embedding_size)
def forward(self, x, state):
batch_size = x.size()[0]
r = torch.sigmoid(self.fc_x1(x) + self.fc_h1(state))
z = torch.sigmoid(self.fc_x2(x) + self.fc_h2(state))
htilde = torch.tanh(
self.fc_x3(x) + self.fc_rh(Hadamard(r, state)))
hdot = Hadamard(1 - z, state) + Hadamard(z, htilde)
return hdot
class MyGRUv1Cell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.embedding_size = hidden_size
self.fc_x1 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x2 = nn.Linear(self.input_size, self.embedding_size)
self.fc_x3 = nn.Linear(self.input_size, self.embedding_size)
self.fc_h1 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h2 = nn.Linear(self.embedding_size, self.embedding_size)
self.fc_h3 = nn.Linear(self.embedding_size, self.embedding_size)
def forward(self, x, state):
batch_size = x.size()[0]
r = torch.sigmoid(self.fc_x1(x) + self.fc_h1(state))
z = torch.sigmoid(self.fc_x2(x) + self.fc_h2(state))
n = torch.tanh(self.fc_x3(x) + Hadamard(r, self.fc_h3(state)))
hdot = Hadamard(1 - z, n) + Hadamard(z, state)
return hdot
for k, v in {
'mygruv3': MyGRUv3Cell,
'mygruv1': MyGRUv1Cell,
'mygruv3z': MyGRUv3Cellz,
'ugru': MyUGRUCell,
'noisygruv3': MyNoisyGRUv3Cell,
'mynegoutgru': MyNegoutGRUv3Cell
}.items():
cells_by_name[k] = v
| 39.237838
| 98
| 0.642651
| 2,072
| 14,518
| 4.260135
| 0.064672
| 0.122352
| 0.227257
| 0.185567
| 0.781353
| 0.756203
| 0.733205
| 0.723575
| 0.716098
| 0.711227
| 0
| 0.01951
| 0.226822
| 14,518
| 369
| 99
| 39.344173
| 0.76686
| 0.058961
| 0
| 0.601476
| 0
| 0
| 0.008433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095941
| false
| 0
| 0.01845
| 0.00369
| 0.210332
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9eda6d91c9c6858b27a575aea55c59c1f06db163
| 22,675
|
py
|
Python
|
optimization/first_sdEta_mjj_optimization/sdEta_mistake_analyses/dEta_mmjj_cuts_plots/tight_analysis_sdeta_3.6_mmjj_1250/Output/Histos/MadAnalysis5job_0/selection_9.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
optimization/first_sdEta_mjj_optimization/sdEta_mistake_analyses/dEta_mmjj_cuts_plots/tight_analysis_sdeta_3.6_mmjj_1250/Output/Histos/MadAnalysis5job_0/selection_9.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
optimization/first_sdEta_mjj_optimization/sdEta_mistake_analyses/dEta_mmjj_cuts_plots/tight_analysis_sdeta_3.6_mmjj_1250/Output/Histos/MadAnalysis5job_0/selection_9.py
|
sheride/axion_pheno
|
7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5
|
[
"MIT"
] | null | null | null |
def selection_9():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(-8.0,8.0,101,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([-7.92,-7.76,-7.6,-7.44,-7.28,-7.12,-6.96,-6.8,-6.64,-6.48,-6.32,-6.16,-6.0,-5.84,-5.68,-5.52,-5.36,-5.2,-5.04,-4.88,-4.72,-4.56,-4.4,-4.24,-4.08,-3.92,-3.76,-3.6,-3.44,-3.28,-3.12,-2.96,-2.8,-2.64,-2.48,-2.32,-2.16,-2.0,-1.84,-1.68,-1.52,-1.36,-1.2,-1.04,-0.88,-0.72,-0.56,-0.4,-0.24,-0.08,0.08,0.24,0.4,0.56,0.72,0.88,1.04,1.2,1.36,1.52,1.68,1.84,2.0,2.16,2.32,2.48,2.64,2.8,2.96,3.12,3.28,3.44,3.6,3.76,3.92,4.08,4.24,4.4,4.56,4.72,4.88,5.04,5.2,5.36,5.52,5.68,5.84,6.0,6.16,6.32,6.48,6.64,6.8,6.96,7.12,7.28,7.44,7.6,7.76,7.92])
# Creating weights for histo: y10_sdETA_0
y10_sdETA_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,22.0916763836,40.3922441335,35.6758483214,32.044395546,28.1713909851,26.0547488645,22.4519560637,21.2728611106,18.52572755,17.5881843825,15.2095224946,13.956731607,12.6343407813,11.2628259991,10.5054186717,9.61700346053,8.33146060203,7.74600512189,7.04182574717,6.37039434337,5.50654311043,5.17082740854,4.28241219741,3.84434458639,3.34486662991,3.00505773164,2.69800120429,2.223087626])
# Creating weights for histo: y10_sdETA_1
y10_sdETA_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.644172887547,3.91333399002,6.9501295767,9.79327628555,10.9726153107,11.2996491787,10.2795353412,9.71984596919,8.17677994739,7.24148367272])
# Creating weights for histo: y10_sdETA_2
y10_sdETA_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.662754313473,7.36933241212,16.8768826742,24.8597028633,32.9320903091,39.4572897542,43.3812785466,44.2076145873,46.6465827242,45.3603446867,40.7429534308,31.5163855208,26.9170101922,20.5415536803,15.2095540575,11.2955236103,8.47409717118,6.38575782077,4.66903829144])
# Creating weights for histo: y10_sdETA_3
y10_sdETA_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.291536785339,3.87227105074,9.25193869613,16.07136371,22.088969664,29.3777396914,35.9494636392,41.7948220452,49.719980523,54.12860612,52.8306252335,48.3158048935,41.7136525362,36.4626596877,31.072447725,25.5384666055,20.1650532373,16.4516294531,12.2380706799,8.69660600359,5.7975098336,4.13088206259,2.85468044219,1.94141662163,1.16046072568,0.885411060131,0.467632653767,0.225502546022])
# Creating weights for histo: y10_sdETA_4
y10_sdETA_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,8.31910091163,16.8698974436,16.2683722595,15.1756422594,13.8751782406,12.4412739946,11.0253589871,9.29903742411,7.98161429922,6.63260178287,5.23729343206,4.0716765529,3.02466879994,2.21946679571,1.61352246379,1.01049657318,0.690820429241,0.44799456883,0.243754582908,0.157864226021,0.0829021712088,0.0503376010896,0.0128240062676,0.00789518338335,0.00394714757233,0.00197022806036,0.0,0.0])
# Creating weights for histo: y10_sdETA_5
y10_sdETA_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.16209978871,3.98233789334,3.52678664624,3.09273748778,2.60421733167,2.1920335512,1.74889135623,1.37509581771,1.03652574322,0.745142874876,0.519015656767,0.346860033477,0.227361921796,0.130339018103,0.0710893650958,0.039070297706,0.0199094089058,0.0115970283979,0.00630153392135,0.00226916572258,0.00126035879706,0.000504370454642,0.000755979140157,0.000252122091792,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_6
y10_sdETA_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.757801323999,1.31918122267,1.05552731652,0.842588672683,0.648478466347,0.485541326017,0.34591900715,0.225023660071,0.14975779389,0.0979009673338,0.0512255698704,0.0271876227665,0.0145912093428,0.00859080699702,0.00515475610633,0.0017188171778,0.000569216350578,0.000569993487968,0.0,0.000286301353131,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_7
y10_sdETA_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0643296314281,0.0984671238263,0.0671363000682,0.0435057945916,0.0284634596304,0.0167314489324,0.00867745989312,0.00472931544587,0.00192257220931,0.0011215757503,0.000604806247598,0.000431904781183,0.000151239383951,0.000172771593242,8.64014073849e-05,8.63438675766e-05,2.16172449975e-05,0.0,2.15976152479e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_8
y10_sdETA_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00700371696988,0.00830171500248,0.00510904270934,0.0022068977061,0.00130513661909,0.000568267949524,0.000424047729026,5.67887549836e-05,0.000113580452121,5.68589357542e-05,2.82370391286e-05,2.81631435173e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_9
y10_sdETA_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,5.21372681349,7.82912472841,10.4207614034,13.0433553817,39.0918935478,13.0232248654,10.419107578,10.4330112495,2.60449843645])
# Creating weights for histo: y10_sdETA_10
y10_sdETA_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.05238923318,18.9496204359,29.4832167291,42.1437813166,48.4376198867,49.5090957578,55.8105143453,47.3996192197,62.1406369585,53.7138507302,31.5995345216,16.8601289794,15.8029089279,8.42471615248,6.31932205018,2.10407895003,0.0,3.16002078734,3.16190540285])
# Creating weights for histo: y10_sdETA_11
y10_sdETA_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.38083353168,18.4267088703,38.9315845919,50.9075336514,58.0450651134,74.6179739024,84.9993557978,80.6222979425,87.7417700159,83.6169672374,69.3334125583,58.5089224306,34.5514105315,26.0264607773,14.2829053096,13.1221861382,7.37202685925,3.68731293712,2.99227700234,2.76534925743,0.921450331968,1.15181819829,0.0,0.0,0.230097590283,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_12
y10_sdETA_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,38.9332171624,66.2363747776,50.4255861855,38.2401902466,29.4073104259,21.2418660887,16.1732351404,10.6886818605,7.30950864128,5.3158506487,3.57220425408,2.13253113937,1.32894015618,0.91361644556,0.553595158676,0.276956775591,0.0276984205974,0.110836216797,0.0,0.0553508597156,0.0,0.0276411938149,0.0276861634863,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_13
y10_sdETA_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,6.58325110175,11.5135187288,8.60097393345,5.82747972258,4.315238553,2.66117803224,1.9756767757,1.28039351422,0.695775365328,0.413384883469,0.221796176542,0.241899437587,0.0907137908915,0.0705727877277,0.0100433721581,0.0201712818095,0.0301952307321,0.0100789116426,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_14
y10_sdETA_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.02278221684,3.01301154384,2.13027547918,1.36376377765,0.96195193315,0.568683825104,0.345169568796,0.223509524107,0.10469137693,0.0594176915783,0.0537829883174,0.019797565187,0.00283431906706,0.00849898830914,0.00565621745651,0.00282576609704,0.0,0.00282698954379,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_15
y10_sdETA_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.18409652559,0.202787789776,0.143069319471,0.0701257270632,0.0381396853571,0.0106563416458,0.010669139381,0.00762451102707,0.00151715620158,0.00305892697777,0.0,0.00152248849276,0.0,0.00152632580425,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y10_sdETA_16
y10_sdETA_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0139029420888,0.0166115524095,0.0066803034538,0.00307040062161,0.000902981664435,0.000902687814978,0.000541096237976,0.000722822678131,0.000180820513449,0.000180182440343,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights+y10_sdETA_13_weights+y10_sdETA_14_weights+y10_sdETA_15_weights+y10_sdETA_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights+y10_sdETA_13_weights+y10_sdETA_14_weights+y10_sdETA_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights+y10_sdETA_13_weights+y10_sdETA_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights+y10_sdETA_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights+y10_sdETA_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=4, linestyle="dashdot",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y10_sdETA_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=3, linestyle="dashed",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\Delta\eta ( j_{1} , j_{2} ) ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights+y10_sdETA_13_weights+y10_sdETA_14_weights+y10_sdETA_15_weights+y10_sdETA_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y10_sdETA_0_weights+y10_sdETA_1_weights+y10_sdETA_2_weights+y10_sdETA_3_weights+y10_sdETA_4_weights+y10_sdETA_5_weights+y10_sdETA_6_weights+y10_sdETA_7_weights+y10_sdETA_8_weights+y10_sdETA_9_weights+y10_sdETA_10_weights+y10_sdETA_11_weights+y10_sdETA_12_weights+y10_sdETA_13_weights+y10_sdETA_14_weights+y10_sdETA_15_weights+y10_sdETA_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_9.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_9.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_9.eps')
# Running!
if __name__ == '__main__':
selection_9()
| 116.881443
| 717
| 0.700992
| 5,398
| 22,675
| 2.809189
| 0.09837
| 0.363229
| 0.537523
| 0.707465
| 0.626616
| 0.626286
| 0.611976
| 0.606832
| 0.604326
| 0.604326
| 0
| 0.381582
| 0.072856
| 22,675
| 193
| 718
| 117.487047
| 0.339723
| 0.06258
| 0
| 0.185841
| 0
| 0.00885
| 0.05066
| 0.009425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00885
| false
| 0
| 0.035398
| 0
| 0.044248
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7303a689862ba54673f224cfd4a8d91188a886a6
| 35,892
|
py
|
Python
|
test/new_tests/test_operate_map.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 105
|
2015-01-07T09:51:13.000Z
|
2022-03-24T04:23:54.000Z
|
test/new_tests/test_operate_map.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 180
|
2015-01-01T19:29:50.000Z
|
2022-03-19T14:14:06.000Z
|
test/new_tests/test_operate_map.py
|
syaiful6/aerospike-client-python
|
59fa0d36aa899a164282643fe49b27d12aaf323f
|
[
"Apache-2.0"
] | 94
|
2015-01-21T19:17:48.000Z
|
2022-01-31T07:17:47.000Z
|
# -*- coding: utf-8 -*-
import sys
import pytest
from .test_base_class import TestBaseClass
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
from aerospike import exception as e
except:
print("Please install aerospike python client.")
sys.exit(1)
# aerospike.OP_MAP_SET_POLICY
# aerospike.OP_MAP_PUT
# aerospike.OP_MAP_PUT_ITEMS
# aerospike.OP_MAP_INCREMENT
# aerospike.OP_MAP_DECREMENT
# aerospike.OP_MAP_SIZE
# aerospike.OP_MAP_CLEAR
# aerospike.OP_MAP_REMOVE_BY_KEY
# aerospike.OP_MAP_REMOVE_BY_KEY_LIST
# aerospike.OP_MAP_REMOVE_BY_KEY_RANGE
# aerospike.OP_MAP_REMOVE_BY_VALUE
# aerospike.OP_MAP_REMOVE_BY_VALUE_LIST
# aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE
# aerospike.OP_MAP_REMOVE_BY_INDEX
# aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE
# aerospike.OP_MAP_REMOVE_BY_RANK
# aerospike.OP_MAP_REMOVE_BY_RANK_RANGE
# aerospike.OP_MAP_GET_BY_KEY
# aerospike.OP_MAP_GET_BY_KEY_RANGE
# aerospike.OP_MAP_GET_BY_VALUE
# aerospike.OP_MAP_GET_BY_VALUE_RANGE
# aerospike.OP_MAP_GET_BY_INDEX
# aerospike.OP_MAP_GET_BY_INDEX_RANGE
# aerospike.OP_MAP_GET_BY_RANK
# aerospike.OP_MAP_GET_BY_RANK_RANGE
class TestOperate(object):
def setup_class(cls):
"""
Setup class.
"""
cls.client_no_typechecks = TestBaseClass.get_new_connection(
{'strict_types': False})
def teardown_class(cls):
TestOperate.client_no_typechecks.close()
@pytest.fixture(autouse=True)
def setup(self, request, as_connection):
"""
Setup Method
"""
key = ('test', 'demo', 'test_op_map')
self.test_map_key = key
test_map = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
self.test_map = test_map
self.test_map_bin = 'test_map'
key_order_policy = {'map_order': aerospike.MAP_KEY_ORDERED}
as_connection.map_put_items(
key, bin='test_map', items=test_map,
map_policy=key_order_policy)
as_connection.map_put_items(
key, bin='test_map2', items=test_map,
map_policy=key_order_policy)
def teardown():
try:
as_connection.remove(key)
except:
pass
"""
Teardown Method
"""
request.addfinalizer(teardown)
@pytest.mark.parametrize("key, llist, expected", [
(('test', 'map_test', 1),
[{"op": aerospike.OP_MAP_PUT,
"bin": "my_map",
"key": "age",
"val": 97},
{"op": aerospike.OP_MAP_INCREMENT,
"bin": "my_map",
"key": "age",
"val": 1},
{"op": aerospike.OP_MAP_GET_BY_KEY,
"bin": "my_map",
"key": "age",
"return_type": aerospike.MAP_RETURN_VALUE}],
{'my_map': 98}),
(('test', 'map_test', 1),
[{"op": aerospike.OP_MAP_PUT,
"bin": "my_map",
"key": "age",
"val": 22},
{"op": aerospike.OP_MAP_DECREMENT,
"bin": "my_map",
"key": "age",
"val": 1},
{"op": aerospike.OP_MAP_GET_BY_KEY,
"bin": "my_map",
"key": "age",
"return_type": aerospike.MAP_RETURN_VALUE}],
{'my_map': 21}),
(('test', 'map_test', 1),
[{"op": aerospike.OP_MAP_PUT_ITEMS,
"bin": "my_map",
"val": {'name': 'bubba', 'occupation': 'dancer'}},
{"op": aerospike.OP_MAP_GET_BY_KEY,
"bin": "my_map",
"key": "name",
"return_type": aerospike.MAP_RETURN_KEY_VALUE}],
{'my_map': ['name', 'bubba']})
])
def test_pos_operate_with_correct_paramters(self, key, llist, expected):
"""
Invoke operate() with correct parameters
"""
key, _, bins = self.as_connection.operate(key, llist)
assert bins == expected
self.as_connection.remove(key)
@pytest.mark.skip()
@pytest.mark.parametrize("key, llist, expected", [
(('test', 'map_test', 1),
[{"op": aerospike.OP_MAP_PUT,
"bin": "my_map",
"key": "age",
"val": 97},
{"op": aerospike.OP_MAP_INCREMENT,
"bin": "my_map",
"key": "age",
"val": 1},
{"op": aerospike.OP_MAP_GET_BY_KEY,
"bin": "my_map",
"key": "age",
"return_type": aerospike.MAP_RETURN_VALUE}],
[None, None, ('my_map', 98)]),
(('test', 'map_test', 1),
[{"op": aerospike.OP_MAP_PUT,
"bin": "my_map",
"key": "age",
"val": 22},
{"op": aerospike.OP_MAP_DECREMENT,
"bin": "my_map",
"key": "age",
"val": 1},
{"op": aerospike.OP_MAP_GET_BY_KEY,
"bin": "my_map",
"key": "age",
"return_type": aerospike.MAP_RETURN_VALUE}],
[None, None, ('my_map', 21)]),
(('test', 'map_test', 1),
[{"op": aerospike.OP_MAP_PUT_ITEMS,
"bin": "my_map",
"val": {'name': 'bubba', 'occupation': 'dancer'}},
{"op": aerospike.OP_MAP_GET_BY_KEY,
"bin": "my_map",
"key": "name",
"return_type": aerospike.MAP_RETURN_KEY_VALUE}],
[None, ('my_map', [('name', 'bubba')])])
])
def test_pos_operate_ordered_with_correct_paramters(self, key, llist, expected):
"""
Invoke operate() with correct parameters
"""
key, _, bins = self.as_connection.operate_ordered(key, llist)
assert bins == expected
self.as_connection.remove(key)
def test_pos_operate_set_map_policy(self):
key = ('test', 'map_test', 1)
llist = [{"op": aerospike.OP_MAP_SET_POLICY,
"bin": "my_map",
"map_policy": {'map_sort': aerospike.MAP_KEY_ORDERED}}]
key, _, _ = self.as_connection.operate(key, llist)
self.as_connection.remove(key)
pass
def test_pos_map_clear(self):
key = ('test', 'map_test', 1)
binname = 'my_map'
llist = [{"op": aerospike.OP_MAP_PUT,
"bin": binname,
"key": "age",
"val": 97}]
key, _, _ = self.as_connection.operate(key, llist)
llist = [{"op": aerospike.OP_MAP_SIZE,
"bin": binname}]
key, _, bins = self.as_connection.operate(key, llist)
assert bins == {binname: 1}
key, _, _ = self.as_connection.operate(key, llist)
llist = [{"op": aerospike.OP_MAP_CLEAR,
"bin": binname}]
key, _, _ = self.as_connection.operate(key, llist)
llist = [{"op": aerospike.OP_MAP_SIZE,
"bin": binname}]
key, _, bins = self.as_connection.operate(key, llist)
assert bins == {binname: 0}
self.as_connection.remove(key)
def test_map_remove_by_index_range_correct(self):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE,
'bin': self.test_map_bin,
'index': 1,
'val': 3,
}]
_, _, bins = self.as_connection.operate(
self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == {'a': 1, 'e': 5}
@pytest.mark.xfail(reason="previously worked")
def test_map_remove_by_index_range_no_index(self):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE,
'bin': self.test_map_bin,
'val': 3,
}]
with pytest.raises(e.ParamError):
_, _, bins = self.as_connection.operate(
self.test_map_key, ops)
def test_op_map_put_existing_key(self):
result_map = self.test_map.copy()
result_map['a'] = 'b'
ops = [{
'op': aerospike.OP_MAP_PUT,
'bin': self.test_map_bin,
'key': 'a',
'val': 'b'
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins['test_map'] == result_map
def test_op_map_put_new_key(self):
result_map = self.test_map.copy()
result_map['new'] = 'value'
ops = [{
'op': aerospike.OP_MAP_PUT,
'bin': self.test_map_bin,
'key': 'new',
'val': 'value'
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
"required_key",
(
'bin',
'key',
'val'
))
def test_op_map_put_missing_required_keys(self, required_key):
op = {
'op': aerospike.OP_MAP_PUT,
'bin': 'test_map',
'key': 'new',
'val': 'value'
}
del op[required_key]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_put_items(self):
result_map = self.test_map.copy()
result_map['new'] = 'value'
result_map['new2'] = 'value2'
ops = [{
'op': aerospike.OP_MAP_PUT_ITEMS,
'bin': self.test_map_bin,
'val': {"new": "value", "new2": "value2"}
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
"key",
(
'bin',
'val'
))
def test_op_map_put_items_missing_required_entry(self, key):
op = {
'op': aerospike.OP_MAP_PUT_ITEMS,
'bin': 'test_map',
'val': {"new": "value", "new2": "value2"}
}
del op[key]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_increment(self):
result_map = self.test_map.copy()
result_map['a'] = result_map['a'] + 2
ops = [{
'op': aerospike.OP_MAP_INCREMENT,
'bin': self.test_map_bin,
'key': 'a',
'val': 2
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
"val",
(
"str",
[1, 2, 3],
(),
{'a': 'b'}
))
def test_map_increment_invalid_type(self, val):
ops = [{
'op': aerospike.OP_MAP_INCREMENT,
'bin': self.test_map_bin,
'key': 'a',
'val': val
}]
with pytest.raises(Exception):
self.as_connection.operate(self.test_map_key, ops)
@pytest.mark.parametrize(
"key",
(
'bin',
'key',
'val'
))
def test_op_map_put_incr_missing_required_entry(self, key):
op = {
'op': aerospike.OP_MAP_INCREMENT,
'bin': self.test_map_bin,
'key': 'a',
'val': 1
}
del op[key]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_decrement(self):
result_map = self.test_map.copy()
result_map['a'] = result_map['a'] - 2
ops = [{
'op': aerospike.OP_MAP_DECREMENT,
'bin': self.test_map_bin,
'key': 'a',
'val': 2
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
"val",
(
"str",
[1, 2, 3],
(),
{'a': 'b'}
))
def test_map_decrement_invalid_type(self, val):
ops = [{
'op': aerospike.OP_MAP_DECREMENT,
'bin': self.test_map_bin,
'key': 'a',
'val': val
}]
with pytest.raises(Exception):
self.as_connection.operate(self.test_map_key, ops)
@pytest.mark.parametrize(
"key",
(
'bin',
'key',
'val'
))
def test_op_map_put_decr_missing_required_entry(self, key):
op = {
'op': aerospike.OP_MAP_DECREMENT,
'bin': self.test_map_bin,
'key': 'a',
'val': 1
}
del op[key]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_size(self):
op = {
'op': aerospike.OP_MAP_SIZE,
'bin': self.test_map_bin
}
ops = [op]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins[self.test_map_bin] == len(self.test_map)
def test_map_size_no_bin(self):
op = {
'op': aerospike.OP_MAP_SIZE,
}
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_size_non_existent_bin(self):
op = {
'op': aerospike.OP_MAP_SIZE,
'bin': 'fake_bin'
}
ops = [op]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins['fake_bin'] is None
def test_map_clear(self):
ops = [{
'op': aerospike.OP_MAP_CLEAR,
'bin': self.test_map_bin
}]
_, _, bins = self.as_connection.get(self.test_map_key)
assert len(bins[self.test_map_bin]) == len(self.test_map)
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert len(bins[self.test_map_bin]) == 0
def test_map_remove_by_key(self):
result_map = self.test_map.copy()
del result_map["c"]
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': self.test_map_bin,
'key': 'c'
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert 'c' not in bins[self.test_map_bin]
def test_map_remove_by_key_ret_key(self):
result_map = self.test_map.copy()
del result_map["c"]
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': 'test_map',
'key': 'c',
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins[self.test_map_bin] == 'c'
def test_map_remove_by_key_ret_val(self):
result_map = self.test_map.copy()
del result_map["c"]
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': 'test_map',
'key': 'c',
'return_type': aerospike.MAP_RETURN_VALUE
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins['test_map'] == self.test_map['c']
def test_map_remove_by_key_ret_key_val(self):
result_map = self.test_map.copy()
del result_map["c"]
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': self.test_map_bin,
'key': 'c',
'return_type': aerospike.MAP_RETURN_KEY_VALUE
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins[self.test_map_bin] == ['c', self.test_map['c']]
def test_map_remove_by_key_ret_key_val_test_with_list_read_odd(self):
result_map = self.test_map.copy()
self.as_connection.put(self.test_map_key, {'cool_list': [1, 2, 3]})
ops = [
{
'op': aerospike.OPERATOR_READ,
'bin': 'cool_list'
},
{
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': self.test_map_bin,
'key': 'c',
'return_type': aerospike.MAP_RETURN_KEY_VALUE
}
]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins['cool_list'] == [1, 2, 3]
def test_map_remove_by_key_ret_key_val_test_with_list_read_even(self):
result_map = self.test_map.copy()
self.as_connection.put(self.test_map_key, {'cool_list': [1, 2, 3, 4]})
ops = [
{
'op': aerospike.OPERATOR_READ,
'bin': 'cool_list'
},
{
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': self.test_map_bin,
'key': 'c',
'return_type': aerospike.MAP_RETURN_KEY_VALUE
}
]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins['cool_list'] == [1, 2, 3, 4]
@pytest.mark.parametrize(
"entry", ('bin', 'key'))
def test_map_remove_by_key_missing_required_entries(self, entry):
op = {
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': self.test_map_bin,
'key': 'c',
'return_type': aerospike.MAP_RETURN_KEY_VALUE
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
@pytest.mark.parametrize(
"entry", ([], {}, ()))
def test_map_remove_by_key_invalid_bin(self, entry):
op = {
'op': aerospike.OP_MAP_REMOVE_BY_KEY,
'bin': entry,
'key': 'a',
'return_type': aerospike.MAP_RETURN_KEY_VALUE
}
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_key_list(self):
result_map = self.test_map
del result_map['a']
del result_map['c']
del result_map['d']
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY_LIST,
'bin': self.test_map_bin,
'val': ['a', 'c', 'd'],
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
"val", ('a', {'a': 'b'}, 2, ('a', 'b')))
def test_map_remove_by_key_list_wrong_val_type(self, val):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY_LIST,
'bin': self.test_map_bin,
'val': val,
}]
with pytest.raises(Exception):
self.as_connection.operate(self.test_map_key, ops)
@pytest.mark.parametrize(
"val", ('bin', 'val'))
def test_map_remove_by_key_missing_required_entry(self, val):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY_LIST,
'bin': self.test_map_bin,
'val': val,
}]
del ops[0][val]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_key_range(self):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_KEY_RANGE,
'bin': self.test_map_bin,
'key': 'b',
'val': 'd',
}]
result_map = self.test_map.copy()
del result_map['b']
del result_map['c']
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert result_map == bins[self.test_map_bin]
@pytest.mark.parametrize(
'entry', ('bin', 'key', 'val'))
def test_map_remove_by_key_range_missing_entry(self, entry):
op = {
'op': aerospike.OP_MAP_REMOVE_BY_KEY_RANGE,
'bin': self.test_map_bin,
'key': 'b',
'val': 'd'
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_value(self):
result_map = self.test_map.copy()
del result_map['d']
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_VALUE,
'bin': self.test_map_bin,
'val': 4,
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins[self.test_map_bin] == ['d']
_, _, bins = self.as_connection.get(self.test_map_key)
assert result_map == bins[self.test_map_bin]
@pytest.mark.parametrize(
'entry', ('bin', 'val'))
def test_map_remove_by_value_missing_required_entry(self, entry):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_VALUE,
'bin': self.test_map_bin,
'val': 4,
'return_type': aerospike.MAP_RETURN_KEY
}]
del ops[0][entry]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_value_list(self):
result_map = self.test_map.copy()
del result_map['b']
del result_map['d']
del result_map['e']
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_VALUE_LIST,
'bin': self.test_map_bin,
'val': [2, 4, 5],
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins[self.test_map_bin] == ['b', 'd', 'e']
_, _, bins = self.as_connection.get(self.test_map_key)
assert result_map == bins[self.test_map_bin]
@pytest.mark.parametrize(
'entry', ('bin', 'val'))
def test_map_remove_by_value_list_missing_required_entry(self, entry):
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_VALUE_LIST,
'bin': self.test_map_bin,
'val': [2, 4, 5],
'return_type': aerospike.MAP_RETURN_KEY
}]
del ops[0][entry]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_value_range(self):
result_map = self.test_map.copy()
del result_map['b']
del result_map['c']
del result_map['d']
ops = [{
'op': aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE,
'bin': self.test_map_bin,
'val': 2,
'range': 5,
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, bins = self.as_connection.operate(self.test_map_key, ops)
assert bins[self.test_map_bin] == ['b', 'c', 'd']
_, _, bins = self.as_connection.get(self.test_map_key)
assert result_map == bins[self.test_map_bin]
@pytest.mark.parametrize(
'entry', ('bin', 'val', 'range'))
def test_map_remove_by_value_range_missing_required_entry(self, entry):
op = {
'op': aerospike.OP_MAP_REMOVE_BY_VALUE_RANGE,
'bin': self.test_map_bin,
'val': 2,
'range': 5,
'return_type': aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_index(self):
result_map = self.test_map.copy()
del result_map['b']
ops = [{
"op": aerospike.OP_MAP_REMOVE_BY_INDEX,
"bin": self.test_map_bin,
"index": 1,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == 'b'
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
'entry', ('bin', 'index'))
def test_map_remove_by_index_missing_required_entry(self, entry):
op = {
"op": aerospike.OP_MAP_REMOVE_BY_INDEX,
"bin": self.test_map_bin,
"index": 1,
"return_type": aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_index_range(self):
result_map = self.test_map.copy()
del result_map['b']
del result_map['c']
del result_map['d']
ops = [{
"op": aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE,
"bin": self.test_map_bin,
"index": 1,
"val": 3,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['b', 'c', 'd']
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
'entry', ('bin', 'index', 'val'))
def test_map_remove_by_index_range_missing_required_entry(self, entry):
op = {
"op": aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE,
"bin": self.test_map_bin,
"index": 1,
"val": 3,
"return_type": aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
@pytest.mark.xfail(reason="This works, but shouldn't")
def test_map_remove_by_index_range_no_strict_types_val(self):
result_map = self.test_map.copy()
del result_map['b']
del result_map['c']
del result_map['d']
ops = [{
"op": aerospike.OP_MAP_REMOVE_BY_INDEX_RANGE,
"bin": self.test_map_bin,
"index": 1,
"val": 3,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.client_no_typechecks.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['b', 'c', 'd']
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
def test_map_remove_by_rank(self):
'''
remove the 3rd item ordered by val
'''
result_map = self.test_map.copy()
del result_map['c']
ops = [{
"op": aerospike.OP_MAP_REMOVE_BY_RANK,
"bin": self.test_map_bin,
"index": 2,
}]
self.as_connection.operate(self.test_map_key, ops)
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
@pytest.mark.parametrize(
'entry', ('bin', 'index'))
def test_map_remove_by_rank_missing_required_entry(self, entry):
op = {
"op": aerospike.OP_MAP_REMOVE_BY_RANK,
"bin": self.test_map_bin,
"index": 2,
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_map_remove_by_rank_range(self):
result_map = self.test_map.copy()
del result_map['c']
del result_map['d']
ops = [{
"op": aerospike.OP_MAP_REMOVE_BY_RANK_RANGE,
"bin": self.test_map_bin,
"index": 2,
"val": 2,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['c', 'd']
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
def test_map_remove_by_rank_range_val_instead_of_range(self):
'''
TODO: Should not work
'''
result_map = self.test_map.copy()
del result_map['c']
del result_map['d']
ops = [{
"op": aerospike.OP_MAP_REMOVE_BY_RANK_RANGE,
"bin": self.test_map_bin,
"index": 2,
"val": 2,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['c', 'd']
_, _, bins = self.as_connection.get(self.test_map_key)
assert bins[self.test_map_bin] == result_map
def test_op_map_get_by_key(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_KEY,
'bin': self.test_map_bin,
'key': 'b',
'return_type': aerospike.MAP_RETURN_VALUE
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == 2
@pytest.mark.parametrize(
'entry', ('bin', 'key'))
def test_op_map_get_by_key_missing_required_entry(self, entry):
op = {
'op': aerospike.OP_MAP_GET_BY_KEY,
'bin': self.test_map_bin,
'key': 'b',
'return_type': aerospike.MAP_RETURN_VALUE
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_key_range(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_KEY_RANGE,
'bin': self.test_map_bin,
'key': 'b',
'range': 'e',
'return_type': aerospike.MAP_RETURN_VALUE
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == [2, 3, 4]
@pytest.mark.parametrize(
'entry', ('bin', 'key', 'range'))
def test_op_map_get_by_key_range_missing_required_entry(self, entry):
'''
TODO Figure out if this is correct
'''
op = {
'op': aerospike.OP_MAP_GET_BY_KEY_RANGE,
'bin': self.test_map_bin,
'key': 'b',
'range': 'e',
'return_type': aerospike.MAP_RETURN_VALUE
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_value(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_VALUE,
'bin': self.test_map_bin,
'val': 2,
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['b']
@pytest.mark.parametrize(
'entry', ('bin', 'val'))
def test_op_map_get_by_value_missing_required_entry(self, entry):
op = {
'op': aerospike.OP_MAP_GET_BY_VALUE,
'bin': self.test_map_bin,
'val': 2,
'return_type': aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_value_range(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_VALUE_RANGE,
'bin': self.test_map_bin,
'val': 2,
'range': 5,
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['b', 'c', 'd']
@pytest.mark.parametrize(
'entry', ('bin', 'val', 'range'))
def test_op_map_get_by_value_range_missing_required_entry(self, entry):
op = {
'op': aerospike.OP_MAP_GET_BY_VALUE_RANGE,
'bin': self.test_map_bin,
'val': 2,
'range': 5,
'return_type': aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_index(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_INDEX,
'bin': self.test_map_bin,
'index': 2,
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == 'c'
@pytest.mark.parametrize(
'entry', ('bin', 'index'))
def test_op_map_get_by_index_missing_required_entry(self, entry):
op = {
'op': aerospike.OP_MAP_GET_BY_INDEX,
'bin': self.test_map_bin,
'index': 2,
'return_type': aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_index_range(self):
ops = [{
"op": aerospike.OP_MAP_GET_BY_INDEX_RANGE,
"bin": self.test_map_bin,
"index": 1,
"val": 2,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['b', 'c']
@pytest.mark.parametrize(
'entry', ('bin', 'index', 'val'))
def test_op_map_get_by_index_range_missing_required(self, entry):
op = {
"op": aerospike.OP_MAP_GET_BY_INDEX_RANGE,
"bin": self.test_map_bin,
"index": 1,
"val": 2,
"return_type": aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_rank(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_RANK,
'bin': self.test_map_bin,
'index': 1,
"return_type": aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == 'b'
@pytest.mark.parametrize(
'entry', ('bin', 'index'))
def test_op_map_get_by_rank_missing_required_entry(self, entry):
'''
TODO: this shouldn't be needed
'''
op = {
'op': aerospike.OP_MAP_GET_BY_RANK,
'bin': self.test_map_bin,
'index': 1,
"return_type": aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
def test_op_map_get_by_rank_range(self):
ops = [{
'op': aerospike.OP_MAP_GET_BY_RANK_RANGE,
'bin': self.test_map_bin,
'index': 1,
'val': 2,
'return_type': aerospike.MAP_RETURN_KEY
}]
_, _, res = self.as_connection.operate(self.test_map_key, ops)
assert res[self.test_map_bin] == ['b', 'c']
@pytest.mark.parametrize(
'entry', ('bin', 'index', 'val'))
def test_op_map_get_by_rank_range_val_missing_required_values(self, entry):
op = {
'op': aerospike.OP_MAP_GET_BY_RANK_RANGE,
'bin': self.test_map_bin,
'index': 1,
'val': 2,
'return_type': aerospike.MAP_RETURN_KEY
}
del op[entry]
ops = [op]
with pytest.raises(e.ParamError):
self.as_connection.operate(self.test_map_key, ops)
| 31.237598
| 84
| 0.549593
| 4,455
| 35,892
| 4.051852
| 0.037262
| 0.101601
| 0.124314
| 0.072129
| 0.916958
| 0.902443
| 0.877071
| 0.837682
| 0.805329
| 0.787269
| 0
| 0.005369
| 0.320211
| 35,892
| 1,148
| 85
| 31.264808
| 0.734456
| 0.029032
| 0
| 0.788108
| 0
| 0
| 0.070332
| 0
| 0
| 0
| 0
| 0.002613
| 0.050811
| 1
| 0.074595
| false
| 0.002162
| 0.006486
| 0
| 0.082162
| 0.001081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7305eb789285f29b3032a57825aed59ae37428c1
| 6,407
|
py
|
Python
|
loldib/getratings/models/NA/na_xayah/na_xayah_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_xayah/na_xayah_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_xayah/na_xayah_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Xayah_Jng_Aatrox(Ratings):
pass
class NA_Xayah_Jng_Ahri(Ratings):
pass
class NA_Xayah_Jng_Akali(Ratings):
pass
class NA_Xayah_Jng_Alistar(Ratings):
pass
class NA_Xayah_Jng_Amumu(Ratings):
pass
class NA_Xayah_Jng_Anivia(Ratings):
pass
class NA_Xayah_Jng_Annie(Ratings):
pass
class NA_Xayah_Jng_Ashe(Ratings):
pass
class NA_Xayah_Jng_AurelionSol(Ratings):
pass
class NA_Xayah_Jng_Azir(Ratings):
pass
class NA_Xayah_Jng_Bard(Ratings):
pass
class NA_Xayah_Jng_Blitzcrank(Ratings):
pass
class NA_Xayah_Jng_Brand(Ratings):
pass
class NA_Xayah_Jng_Braum(Ratings):
pass
class NA_Xayah_Jng_Caitlyn(Ratings):
pass
class NA_Xayah_Jng_Camille(Ratings):
pass
class NA_Xayah_Jng_Cassiopeia(Ratings):
pass
class NA_Xayah_Jng_Chogath(Ratings):
pass
class NA_Xayah_Jng_Corki(Ratings):
pass
class NA_Xayah_Jng_Darius(Ratings):
pass
class NA_Xayah_Jng_Diana(Ratings):
pass
class NA_Xayah_Jng_Draven(Ratings):
pass
class NA_Xayah_Jng_DrMundo(Ratings):
pass
class NA_Xayah_Jng_Ekko(Ratings):
pass
class NA_Xayah_Jng_Elise(Ratings):
pass
class NA_Xayah_Jng_Evelynn(Ratings):
pass
class NA_Xayah_Jng_Ezreal(Ratings):
pass
class NA_Xayah_Jng_Fiddlesticks(Ratings):
pass
class NA_Xayah_Jng_Fiora(Ratings):
pass
class NA_Xayah_Jng_Fizz(Ratings):
pass
class NA_Xayah_Jng_Galio(Ratings):
pass
class NA_Xayah_Jng_Gangplank(Ratings):
pass
class NA_Xayah_Jng_Garen(Ratings):
pass
class NA_Xayah_Jng_Gnar(Ratings):
pass
class NA_Xayah_Jng_Gragas(Ratings):
pass
class NA_Xayah_Jng_Graves(Ratings):
pass
class NA_Xayah_Jng_Hecarim(Ratings):
pass
class NA_Xayah_Jng_Heimerdinger(Ratings):
pass
class NA_Xayah_Jng_Illaoi(Ratings):
pass
class NA_Xayah_Jng_Irelia(Ratings):
pass
class NA_Xayah_Jng_Ivern(Ratings):
pass
class NA_Xayah_Jng_Janna(Ratings):
pass
class NA_Xayah_Jng_JarvanIV(Ratings):
pass
class NA_Xayah_Jng_Jax(Ratings):
pass
class NA_Xayah_Jng_Jayce(Ratings):
pass
class NA_Xayah_Jng_Jhin(Ratings):
pass
class NA_Xayah_Jng_Jinx(Ratings):
pass
class NA_Xayah_Jng_Kalista(Ratings):
pass
class NA_Xayah_Jng_Karma(Ratings):
pass
class NA_Xayah_Jng_Karthus(Ratings):
pass
class NA_Xayah_Jng_Kassadin(Ratings):
pass
class NA_Xayah_Jng_Katarina(Ratings):
pass
class NA_Xayah_Jng_Kayle(Ratings):
pass
class NA_Xayah_Jng_Kayn(Ratings):
pass
class NA_Xayah_Jng_Kennen(Ratings):
pass
class NA_Xayah_Jng_Khazix(Ratings):
pass
class NA_Xayah_Jng_Kindred(Ratings):
pass
class NA_Xayah_Jng_Kled(Ratings):
pass
class NA_Xayah_Jng_KogMaw(Ratings):
pass
class NA_Xayah_Jng_Leblanc(Ratings):
pass
class NA_Xayah_Jng_LeeSin(Ratings):
pass
class NA_Xayah_Jng_Leona(Ratings):
pass
class NA_Xayah_Jng_Lissandra(Ratings):
pass
class NA_Xayah_Jng_Lucian(Ratings):
pass
class NA_Xayah_Jng_Lulu(Ratings):
pass
class NA_Xayah_Jng_Lux(Ratings):
pass
class NA_Xayah_Jng_Malphite(Ratings):
pass
class NA_Xayah_Jng_Malzahar(Ratings):
pass
class NA_Xayah_Jng_Maokai(Ratings):
pass
class NA_Xayah_Jng_MasterYi(Ratings):
pass
class NA_Xayah_Jng_MissFortune(Ratings):
pass
class NA_Xayah_Jng_MonkeyKing(Ratings):
pass
class NA_Xayah_Jng_Mordekaiser(Ratings):
pass
class NA_Xayah_Jng_Morgana(Ratings):
pass
class NA_Xayah_Jng_Nami(Ratings):
pass
class NA_Xayah_Jng_Nasus(Ratings):
pass
class NA_Xayah_Jng_Nautilus(Ratings):
pass
class NA_Xayah_Jng_Nidalee(Ratings):
pass
class NA_Xayah_Jng_Nocturne(Ratings):
pass
class NA_Xayah_Jng_Nunu(Ratings):
pass
class NA_Xayah_Jng_Olaf(Ratings):
pass
class NA_Xayah_Jng_Orianna(Ratings):
pass
class NA_Xayah_Jng_Ornn(Ratings):
pass
class NA_Xayah_Jng_Pantheon(Ratings):
pass
class NA_Xayah_Jng_Poppy(Ratings):
pass
class NA_Xayah_Jng_Quinn(Ratings):
pass
class NA_Xayah_Jng_Rakan(Ratings):
pass
class NA_Xayah_Jng_Rammus(Ratings):
pass
class NA_Xayah_Jng_RekSai(Ratings):
pass
class NA_Xayah_Jng_Renekton(Ratings):
pass
class NA_Xayah_Jng_Rengar(Ratings):
pass
class NA_Xayah_Jng_Riven(Ratings):
pass
class NA_Xayah_Jng_Rumble(Ratings):
pass
class NA_Xayah_Jng_Ryze(Ratings):
pass
class NA_Xayah_Jng_Sejuani(Ratings):
pass
class NA_Xayah_Jng_Shaco(Ratings):
pass
class NA_Xayah_Jng_Shen(Ratings):
pass
class NA_Xayah_Jng_Shyvana(Ratings):
pass
class NA_Xayah_Jng_Singed(Ratings):
pass
class NA_Xayah_Jng_Sion(Ratings):
pass
class NA_Xayah_Jng_Sivir(Ratings):
pass
class NA_Xayah_Jng_Skarner(Ratings):
pass
class NA_Xayah_Jng_Sona(Ratings):
pass
class NA_Xayah_Jng_Soraka(Ratings):
pass
class NA_Xayah_Jng_Swain(Ratings):
pass
class NA_Xayah_Jng_Syndra(Ratings):
pass
class NA_Xayah_Jng_TahmKench(Ratings):
pass
class NA_Xayah_Jng_Taliyah(Ratings):
pass
class NA_Xayah_Jng_Talon(Ratings):
pass
class NA_Xayah_Jng_Taric(Ratings):
pass
class NA_Xayah_Jng_Teemo(Ratings):
pass
class NA_Xayah_Jng_Thresh(Ratings):
pass
class NA_Xayah_Jng_Tristana(Ratings):
pass
class NA_Xayah_Jng_Trundle(Ratings):
pass
class NA_Xayah_Jng_Tryndamere(Ratings):
pass
class NA_Xayah_Jng_TwistedFate(Ratings):
pass
class NA_Xayah_Jng_Twitch(Ratings):
pass
class NA_Xayah_Jng_Udyr(Ratings):
pass
class NA_Xayah_Jng_Urgot(Ratings):
pass
class NA_Xayah_Jng_Varus(Ratings):
pass
class NA_Xayah_Jng_Vayne(Ratings):
pass
class NA_Xayah_Jng_Veigar(Ratings):
pass
class NA_Xayah_Jng_Velkoz(Ratings):
pass
class NA_Xayah_Jng_Vi(Ratings):
pass
class NA_Xayah_Jng_Viktor(Ratings):
pass
class NA_Xayah_Jng_Vladimir(Ratings):
pass
class NA_Xayah_Jng_Volibear(Ratings):
pass
class NA_Xayah_Jng_Warwick(Ratings):
pass
class NA_Xayah_Jng_Xayah(Ratings):
pass
class NA_Xayah_Jng_Xerath(Ratings):
pass
class NA_Xayah_Jng_XinZhao(Ratings):
pass
class NA_Xayah_Jng_Yasuo(Ratings):
pass
class NA_Xayah_Jng_Yorick(Ratings):
pass
class NA_Xayah_Jng_Zac(Ratings):
pass
class NA_Xayah_Jng_Zed(Ratings):
pass
class NA_Xayah_Jng_Ziggs(Ratings):
pass
class NA_Xayah_Jng_Zilean(Ratings):
pass
class NA_Xayah_Jng_Zyra(Ratings):
pass
| 15.364508
| 46
| 0.761667
| 972
| 6,407
| 4.59465
| 0.151235
| 0.216301
| 0.370802
| 0.463502
| 0.797582
| 0.797582
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173404
| 6,407
| 416
| 47
| 15.401442
| 0.843278
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
731dda6088a1a5d4dcb4b5b982a4c0921598ff96
| 301
|
py
|
Python
|
17_Scripting.py
|
mhaythornthwaite/Python-Zero-to-Mastery-Course
|
33b857aa62bfe3b93c57eaa31cae99e7b2b1a6a9
|
[
"MIT"
] | null | null | null |
17_Scripting.py
|
mhaythornthwaite/Python-Zero-to-Mastery-Course
|
33b857aa62bfe3b93c57eaa31cae99e7b2b1a6a9
|
[
"MIT"
] | null | null | null |
17_Scripting.py
|
mhaythornthwaite/Python-Zero-to-Mastery-Course
|
33b857aa62bfe3b93c57eaa31cae99e7b2b1a6a9
|
[
"MIT"
] | null | null | null |
print(f'\n\n')
print(' ---------------- START ---------------- ')
# -------------------------------- SCRIPTING ----------------------------------
# ----------------------------------- END -------------------------------------
print(' ----------------- END ----------------- ')
print(f'\n')
| 16.722222
| 79
| 0.149502
| 13
| 301
| 3.461538
| 0.461538
| 0.266667
| 0.311111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10299
| 301
| 18
| 80
| 16.722222
| 0.166667
| 0.51495
| 0
| 0
| 0
| 0
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
b4462d3972746e9bbda943f3da81288afefe368b
| 7,793
|
py
|
Python
|
tests/test_fabrik.py
|
utiasSTARS/graphIK
|
df5ca000485593540113dad42940680919dc4eb9
|
[
"MIT"
] | 16
|
2020-12-14T17:55:17.000Z
|
2022-03-29T05:26:05.000Z
|
tests/test_fabrik.py
|
utiasSTARS/graphIK
|
df5ca000485593540113dad42940680919dc4eb9
|
[
"MIT"
] | 1
|
2020-11-30T14:20:01.000Z
|
2022-01-16T17:54:48.000Z
|
tests/test_fabrik.py
|
utiasSTARS/graphIK
|
df5ca000485593540113dad42940680919dc4eb9
|
[
"MIT"
] | 2
|
2021-11-20T15:04:47.000Z
|
2022-01-11T13:42:43.000Z
|
import numpy as np
import numpy.linalg as la
import math
import unittest
import random
from graphik.solvers.solver_fabrik import solver_fabrik
class TestFabrik(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFabrik, self).__init__(*args, **kwargs)
self.max_iteration = 100
self.n_tests = 1000
def initialize_solver_plug(self, immediateParent=False):
# Number of joints
if immediateParent:
N = 6
else:
N = 8
# Dimension
dim = 3
# Link lengths
r = []
for i in range(N):
r += [random.random() + 0.5]
# Setting the joint angle limits
angle_limit = []
for i in range(N):
angle_limit += [math.pi]
# Consisted of the indices of the endeffectors
if immediateParent:
goal_index = [4, 5]
else:
goal_index = [5, 7]
# The coordinatest of the goal of the endeffectors chosen in "goal_index" in the order at which they were inserted
goal_position = [[2.5, 0.5, 0.5], [3, 0.5, 0.5]]
# The index of the parent joint of each joint; the base joint(the 0th joint) has no parents and hence parents[0]=-1
# NOTE: Make sure the index of the parent always preceeds that of its child joints; so joint[n]<n for all n.
if immediateParent:
parents = [-1, 0, 1, 2, 3, 3]
else:
parents = [-1, 0, 1, 2, 3, 4, 3, 6]
params = {
"N": N,
"r": r,
"parents": parents,
"angle_limit": angle_limit,
"goal_index": goal_index,
"goal_position": goal_position,
"dim": dim,
}
solver = solver_fabrik(params)
return solver
def initialize_solver_chain(self, dim):
# Number of joints
N = random.randint(3, 10)
# Link lengths
r = []
for i in range(N):
r += [random.random() + 0.5]
# Setting the joint angle limits
angle_limit = []
for i in range(N):
angle_limit += [math.pi]
# Consisted of the indices of the endeffectors
goal_index = [N - 1]
# The coordinatest of the goal of the endeffectors chosen in "goal_index" in the order at which they were inserted
goal_position = [[2.5, 0.5, 0.5 * (dim - 2)]]
# The index of the parent joint of each joint; the base joint(the 0th joint) has no parents and hence parents[0]=-1
# NOTE: Make sure the index of the parent always preceeds that of its child joints; so joint[n]<n for all n.
parents = [-1]
for i in range(N - 1):
parents += [i]
params = {
"N": N,
"r": r,
"parents": parents,
"angle_limit": angle_limit,
"goal_index": goal_index,
"goal_position": goal_position,
"dim": dim,
}
solver = solver_fabrik(params)
return solver
def test_chain3d(self):
n_tests = self.n_tests
max_iteration = self.max_iteration
initial_guess = None
error_threshold = 0.010
sensitivity = 0.0000001
sensitivity_range = 5
solver = self.initialize_solver_chain(dim=3)
for i in range(n_tests):
solver.goal_position = solver.generate_random_configuration()
solution = solver.solve(
initial_guess,
max_iteration,
error_threshold,
sensitivity,
sensitivity_range,
)
p = solution["positions"]
for i in range(len(solver.goal_index)):
distance = la.norm(solver.goal_position[i] - p[solver.goal_index[i], :])
self.assertAlmostEqual(0, distance, delta=0.02)
def test_chain2d(self):
n_tests = self.n_tests
max_iteration = self.max_iteration
initial_guess = None
error_threshold = 0.010
sensitivity = 0.0000001
sensitivity_range = 5
solver = self.initialize_solver_chain(dim=2)
for i in range(n_tests):
solver.goal_position = solver.generate_random_configuration()
solution = solver.solve(
initial_guess,
max_iteration,
error_threshold,
sensitivity,
sensitivity_range,
)
p = solution["positions"]
for i in range(len(solver.goal_index)):
distance = la.norm(solver.goal_position[i] - p[solver.goal_index[i], :])
self.assertAlmostEqual(0, distance, delta=0.2)
def test_chain2d_orientation(self):
n_tests = self.n_tests
max_iteration = self.max_iteration
initial_guess = None
error_threshold = 0.001
sensitivity = 0.0000001
sensitivity_range = 5
solver = self.initialize_solver_chain(dim=2)
solver.goal_index = [solver.N - 1, solver.N - 2]
for i in range(n_tests):
configuration = solver.generate_random_configuration(
returnAllPositions=True
)
solver.goal_position = [configuration[-1, :], configuration[-2, :]]
solution = solver.solve(
initial_guess,
max_iteration,
error_threshold,
sensitivity,
sensitivity_range,
)
p = solution["positions"]
for i in range(len(solver.goal_index)):
distance = la.norm(solver.goal_position[i] - p[solver.goal_index[i], :])
self.assertAlmostEqual(0, distance, delta=0.2)
def test_chain2d_orientation_angle_constraints(self):
n_tests = self.n_tests
max_iteration = self.max_iteration
initial_guess = None
error_threshold = 0.001
sensitivity = 0.0000001
sensitivity_range = 5
solver = self.initialize_solver_chain(dim=2)
solver.goal_index = [solver.N - 1, solver.N - 2]
solver.angle_limit = [0]
for i in range(solver.N - 1):
solver.angle_limit += [np.random.random() * math.pi / 2 + math.pi / 2]
for i in range(n_tests):
configuration = solver.generate_random_configuration(
returnAllPositions=True
)
solver.goal_position = [configuration[-1, :], configuration[-2, :]]
solution = solver.solve(
initial_guess,
max_iteration,
error_threshold,
sensitivity,
sensitivity_range,
)
a = solution["angles"]
for i in range(1, solver.N):
self.assertLess(a[i], solver.angle_limit[i] + 0.03)
def test_plug(self):
n_tests = self.n_tests
max_iteration = self.max_iteration
initial_guess = None
error_threshold = 0.020
sensitivity = 0.0000001
sensitivity_range = 5
solver = self.initialize_solver_plug(immediateParent=False)
for i in range(n_tests):
solver.goal_position = solver.generate_random_configuration()
solution = solver.solve(
initial_guess,
max_iteration,
error_threshold,
sensitivity,
sensitivity_range,
)
p = solution["positions"]
for i in range(len(solver.goal_index)):
distance = la.norm(solver.goal_position[i] - p[solver.goal_index[i], :])
self.assertAlmostEqual(0, distance, delta=0.5)
if __name__ == "__main__":
unittest.main()
| 30.924603
| 123
| 0.55614
| 902
| 7,793
| 4.627494
| 0.150776
| 0.040968
| 0.023
| 0.042166
| 0.819118
| 0.816243
| 0.810494
| 0.810494
| 0.810494
| 0.810494
| 0
| 0.032578
| 0.354036
| 7,793
| 251
| 124
| 31.047809
| 0.796583
| 0.114077
| 0
| 0.706522
| 0
| 0
| 0.020616
| 0
| 0
| 0
| 0
| 0
| 0.027174
| 1
| 0.043478
| false
| 0
| 0.032609
| 0
| 0.092391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b4600bed94373f8305003884afb41e79e0a568d9
| 2,104
|
py
|
Python
|
delta_node/entity/task.py
|
delta-mpc/delta-node
|
674fc61f951e41ed353597f93ca6ea6bc74a102b
|
[
"Apache-2.0"
] | 4
|
2021-07-22T01:11:15.000Z
|
2022-03-17T03:26:20.000Z
|
delta_node/entity/task.py
|
delta-mpc/delta-node
|
674fc61f951e41ed353597f93ca6ea6bc74a102b
|
[
"Apache-2.0"
] | 10
|
2021-09-13T09:55:02.000Z
|
2022-03-23T09:41:26.000Z
|
delta_node/entity/task.py
|
delta-mpc/delta-node
|
674fc61f951e41ed353597f93ca6ea6bc74a102b
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from enum import Enum
import sqlalchemy as sa
from delta_node.db import mapper_registry
from .base import BaseTable
__all__ = ["TaskStatus", "Task", "RunnerTask"]
class TaskStatus(Enum):
PENDING = 0
RUNNING = 1
FINISHED = 2
ERROR = 3
@mapper_registry.mapped
@dataclass
class Task(BaseTable):
__tablename__ = "task"
__sa_dataclass_metadata_key__ = "sa"
creator: str = field(
metadata={"sa": sa.Column(sa.String, nullable=False, index=True)}
)
task_id: str = field(
metadata={"sa": sa.Column(sa.String, nullable=False, index=True)}
)
dataset: str = field(
metadata={"sa": sa.Column(sa.String, nullable=False, index=False)}
)
commitment: bytes = field(
metadata={"sa": sa.Column(sa.BINARY, nullable=False, index=False)}
)
name: str = field(metadata={"sa": sa.Column(sa.String, nullable=True, index=False)})
type: str = field(metadata={"sa": sa.Column(sa.String, nullable=True, index=False)})
status: TaskStatus = field(
default=TaskStatus.PENDING,
metadata={"sa": sa.Column(sa.Enum(TaskStatus), nullable=False, index=True)},
)
@mapper_registry.mapped
@dataclass
class RunnerTask(BaseTable):
__tablename__ = "runner_task"
__sa_dataclass_metadata_key__ = "sa"
creator: str = field(
metadata={"sa": sa.Column(sa.String, nullable=False, index=True)}
)
task_id: str = field(
metadata={"sa": sa.Column(sa.String, nullable=False, index=True)}
)
dataset: str = field(
metadata={"sa": sa.Column(sa.String, nullable=False, index=False)}
)
commitment: bytes = field(
metadata={"sa": sa.Column(sa.BINARY, nullable=False, index=False)}
)
url: str = field(metadata={"sa": sa.Column(sa.String, nullable=True, index=False)})
type: str = field(metadata={"sa": sa.Column(sa.String, nullable=True, index=False)})
status: TaskStatus = field(
default=TaskStatus.PENDING,
metadata={"sa": sa.Column(sa.Enum(TaskStatus), nullable=False, index=True)},
)
| 29.633803
| 88
| 0.654943
| 260
| 2,104
| 5.173077
| 0.2
| 0.104089
| 0.124907
| 0.187361
| 0.798513
| 0.747955
| 0.747955
| 0.747955
| 0.747955
| 0.747955
| 0
| 0.002371
| 0.198194
| 2,104
| 70
| 89
| 30.057143
| 0.794902
| 0
| 0
| 0.526316
| 0
| 0
| 0.033745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.087719
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.